repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/src/containers.rs | tpnote-html2md/src/containers.rs | use crate::markup5ever_rcdom;
use super::StructuredPrinter;
use super::TagHandler;
use markup5ever_rcdom::Handle;
#[derive(Default)]
pub struct ContainerHandler;
impl TagHandler for ContainerHandler {
fn handle(&mut self, _tag: &Handle, printer: &mut StructuredPrinter) {
printer.insert_newline();
printer.insert_newline();
}
fn after_handle(&mut self, printer: &mut StructuredPrinter) {
printer.insert_newline();
printer.insert_newline();
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/src/paragraphs.rs | tpnote-html2md/src/paragraphs.rs | use crate::markup5ever_rcdom;
use super::StructuredPrinter;
use super::TagHandler;
use markup5ever_rcdom::{Handle, NodeData};
#[derive(Default)]
pub struct ParagraphHandler {
paragraph_type: String,
}
impl TagHandler for ParagraphHandler {
fn handle(&mut self, tag: &Handle, printer: &mut StructuredPrinter) {
self.paragraph_type = match tag.data {
NodeData::Element { ref name, .. } => name.local.to_string(),
_ => String::new(),
};
// insert newlines at the start of paragraph
if self.paragraph_type == "p" {
printer.insert_newline();
printer.insert_newline();
}
}
fn after_handle(&mut self, printer: &mut StructuredPrinter) {
// insert newlines at the end of paragraph
match self.paragraph_type.as_ref() {
"p" => {
printer.insert_newline();
printer.insert_newline();
}
"hr" => {
printer.insert_newline();
printer.append_str("---");
printer.insert_newline();
}
"br" => printer.append_str(" \n"),
_ => {}
}
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/src/images.rs | tpnote-html2md/src/images.rs | use super::StructuredPrinter;
use super::TagHandler;
use crate::common::get_tag_attr;
use crate::dummy::IdentityHandler;
use crate::markup5ever_rcdom;
use markup5ever_rcdom::Handle;
use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS};
const FRAGMENT: &AsciiSet = &CONTROLS.add(b' ').add(b'"').add(b'<').add(b'>').add(b'`');
/// Handler for `<img>` tag. Depending on circumstances can produce both
/// inline HTML-formatted image and Markdown native one
#[derive(Default)]
pub struct ImgHandler {
block_mode: bool,
}
impl TagHandler for ImgHandler {
fn handle(&mut self, tag: &Handle, printer: &mut StructuredPrinter) {
// hack: detect if the image has associated style and has display in block mode
let style_tag = get_tag_attr(tag, "src");
if let Some(style) = style_tag {
if style.contains("display: block") {
self.block_mode = true
}
}
if self.block_mode {
// make image on new paragraph
printer.insert_newline();
printer.insert_newline();
}
// try to extract attrs
let src = get_tag_attr(tag, "src");
let alt = get_tag_attr(tag, "alt");
let title = get_tag_attr(tag, "title");
let height = get_tag_attr(tag, "height");
let width = get_tag_attr(tag, "width");
let align = get_tag_attr(tag, "align");
if height.is_some() || width.is_some() || align.is_some() {
// need to handle it as inline html to preserve attributes we support
let mut identity = IdentityHandler;
identity.handle(tag, printer);
} else {
// need to escape URL if it contains spaces
// don't have any geometry-controlling attrs, post markdown natively
let mut img_url = src.unwrap_or_default();
if img_url.contains(' ') {
img_url = utf8_percent_encode(&img_url, FRAGMENT).to_string();
}
printer.append_str(&format!(
"",
alt.unwrap_or_default(),
&img_url,
title
.map(|value| format!(" \"{}\"", value))
.unwrap_or_default()
));
}
}
fn after_handle(&mut self, printer: &mut StructuredPrinter) {
if self.block_mode {
printer.insert_newline();
printer.insert_newline();
}
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/src/bin/html2md.rs | tpnote-html2md/src/bin/html2md.rs | extern crate html2md;
use std::io::{self, Read};
fn main() {
let stdin = io::stdin();
let mut buffer = String::new();
let mut handle = stdin.lock();
handle
.read_to_string(&mut buffer)
.expect("Must be readable HTML!");
println!("{}", html2md::parse_html(&buffer));
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/unit.rs | tpnote-html2md/tests/unit.rs | extern crate html2md;
use html2md::parse_html;
#[test]
fn test_dumb() {
let md = parse_html("<p>CARTHAPHILUS</p>");
assert_eq!(md, "CARTHAPHILUS")
}
#[test]
fn test_anchor() {
let md = parse_html(r#"<p><a href="http://ya.ru">APOSIMZ</a></p>"#);
assert_eq!(md, "[APOSIMZ](http://ya.ru)")
}
#[test]
fn test_anchor2() {
let md = parse_html(
r#"<p><a href="http://ya.ru">APOSIMZ</a><a href="http://yandex.ru">SIDONIA</a></p>"#,
);
assert_eq!(md, "[APOSIMZ](http://ya.ru)[SIDONIA](http://yandex.ru)")
}
#[test]
fn test_anchor3() {
let md = parse_html(
r#"<p><a href="http://ya.ru">APOSIMZ</a><p/><a href="http://yandex.ru">SIDONIA</a></p>"#,
);
assert_eq!(md, "[APOSIMZ](http://ya.ru)\n\n[SIDONIA](http://yandex.ru)")
}
#[test]
fn test_anchor4() {
// [Commonmark: Example 489](https://spec.commonmark.org/0.31.2/#example-489)
let md = parse_html(r#"<p><a href="/my uri">link</a><p/><a href="/my%20uri">link</a></p>"#);
assert_eq!(md, "[link](</my uri>)\n\n[link](</my uri>)")
}
#[test]
fn test_anchor_with_name_attribute_is_preserved() {
let md = parse_html(r#"<p><a name="part1"></a></p>"#);
assert_eq!(md, r#"<a name="part1"></a>"#)
}
#[test]
fn test_image() {
let md = parse_html(
r#"<p><a href="https://gitter.im/MARC-FS/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge"><img src="https://img.shields.io/gitter/room/MARC-FS/MARC-FS.svg" alt="Gitter"></a><br>"#,
);
assert_eq!(md, "[](https://gitter.im/MARC-FS/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)")
}
#[test]
fn test_escaping() {
let md = parse_html(r#"<p>*god*'s in his **heaven** - all is right with the __world__</p>"#);
assert_eq!(
md,
"\\*god\\*\'s in his \\*\\*heaven\\*\\* - all is right with the \\_\\_world\\_\\_"
)
}
#[test]
fn test_escaping_mid_hyphens() {
let md = parse_html(r#"<h1>This is a header with-hyphen!</h1>"#);
assert_eq!(md, "# This is a header with-hyphen!")
}
#[test]
fn test_escaping_start_hyphens() {
let md = parse_html(r#"<h1>- This is a header with starting hyphen!</h1>"#);
assert_eq!(md, "# - This is a header with starting hyphen!")
}
#[test]
fn test_escaping_start_sharp() {
let md = parse_html("<html># nothing to worry about</html>");
assert_eq!(md, "\\# nothing to worry about")
}
/// Note: Also strips multiple spaces
#[test]
fn test_escaping_start_hyphens_space() {
let md = parse_html(r#"<h1> - This is a header with starting hyphen!</h1>"#);
assert_eq!(md, "# - This is a header with starting hyphen!")
}
#[test]
fn test_escaping_html_tags() {
let md = parse_html(
r#"xxxxxxx xx xxxxxxxxxxx: <iframe src="xxxxxx_xx_xxxxxxxxxxx/embed/" allowfullscreen="" height="725" width="450"></iframe>"#,
);
assert_eq!(
md,
r#"xxxxxxx xx xxxxxxxxxxx: \<iframe src="xxxxxx\_xx\_xxxxxxxxxxx/embed/" allowfullscreen="" height="725" width="450"\>\</iframe\>"#
)
}
#[test]
fn test_headers() {
let md = parse_html(
r#"<h1 id="marc-fs">MARC-FS</h1><p><a href="http://Mail.ru">Mail.ru</a> Cloud filesystem written for FUSE</p><h2 id="synopsis">Synopsis</h2>"#,
);
assert_eq!(
md,
"# MARC-FS\n\n[Mail.ru](http://Mail.ru) Cloud filesystem written for FUSE\n\n## Synopsis"
)
}
#[test]
fn test_escaping_start_equal() {
let md = parse_html(r#"<p>This is NOT a header!<br/>===========</p>"#);
assert_eq!(md, "This is NOT a header! \n\\===========")
}
/// Note: Also strips multiple spaces
#[test]
fn test_escaping_start_equal_space() {
let md = parse_html(r#"<p>This is NOT a header!<br/> ===========</p>"#);
assert_eq!(md, "This is NOT a header! \n \\===========")
}
#[test]
fn test_escaping_start_hyphen() {
let md = parse_html(r#"<p>This is NOT a header!<br/>-------</p>"#);
assert_eq!(md, "This is NOT a header! \n\\-------")
}
/// Note: Also strips multiple spaces
#[test]
fn test_escaping_start_hyphen_space() {
let md = parse_html(r#"<p>This is NOT a header!<br/> -------</p>"#);
assert_eq!(md, "This is NOT a header! \n \\-------")
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/styles.rs | tpnote-html2md/tests/styles.rs | extern crate html2md;
use html2md::parse_html;
#[test]
fn test_styles_with_spaces() {
let md = parse_html(r#"It read:<s> Nobody will ever love you</s>"#);
assert_eq!(md, r#"It read: ~~Nobody will ever love you~~"#)
}
#[test]
fn test_styles_with_newlines() {
let md = parse_html(
r#"
And she said:<br/>
<s>We are all just prisoners here<br/>
<u> Of our own device<br/> </s>
And in the master's chambers<br/>
They gathered for the feast<br/>
<em>They stab it with their steely knives</em><br/>
<strong>But they just can't kill the beast<br/></strong>
"#,
);
assert_eq!(
md,
"\
And she said:
~~We are all just prisoners here
Of our own device~~
And in the master's chambers
They gathered for the feast
*They stab it with their steely knives*
**But they just can't kill the beast**"
)
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/quotes.rs | tpnote-html2md/tests/quotes.rs | extern crate html2md;
use html2md::parse_html;
use indoc::indoc;
#[test]
fn test_quotes() {
let md = parse_html(
"<p><blockquote>here's a quote\n next line of it</blockquote>And some text after it</p>",
);
assert_eq!(
md,
"\
> here's a quote next line of it
And some text after it"
)
}
#[test]
fn test_quotes2() {
let md = parse_html("<p><blockquote>here's<blockquote>nested quote!</blockquote> a quote\n next line of it</blockquote></p>");
assert_eq!(
md,
"\
> here's
> > nested quote!
>
> a quote next line of it"
)
}
#[test]
fn test_blockquotes() {
let md = parse_html(
"<blockquote>Quote at the start of the message</blockquote>Should not crash the parser",
);
assert_eq!(
md,
"\
> Quote at the start of the message
Should not crash the parser"
)
}
#[test]
fn test_details() {
let html = indoc! {"
<details>
<summary>There are more things in heaven and Earth, <b>Horatio</b></summary>
<p>Than are dreamt of in your philosophy</p>
</details>
"};
let md = parse_html(html);
assert_eq!(md, "<details> <summary>There are more things in heaven and Earth, **Horatio**</summary>\n\nThan are dreamt of in your philosophy\n\n</details>")
}
#[test]
fn test_subsup() {
let md = parse_html("X<sub>2</sub>");
assert_eq!(md, r#"X<sub>2</sub>"#)
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/iframes.rs | tpnote-html2md/tests/iframes.rs | extern crate html2md;
use html2md::parse_html;
#[test]
fn test_youtube_simple() {
let md = parse_html("<iframe src='https://www.youtube.com/embed/zE-dmXZp3nU?wmode=opaque' class='fr-draggable' width='640' height='360'></iframe>");
assert_eq!(md, "[](https://www.youtube.com/watch?v=zE-dmXZp3nU)")
}
#[test]
fn test_instagram_simple() {
let md = parse_html("<iframe src='https://www.instagram.com/p/B1BKr9Wo8YX/embed/' width='600' height='600'></iframe>");
assert_eq!(md, "[](https://www.instagram.com/p/B1BKr9Wo8YX/embed/)")
}
#[test]
fn test_vkontakte_simple() {
let md = parse_html("<iframe src='https://vk.com/video_ext.php?oid=-76477496&id=456239454&hash=ebfdc2d386617b97' width='640' height='360' frameborder='0' allowfullscreen></iframe>");
assert_eq!(md, "[](https://vk.com/video-76477496_456239454)")
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/lists.rs | tpnote-html2md/tests/lists.rs | extern crate html2md;
use html2md::parse_html;
#[test]
fn test_list_simple() {
let md = parse_html(
r#"<p><ul><li>Seven things has lady Lackless</li><li>Keeps them underneath her black dress</li><li>One a thing that's not for wearing</li></ul></p>"#,
);
assert_eq!(
md,
"\
* Seven things has lady Lackless
* Keeps them underneath her black dress
* One a thing that's not for wearing"
)
}
#[test]
fn test_list_formatted() {
// let's use some some broken html
let md = parse_html(
r#"
<ul><p>
<li>You should NEVER see this error
<ul>
<li>Broken lines, broken strings
<li>Broken threads, broken springs</li>
<li>Broken idols, broken heads
<li>People sleep in broken beds</li>
</ul>
</li>
<li>Ain't no use jiving</li>
<li>Ain't no use joking</li>
<li>EVERYTHING IS BROKEN
"#,
);
assert_eq!(
md,
"\
* You should NEVER see this error
* Broken lines, broken strings
* Broken threads, broken springs
* Broken idols, broken heads
* People sleep in broken beds
* Ain't no use jiving
* Ain't no use joking
* EVERYTHING IS BROKEN"
)
}
#[test]
fn test_list_stackedit() {
let md = parse_html(
r#"
<ul>
<li>
<p>You should NEVER see this error</p>
<ul>
<li>
<p>Broken lines, broken strings</p>
</li>
<li>
<p>Broken threads, broken springs</p>
</li>
<li>
<p>Broken idols, broken heads</p>
</li>
<li>
<p>People sleep in broken beds</p>
</li>
</ul>
</li>
<li>
<p>Ain’t no use jiving</p>
</li>
<li>
<p>Ain’t no use joking</p>
</li>
<li>
<p>EVERYTHING IS BROKEN</p>
</li>
</ul>"#,
);
assert_eq!(
md,
"\
* You should NEVER see this error
* Broken lines, broken strings
* Broken threads, broken springs
* Broken idols, broken heads
* People sleep in broken beds
* Ain’t no use jiving
* Ain’t no use joking
* EVERYTHING IS BROKEN"
)
}
#[test]
fn test_list_stackedit_add_brs() {
let md = parse_html(
r#"
<ul>
<li>
<p>You should NEVER see this error</p>
<ul>
<li>
<p>Broken lines, broken strings</p>
</li>
<li>
<p>Broken threads, broken springs</p>
</li>
<li>
<p>Broken idols, broken heads</p>
</li>
<li>
<p>People sleep in broken beds</p>
<br/>
<br/>
</li>
</ul>
</li>
<li>
<p>Ain’t no use jiving</p>
</li>
<li>
<p>Ain’t no use joking</p>
</li>
<li>
<p>EVERYTHING IS BROKEN</p>
</li>
</ul>"#,
);
assert_eq!(
md,
"\
* You should NEVER see this error
* Broken lines, broken strings
* Broken threads, broken springs
* Broken idols, broken heads
* People sleep in broken beds
* Ain’t no use jiving
* Ain’t no use joking
* EVERYTHING IS BROKEN"
)
}
#[test]
fn test_list_multiline() {
let md = parse_html(
r#"
<ol>
<li>
<p>In the heat and the rains</p>
<p>With whips and chains</p>
<p>Just to see him fly<br/>So many die!</p>
</li>
</ol>
"#,
);
assert_eq!(
md,
"\
1. In the heat and the rains
With whips and chains
Just to see him fly
So many die!"
)
}
#[test]
fn test_list_multiline_formatted() {
// let's use some some broken html
let md = parse_html(
r#"
<ul><p>
<li>You should NEVER see this error
<ul>
<li>Broken lines, broken strings
<li>Broken threads, broken springs</li>
<li>Broken idols, broken heads
<li>People sleep in broken beds</li>
<li>
<p>Ain't no use jiving</p>
<p>Ain't no use joking</p>
<p>EVERYTHING IS BROKEN</p>
</li>
</ul>
</li>
"#,
);
assert_eq!(
md,
"\
* You should NEVER see this error
* Broken lines, broken strings
* Broken threads, broken springs
* Broken idols, broken heads
* People sleep in broken beds
* Ain't no use jiving
Ain't no use joking
EVERYTHING IS BROKEN"
)
}
#[test]
fn test_list_ordered() {
// let's use some some broken html
let md = parse_html(
r#"
<ol>
<li>Now did you read the news today?</li>
<li>They say the danger's gone away</li>
<li>Well I can see the fire still alight</li>
<li>Burning into the night</li>
</ol>
"#,
);
assert_eq!(
md,
"\
1. Now did you read the news today?
2. They say the danger's gone away
3. Well I can see the fire still alight
4. Burning into the night"
)
}
#[test]
fn test_list_text_prevsibling() {
let md = parse_html(
r#"
Phrases to describe me:
<ul>
<li>Awesome</li>
<li>Cool</li>
<li>Awesome and cool</li>
<li>Can count to five</li>
<li>Learning to count to six B)</li>
</ul>
"#,
);
assert_eq!(
md,
"\
Phrases to describe me:
* Awesome
* Cool
* Awesome and cool
* Can count to five
* Learning to count to six B)"
)
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/integration.rs | tpnote-html2md/tests/integration.rs | extern crate html2md;
use html2md::parse_html;
use std::fs::File;
use std::io::prelude::*;
use indoc::indoc;
#[test]
#[ignore]
fn test_marcfs() {
let mut html = String::new();
let mut html_file = File::open("tests/input/marcfs-readme.html").unwrap();
html_file
.read_to_string(&mut html)
.expect("File must be readable");
let result = parse_html(&html);
println!("{}", result);
}
#[test]
#[ignore]
fn test_cheatsheet() {
let mut html = String::new();
let mut md = String::new();
let mut html_file = File::open("tests/input/markdown-cheatsheet.html").unwrap();
let mut md_file = File::open("tests/input/markdown-cheatsheet.md").unwrap();
html_file
.read_to_string(&mut html)
.expect("File must be readable");
md_file
.read_to_string(&mut md)
.expect("File must be readable");
let md_parsed = parse_html(&html);
println!("{}", md_parsed);
//assert_eq!(md, md_parsed);
}
/// newlines after list shouldn't be converted into text of the last list element
#[test]
fn test_list_newlines() {
let mut html = String::new();
if let Ok(mut html_file) = File::open("tests/input/dybr-bug-with-list-newlines.html") {
html_file
.read_to_string(&mut html)
.expect("File must be readable");
let result = parse_html(&html);
assert!((result).contains(".\n\nxxx xxxx"));
assert!((result).contains("xx x.\n\nxxxxx:"));
}
}
#[test]
fn test_lists_from_text() {
let mut html = String::new();
if let Ok(mut html_file) = File::open("tests/input/dybr-bug-with-lists-from-text.html") {
html_file
.read_to_string(&mut html)
.expect("File must be readable");
let result = parse_html(&html);
assert!((result).contains("\\- x xxxx xxxxx xx xxxxxxxxxx"));
assert!((result).contains("\\- x xxxx xxxxxxxx xxxxxxxxx xxxxxx xxx x xxxxxxxx xxxx"));
assert!((result).contains("\\- xxxx xxxxxxxx"));
}
}
#[test]
fn test_strong_inside_link() {
let mut html = String::new();
if let Ok(mut html_file) = File::open("tests/input/dybr-bug-with-strong-inside-link.html") {
html_file
.read_to_string(&mut html)
.expect("File must be readable");
let result = parse_html(&html);
assert!((result).contains("[**Just God**](http://fanfics.me/ficXXXXXXX)"));
}
}
#[test]
fn test_tables_with_newlines() {
let mut html = String::new();
if let Ok(mut html_file) = File::open("tests/input/dybr-bug-with-tables-masked.html") {
html_file
.read_to_string(&mut html)
.expect("File must be readable");
let result = parse_html(&html);
// all lines starting with | should end with | as well
let invalid_table_lines: Vec<&str> = result
.lines()
.filter(|line| line.starts_with("|"))
.filter(|line| !line.ends_with("|"))
.collect();
assert!((invalid_table_lines).is_empty());
}
}
#[test]
fn test_tables2() {
let mut html = String::new();
if let Ok(mut html_file) = File::open("tests/input/dybr-bug-with-tables-2-masked.html") {
html_file
.read_to_string(&mut html)
.expect("File must be readable");
let table_with_vertical_header = parse_html(&html);
assert!((table_with_vertical_header).contains(indoc! {"
|Current Conditions:|Open all year. No reservations. No services.|
|-------------------|--------------------------------------------|
| Reservations: | No reservations. |
| Fees | No fee. |
| Water: | No water. |"
}));
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/tables.rs | tpnote-html2md/tests/tables.rs | extern crate html2md;
use html2md::parse_html;
#[test]
fn test_tables() {
let md = parse_html(
r#"<table>
<thead>
<tr>
<th scope='col'>Minor1</th>
<th scope='col'>Minor2</th>
<th scope='col'>Minor3</th>
<th scope='col'>Minor4</th>
</tr>
</thead>
<tbody>
<tr>
<td>col1</td>
<td>col2</td>
<td>col3</td>
<td>col4</td>
</tr>
</tbody>
</table>"#,
);
assert_eq!(
md,
"\
|Minor1|Minor2|Minor3|Minor4|
|------|------|------|------|
| col1 | col2 | col3 | col4 |"
);
}
#[test]
fn test_tables_invalid_more_headers() {
let md = parse_html(
r#"<table>
<thead>
<tr>
<th scope='col'>Minor1</th>
<th scope='col'>Minor2</th>
<th scope='col'>Minor3</th>
<th scope='col'>Minor4</th>
<th scope='col'>Minor5</th>
<th scope='col'>Minor6</th>
</tr>
</thead>
<tbody>
<tr>
<td>col1</td>
<td>col2</td>
<td>col3</td>
<td>col4</td>
</tr>
</tbody>
</table>"#,
);
assert_eq!(
md,
"\
|Minor1|Minor2|Minor3|Minor4|Minor5|Minor6|
|------|------|------|------|------|------|
| col1 | col2 | col3 | col4 | | |"
);
}
#[test]
fn test_tables_invalid_more_rows() {
let md = parse_html(
r#"<table>
<thead>
<tr>
<th scope='col'>Minor1</th>
<th scope='col'>Minor2</th>
</tr>
</thead>
<tbody>
<tr>
<td>col1</td>
<td>col2</td>
<td>col3</td>
<td>col4</td>
</tr>
</tbody>
</table>"#,
);
assert_eq!(
md,
"\
|Minor1|Minor2| | |
|------|------|----|----|
| col1 | col2 |col3|col4|"
);
}
#[test]
fn test_tables_odd_column_width() {
let md = parse_html(
r#"<table>
<thead>
<tr>
<th scope='col'>Minor</th>
<th scope='col'>Major</th>
</tr>
</thead>
<tbody>
<tr>
<td>col1</td>
<td>col2</td>
</tr>
</tbody>
</table>"#,
);
assert_eq!(
md,
"\
|Minor|Major|
|-----|-----|
|col1 |col2 |"
);
}
#[test]
fn test_tables_alignment() {
let md = parse_html(
r#"<table>
<thead>
<tr>
<th align='right'>Minor1</th>
<th align='center'>Minor2</th>
<th align='right'>Minor3</th>
<th align='left'>Minor4</th>
</tr>
</thead>
<tbody>
<tr>
<td>col1</td>
<td>col2</td>
<td>col3</td>
<td>col4</td>
</tr>
</tbody>
</table>"#,
);
assert_eq!(
md,
"\
|Minor1|Minor2|Minor3|Minor4|
|-----:|:----:|-----:|:-----|
| col1 | col2 | col3 | col4 |"
);
}
#[test]
fn test_tables_wild_example() {
let md = parse_html(
r#"
<table style="width: 100%;">
<thead>
<tr>
<th>One ring<br></th>
<th>Patterns<br></th>
<th>Titanic<br></th>
<th><br></th>
<th><br></th>
<th><br></th>
</tr>
</thead>
<tbody>
<tr>
<td style="width: 16.6667%;">One ring to rule them all<br></td>
<td style="width: 16.6667%;">There's one for the sorrow <br></td>
<td style="width: 16.6667%;">Roll on, Titanic, roll<br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
</tr>
<tr>
<td style="width: 16.6667%;">One ring to find them<br></td>
<td style="width: 16.6667%;">And two for the joy<br></td>
<td style="width: 16.6667%;">You're the pride of White Star Line<br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
</tr>
<tr>
<td style="width: 16.6667%;">One ring to bring them all<br></td>
<td style="width: 16.6667%;">And three for the girls<br></td>
<td style="width: 16.6667%;">Roll on, Titanic, roll<br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
</tr>
<tr>
<td style="width: 16.6667%;">And in the darkness bind them<br></td>
<td style="width: 16.6667%;">And four for the boys<br></td>
<td style="width: 16.6667%;">Into the mists of time<br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
<td style="width: 16.6667%;"><br></td>
</tr>
</tbody>
</table>"#,
);
assert_eq!(md, "\
| One ring | Patterns | Titanic | | | |
|-----------------------------|--------------------------|-----------------------------------|---|---|---|
| One ring to rule them all |There's one for the sorrow| Roll on, Titanic, roll | | | |
| One ring to find them | And two for the joy |You're the pride of White Star Line| | | |
| One ring to bring them all | And three for the girls | Roll on, Titanic, roll | | | |
|And in the darkness bind them| And four for the boys | Into the mists of time | | | |");
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-html2md/tests/images.rs | tpnote-html2md/tests/images.rs | extern crate html2md;
use html2md::parse_html;
#[test]
fn test_image_native_simple() {
let md = parse_html("<img src=\"https://i.redd.it/vesfbmwfkz811.png\" alt=\"image of Linus holding his laptop\" title=\"Daddy Linus\" />");
assert_eq!(
md,
""
)
}
#[test]
fn test_image_native_without_title() {
let md = parse_html("<img src=\"https://i.redd.it/l0ne52x7fh611.png\" alt=\"image of usual kill -9 sequence\" />");
assert_eq!(
md,
""
)
}
#[test]
fn test_image_embedded_html() {
let md = parse_html("<img src=\"https://i.redd.it/un4h28uwtp711.png\" alt=\"comics about Mac and GNU/Linux\" title=\"Look at me, brother\" height=\"150\" width=\"150\" />");
assert_eq!(md, "<img src=\"https://i.redd.it/un4h28uwtp711.png\" alt=\"comics about Mac and GNU/Linux\" title=\"Look at me, brother\" height=\"150\" width=\"150\">")
}
#[test]
fn test_image_embedded_with_unsupported_html() {
// srcset is unsupported in Markdown
let md = parse_html("<img src=\"https://i.redd.it/07onlc10x5711.png\" alt=\"HACKERMAN\" title=\"When you reboot instead of exiting vim\" height=\"150\" width=\"150\" srcset=\"image1 image2\" align=\"center\" />");
assert_eq!(md, "<img src=\"https://i.redd.it/07onlc10x5711.png\" alt=\"HACKERMAN\" title=\"When you reboot instead of exiting vim\" height=\"150\" width=\"150\" srcset=\"image1 image2\" align=\"center\">")
}
#[test]
fn test_image_src_issue() {
let md = parse_html("<img src=\"https://dybr.ru/img/43/1532265494_android-Kanedias\" width=\"auto\" height=\"500\" >");
assert_eq!(md, "<img src=\"https://dybr.ru/img/43/1532265494_android-Kanedias\" width=\"auto\" height=\"500\">")
}
#[test]
fn test_image_with_space_issue() {
let md = parse_html("<img src=\"https://i.redd.it/l0ne 52x7f h611.png\" alt=\"image of usual kill -9 sequence\" />");
assert_eq!(
md,
""
)
}
#[test]
fn test_image_with_query_issue() {
let md = parse_html("<img src=\"https://instagram.ftll1-1.fna.fbcdn.net/vp/4c753762a3cd58ec2cd55f7e20f87e5c/5D39A8B3/t51.2885-15/sh0.08/e35/p640x640/54511922_267736260775264_8482507773977053160_n.jpg?_nc_ht=instagram.ftll1-1.fna.fbcdn.net\" style=\"width: 494px;\">");
assert_eq!(md, "")
}
#[test]
fn test_image_with_unsupported_html_and_quotes_in_alt() {
let md = parse_html(r#"<img alt="A "pipe"" src="a.png" width="13" />"#);
assert_eq!(
md,
r#"<img alt="A "pipe"" src="a.png" width="13">"#
)
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/build.rs | tpnote/build.rs | extern crate winresource;
use std::env;
use std::error::Error;
use winresource::WindowsResource;
/// Cross compile with icons is a new feature in `winres 0.1.12`:
///
/// * [Adding an icon issues when building from Linux for Windows · Issue #33 · mxre/winres · GitHub](https://github.com/mxre/winres/issues/33)
/// * [Enable cross compiling from unix/macos by moshensky · Pull Request #24 · mxre/winres · GitHub](https://github.com/mxre/winres/pull/24)
/// * [Rust 1.61 in Linux does no add resources to the EXE · Issue #40 · mxre/winres · GitHub](https://github.com/mxre/winres/issues/40#issuecomment-1321141396)
///
fn add_icon_to_bin_when_building_for_win(icon_path: &str) -> Result<(), Box<dyn Error>> {
if env::var("CARGO_CFG_TARGET_FAMILY")? == "windows" {
let mut res = WindowsResource::new();
let target_env = std::env::var("CARGO_CFG_TARGET_ENV")?;
match target_env.as_str() {
"gnu" => res
.set_ar_path("x86_64-w64-mingw32-ar")
.set_windres_path("x86_64-w64-mingw32-windres")
.set_toolkit_path(".")
.set_icon(icon_path),
"msvc" => res.set_icon(icon_path),
_ => panic!("Unsupported env: {}", target_env),
};
res.compile()?;
}
Ok(())
}
fn main() -> Result<(), Box<dyn Error>> {
add_icon_to_bin_when_building_for_win("tpnote.ico")
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/config.rs | tpnote/src/config.rs | //! Sets configuration defaults, reads, and writes Tp-Note's configuration
//! file and exposes the configuration as `static` variable.
use crate::error::ConfigFileError;
use crate::settings::ClapLevelFilter;
use crate::settings::ARGS;
use crate::settings::DOC_PATH;
use crate::settings::ENV_VAR_TPNOTE_CONFIG;
use directories::ProjectDirs;
use parking_lot::RwLock;
use serde::Deserialize;
use serde::Serialize;
use std::collections::HashMap;
use std::env;
use std::fs;
use std::fs::File;
use std::io;
use std::mem;
use std::path::Path;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::LazyLock;
use tera::Tera;
use toml::Value;
use tpnote_lib::config::LibCfg;
use tpnote_lib::config::LocalLinkKind;
use tpnote_lib::config::TmplHtml;
use tpnote_lib::config::FILENAME_ROOT_PATH_MARKER;
use tpnote_lib::config::LIB_CFG;
use tpnote_lib::config::LIB_CFG_RAW_FIELD_NAMES;
use tpnote_lib::config::LIB_CONFIG_DEFAULT_TOML;
use tpnote_lib::config_value::CfgVal;
use tpnote_lib::context::Context;
use tpnote_lib::text_reader::read_as_string_with_crlf_suppression;
use tpnote_lib::filename::NotePathBuf;
/// Set the minimum required configuration file version that is compatible with
/// this Tp-Note version.
///
/// Examples how to use this constant. Choose one of the following:
/// 1. Require some minimum version of the configuration file.
/// Abort if not satisfied.
///
/// ```no_run
/// const MIN_CONFIG_FILE_VERSION: Option<&'static str> = Some("1.5.1");
/// ```
///
/// 2. Require the configuration file to be of the same version as this binary.
///
/// ```no_run
/// const MIN_CONFIG_FILE_VERSION: Option<&'static str> = PKG_VERSION;
/// ```
///
/// 3. Disable minimum version check; all configuration file versions are
/// allowed.
///
/// ```no_run
/// const MIN_CONFIG_FILE_VERSION: Option<&'static str> = None;
/// ```
///
pub(crate) const MIN_CONFIG_FILE_VERSION: Option<&'static str> = PKG_VERSION;
/// Authors.
pub(crate) const AUTHOR: Option<&str> = option_env!("CARGO_PKG_AUTHORS");
/// Copyright.
pub(crate) const COPYRIGHT_FROM: &str = "2020";
/// Name of this executable (without the Windows `.exe` extension).
pub(crate) const CARGO_BIN_NAME: &str = env!("CARGO_BIN_NAME");
/// Use the version number defined in `../Cargo.toml`.
pub(crate) const PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
/// Tp-Note's configuration file filename.
const CONFIG_FILENAME: &str = concat!(env!("CARGO_BIN_NAME"), ".toml");
/// Default configuration.
pub(crate) const GUI_CONFIG_DEFAULT_TOML: &str = include_str!("config_default.toml");
pub(crate) const DO_NOT_COMMENT_IF_LINE_STARTS_WITH: [&str; 3] = ["###", "[", "name ="];
/// Configuration data, deserialized from the configuration file.
#[derive(Debug, Serialize, Deserialize)]
pub struct Cfg {
/// Version number of the configuration file as String -or- a text message
/// explaining why we could not load the configuration file.
pub version: String,
#[serde(flatten)]
pub extra_fields: HashMap<String, Value>,
pub arg_default: ArgDefault,
pub clipboard: Clipboard,
pub app_args: OsType<AppArgs>,
pub viewer: Viewer,
pub tmpl_html: TmplHtml,
}
#[derive(Debug, Serialize, Deserialize, Default)]
/// The `OsType` selects operating system specific defaults at runtime.
pub struct OsType<T> {
/// `#[cfg(all(target_family = "unix", not(target_os = "macos")))]`
/// Currently this selects the following target operating systems:
/// aix, android, dragonfly, emscripten, espidf, freebsd, fuchsia, haiku,
/// horizon, illumos, ios, l4re, linux, netbsd, nto, openbsd, redox,
/// solaris, tvos, unknown, vita, vxworks, wasi, watchos.
pub unix: T,
/// `#[cfg(target_family = "windows")]`
pub windows: T,
/// `#[cfg(all(target_family = "unix", target_os = "macos"))]`
pub macos: T,
}
/// Command line arguments, deserialized form configuration file.
#[derive(Debug, Serialize, Deserialize)]
pub struct ArgDefault {
pub debug: ClapLevelFilter,
pub edit: bool,
pub no_filename_sync: bool,
pub popup: bool,
pub scheme: String,
pub tty: bool,
pub add_header: bool,
pub export_link_rewriting: LocalLinkKind,
}
/// Default values for command line arguments.
impl ::std::default::Default for ArgDefault {
fn default() -> Self {
ArgDefault {
debug: ClapLevelFilter::Error,
edit: false,
no_filename_sync: false,
popup: false,
scheme: "default".to_string(),
tty: false,
add_header: true,
export_link_rewriting: LocalLinkKind::default(),
}
}
}
/// Configuration of clipboard behavior, deserialized from the configuration
/// file.
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct Clipboard {
pub read_enabled: bool,
pub empty_enabled: bool,
}
/// Arguments lists for invoking external applications, deserialized from the
/// configuration file.
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct AppArgs {
pub browser: Vec<Vec<String>>,
pub editor: Vec<Vec<String>>,
pub editor_console: Vec<Vec<String>>,
}
/// Configuration data for the viewer feature, deserialized from the
/// configuration file.
#[derive(Debug, Serialize, Deserialize, Default)]
pub struct Viewer {
pub startup_delay: isize,
pub missing_header_disables: bool,
pub notify_period: u64,
pub tcp_connections_max: usize,
pub served_mime_types: Vec<(String, String)>,
pub displayed_tpnote_count_max: usize,
}
/// When no configuration file is found, defaults are set here from built-in
/// constants. These defaults are then serialized into a newly created
/// configuration file on disk.
impl ::std::default::Default for Cfg {
fn default() -> Self {
// Make sure that we parse the `LIB_CONFIG_DEFAULT_TOML` first.
LazyLock::force(&LIB_CFG);
toml::from_str(&Cfg::default_as_toml()).expect(
"Error in default configuration in source file:\n\
`tpnote/src/config_default.toml`",
)
}
}
impl Cfg {
/// Emits the default configuration as TOML string with comments.
#[inline]
fn default_as_toml() -> String {
let config_default_toml = format!(
"version = \"{}\"\n\n{}\n\n{}",
PKG_VERSION.unwrap_or_default(),
LIB_CONFIG_DEFAULT_TOML,
GUI_CONFIG_DEFAULT_TOML
);
config_default_toml
}
/// Parse the configuration file if it exists. Otherwise write one with
/// default values.
#[inline]
fn from_files(config_paths: &[PathBuf]) -> Result<Cfg, ConfigFileError> {
// Runs through all strings and renders config values as templates.
// No variables are set in this context. But you can use environment
// variables in templates: e.g.:
// `{{ get_env(name="username", default="unknown-user" )}}`.
fn render_tmpl(var: &mut [Vec<String>]) {
var.iter_mut().for_each(|i| {
i.iter_mut().for_each(|arg| {
let new_arg = Tera::default()
.render_str(arg, &tera::Context::new())
.unwrap_or_default()
.to_string();
let _ = mem::replace(arg, new_arg);
})
})
}
//
// `from_files()` start
let mut base_config = CfgVal::from_str(GUI_CONFIG_DEFAULT_TOML)?;
base_config.extend(CfgVal::from_str(LIB_CONFIG_DEFAULT_TOML)?);
base_config.insert(
"version".to_string(),
Value::String(PKG_VERSION.unwrap_or_default().to_string()),
);
// Merge all config files from various locations.
let cfg_val = config_paths
.iter()
.filter_map(|path| File::open(path).ok())
.map(|reader| read_as_string_with_crlf_suppression(reader).map_err(ConfigFileError::from)
.and_then(|config| toml::from_str(&config).map_err(ConfigFileError::from))
)
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.fold(base_config, CfgVal::merge);
// We cannot he logger here, it is too early.
if ARGS.debug == Some(ClapLevelFilter::Trace) && ARGS.batch && ARGS.version {
println!(
"*** Merged configuration from all config files:\n\n{:#?}",
cfg_val
);
}
// Parse Values into the `lib_cfg`.
let lib_cfg = LibCfg::try_from(cfg_val.clone())?;
{
// Copy the `lib_cfg` into `LIB_CFG`.
let mut c = LIB_CFG.write();
*c = lib_cfg; // Release lock.
//_cfg;
// We cannot use the logger here, it is too early.
if ARGS.debug == Some(ClapLevelFilter::Trace) && ARGS.batch && ARGS.version {
println!(
"\n\n\n\n\n*** Configuration part 1 after merging \
`scheme`s into copies of `base_scheme`:\
\n\n{:#?}\
\n\n\n\n\n",
*c
);
}
} // Release lock.
//
// Parse the result into the struct `Cfg`.
let mut cfg: Cfg = cfg_val.to_value().try_into()?;
// Collect unused field names.
// We know that all keys collected in `extra_fields` must be
// top level keys in `LIB_CFG`.
let unused: Vec<String> = cfg
.extra_fields
.into_keys()
.filter(|k| !LIB_CFG_RAW_FIELD_NAMES.contains(&k.as_str()))
.collect::<Vec<String>>();
if !unused.is_empty() {
return Err(ConfigFileError::ConfigFileUnkownFieldName { error: unused });
};
// Remove already processed items.
cfg.extra_fields = HashMap::new();
// Fill in potential templates.
render_tmpl(&mut cfg.app_args.unix.browser);
render_tmpl(&mut cfg.app_args.unix.editor);
render_tmpl(&mut cfg.app_args.unix.editor_console);
render_tmpl(&mut cfg.app_args.windows.browser);
render_tmpl(&mut cfg.app_args.windows.editor);
render_tmpl(&mut cfg.app_args.windows.editor_console);
render_tmpl(&mut cfg.app_args.macos.browser);
render_tmpl(&mut cfg.app_args.macos.editor);
render_tmpl(&mut cfg.app_args.macos.editor_console);
let cfg = cfg; // Freeze.
// We cannot use the logger here, it is too early.
if ARGS.debug == Some(ClapLevelFilter::Trace) && ARGS.batch && ARGS.version {
println!(
"\n\n\n\n\n*** Configuration part 2 after applied templates:\
\n\n{:#?}\
\n\n\n\n\n",
cfg
);
}
// First check passed.
Ok(cfg)
}
/// Writes the default configuration to `Path` or to `stdout` if
/// `config_path == -`.
pub(crate) fn write_default_to_file_or_stdout(
config_path: &Path,
) -> Result<(), ConfigFileError> {
// These must live longer than `readable`, and thus are declared first:
let (mut stdout_write, mut file_write);
// On-Stack Dynamic Dispatch:
let writeable: &mut dyn io::Write = if config_path == Path::new("-") {
stdout_write = io::stdout();
&mut stdout_write
} else {
fs::create_dir_all(config_path.parent().unwrap_or_else(|| Path::new("")))?;
file_write = File::create(config_path)?;
&mut file_write
};
let mut commented = String::new();
for l in Self::default_as_toml().lines() {
if l.is_empty() {
commented.push('\n');
} else if DO_NOT_COMMENT_IF_LINE_STARTS_WITH
.iter()
.all(|&token| !l.starts_with(token))
{
commented.push_str("# ");
commented.push_str(l);
commented.push('\n');
} else {
commented.push_str(l);
commented.push('\n');
}
}
writeable.write_all(commented.as_bytes())?;
Ok(())
}
/// Backs up the existing configuration file and writes a new one with
/// default values.
pub(crate) fn backup_and_remove_last() -> Result<PathBuf, ConfigFileError> {
if let Some(config_path) = CONFIG_PATHS.iter().filter(|p| p.exists()).next_back() {
let mut config_path_bak = config_path.to_path_buf();
config_path_bak.set_next_unused()?;
fs::rename(config_path, &config_path_bak)?;
Ok(config_path.clone())
} else {
Err(ConfigFileError::PathToConfigFileNotFound)
}
}
}
/// Reads and parses the configuration file "tpnote.toml". An alternative
/// filename (optionally with absolute path) can be given on the command
/// line with "--config".
pub static CFG: LazyLock<Cfg> = LazyLock::new(|| {
Cfg::from_files(&CONFIG_PATHS).unwrap_or_else(|e| {
// Remember that something went wrong.
let mut cfg_file_loading = CFG_FILE_LOADING.write();
*cfg_file_loading = Err(e);
// As we could not load the configuration file, we will use
// the default configuration.
Cfg::default()
})
});
/// Variable indicating with `Err` if the loading of the configuration file
/// went wrong.
pub static CFG_FILE_LOADING: LazyLock<RwLock<Result<(), ConfigFileError>>> =
LazyLock::new(|| RwLock::new(Ok(())));
/// This is where the Tp-Note searches for its configuration files.
pub static CONFIG_PATHS: LazyLock<Vec<PathBuf>> = LazyLock::new(|| {
let mut config_path: Vec<PathBuf> = vec![];
#[cfg(unix)]
config_path.push(PathBuf::from("/etc/tpnote/tpnote.toml"));
// Config path comes from the environment variable.
if let Ok(env_config) = env::var(ENV_VAR_TPNOTE_CONFIG) {
config_path.push(PathBuf::from(env_config));
};
// Config comes from the standard configuration file location.
if let Some(usr_config) = ProjectDirs::from("rs", "", CARGO_BIN_NAME) {
let mut config = PathBuf::from(usr_config.config_dir());
config.push(Path::new(CONFIG_FILENAME));
config_path.push(config);
};
// Is there a `FILENAME_ROOT_PATH_MARKER` file?
// At this point, we ignore a file error silently. Next time,
// `Context::new()` is executed, we report this error to the user.
if let Some(root_path) = DOC_PATH.as_deref().ok().map(|doc_path| {
let mut root_path = if let Ok(context) = Context::from(doc_path) {
context.get_root_path().to_owned()
} else {
PathBuf::new()
};
root_path.push(FILENAME_ROOT_PATH_MARKER);
root_path
}) {
config_path.push(root_path);
};
if let Some(commandline_path) = &ARGS.config {
// Config path comes from command line.
config_path.push(PathBuf::from(commandline_path));
};
config_path
});
#[cfg(test)]
mod tests {
use crate::error::ConfigFileError;
use tpnote_lib::config::LIB_CFG;
use super::Cfg;
use std::env::temp_dir;
use std::fs;
#[test]
fn test_cfg_from_file() {
//
// Prepare test: some mini config file.
let raw = "\
[arg_default]
scheme = 'zettel'
";
let userconfig = temp_dir().join("tpnote.toml");
fs::write(&userconfig, raw.as_bytes()).unwrap();
let cfg = Cfg::from_files(&[userconfig]).unwrap();
assert_eq!(cfg.arg_default.scheme, "zettel");
//
// Prepare test: create existing note.
let raw = "\
[viewer]
served_mime_types = [ ['abc', 'abc/text'], ]
";
let userconfig = temp_dir().join("tpnote.toml");
fs::write(&userconfig, raw.as_bytes()).unwrap();
let cfg = Cfg::from_files(&[userconfig]).unwrap();
assert_eq!(cfg.viewer.served_mime_types.len(), 1);
assert_eq!(cfg.viewer.served_mime_types[0].0, "abc");
//
// Prepare test: some mini config file.
let raw = "\
unknown_field_name = 'aha'
";
let userconfig = temp_dir().join("tpnote.toml");
fs::write(&userconfig, raw.as_bytes()).unwrap();
let cfg = Cfg::from_files(&[userconfig]).unwrap_err();
assert!(matches!(
cfg,
ConfigFileError::ConfigFileUnkownFieldName { .. }
));
//
// Prepare test: create existing note.
let raw = "\
[[scheme]]
name = 'default'
[scheme.filename]
sort_tag.separator = '---'
[scheme.tmpl]
fm_var.localization = [ ['fm_foo', 'foofoo'], ]
";
let userconfig = temp_dir().join("tpnote.toml");
fs::write(&userconfig, raw.as_bytes()).unwrap();
let _cfg = Cfg::from_files(&[userconfig]).unwrap();
{
let lib_cfg = LIB_CFG.read();
// The variables come from the `./config_default.toml` `zettel`
// scheme:
assert_eq!(lib_cfg.scheme.len(), 2);
let zidx = lib_cfg.scheme_idx("zettel").unwrap();
assert_eq!(lib_cfg.scheme[zidx].name, "zettel");
// This variable is defined in the `zettel` scheme in
// `./config_default.toml`:
assert_eq!(lib_cfg.scheme[zidx].filename.sort_tag.separator, "--");
// This variables are inherited from the `base_scheme` in
// `./config_default.toml`. They are part of the `zettel` scheme:
assert_eq!(lib_cfg.scheme[zidx].filename.extension_default, "md");
assert_eq!(lib_cfg.scheme[zidx].filename.sort_tag.extra_separator, '\'');
let didx = lib_cfg.scheme_idx("default").unwrap();
// This variables are inherited from the `base_scheme` in
// `./config_default.toml`. They are part of the `default` scheme:
assert_eq!(lib_cfg.scheme[didx].filename.extension_default, "md");
assert_eq!(lib_cfg.scheme[didx].tmpl.fm_var.localization.len(), 1);
// These variables originate from `userconfig` (`tpnote.toml`)
// and are part of the `default` scheme:
assert_eq!(lib_cfg.scheme[didx].filename.sort_tag.separator, "---");
assert_eq!(
lib_cfg.scheme[didx].tmpl.fm_var.localization[0],
("fm_foo".to_string(), "foofoo".to_string())
);
// This variable is defined in the `default` scheme in
// `./config_default.toml`:
assert_eq!(lib_cfg.scheme[didx].name, "default");
} // Free `LIB_CFG` lock.
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/alert_service.rs | tpnote/src/alert_service.rs | //! Receives strings by a message channel, queues them and displays them
//! one by one in popup alert windows.
use std::sync::mpsc::sync_channel;
use std::sync::mpsc::Receiver;
use std::sync::mpsc::RecvTimeoutError;
use std::sync::mpsc::SendError;
use std::sync::mpsc::SyncSender;
use std::sync::LazyLock;
use std::sync::Mutex;
use std::thread;
use std::thread::sleep;
use std::time::Duration;
/// The number of messages that will be queued.
/// As error messages can drop in by every thread and we can only
/// show one alert window at the same time, they must be queued.
pub const QUEUE_LEN: usize = 30;
/// The `AlertService` reports to be busy as long as there is a message window
/// open and beyond that also `KEEP_ALIVE` milliseconds after the last message
/// window got closed by the user.
#[cfg(feature = "message-box")]
const KEEP_ALIVE: u64 = 1000;
/// Extra timeout for the `flush()` method, before it checks if there is still
/// an open popup alert window. We wait a moment just in case that there are
/// pending messages we have not received yet. 1 millisecond is enough, we wait
/// 10 just to be sure.
const FLUSH_TIMEOUT: u64 = 10;
/// Hold `AlertService` in a static variable, that
/// `AlertService::push_str()` can be called easily from everywhere.
static ALERT_SERVICE: LazyLock<AlertService> = LazyLock::new(|| {
AlertService {
// The message queue accepting strings for being shown as
// popup alert windows.
message_channel: {
let (tx, rx) = sync_channel(QUEUE_LEN);
(tx, Mutex::new(rx))
},
// This mutex does not hold any data. When it is locked, it indicates,
// that the `AlertService` is still busy and should not get shut down.
busy_lock: Mutex::new(()),
// We start with no function pointer.
popup_alert: Mutex::new(None),
}
});
pub struct AlertService {
/// The message queue accepting strings for being shown as
/// popup alert windows.
message_channel: (SyncSender<String>, Mutex<Receiver<String>>),
/// This mutex does not hold any data. When it is locked, it indicates,
/// that the `AlertService` is still busy and should not get shut down.
busy_lock: Mutex<()>,
/// Function pointer to the function that is called when the
/// popup alert dialog shall appear.
/// None means no function pointer was registered.
popup_alert: Mutex<Option<fn(&str)>>,
}
impl AlertService {
/// Initializes the service. Call once when the application starts.
/// Drop strings in the`ALERT_SERVICE.message_channel` to use this service.
pub fn init(popup_alert: fn(&str)) {
// Setup the `AlertService`.
// Set up the channel now.
LazyLock::force(&ALERT_SERVICE);
*ALERT_SERVICE.popup_alert.lock().unwrap() = Some(popup_alert);
thread::spawn(move || {
// This will block until the previous message has been received.
AlertService::run();
});
}
/// Alert service, receiving Strings to display in a popup window.
fn run() {
// Get the receiver.
let (_, rx) = &ALERT_SERVICE.message_channel;
let rx = rx.lock().unwrap();
// We start with the lock released.
let mut opt_guard = None;
loop {
let msg = if opt_guard.is_none() {
// As there is no lock, we block here until the next message comes.
// `recv()` should never return `Err`. This can only happen when
// the sending half of a channel (or sync_channel) is disconnected,
// implying that no further messages will ever be received.
// As this should never happen, we panic this thread then.
Some(rx.recv().unwrap())
} else {
// There is a lock because we just received another message.
// If the next `KEEP_ALIVE` milliseconds no
// other message comes in, we release the lock again.
match rx.recv_timeout(Duration::from_millis(KEEP_ALIVE)) {
Ok(s) => Some(s),
Err(RecvTimeoutError::Timeout) => None,
// The sending half of a channel (or sync_channel) is `Disconnected`,
// implies that no further messages will ever be received.
// As this should never happen, we panic this thread then.
Err(RecvTimeoutError::Disconnected) => panic!(),
}
};
// We received a message.
match msg {
Some(s) => {
// If the lock is released, lock it now.
if opt_guard.is_none() {
opt_guard = ALERT_SERVICE.busy_lock.try_lock().ok();
}
match *ALERT_SERVICE.popup_alert.lock().unwrap() {
// This blocks until the user closes the alert window.
Some(popup_alert) => popup_alert(&s),
_ => panic!(
"Can not print message \"{}\". \
No alert function registered!",
&s
),
};
}
// `ALERT_SERVICE_KEEP_ALIVE` milliseconds are over and still no
// new message. We release the lock again.
None => {
// Here the `guard` goes out of scope and the lock is released.
opt_guard = None;
//
}
}
}
}
/// The `AlertService` keeps holding a lock until `KEEP_ALIVE` milliseconds
/// after the user has closed that last error alert window. Only then, it
/// releases the lock. This function blocks until the lock is released.
pub fn flush() {
// See constant documentation why we wait here.
sleep(Duration::from_millis(FLUSH_TIMEOUT));
// This might block, if a guard in `run()` holds already a lock.
let _res = ALERT_SERVICE.busy_lock.lock();
}
#[inline]
/// Pushes `msg` into queue. In case the message queue is full, the method
/// blocks until there is more free space. Make sure to initialize before
/// with `AlertService::init()` Returns an `SendError` if nobody listens on
/// `rx` of the queue. This can happen, e.g. if `AlertService::init()` has
/// not been called before.
pub fn push_str(msg: String) -> Result<(), SendError<String>> {
let (tx, _) = &ALERT_SERVICE.message_channel;
tx.send(msg)
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/settings.rs | tpnote/src/settings.rs | //! Reads the command line parameters and clipboard and exposes them as `static`
//! variables.
//#[cfg(any(feature = "read-clipboard", feature = "viewer"))]
use crate::clipboard::SystemClipboard;
use crate::config::CFG;
use clap::Parser;
use clap::ValueEnum;
use serde::Deserialize;
use serde::Serialize;
use std::env;
use std::io;
use std::io::IsTerminal;
use std::io::Read;
use std::path::PathBuf;
use std::sync::LazyLock;
use tpnote_lib::config::LocalLinkKind;
use tpnote_lib::config::TMPL_VAR_STDIN;
use tpnote_lib::content::Content;
use tpnote_lib::content::ContentString;
use tpnote_lib::text_reader::CrlfSuppressorExt;
/// The name of the environment variable, that - when set - replaces the default
/// path where Tp-Note loads or stores its configuration file.
pub const ENV_VAR_TPNOTE_CONFIG: &str = "TPNOTE_CONFIG";
/// The name of the environment variable which can be optionally set to launch
/// a different file editor.
pub const ENV_VAR_TPNOTE_EDITOR: &str = "TPNOTE_EDITOR";
/// The name of the environment variable which can be optionally used to launch
/// a different web browser.
#[cfg(feature = "viewer")]
pub const ENV_VAR_TPNOTE_BROWSER: &str = "TPNOTE_BROWSER";
/// The name of the environment variable which Tp-Note checks under Unix, if it
/// is invoked as `root`.
#[cfg(target_family = "unix")]
const ENV_VAR_USER: &str = "USER";
/// The name of the environment variable which Tp-Note checks under Unix, if it
/// is invoked on a graphical desktop.
#[cfg(target_family = "unix")]
const ENV_VAR_DISPLAY: &str = "DISPLAY";
#[derive(Debug, Eq, PartialEq, Parser)]
#[command(
version,
name = "Tp-Note",
about,
long_about = "Fast note taking with templates and filename synchronization.",
disable_version_flag = true
)]
/// _Tp-Note_ is a note-taking tool and a template system, that synchronizes the
/// note's metadata with its filename. _Tp-Note_ collects various information
/// about its environment and the clipboard and stores it in variables. New
/// notes are created by filling these variables in predefined and customizable
/// `Tera`-templates. In case `<path>` points to an existing _Tp-Note_ file, the
/// note's metadata is analyzed and, if necessary, its filename is adjusted.
/// For all other filetypes, _Tp-Note_ creates a new note annotating the
/// file `<path>` points to. If `<path>` is a directory (or, when omitted the
/// current working directory), a new note is created in that directory. After
/// creation, _Tp-Note_ launches an external editor of your choice. Although the
/// templates are written for Markdown, _Tp-Note_ is not tied to
/// any specific markup language. However, _Tp-Note_ comes with an optional
/// viewer feature, that currently renders only Markdown, ReStructuredText and
/// HTML. Note, that there is also some limited support for Asciidoc and
/// WikiText. The note's rendition with its hyperlinks is live updated and
/// displayed in the user's web browser.
pub struct Args {
/// Prepends a YAML header if missing
#[arg(long, short = 'a')]
pub add_header: bool,
/// Batch mode: does not launch the editor or the viewer
#[arg(long, short = 'b')]
pub batch: bool,
/// Loads (and merges) an additional configuration file
#[arg(long, short = 'c')]
pub config: Option<String>,
/// Dumps the internal default configuration into a file
/// or stdout if `-`
#[arg(long, short = 'C')]
pub config_defaults: Option<String>,
/// Console debug level:
#[arg(long, short = 'd', value_enum)]
pub debug: Option<ClapLevelFilter>,
/// Shows console debug messages also as popup windows
#[arg(long, short = 'u')]
pub popup: bool,
/// Launches only the editor, no browser
#[arg(long, short = 'e')]
pub edit: bool,
/// Scheme for new notes: "default", "zettel", (cf. `--config-defaults`)
#[arg(long, short = 's')]
pub scheme: Option<String>,
/// Console mode: opens the console editor, no browser
#[arg(long, short = 't')]
pub tty: bool,
/// Lets the web server listen to a specific port
#[arg(long, short = 'p')]
pub port: Option<u16>,
/// Disables filename synchronization
#[arg(long, short = 'n')]
pub no_filename_sync: bool,
/// Disables automatic language detection and uses `<FORCE_LANG>`
/// instead; or, if '-' use `TPNOTE_LANG` or `LANG`
#[arg(long, short = 'l')]
pub force_lang: Option<String>,
/// Launches only the browser, no editor
#[arg(long, short = 'v')]
pub view: bool,
/// `<DIR>` the new note's location or `<FILE>` to open or to annotate
#[arg(name = "PATH")]
pub path: Option<PathBuf>,
/// Prints the version and exits
#[arg(long, short = 'V')]
pub version: bool,
/// Saves the HTML rendition in the `<EXPORT>` directory,
/// the note's directory if '.' or standard output if '-'.
#[arg(long, short = 'x')]
pub export: Option<PathBuf>,
/// Exporter local link rewriting: [possible values: off, short, long]
#[arg(long, value_enum)]
pub export_link_rewriting: Option<LocalLinkKind>,
}
/// Structure to hold the parsed command line arguments.
pub static ARGS: LazyLock<Args> = LazyLock::new(Args::parse);
/// Shall we launch the external text editor?
pub static LAUNCH_EDITOR: LazyLock<bool> = LazyLock::new(|| {
!ARGS.batch
&& ARGS.export.is_none()
&& env::var(ENV_VAR_TPNOTE_EDITOR) != Ok(String::new())
&& (ARGS.edit || !ARGS.view)
});
#[cfg(feature = "viewer")]
/// Shall we launch the internal HTTP server and the external browser?
pub static LAUNCH_VIEWER: LazyLock<bool> = LazyLock::new(|| {
!ARGS.batch
&& ARGS.export.is_none()
&& !*RUNS_ON_CONSOLE
&& (ARGS.view
|| (!ARGS.edit
&& !CFG.arg_default.edit
&& env::var(ENV_VAR_TPNOTE_BROWSER) != Ok(String::new())))
});
#[cfg(not(feature = "viewer"))]
/// Shall we launch the internal HTTP server and the external browser?
pub static LAUNCH_VIEWER: LazyLock<bool> = LazyLock::new(|| false);
/// Do we run on a console?
pub static RUNS_ON_CONSOLE: LazyLock<bool> = LazyLock::new(|| {
use crate::CFG;
// User `root` has usually no GUI.
#[cfg(target_family = "unix")]
if let Some(user) = std::env::var(ENV_VAR_USER)
// Map error to `None`.
.ok()
// A pattern mapping `Some("")` to `None`.
.and_then(|s: String| if s.is_empty() { None } else { Some(s) })
{
if user == "root" {
return true;
}
}
// On Linux popup window only if DISPLAY is set.
#[cfg(target_family = "unix")]
let display = std::env::var(ENV_VAR_DISPLAY)
// Map error to `None`.
.ok()
// A pattern mapping `Some("")` to `None`.
.and_then(|s: String| if s.is_empty() { None } else { Some(s) });
// In non-Linux there is always "Some" display.
#[cfg(not(target_family = "unix"))]
let display = Some(String::new());
display.is_none() || ARGS.tty || CFG.arg_default.tty
});
/// Reads the input stream standard input if there is any.
pub static STDIN: LazyLock<ContentString> = LazyLock::new(|| {
// Bring new methods into scope.
use tpnote_lib::html::HtmlStr;
let mut buffer = String::new();
// Read the standard input.
let stdin = io::stdin();
if !stdin.is_terminal() {
// There is an input pipe for us to read from.
let handle = stdin.lock();
let buf = handle.bytes().crlf_suppressor();
let buf: Result<Vec<u8>, std::io::Error> = buf.collect();
let buf = buf.unwrap_or_default();
buffer = String::from_utf8(buf).unwrap_or_default();
}
// Guess if this is an HTML stream.
if buffer.is_html_unchecked() {
ContentString::from_html(buffer, TMPL_VAR_STDIN.to_string()).unwrap_or_else(|e| {
ContentString::from_string(e.to_string(), TMPL_VAR_STDIN.to_string())
})
} else {
ContentString::from_string(buffer, TMPL_VAR_STDIN.to_string())
}
});
/// Reads the clipboard, if there is any and empties it.
pub static SYSTEM_CLIPBOARD: LazyLock<SystemClipboard> = LazyLock::new(|| {
if CFG.clipboard.read_enabled && !ARGS.batch {
SystemClipboard::new()
} else {
SystemClipboard::default()
}
});
/// Read and canonicalize the `<path>` from the command line. If no
/// `<path>` was given, use the current directory.
pub static DOC_PATH: LazyLock<Result<PathBuf, std::io::Error>> = LazyLock::new(|| {
if let Some(p) = &ARGS.path {
p.canonicalize()
} else {
env::current_dir()
}
});
/// An enum representing the available verbosity level filters of the logger.
///
/// A `LevelFilter` may be compared directly to a [`Level`]. Use this type
/// to get and set the maximum log level with [`max_level()`] and [`set_max_level`].
///
/// [`Level`]: enum.Level.html
/// [`max_level()`]: fn.max_level.html
/// [`set_max_level`]: fn.set_max_level.html
#[repr(usize)]
#[derive(
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Debug,
Hash,
Serialize,
Deserialize,
Default,
ValueEnum,
)]
pub enum ClapLevelFilter {
/// A level lower than all log levels.
Off,
/// Corresponds to the `Error` log level.
#[default]
Error,
/// Corresponds to the `Warn` log level.
Warn,
/// Corresponds to the `Info` log level.
Info,
/// Corresponds to the `Debug` log level.
Debug,
/// Corresponds to the `Trace` log level.
Trace,
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/logger.rs | tpnote/src/logger.rs | //! Prints error messages and exceptional states.
use crate::CONFIG_PATHS;
#[cfg(feature = "message-box")]
use crate::PKG_VERSION;
#[cfg(feature = "message-box")]
use crate::alert_service::AlertService;
use crate::config::CARGO_BIN_NAME;
#[cfg(feature = "message-box")]
use crate::settings::ARGS;
#[cfg(feature = "message-box")]
use crate::settings::RUNS_ON_CONSOLE;
use log::LevelFilter;
use log::{Level, Metadata, Record};
#[cfg(all(unix, not(target_os = "macos")))]
#[cfg(feature = "message-box")]
use notify_rust::Hint;
#[cfg(not(target_os = "windows"))]
#[cfg(feature = "message-box")]
use notify_rust::{Notification, Timeout};
use parking_lot::RwLock;
use std::env;
use std::sync::LazyLock;
#[cfg(target_os = "windows")]
#[cfg(feature = "message-box")]
use win_msgbox::{Okay, information};
#[cfg(feature = "message-box")]
/// Window title of the message alert box.
const ALERT_DIALOG_TITLE: &str = "Tp-Note";
#[cfg(feature = "message-box")]
/// Window title followed by version.
pub static ALERT_DIALOG_TITLE_LINE: LazyLock<String> = LazyLock::new(|| {
format!(
"{} (v{})",
&ALERT_DIALOG_TITLE,
PKG_VERSION.unwrap_or("unknown")
)
});
/// Pops up an error message notification and prints `msg`.
/// Blocks until the user closes the window.
/// Under Linux no notifications will be shown when
/// `log::max_level=Level::Trace`.
#[cfg(not(target_os = "windows"))]
#[cfg(feature = "message-box")]
fn popup_alert(msg: &str) {
if log::max_level() == Level::Trace {
return;
}
let mut n = Notification::new();
let n = n
.summary(&ALERT_DIALOG_TITLE_LINE)
.body(msg)
.icon("tpnote")
.appname("tpnote");
#[cfg(all(unix, not(target_os = "macos")))]
let n = n.hint(Hint::Resident(true));
if let Ok(_handle) = n
// Does not work on Kde.
.timeout(Timeout::Never) // Works on Kde and Gnome.
.show()
{
// // Only available in Linux.
// ```rust
// _handle.wait_for_action(|_action| {
// if "__closed" == _action {
// println!("the notification was closed")
// }
// })
// ```
};
}
/// Pops up an error message box and prints `msg`.
/// Blocks until the user closes the window.
#[cfg(target_os = "windows")]
#[cfg(feature = "message-box")]
fn popup_alert(msg: &str) {
// Silently ignore `show()` error.
let _ = information::<Okay>(msg)
.title(&ALERT_DIALOG_TITLE_LINE)
.show();
}
/// Some additional debugging information added to the end of error messages.
pub static ERR_MSG_TAIL: LazyLock<String> = LazyLock::new(|| {
use std::fmt::Write;
let mut args_str = String::new();
for argument in env::args() {
args_str.push_str(argument.as_str());
args_str.push(' ');
}
format!(
"\n\
\n\
Additional technical details:\n\
* Command line parameters:\n\
{}\n\
* Sourced configuration files:\n\
{}",
args_str,
CONFIG_PATHS
.iter()
.filter(|p| p.exists())
.map(|p| p.to_str().unwrap_or_default())
.fold(String::new(), |mut output, p| {
let _ = writeln!(output, "{p}");
output
})
)
});
/// If `true`, all future log events will trigger the opening of a popup
/// alert window. Otherwise only `Level::Error` will do.
static APP_LOGGER_ENABLE_POPUP: RwLock<bool> = RwLock::new(false);
pub struct AppLogger;
static APP_LOGGER: AppLogger = AppLogger;
/// All methods here are stateless (without _self_). Instead, their state is
/// stored in a global variable `APP_LOGGER` in order to simplify the API for
/// the caller. As all the methods are stateless, the caller does not need to
/// carry around any (state) struct. For example, just `AppLogger::log(...)`
/// will do.
impl AppLogger {
#[inline]
/// Initialize logger.
pub fn init() {
// Setup the `AlertService`
#[cfg(feature = "message-box")]
if !*RUNS_ON_CONSOLE && !ARGS.batch {
AlertService::init(popup_alert);
};
// Setup console logger.
log::set_logger(&APP_LOGGER).unwrap();
log::set_max_level(LevelFilter::Error);
}
/// Sets the maximum level debug events must have to be logged.
#[allow(dead_code)]
pub fn set_max_level(level: LevelFilter) {
log::set_max_level(level);
}
/// If called with `true`, all debug events will also trigger the appearance of
/// a popup alert window.
#[allow(dead_code)]
pub fn set_popup_always_enabled(popup: bool) {
// Release lock immediately.
*APP_LOGGER_ENABLE_POPUP.write() = popup;
}
/// Blocks until the `AlertService` is not busy any more. This should be
/// executed before quitting the application because there might be still
/// queued error messages the uses has not seen yet.
/// Once flushed, no more logs are recorded.
pub fn flush() {
#[cfg(feature = "message-box")]
if !*RUNS_ON_CONSOLE && !ARGS.batch {
// If ever there is still a message window open, this will block.
AlertService::flush();
log::set_max_level(LevelFilter::Off);
}
}
}
/// Trait defining the logging format and destination.
impl log::Log for AppLogger {
fn enabled(&self, metadata: &Metadata<'_>) -> bool {
metadata.level() <= Level::Trace
}
fn log(&self, record: &Record<'_>) {
if self.enabled(record.metadata()) {
let mut msg = format!("{}:\n{}", record.level(), &record.args().to_string());
if record.metadata().level() == Level::Error {
msg.push_str(&ERR_MSG_TAIL);
};
// Only log Tp-Note errors. Silently ignore others.
if !record.metadata().target().starts_with(CARGO_BIN_NAME) {
return;
}
// Log this to `stderr`.
eprintln!("*** {}", msg);
// Eventually also log as popup alert window.
#[cfg(feature = "message-box")]
if !*RUNS_ON_CONSOLE
&& !ARGS.batch
&& ((record.metadata().level() == LevelFilter::Error)
// Release lock immediately.
|| *APP_LOGGER_ENABLE_POPUP.read())
{
// We silently ignore failing pushes. We have printed the
// error message on the console already.
let _ = AlertService::push_str(msg);
};
}
}
fn flush(&self) {
Self::flush();
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/clipboard.rs | tpnote/src/clipboard.rs | //! Abstract the clipboard handling.
#[cfg(feature = "read-clipboard")]
use clipboard_rs::Clipboard;
#[cfg(feature = "read-clipboard")]
use clipboard_rs::ClipboardContext;
use tpnote_lib::config::TMPL_VAR_HTML_CLIPBOARD;
use tpnote_lib::config::TMPL_VAR_TXT_CLIPBOARD;
use tpnote_lib::content::Content;
use tpnote_lib::content::ContentString;
#[cfg(feature = "read-clipboard")]
use tpnote_lib::text_reader::StringExt;
#[cfg(feature = "read-clipboard")]
#[cfg(unix)]
use tpnote_lib::text_reader::read_as_string_with_crlf_suppression;
#[cfg(feature = "read-clipboard")]
#[cfg(unix)]
use wl_clipboard_rs::copy;
#[cfg(feature = "read-clipboard")]
#[cfg(unix)]
use wl_clipboard_rs::paste;
#[derive(Debug, PartialEq, Eq)]
pub struct SystemClipboard {
pub html: ContentString,
pub txt: ContentString,
}
impl SystemClipboard {
/// Get a snapshot of the Markdown representation of the clipboard content.
/// If the content contains HTML, the marker `<!DOCTYPE html>` is
/// prepended.
#[cfg(feature = "read-clipboard")]
pub(crate) fn new() -> Self {
// Bring new methods into scope.
use tpnote_lib::{
config::{TMPL_VAR_HTML_CLIPBOARD, TMPL_VAR_TXT_CLIPBOARD},
content::Content,
};
let mut txt_content = String::new();
let mut html_content = String::new();
#[cfg(unix)]
{
// Query Wayland clipboard
// Html clipboard content
if let Ok((pipe_reader, _)) = paste::get_contents(
paste::ClipboardType::Regular,
paste::Seat::Unspecified,
paste::MimeType::Specific("text/html"),
) {
let html_content =
read_as_string_with_crlf_suppression(pipe_reader).unwrap_or_default();
if !html_content.is_empty() {
log::trace!("Got HTML Wayland clipboard:\n {}", html_content);
}
};
// Plain text clipboard content
if let Ok((pipe_reader, _)) = paste::get_contents(
paste::ClipboardType::Regular,
paste::Seat::Unspecified,
paste::MimeType::Specific("plain/text"),
) {
let txt_content =
read_as_string_with_crlf_suppression(pipe_reader).unwrap_or_default();
log::trace!("Got text Wayland clipboard:\n {}", txt_content);
};
}
if html_content.is_empty() && txt_content.is_empty() {
// Query X11 clipboard.
if let Ok(ctx) = ClipboardContext::new() {
if let Ok(html) = ctx.get_html() {
// As this is HTML what the newline kind does not matter
// here.
log::trace!("Got HTML non-wayland clipboard:\n {}", html_content);
html_content = html;
};
if let Ok(txt) = ctx.get_text() {
txt_content = txt.crlf_suppressor_string();
log::trace!("Got text non-wayland clipboard:\n {}", txt_content);
};
}
}
Self {
html: ContentString::from_html(html_content, TMPL_VAR_HTML_CLIPBOARD.to_string())
.map_err(|e| {
log::error!("Could not interpret HTML clipboard:\n{}", e);
})
// Ignore error and continue with empty string.
.unwrap_or_default(),
txt: ContentString::from_string(txt_content, TMPL_VAR_TXT_CLIPBOARD.to_string()),
}
}
/// When the `read-clipboard` feature is disabled, always return `None`.
#[inline]
#[cfg(not(feature = "read-clipboard"))]
pub(crate) fn new() -> Self {
Self::default()
}
/// Empty the clipboard.
#[inline]
pub(crate) fn empty() {
// Clear Wayland clipboard.
#[cfg(feature = "read-clipboard")]
#[cfg(unix)]
let _ = copy::clear(copy::ClipboardType::Regular, copy::Seat::All);
// Clear X11 and other clipboards.
#[cfg(feature = "read-clipboard")]
if let Ok(ctx) = clipboard_rs::ClipboardContext::new() {
let _ = ctx.set_html("".to_string());
let _ = ctx.set_text("".to_string());
};
}
}
/// Register empty HTML and TXT clipboards with the `ContentString` names:
/// * `TMPL_VAR_HTML_CLIPBOARD`,
/// * `TMPL_VAR_TXT_CLIPBOARD`.
impl Default for SystemClipboard {
fn default() -> Self {
SystemClipboard {
html: ContentString::from_string(String::new(), TMPL_VAR_HTML_CLIPBOARD.to_string()),
txt: ContentString::from_string(String::new(), TMPL_VAR_TXT_CLIPBOARD.to_string()),
}
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/file_editor.rs | tpnote/src/file_editor.rs | //! Launch the user's favorite file editor.
use crate::config::CFG;
use crate::error::ConfigFileError;
use crate::process_ext::ChildExt;
use crate::settings::ENV_VAR_TPNOTE_EDITOR;
use crate::settings::RUNS_ON_CONSOLE;
use percent_encoding::percent_decode_str;
use std::env;
#[cfg(not(target_family = "windows"))]
use std::fs::File;
use std::path::Path;
use std::process::Command;
use std::process::Stdio;
#[inline]
/// Launch some external text editor. The editor can be chosen through
/// Tp-Note's configuration file. This function searches the lists
/// `CFG.app_args.editor_console` or `CFG.app_args.editor` until it finds an
/// installed text editor. Once the editor is launched, the function blocks
/// until the user closes the editor window.
pub fn launch_editor(path: &Path) -> Result<(), ConfigFileError> {
// Both lists have always the same number of items.
let mut args_list = Vec::new();
let mut executable_list = Vec::new();
// Choose the right parameter list.
let env_var = env::var(ENV_VAR_TPNOTE_EDITOR).ok();
let vv: Vec<Vec<String>>;
#[cfg(all(target_family = "unix", not(target_os = "macos")))]
let app_args = &CFG.app_args.unix;
#[cfg(target_family = "windows")]
let app_args = &CFG.app_args.windows;
#[cfg(all(target_family = "unix", target_os = "macos"))]
let app_args = &CFG.app_args.macos;
let editor_args = match (&env_var, *RUNS_ON_CONSOLE) {
// If the environment variable is defined, it has precedence.
(Some(s), false) => {
if s.is_empty() {
&app_args.editor
} else {
vv = vec![s
.split_ascii_whitespace()
.map(|s| percent_decode_str(s).decode_utf8_lossy().to_string())
.collect::<Vec<String>>()];
&vv
}
}
(None, false) => &app_args.editor,
(_, true) => &app_args.editor_console,
};
// Prepare launch of editor/viewer.
for app in editor_args {
executable_list.push(&app[0]);
let mut args: Vec<&str> = Vec::new();
for s in app[1..].iter() {
args.push(s);
}
args.push(path.to_str().ok_or(ConfigFileError::PathNotUtf8 {
path: path.to_path_buf(),
})?);
args_list.push(args);
}
// Move and make immutable.
let args_list = args_list;
let executable_list = executable_list;
let mut executable_found = false;
for i in 0..executable_list.len() {
log::debug!(
"Trying to launch the file editor:\n\'{}\' {}",
executable_list[i],
args_list[i]
.iter()
.map(|p| {
let mut s = "'".to_string();
s.push_str(p);
s.push_str("' ");
s
})
.collect::<String>()
);
// Check if this is a `flatpak run <app>` command.
#[cfg(target_family = "unix")]
if executable_list[i].ends_with("flatpak")
&& args_list[i].len() >= 3
&& args_list[i][0] == "run"
{
// Check if the Flatpak is installed on this system with
// `flatpak info <app>`.
if let Ok(ecode) = Command::new(executable_list[i])
.args(["info", args_list[i][1]])
.stderr(Stdio::null())
.stdout(Stdio::null())
.status()
{
if !ecode.success() {
// This is a Flatpak command, but the application is not
// installed on this system. Silently ignore this Flatpak
// command.
log::info!("Flatpak executable \"{}\" not found.", args_list[i][1]);
continue;
};
};
};
// Connect `stdin` of child process to `/dev/tty`.
#[cfg(not(target_family = "windows"))]
let (config_stdin, config_stdout) = if *RUNS_ON_CONSOLE {
match File::open("/dev/tty") { Ok(file) => {
(Stdio::from(file), Stdio::inherit())
} _ => {
(Stdio::null(), Stdio::null())
}}
} else {
(Stdio::null(), Stdio::null())
};
#[cfg(target_family = "windows")]
let (config_stdin, config_stdout) = (Stdio::null(), Stdio::null());
let mut command = Command::new(executable_list[i]);
command
.args(&args_list[i])
.stdin(config_stdin)
.stdout(config_stdout)
.stderr(Stdio::null());
match command.spawn() {
Ok(mut child) => {
let ecode = child.wait_subprocess()?;
if !ecode.success() {
// Check if this is a console command running in a terminal
// emulator.
#[cfg(target_family = "unix")]
if executable_list[i].ends_with("alacritty")
&& args_list[i].len() >= 3
&& (args_list[i][args_list[i].len() - 2] == "-e"
|| args_list[i][args_list[i].len() - 2] == "--command")
{
// This is a terminal emulator command, but the
// application is not installed on this system.
// Silently ignore this flatpak command.
log::info!(
"Console file editor executable \"{}\" not found.",
args_list[i][args_list[i].len() - 2]
);
continue;
};
return Err(ConfigFileError::ApplicationReturn {
code: ecode,
var_name: if *RUNS_ON_CONSOLE {
"app_args.editor_console".to_string()
} else {
"app_args.editor".to_string()
},
args: (*editor_args[i]).to_vec(),
});
};
executable_found = true;
break;
}
Err(e) => {
log::info!("File editor \"{}\" not found: {}", executable_list[i], e);
}
}
} // All executables in the list are launched, without success.
if !executable_found {
let mut app_list = String::new();
for l in editor_args.iter() {
app_list.push_str("\n\t");
for a in l {
app_list.push_str(a);
app_list.push(' ');
}
app_list.truncate(app_list.len() - " ".len());
}
return Err(ConfigFileError::NoApplicationFound {
app_list,
// Choose the right parameter list.
var_name: match (&env_var, *RUNS_ON_CONSOLE) {
(Some(_), false) => ENV_VAR_TPNOTE_EDITOR.to_string(),
(_, true) => "app_args.editor_console".to_string(),
(None, false) => "app_args.editor".to_string(),
},
});
};
Ok(())
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/template.rs | tpnote/src/template.rs | //! Helper functions dealing with `TemplateKind` variants.
use crate::config::CFG;
use crate::settings::ARGS;
use tpnote_lib::template::TemplateKind;
/// Helper function to inhibit template application according to
/// command line parameters.
pub(crate) fn template_kind_filter(template_kind: TemplateKind) -> TemplateKind {
// Treat inhibitors:
match template_kind {
TemplateKind::FromTextFile => {
if (ARGS.add_header || CFG.arg_default.add_header)
&& !CFG.arg_default.no_filename_sync
&& !ARGS.no_filename_sync
{
// No change, we do it.
template_kind
} else {
log::info!(
"Not adding header to text file: \
`add_header` is not enabled or `no_filename_sync`",
);
log::debug!("Changing the template to \"TemplateKind::None\"");
// We change to `None`.
TemplateKind::None
}
}
TemplateKind::SyncFilename => {
if ARGS.no_filename_sync {
log::info!("Filename synchronisation disabled with the flag: `--no-filename-sync`",);
log::debug!("Changing the template to \"TemplateKind::None\"");
TemplateKind::None
} else if CFG.arg_default.no_filename_sync {
log::info!(
"Filename synchronisation disabled with the configuration file \
variable: `arg_default.no_filename_sync = true`",
);
log::debug!("Changing the template to \"TemplateKind::None\"");
TemplateKind::None
} else {
// We do it, no change
template_kind
}
}
// Otherwise, there are no more inhibitors so far.
_ => template_kind,
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/process_ext.rs | tpnote/src/process_ext.rs | //! Module extending the process handling.
#[cfg(target_family = "windows")]
use std::os::windows::io::AsRawHandle;
use std::process::Child;
use std::process::ExitStatus;
#[cfg(target_family = "windows")]
use std::thread::sleep;
#[cfg(target_family = "windows")]
use std::time::Duration;
#[cfg(target_family = "windows")]
use win32job::Job;
/// Polling interval when waiting for grand children to terminate.
#[cfg(target_family = "windows")]
const PROCESS_POLLING_INTERVAL: u64 = 1000;
/// Extension trait with a method that waits under Windows not only the started
/// process, but also all subprocesses as far as they are known.
pub trait ChildExt {
fn wait_subprocess(&mut self) -> std::io::Result<ExitStatus>;
}
impl ChildExt for Child {
#[cfg(not(target_family = "windows"))]
#[inline]
/// Windows: This `wait()` implementation not only waits until the `Child`
/// process terminates, it also waits until all its subprocesses terminate.
/// All other OS: Just executes the usual `wait()` method.
fn wait_subprocess(&mut self) -> std::io::Result<ExitStatus> {
// Remember ID for debugging.
let process_id = self.id();
log::debug!("Process started: id={}", process_id);
let exit_status = self.wait();
log::debug!(
"Process terminated: id={}, {}",
process_id,
match &exit_status {
Ok(ex_st) => ex_st.to_string(),
Err(e) => e.to_string(),
}
);
exit_status
}
/// Windows: This `wait()` implementation not only waits until the `Child`
/// process terminates, it also waits until all its subprocesses terminate.
/// All other OS: Just executes the usual `wait()` method.
#[cfg(target_family = "windows")]
fn wait_subprocess(&mut self) -> std::io::Result<ExitStatus> {
// Initialize the job monitor via a job handle.
fn wait_init(me: &Child) -> Result<Job, Box<dyn std::error::Error>> {
// We create a job to monitor the wrapped child.
let job = Job::create()?;
let handle = me.as_raw_handle();
job.assign_process(handle as isize)?;
Ok(job)
}
// At this point, the parent process just terminated. We here we wait
// for the children and grandchildren also. When all grandchildren
// terminated, the `process_id_list` will be 0.
fn wait_more(me: &Child, job: Job) -> Result<(), Box<dyn std::error::Error>> {
let ids = job.query_process_id_list()?;
if ids.len() > 0 {
log::debug!(
"Processes id={} launched still running ids:{:?}.",
me.id(),
ids
);
}
// Wait until all will have terminated.
while job.query_process_id_list()?.len() > 0 {
sleep(Duration::from_millis(PROCESS_POLLING_INTERVAL));
}
if ids.len() > 0 {
log::debug!("All processes launched by id={} terminated.", me.id());
};
Ok(())
}
// Remember ID for debugging.
let process_id = self.id();
log::debug!("Process started: id={}", process_id);
let job = wait_init(&self);
// For most browsers under Windows, this might most likely returns
// immediately. The `Child` terminates, after having launched processes
// himself.
let exit_status = self.wait();
if exit_status.is_err() {
return exit_status;
};
log::debug!("Process terminated: id={}, {:?}", process_id, exit_status);
// Wait for subprocesses to finish.
match job {
Ok(job) => {
if let Err(e) = wait_more(&self, job) {
log::debug!("Error handling job list: {}", e);
}
}
Err(e) => log::debug!("Error initializing job list: {}", e),
}
exit_status
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/workflow.rs | tpnote/src/workflow.rs | //! High level program logic implementing the whole workflow.
use crate::config::CFG;
use crate::error::WorkflowError;
use crate::file_editor::launch_editor;
use crate::settings::ARGS;
use crate::settings::DOC_PATH;
use crate::settings::LAUNCH_EDITOR;
use crate::settings::LAUNCH_VIEWER;
use crate::settings::STDIN;
use crate::settings::SYSTEM_CLIPBOARD;
use crate::template::template_kind_filter;
#[cfg(feature = "viewer")]
use crate::viewer::launch_viewer_thread;
#[cfg(not(target_family = "windows"))]
use std::matches;
use std::path::PathBuf;
#[cfg(feature = "viewer")]
use std::thread;
#[cfg(feature = "viewer")]
use std::time::Duration;
use tpnote_lib::content::ContentString;
use tpnote_lib::error::NoteError;
use tpnote_lib::workflow::WorkflowBuilder;
/// Run Tp-Note and return the (modified) path to the (new) note file.
/// 1. Create a new note by inserting Tp-Note's environment in a template.
/// 2. If the note to be created exists already, open it, read the YAML front
/// matter and synchronize the filename if necessary.
/// 3. Open the new note in an external editor (configurable).
/// 4. Read the front matter again and resynchronize the filename if necessary.
#[inline]
pub fn run_workflow(mut path: PathBuf) -> Result<PathBuf, WorkflowError> {
// Depending on this we might not show the viewer later or
// log an error as WARN level instead of ERROR level.
let launch_viewer;
let mut workflow_builder = WorkflowBuilder::new(&path).upgrade::<ContentString, _>(
&CFG.arg_default.scheme,
vec![&SYSTEM_CLIPBOARD.html, &SYSTEM_CLIPBOARD.txt, &*STDIN],
template_kind_filter,
);
if let Some(scheme) = ARGS.scheme.as_deref() {
workflow_builder.force_scheme(scheme);
}
if let Some(lang) = ARGS.force_lang.as_deref() {
if lang == "-" {
workflow_builder.force_lang("");
} else {
workflow_builder.force_lang(lang);
}
}
if let Some(path) = &ARGS.export {
workflow_builder.html_export(
path,
ARGS.export_link_rewriting
.unwrap_or(CFG.arg_default.export_link_rewriting),
);
}
let workflow = workflow_builder.build();
match workflow.run() {
// Use the new `path` from now on.
Ok(p) => {
path = p;
#[cfg(feature = "viewer")]
{
launch_viewer = *LAUNCH_VIEWER;
}
}
Err(e) => {
if (matches!(e, NoteError::InvalidFrontMatterYaml { .. })
|| matches!(e, NoteError::FrontMatterFieldIsCompound { .. })
|| matches!(e, NoteError::FrontMatterFieldIsDuplicateSortTag { .. })
|| matches!(e, NoteError::FrontMatterFieldIsEmptyString { .. })
|| matches!(e, NoteError::FrontMatterFieldIsInvalidSortTag { .. })
|| matches!(e, NoteError::FrontMatterFieldIsNotBool { .. }))
|| matches!(e, NoteError::FrontMatterFieldIsNotNumber { .. })
|| matches!(e, NoteError::FrontMatterFieldIsNotString { .. })
|| matches!(e, NoteError::FrontMatterFieldIsNotTpnoteExtension { .. })
|| matches!(e, NoteError::FrontMatterFieldMissing { .. })
|| matches!(e, NoteError::FrontMatterMissing { .. })
&& !ARGS.batch
&& ARGS.export.is_none()
{
// Continue the workflow.
let missing_header = matches!(e, NoteError::FrontMatterMissing { .. })
|| matches!(e, NoteError::FrontMatterFieldMissing { .. });
launch_viewer = *LAUNCH_VIEWER
&& !(missing_header
&& CFG.viewer.missing_header_disables
&& !CFG.arg_default.add_header
&& !ARGS.add_header
&& !ARGS.view);
if launch_viewer || missing_header {
// Inform user when `--debug warn`, then continue workflow.
log::warn!("{}", e,);
} else {
// Inform user, then continue workflow.
log::error!("{}", e,);
};
} else {
// This is a fatal error, so we quit.
return Err(e.into());
}
}
};
#[cfg(feature = "viewer")]
let viewer_join_handle = if launch_viewer {
Some(launch_viewer_thread(&path))
} else {
None
};
if *LAUNCH_EDITOR {
#[cfg(feature = "viewer")]
if viewer_join_handle.is_some() && CFG.viewer.startup_delay < 0 {
thread::sleep(Duration::from_millis(
CFG.viewer.startup_delay.unsigned_abs() as u64,
));
};
// This blocks.
launch_editor(&path)?;
};
if *LAUNCH_EDITOR {
let workflow = WorkflowBuilder::new(&path).build();
match workflow.run::<ContentString>() {
// `path` has changed!
Ok(p) => path = p,
Err(e) => {
let missing_header = matches!(e, NoteError::FrontMatterMissing { .. })
|| matches!(e, NoteError::FrontMatterFieldMissing { .. });
if missing_header && *LAUNCH_VIEWER {
// Silently ignore error.
log::warn!("{}", e);
} else {
// Report all other errors.
return Err(e.into());
}
}
};
} else {
#[cfg(feature = "viewer")]
if let Some(jh) = viewer_join_handle {
let _ = jh.join();
};
};
Ok(path)
}
#[inline]
pub(crate) fn run() -> Result<PathBuf, WorkflowError> {
// Process arg = <path>
let doc_path = DOC_PATH.as_deref()?;
run_workflow(doc_path.to_path_buf())
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/error.rs | tpnote/src/error.rs | //! Custom error types.
use std::path::PathBuf;
use std::process::ExitStatus;
use thiserror::Error;
use tpnote_lib::error::FileError;
use tpnote_lib::error::LibCfgError;
use tpnote_lib::error::NoteError;
#[allow(dead_code)]
#[derive(Debug, Error)]
/// Error arising in the `workflow` and `main` module.
pub enum WorkflowError {
/// Remedy: check `<path>` to note file.
#[error("Can not export. No note file found.")]
ExportNeedsNoteFile,
/// Remedy: restart with `--debug trace`.
#[error(
"Failed to render template (cf. `{tmpl_name}`\
in configuration file)!\n{source}"
)]
Template {
tmpl_name: String,
source: NoteError,
},
#[error(transparent)]
Note(#[from] NoteError),
#[error(transparent)]
ConfigFile(#[from] ConfigFileError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
IoRef(#[from] &'static std::io::Error),
}
/// Error related to the filesystem and to invoking external applications.
#[derive(Debug, Error)]
pub enum ConfigFileError {
/// Remedy: delete or rename the configuration file.
#[error(
"Can not backup and delete the erroneous\n\
configuration file:\n\
---\n\
{error}\n\n\
Please do it manually."
)]
ConfigFileBackup { error: String },
/// Remedy: Compare your config file structure with the default one
/// (`--config-defaults`).
#[error(
"Unknown top level key(s) in configuration file:\n\
{error:?}"
)]
ConfigFileUnkownFieldName { error: Vec<String> },
/// Remedy: check the path and permissions of the to be generated
/// configuration file.
#[error(
"Can not write the default configuration:\n\
---\n\
{error}"
)]
ConfigFileWrite { error: String },
/// Remedy: restart, or check file permission of the configuration file.
#[error(
"Can not load or parse the (merged)\n\
configuration file(s):\n\
---\n\
{error}\n\n\
Note: this error may occur after upgrading\n\
Tp-Note due to some incompatible configuration\n\
file changes.\n\
\n\
Tp-Note renames and thus disables the last sourced\n\
configuration file."
)]
ConfigFileLoadParse { error: String },
/// Remedy: restart.
#[error(
"Configuration file version mismatch:\n---\n\
Configuration file version: \'{config_file_version}\'\n\
Minimum required version: \'{min_version}\'\n\
\n\
Tp-Note renames and thus disables the last sourced\n\
configuration file."
)]
ConfigFileVersionMismatch {
config_file_version: String,
min_version: String,
},
/// Should not happen. Please report this bug.
#[error("Can not convert path to UTF-8:\n{path:?}")]
PathNotUtf8 { path: PathBuf },
/// Remedy: check the configuration file variable `app_args.editor`.
#[error(
"The external application did not terminate\n\
gracefully: {code}\n\
\n\
Edit the variable `{var_name}` in Tp-Note's\n\
configuration file and correct the following:\n\
\t{args:?}"
)]
ApplicationReturn {
code: ExitStatus,
var_name: String,
args: Vec<String>,
},
/// Remedy: check the configuration file variable `app_args.editor`
/// or `app_args.browser` depending on the displayed variable name.
/// For `TPNOTE_EDITOR` and `TPNOTE_BROWSER` check the environment
/// variable of the same name.
#[error(
"Can not find any external application listed\n\
in `{var_name}`: \
{app_list}\n\
Install one of the listed applications on your\n\
system -or- register some already installed\n\
application in Tp-Note's configuration file\n\
or in the corresponding environment variable."
)]
NoApplicationFound { app_list: String, var_name: String },
/// Should not happen. Please report this bug.
#[error("No path to configuration file found.")]
PathToConfigFileNotFound,
#[error(transparent)]
File(#[from] FileError),
#[error(transparent)]
LibConfig(#[from] LibCfgError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
IoRef(#[from] &'static std::io::Error),
#[error(transparent)]
Serialize(#[from] toml::ser::Error),
#[error(transparent)]
Deserialize(#[from] toml::de::Error),
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/main.rs | tpnote/src/main.rs | #![windows_subsystem = "windows"]
#![allow(clippy::vec_init_then_push)]
//! _Tp-Note_ is a note taking tool and a template system, that consistently
//! synchronizes the note's metadata with its filename.
//! _Tp-Note's_ main design goal is to convert some input text -
//! usually provided by the system's clipboard - into a Markdown note file, with
//! a descriptive YAML header and meaningful filename.
//! _Tp-Note_ collects
//! various information about its environment and the clipboard and stores them
//! in variables. New notes are created by filling these variables in predefined
//! and customizable `Tera`-templates. In case `<path>` points to an existing
//! _Tp-Note_-file, the note's metadata is analyzed and, if necessary, its
//! filename is modified. For all other filetypes, _Tp-Note_ creates a new note
//! that annotates the file `<path>` points to. If `<path>` is a directory (or,
//! when omitted the current working directory), a new note is created in that
//! directory. After creation, _Tp-Note_ launches an external editor of your
//! choice. Although the note's structure follows _Pandoc's_ conventions, it is
//! not tied to any specific Markup language.
#[cfg(feature = "message-box")]
mod alert_service;
mod clipboard;
mod config;
mod error;
mod file_editor;
mod logger;
mod process_ext;
mod settings;
mod template;
#[cfg(feature = "viewer")]
mod viewer;
mod workflow;
#[cfg(feature = "message-box")]
use crate::alert_service::AlertService;
use crate::clipboard::SystemClipboard;
use crate::config::AUTHOR;
use crate::config::CFG;
use crate::config::CFG_FILE_LOADING;
use crate::config::CONFIG_PATHS;
use crate::config::COPYRIGHT_FROM;
use crate::config::Cfg;
use crate::config::PKG_VERSION;
use crate::error::WorkflowError;
use crate::logger::AppLogger;
use crate::settings::ARGS;
use crate::settings::LAUNCH_EDITOR;
#[cfg(feature = "message-box")]
use crate::settings::RUNS_ON_CONSOLE;
use crate::workflow::run;
use config::MIN_CONFIG_FILE_VERSION;
use error::ConfigFileError;
use semver::Version;
use serde::Serialize;
use settings::SYSTEM_CLIPBOARD;
use std::path::Path;
use std::process;
use std::sync::LazyLock;
use tpnote_lib::error::NoteError;
#[derive(Debug, PartialEq, Serialize)]
struct About {
version: String,
features: Vec<String>,
searched_config_file_paths: Vec<String>,
sourced_config_files: Vec<String>,
copyright: String,
}
/// Print some error message if `run()` does not complete.
/// Exit prematurely if the configuration file version does
/// not match the program version.
fn main() {
// Read the clipboard before starting the logger.
LazyLock::force(&SYSTEM_CLIPBOARD);
// Setup logger.
AppLogger::init();
// Read configuration file, or write one if none exists.
LazyLock::force(&CFG);
// Set the debug level. Only use config file value if
// no command-line-option `--debug` is present.
let level = ARGS.debug.unwrap_or(CFG.arg_default.debug);
let level = match level {
settings::ClapLevelFilter::Off => log::LevelFilter::Off,
settings::ClapLevelFilter::Error => log::LevelFilter::Error,
settings::ClapLevelFilter::Warn => log::LevelFilter::Warn,
settings::ClapLevelFilter::Info => log::LevelFilter::Info,
settings::ClapLevelFilter::Debug => log::LevelFilter::Debug,
settings::ClapLevelFilter::Trace => log::LevelFilter::Trace,
};
AppLogger::set_max_level(level);
// This eventually will extend the error reporting with more
// popup alert windows.
AppLogger::set_popup_always_enabled(ARGS.popup || CFG.arg_default.popup);
// Check if the config file loading was successful.
let cfg_file_loading = &*CFG_FILE_LOADING.read();
let cfg_file_loading_err = cfg_file_loading.as_ref().err().map(|e| e.to_string());
// Check if we can parse the version number in there.
let cfg_file_version = Version::parse(&CFG.version);
let cfg_file_version_err = cfg_file_version.as_ref().err().map(|e| e.to_string());
// This is `Some::String` if one of them is `Err`.
let cfg_err = cfg_file_loading_err.or(cfg_file_version_err);
let config_file_version = match cfg_err {
// This is always `Some::Version` because none of them are `Err`.
None => cfg_file_version.ok(),
// One of them is `Err`, we do not care who.
Some(e) => {
log::error!("{}", ConfigFileError::ConfigFileLoadParse { error: e });
// Move erroneous config file away.
if let Err(e) = Cfg::backup_and_remove_last() {
log::error!(
"{}",
ConfigFileError::ConfigFileBackup {
error: e.to_string()
}
);
AppLogger::flush();
process::exit(5);
};
// As we have an error, we indicate that there is no version.
None
}
};
// Is version number in the configuration file high enough?
if let Some(config_file_version) = config_file_version {
if config_file_version < Version::parse(MIN_CONFIG_FILE_VERSION.unwrap_or("0.0.0")).unwrap()
{
log::error!(
"{}",
ConfigFileError::ConfigFileVersionMismatch {
config_file_version: config_file_version.to_string(),
min_version: MIN_CONFIG_FILE_VERSION.unwrap_or("0.0.0").to_string(),
}
);
if let Err(e) = Cfg::backup_and_remove_last() {
log::error!(
"{}",
ConfigFileError::ConfigFileBackup {
error: e.to_string()
}
);
AppLogger::flush();
process::exit(6);
};
};
};
// Process `arg = `--default-config`.
if let Some(path) = &ARGS.config_defaults {
let path = Path::new(&path);
if let Err(e) = Cfg::write_default_to_file_or_stdout(path) {
log::error!(
"{}",
ConfigFileError::ConfigFileWrite {
error: e.to_string()
}
);
AppLogger::flush();
process::exit(5);
};
// Exit.
AppLogger::flush();
process::exit(0);
}
// Process `arg = `--version`.
// The output is YAML formatted for further automatic processing.
if ARGS.version {
#[allow(unused_mut)]
let mut features = Vec::new();
#[cfg(feature = "lang-detection")]
features.push("lang-detection".to_string());
#[cfg(feature = "message-box")]
features.push("message-box".to_string());
#[cfg(feature = "read-clipboard")]
features.push("read-clipboard".to_string());
#[cfg(feature = "renderer")]
features.push("renderer".to_string());
#[cfg(feature = "viewer")]
features.push("viewer".to_string());
let about = About {
version: PKG_VERSION.unwrap_or("unknown").to_string(),
features,
searched_config_file_paths: CONFIG_PATHS
.iter()
.map(|p| p.to_str().unwrap_or_default().to_owned())
.collect(),
sourced_config_files: CONFIG_PATHS
.iter()
.filter(|p| p.exists())
.map(|p| p.to_str().unwrap_or_default().to_owned())
.collect(),
copyright: format!(
"© {}-{} {}",
COPYRIGHT_FROM,
time::OffsetDateTime::now_utc().year(),
AUTHOR.unwrap()
),
};
let msg = serde_yaml::to_string(&about).unwrap_or_else(|_| "unknown".to_string());
// Print on console.
println!("{}", msg);
// Print in alert box.
#[cfg(feature = "message-box")]
if !*RUNS_ON_CONSOLE && !ARGS.batch {
let _ = AlertService::push_str(msg);
};
AppLogger::flush();
process::exit(0);
};
//
// Run Tp-Note.
let res = run();
match res {
Err(ref e) => {
// Something went wrong. Inform user.
log::error!("{}", e);
}
// Print `path` unless `--export=-`.
Ok(ref path) => {
if let Some(p) = &ARGS.export {
if p.display().to_string() != "-" {
println!("{}", path.display());
}
} else {
println!("{}", path.display());
}
}
};
// Wait if there are still error messages windows open.
AppLogger::flush();
// Delete clipboard content.
if (*LAUNCH_EDITOR && !ARGS.batch && CFG.clipboard.read_enabled && CFG.clipboard.empty_enabled)
|| matches!(
&res,
Err(WorkflowError::Note(NoteError::InvalidInputYaml { .. }))
)
{
SystemClipboard::empty();
}
if res.is_err() {
process::exit(1);
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/viewer/watcher.rs | tpnote/src/viewer/watcher.rs | //! Implements the file watcher for the note viewer feature.
use crate::config::CFG;
use crate::viewer::error::ViewerError;
use crate::viewer::sse_server::SseToken;
use notify::RecursiveMode;
use notify_debouncer_mini::Config;
use notify_debouncer_mini::{new_debouncer_opt, DebouncedEvent, Debouncer};
use std::panic::panic_any;
use std::path::Path;
use std::sync::mpsc::TrySendError;
use std::sync::mpsc::{channel, Receiver, RecvTimeoutError, SyncSender};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::time::Instant;
/// Even if there is no file modification, after `WATCHER_TIMEOUT` seconds,
/// the watcher sends an `update` request to the connected web browsers in
/// order to check if there are still subscribers connected. The value's unit
/// is seconds.
const WATCHER_TIMEOUT: u64 = 10;
/// Delay while `update()` with no subscribers silently ignored. This avoids
/// a race condition, when a file has already changed on disk, but the browser
/// has not connected yet. The value's unit is seconds.
const WATCHER_MIN_UPTIME: u64 = 5;
/// The `watcher` notifies about changes through `rx`.
pub struct FileWatcher {
/// Receiver for file changed messages.
rx: Receiver<Result<Vec<DebouncedEvent>, notify::Error>>,
/// We must store the `Debouncer` because it hold
/// the sender of the channel.
#[allow(dead_code)]
debouncer: Debouncer<notify::PollWatcher>,
/// List of subscribers to inform when the file is changed.
event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>,
/// Send additional periodic update events to detect when
/// the browser disconnects.
terminate_on_browser_disconnect: Arc<Mutex<bool>>,
/// Start time of this file-watcher.
start_time: Instant,
}
/// Watch file changes and notify subscribers.
impl FileWatcher {
/// Constructor. `file` is the file to watch.
pub fn new(
// The file path of the file being watched.
watched_file: &Path,
// A list of subscribers, that shall be informed when the watched
// file has been changed.
event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>,
terminate_on_browser_disconnect: Arc<Mutex<bool>>,
) -> Result<Self, ViewerError> {
let (tx, rx) = channel();
// Max value for `notify_period` is 2 seconds.
// We use the same value for `timeout` and `Some(tick_rate)`.
let notify_period = Duration::from_millis(CFG.viewer.notify_period);
let backend_config = notify::Config::default().with_poll_interval(notify_period);
// Debouncer configuration
let debouncer_config = Config::default()
.with_timeout(notify_period)
.with_notify_config(backend_config);
// Select backend via fish operator, here PollWatcher backend
let mut debouncer = new_debouncer_opt::<_, notify::PollWatcher>(debouncer_config, tx)?;
// In theory watching only `file` is enough. Unfortunately some file
// editors do not modify files directly. They first rename the existing
// file on disk and then create a new file with the same filename.
// Older versions of Notify did not detect this case reliably.
debouncer
.watcher()
.watch(watched_file, RecursiveMode::NonRecursive)?;
log::debug!("File watcher started.");
Ok(Self {
rx,
debouncer,
event_tx_list,
start_time: Instant::now(),
terminate_on_browser_disconnect,
})
}
/// Wrapper to start the server.
pub fn run(&mut self) {
match Self::run2(self) {
Ok(_) => (),
Err(e) => {
log::debug!("File watcher terminated: {}", e);
}
}
}
/// Start the file watcher. Blocks forever, unless an `ViewerError::AllSubscriberDisconnected`
/// occurs.
fn run2(&mut self) -> Result<(), ViewerError> {
loop {
// Detect when the browser quits, then terminate the watcher.
let evnt = match self.rx.recv_timeout(Duration::from_secs(WATCHER_TIMEOUT)) {
Ok(ev) => ev,
Err(RecvTimeoutError::Timeout) => {
// Push something to detect disconnected TCP channels.
self.update(SseToken::Ping)?;
// When empty all TCP connections have disconnected.
let tx_list = &mut *self.event_tx_list.lock().unwrap();
// log::trace!(
// "File watcher timeout: {} open TCP connections.",
// tx_list.len()
// );
{
if tx_list.is_empty()
&& self.start_time.elapsed().as_secs() > WATCHER_MIN_UPTIME
// Release lock immediately.
&& *self.terminate_on_browser_disconnect.lock().unwrap()
{
return Err(ViewerError::AllSubscriberDiconnected);
}
}
continue;
}
// The sending half of a channel (or sync_channel) is
// `Disconnected`, implies that no further messages will ever be
// received. As this should never happen, we panic this thread
// then.
Err(RecvTimeoutError::Disconnected) => panic_any("RecvTimeoutError::Disconnected"),
};
log::trace!("File watcher event: {:?}", evnt);
match evnt {
Ok(_events) => {
// There can be more than one event in `event`, we
// don't care about the details as we watch only one
// file.
self.update(SseToken::Update)?;
}
Err(e) => return Err(e.into()),
}
}
}
/// Run sub-command and notify subscribers.
pub fn update(&self, msg: SseToken) -> Result<(), ViewerError> {
// Notify subscribers and forget disconnected subscribers.
let tx_list = &mut *self.event_tx_list.lock().unwrap();
let tx_list_len_before_update = tx_list.len();
*tx_list = tx_list
.drain(..)
.filter(|tx| match tx.try_send(msg.to_owned()) {
Ok(()) => true,
Err(TrySendError::Disconnected(_)) => false,
Err(_) => true,
})
.collect();
let tx_list_len = tx_list.len();
log::trace!(
"File watcher `update({:?})`: {} dropped TCP connections, {} still open.",
msg,
tx_list_len_before_update - tx_list_len,
tx_list_len,
);
Ok(())
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/viewer/http_response.rs | tpnote/src/viewer/http_response.rs | //! HTTP response renderer and sender for all documents with one exception:
//! The content type `text/event-stream` is generated in the module
//! `sse_server`.
use super::sse_server::ServerThread;
use crate::config::CFG;
use crate::viewer::error::ViewerError;
use std::borrow::Cow;
use std::fs;
use std::io::{Read, Write};
use std::path::Path;
use std::str;
use std::time::SystemTime;
use tpnote_lib::config::LIB_CFG;
use tpnote_lib::config::LocalLinkKind;
use tpnote_lib::config::TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH_VALUE;
use tpnote_lib::config::TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH_VALUE;
use tpnote_lib::content::Content;
use tpnote_lib::content::ContentString;
use tpnote_lib::context::Context;
use tpnote_lib::html::rewrite_links;
use tpnote_lib::html_renderer::HtmlRenderer;
use tpnote_lib::markup_language::MarkupLanguage;
/// Content from files are served in chunks.
const TCP_WRITE_BUFFER_SIZE: usize = 0x1000;
/// Time in seconds the browsers should keep static pages in cache.
const MAX_AGE: usize = 604800;
/// Modern browser request a small icon image.
pub const FAVICON: &[u8] = include_bytes!("favicon.ico");
/// The path where the favicon is requested.
pub const FAVICON_PATH: &str = "/favicon.ico";
pub(crate) trait HttpResponse {
/// Renders the HTTP response and sends it into `self.stream`.
fn respond(&mut self, request: &str) -> Result<(), ViewerError>;
/// Read file from `abspath` and insert its content into an HTTP OK
/// response.
fn respond_file_ok(
&mut self,
abspath: &Path,
max_age: usize,
mime_type: &str,
) -> Result<(), ViewerError>;
/// Send and HTTP response with `content`.
fn respond_content_ok(
&mut self,
reqpath: &Path,
max_age: usize,
mime_type: &str,
content: &[u8],
) -> Result<(), ViewerError>;
// Not implemented:
//
// ```
// fn respond_forbidden(&mut self, reqpath: &Path) -> Result<(), ViewerError>;
// fn respond_no_content_ok(&mut self) -> Result<(), ViewerError>;
// ```
/// Write HTTP "not found" response.
fn respond_not_found(&mut self, reqpath: &Path) -> Result<(), ViewerError>;
/// Write HTTP method "not allowed" response.
fn respond_method_not_allowed(&mut self, method: &str) -> Result<(), ViewerError>;
/// Write HTTP method "too many requests" response.
fn respond_too_many_requests(&mut self) -> Result<(), ViewerError>;
/// Write HTTP service unavailable response.
fn respond_service_unavailable(&mut self) -> Result<(), ViewerError>;
/// Helper function to send HTTP error responses.
fn respond_http_error(
&mut self,
http_error_code: u16,
html_msg: &str,
log_msg: &str,
) -> Result<(), ViewerError>;
/// Renders the error page with the `HTML_VIEWER_ERROR_TMPL`.
/// `abspath` points to the document with markup that should be rendered
/// to HTML.
/// The function injects `self.context` before rendering the template.
fn render_content_and_error(&self, abspath_doc: &Path) -> Result<String, ViewerError>;
}
impl HttpResponse for ServerThread {
fn respond(&mut self, path: &str) -> Result<(), ViewerError> {
match path {
// Serve icon.
FAVICON_PATH => {
self.respond_content_ok(
Path::new(&FAVICON_PATH),
MAX_AGE,
"image/x-icon",
FAVICON,
)?;
}
// Serve document CSS file.
TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH_VALUE => {
self.respond_content_ok(
Path::new(&TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH_VALUE),
MAX_AGE,
"text/css",
LIB_CFG.read_recursive().tmpl_html.viewer_doc_css.as_bytes(),
)?;
}
// Serve highlighting CSS file.
TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH_VALUE => {
self.respond_content_ok(
Path::new(&TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH_VALUE),
MAX_AGE,
"text/css",
LIB_CFG
.read_recursive()
.tmpl_html
.viewer_highlighting_css
.as_bytes(),
)?;
}
// The client wants the rendered note.
"/" => {
// Renders a content page or an error page for the current note.
// Tera template errors.
// The contains JavaScript code to subscribe to `EVENT_PATH`, which
// reloads this document on request of `self.rx`.
let html = self.render_content_and_error(self.context.get_path())?;
self.respond_content_ok(Path::new("/"), 0, "text/html", html.as_bytes())?;
// `self.rx` was not used and is dropped here.
}
// Serve all other documents.
_ => {
// Assert starting with `/`.
let relpath = Path::new(path);
if !relpath.starts_with("/") {
return Err(ViewerError::UrlMustStartWithSlash);
}
//
// Condition 1: Only serve files that explicitly appear in
// `self.allowed_urls`.
let allowed_urls = self.allowed_urls.read_recursive();
// Is the request in our `allowed_urls` list?
if !allowed_urls.contains(relpath) {
log::warn!(
"TCP port local {} to peer {}: target not referenced in note file, rejecting: '{}'",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
relpath.to_str().unwrap_or(""),
);
// Release the `RwLockReadGuard`.
drop(allowed_urls);
self.respond_not_found(relpath)?;
return Ok(());
}
// Release the `RwLockReadGuard`.
drop(allowed_urls);
// We prepend `root_path` to `abspath` before accessing the file system.
let abspath = self
.context
.get_root_path()
.to_owned()
.join(relpath.strip_prefix("/").unwrap_or(relpath));
let abspath = Cow::Borrowed(abspath.as_path());
// From here on, we only work with `abspath`.
#[allow(dropping_references)]
drop(relpath);
// This is an alias for `/`, we send the main document and quit.
if abspath == self.context.get_dir_path() {
let html = self.render_content_and_error(self.context.get_path())?;
return self.respond_content_ok(
Path::new("/"),
0,
"text/html",
html.as_bytes(),
);
}
//
// Condition 2: Check if we serve this kind of extension
let extension = &*abspath
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default()
.to_lowercase();
// Find the corresponding mime type of this file extension.
// Is this `extension` a Tp-Note file the viewer?
let mime_type = MarkupLanguage::from(extension).mine_type().or_else(|| {
// Is this extension registered in `served_mime_types`?
CFG.viewer
.served_mime_types
.iter()
.find_map(|(ext, mime)| (extension == ext).then_some(mime.as_str()))
});
if mime_type.is_none() {
// Reject all files with extensions not listed.
log::warn!(
"TCP port local {} to peer {}: \
files with extension '{}' are not served. Rejecting: '{}'",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
abspath
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default(),
abspath.display(),
);
self.respond_not_found(&abspath)?;
return Ok(());
};
//
// Condition 3: If this is a Tp-Note file, check the maximum
// of delivered documents, then deliver.
if MarkupLanguage::from(extension).is_some() {
if abspath.is_file() {
let delivered_docs_count =
self.delivered_tpnote_docs.read_recursive().len();
if delivered_docs_count < CFG.viewer.displayed_tpnote_count_max {
let html = self.render_content_and_error(&abspath)?;
self.respond_content_ok(&abspath, 0, "text/html", html.as_bytes())?;
} else {
self.respond_too_many_requests()?;
}
return Ok(());
} else {
log::info!("Referenced Tp-Note file not found: {}", abspath.display());
self.respond_not_found(&abspath)?;
return Ok(());
}
}
//
// Condition 4: Is the file readable? We know:
// `mime_type.is_some()` at this point.
if abspath.is_file() {
self.respond_file_ok(&abspath, 0, mime_type.unwrap())?;
} else {
self.respond_not_found(&abspath)?;
}
}
}; // End of match path
Ok(())
}
fn respond_file_ok(
&mut self,
abspath: &Path,
max_age: usize,
mime_type: &str,
) -> Result<(), ViewerError> {
let cache_control = if max_age == 0 {
"no-cache".to_string()
} else {
format!("private, max-age={}", max_age)
};
let response = format!(
"HTTP/1.1 200 OK\r\n\
Date: {}\r\n\
Cache-Control: {}\r\n\
Content-Type: {}\r\n\
Content-Length: {}\r\n\r\n",
httpdate::fmt_http_date(SystemTime::now()),
cache_control,
mime_type,
fs::metadata(abspath)?.len(),
);
self.stream.write_all(response.as_bytes())?;
// Serve file in chunks.
let mut buffer = [0; TCP_WRITE_BUFFER_SIZE];
let mut file = fs::File::open(abspath)?;
while let Ok(n) = file.read(&mut buffer[..]) {
if n == 0 {
break;
};
self.stream.write_all(&buffer[..n])?;
}
log::trace!(
"TCP port local {} to peer {}: 200 OK, served file: '{}'",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
abspath.display()
);
Ok(())
}
fn respond_content_ok(
&mut self,
reqpath: &Path,
max_age: usize,
mime_type: &str,
content: &[u8],
) -> Result<(), ViewerError> {
let cache_control = if max_age == 0 {
"no-cache".to_string()
} else {
format!("private, max-age={}", max_age)
};
let response = format!(
"HTTP/1.1 200 OK\r\n\
Date: {}\r\n\
Cache-Control: {}\r\n\
Content-Type: {}\r\n\
Content-Length: {}\r\n\r\n",
httpdate::fmt_http_date(SystemTime::now()),
cache_control,
mime_type,
content.len(),
);
self.stream.write_all(response.as_bytes())?;
self.stream.write_all(content)?;
log::debug!(
"TCP port local {} to peer {}: 200 OK, served file: '{}'",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
reqpath.display()
);
Ok(())
}
// /// Write HTTP not found response.
// fn respond_forbidden(&mut self, reqpath: &Path) -> Result<(), ViewerError> {
// self.respond_http_error(403, "Forbidden", &reqpath.display().to_string())
// }
// fn respond_no_content_ok(&mut self) -> Result<(), ViewerError> {
// self.respond_http_error(204, "", "Ok, served header")
// }
fn respond_not_found(&mut self, reqpath: &Path) -> Result<(), ViewerError> {
self.respond_http_error(404, "Not found", &reqpath.display().to_string())
}
fn respond_method_not_allowed(&mut self, method: &str) -> Result<(), ViewerError> {
self.respond_http_error(405, "Method Not Allowed", method)
}
fn respond_too_many_requests(&mut self) -> Result<(), ViewerError> {
let mut log_msg;
{
let delivered_tpnote_docs = self.delivered_tpnote_docs.read_recursive();
// Prepare the log entry.
log_msg = format!(
"Error: too many requests. You have exceeded \n\
`viewer.displayed_tpnote_count_max = {}` by browsing:\n",
CFG.viewer.displayed_tpnote_count_max
);
for p in delivered_tpnote_docs.iter() {
log_msg.push_str("- ");
log_msg.push_str(&p.display().to_string());
log_msg.push('\n');
}
}
// Prepare the HTML output.
let content = format!(
"<!DOCTYPE html><html><head><meta charset=\"UTF-8\"></head>
<body><h2>Too many requests</h2>
<p>For security reasons, Tp-Note's internal viewer only displays
a limited number ({}) of Tp-Note files. This limit can be raised
by setting the configuration file variable:</p>
<p> <pre>viewer.displayed_tpnote_count_max</pre></p>
</body></html>
",
CFG.viewer.displayed_tpnote_count_max
);
self.respond_http_error(439, &content, &log_msg)
}
fn respond_service_unavailable(&mut self) -> Result<(), ViewerError> {
self.respond_http_error(503, "Service unavailable", "")
}
fn respond_http_error(
&mut self,
http_error_code: u16,
html_msg: &str,
log_msg: &str,
) -> Result<(), ViewerError> {
let response = format!(
"HTTP/1.1 {}\r\n\
Date: {}\r\n\
Cache-Control: private, no-cache\r\n\
Content-Type: text/html\r\n\
Content-Length: {}\r\n\r\n",
http_error_code,
httpdate::fmt_http_date(SystemTime::now()),
html_msg.len(),
);
self.stream.write_all(response.as_bytes())?;
self.stream.write_all(html_msg.as_bytes())?;
log::debug!(
"TCP port local {} to peer {}: {} {}: {}",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
http_error_code,
html_msg,
log_msg
);
Ok(())
}
fn render_content_and_error(&self, maybe_other_doc: &Path) -> Result<String, ViewerError> {
// First decompose header and body, then deserialize header.
let content = ContentString::open(maybe_other_doc)?;
// Do we render `self.path` or some other document?
let (html_context, viewer_doc_js) = if self.context.get_path() == maybe_other_doc {
let html_context = Context::from_context_path(&self.context);
(html_context, self.live_update_js.as_str())
} else {
// This is not the base document, but some other Tp-Note document
// we want to render. Store store its path.
// `front_matter::assert_precondition()` needs this later.
// Also, the HTML template expects this to be set to the rendered
// document.
let html_context = Context::from(maybe_other_doc)?;
// Only the first base document is live updated.
// Overwrite the dynamic JavaScript.
(html_context, "")
};
match HtmlRenderer::viewer_page::<ContentString>(
html_context.clone(),
content,
viewer_doc_js,
)
// Now scan the HTML result for links and store them in a Map
// accessible to all threads.
// Secondly, convert all relative links to absolute links.
.map(|html| {
rewrite_links(
html,
html_context.get_root_path(),
html_context.get_dir_path(),
// Do convert relative to abs absolute links.
// Do not convert abs. links.
LocalLinkKind::Short,
// Do not append `.html` to `.md` links.
false,
// We clone only the RWlock, not the data.
self.allowed_urls.clone(),
)
}) {
// If the rendition went well, return the HTML.
Ok(html) => {
let mut delivered_tpnote_docs = self.delivered_tpnote_docs.write();
delivered_tpnote_docs.insert(maybe_other_doc.to_owned());
log::trace!(
"Viewer: so far served Tp-Note documents: {}",
delivered_tpnote_docs
.iter()
.map(|p| {
let mut s = "\n '".to_string();
s.push_str(&p.as_path().display().to_string());
s
})
.collect::<String>()
);
Ok(html)
}
// We could not render the note properly. Instead we will render a
// special error page and return this instead.
Err(e) => {
// Render error page providing all information we have.
let note_erroneous_content =
<ContentString as Content>::open(html_context.get_path())?;
HtmlRenderer::error_page(
html_context,
note_erroneous_content,
&e.to_string(),
self.live_update_js.as_str(),
)
.map_err(|e| ViewerError::RenderErrorPage {
tmpl: "tmpl_html.viewer_error".to_string(),
source: e,
})
}
}
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/viewer/error.rs | tpnote/src/viewer/error.rs | //! The viewer feature's error type.
use crate::error::ConfigFileError;
use core::str::Utf8Error;
use std::sync::mpsc::RecvError;
use thiserror::Error;
use tpnote_lib::error::{FileError, NoteError};
/// Represents an error in the viewer feature.
/// Hint: to see this error restart _Tp-Note_ with `--debug debug`.
#[derive(Debug, Error)]
pub enum ViewerError {
/// In `update()` every HTTP client in `event_tx_list`
/// receives a TCP message. If the client does not
/// acknowledge this message, it is removed from the
/// list. An empty list means that all clients have
/// disconnected.
#[error("All subscribers have disconnected.")]
AllSubscriberDiconnected,
/// Should not happen. Please report a bug.
#[error("Can not view non-text files.")]
MarkupLanguageNone,
/// Should not happen. Please report a bug.
#[error("URL path must start with `/`")]
UrlMustStartWithSlash,
/// Remedy: restart with `--debug trace` and make sure that
/// no local process is attacking our HTTP server.
/// If there are good reasons to allow more connections,
/// raise the value `tcp_connections_max` in the
/// configuration file.
#[error(
"Maximum open TCP connections ({max_conn}) exceeded. \
Can not handle request. Consider raising the configuration variable \
`tcp_connections_max` in the configuration file."
)]
TcpConnectionsExceeded { max_conn: usize },
/// Network error.
#[error("Can not read TCP stream: {error}")]
StreamRead { error: std::io::Error },
/// Network error.
#[error("Can not parse HTTP header in TCP stream: {source_str}")]
StreamParse { source_str: String },
/// Remedy: Check the template syntax.
#[error(
"Failed to render the HTML error page (cf. `{tmpl}` in configuration file).\n{source}"
)]
RenderErrorPage { tmpl: String, source: NoteError },
/// File access error.
#[error(transparent)]
File(#[from] FileError),
/// Watcher error.
#[error(transparent)]
Notify(#[from] notify::Error),
/// Network error.
#[error(transparent)]
Httparse(#[from] httparse::Error),
/// Error in `sse_server::serve_event2()` when the watcher thread disconnects the `event`
/// channel.
#[error(transparent)]
Recv(#[from] RecvError),
/// Forward `FileError::ApplicationReturn` and `FileError::NoApplicationFound needed by
/// `viewer::web_browser`.
#[error(transparent)]
ConfigFile(#[from] ConfigFileError),
/// Forward errors from `error::NoteError` when rendering the page.
#[error(transparent)]
Note(#[from] NoteError),
/// Error while decoding URL path.
#[error(transparent)]
Utf8(#[from] Utf8Error),
/// Errors mostly related to the HTTP stream.
#[error(transparent)]
Io(#[from] std::io::Error),
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/viewer/mod.rs | tpnote/src/viewer/mod.rs | //! Modules implementing the note content renderer and viewer feature.
mod error;
mod http_response;
pub mod init;
mod sse_server;
mod watcher;
mod web_browser;
use crate::viewer::init::Viewer;
use std::path::Path;
use std::thread;
use std::thread::JoinHandle;
#[inline]
/// Launches a file watcher and Markdown renderer and displays the
/// result in the system's default browser.
pub fn launch_viewer_thread(path: &Path) -> JoinHandle<()> {
thread::spawn({
let p = path.to_path_buf();
move || Viewer::run(p)
})
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/viewer/init.rs | tpnote/src/viewer/init.rs | //! Main module for the markup renderer and note viewer feature.
use crate::config::CFG;
use crate::settings::ARGS;
use crate::settings::LAUNCH_EDITOR;
use crate::viewer::error::ViewerError;
use crate::viewer::sse_server::manage_connections;
use crate::viewer::sse_server::SseToken;
use crate::viewer::watcher::FileWatcher;
use crate::viewer::web_browser::launch_web_browser;
use std::net::TcpListener;
use std::path::PathBuf;
use std::sync::mpsc::SyncSender;
use std::sync::{Arc, Mutex};
use std::thread;
use std::thread::JoinHandle;
use std::time::Duration;
use std::time::Instant;
use tpnote_lib::markup_language::MarkupLanguage;
/// Minimum uptime in milliseconds we expect a real browser instance to run.
/// When starting a second browser instance, only a signal is sent to the
/// first instance and the process returns immediately. We detect this
/// case if it runs less milliseconds than this constant.
const BROWSER_INSTANCE_MIN_UPTIME: u128 = 3000;
/// This is where our loop back device is.
/// The following is also possible, but binds us to IPv4:
/// `pub const LOCALHOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST);`
/// So better just this string. It will be resolved while binding to the TCP
/// port or in the browser when connecting the event source.
pub const LOCALHOST: &str = "localhost";
#[derive(Clone, Default, Debug)]
pub struct Viewer {}
impl Viewer {
/// Set up the file watcher, start the `event/html` server and launch web
/// browser. Returns when the user closes the web browser and/or file
/// editor. This is a small wrapper printing error messages.
pub fn run(doc: PathBuf) {
match Self::run2(doc) {
Ok(_) => (),
Err(e) => {
log::warn!("Viewer::run(): {}", e);
}
}
}
/// Set up the file watcher, start the `event/html` server and launch
/// web browser. Returns when the user closes the web browser and/or file
/// editor.
#[inline]
fn run2(doc: PathBuf) -> Result<(), ViewerError> {
// Check if the master document (note file) has a known file extension.
match MarkupLanguage::from(&*doc) {
// A master document with this file extension is exempted from being
// viewed. We quit here and do not start the viewer.
MarkupLanguage::RendererDisabled => return Ok(()),
// This should never happen, since non-Tp-Note files are viewed as
// text files.
MarkupLanguage::None => return Err(ViewerError::MarkupLanguageNone),
// All other cases: start viewer.
_ => (),
};
// Launch "server sent event" server.
let listener = if let Some(p) = ARGS.port {
TcpListener::bind((LOCALHOST, p))?
} else {
// Use random port.
TcpListener::bind((LOCALHOST, 0))?
};
let localport = listener.local_addr()?.port();
// Launch a background HTTP server thread to manage Server-Sent-Event
// subscribers and to serve the rendered HTML.
let event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>> = Arc::new(Mutex::new(Vec::new()));
thread::spawn({
// Use a separate scope to `clone()`.
let doc = doc.clone();
let event_tx_list = event_tx_list.clone();
move || manage_connections(event_tx_list, listener, doc)
});
// Launch the file watcher thread.
// Send a signal whenever the file is modified. Without error, this thread runs as long as
// the parent thread (where we are) is running.
let terminate_on_browser_disconnect = Arc::new(Mutex::new(false));
let watcher_handle: JoinHandle<_> = thread::spawn({
let terminate_on_browser_disconnect = terminate_on_browser_disconnect.clone();
move || match FileWatcher::new(&doc, event_tx_list, terminate_on_browser_disconnect) {
Ok(mut w) => w.run(),
Err(e) => {
log::warn!("Can not start file watcher, giving up: {}", e);
}
}
});
// Launch web browser.
let url = format!("http://{}:{}", LOCALHOST, localport);
// Shall the browser be started a little later?
if CFG.viewer.startup_delay > 0 {
thread::sleep(Duration::from_millis(CFG.viewer.startup_delay as u64));
};
// Start timer.
let browser_start = Instant::now();
// This may block.
launch_web_browser(&url)?;
// Did it?
if browser_start.elapsed().as_millis() < BROWSER_INSTANCE_MIN_UPTIME {
// We are here because the browser process did not block.
// We instruct the watcher to terminate when it detects browser disconnection.
if !*LAUNCH_EDITOR {
// Release lock immediately.
*terminate_on_browser_disconnect.lock().unwrap() = true;
};
watcher_handle.join().unwrap();
}
Ok(())
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/viewer/web_browser.rs | tpnote/src/viewer/web_browser.rs | //! Launch the user's favorite web browser.
use crate::config::CFG;
use crate::error::ConfigFileError;
use crate::process_ext::ChildExt;
use crate::settings::ENV_VAR_TPNOTE_BROWSER;
use crate::viewer::error::ViewerError;
use percent_encoding::percent_decode_str;
use std::env;
use std::process::Command;
use std::process::Stdio;
use webbrowser::{open_browser, Browser};
#[inline]
/// Launches a web browser and displays the note's HTML rendition.
/// When not in _fall back mode: this function blocks until the user
/// closes the browser window.
pub fn launch_web_browser(url: &str) -> Result<(), ViewerError> {
if let Err(e) = launch_listed_browser(url) {
log::warn!(
"{}\n\
As fall back workaround, trying to launch the\n\
system's default web browser.",
e
);
// This might not block in all circumstances.
open_browser(Browser::Default, url)?;
};
Ok(())
}
/// Launches one be one, all browsers from the list `CFG.app_args.browser` until
/// it finds an installed one. This blocks until the browser is closed by the
/// user.
pub fn launch_listed_browser(url: &str) -> Result<(), ViewerError> {
let mut args_list = Vec::new();
let mut executable_list = Vec::new();
#[allow(unused_assignments)]
let mut var_name = String::new();
// Choose the right parameter list.
let vv: Vec<Vec<String>>;
#[cfg(all(target_family = "unix", not(target_os = "macos")))]
let app_args = &CFG.app_args.unix;
#[cfg(target_family = "windows")]
let app_args = &CFG.app_args.windows;
#[cfg(all(target_family = "unix", target_os = "macos"))]
let app_args = &CFG.app_args.macos;
let browser_args = if let Ok(s) = env::var(ENV_VAR_TPNOTE_BROWSER) {
if s.is_empty() {
var_name = "app_args.browser".to_string();
&app_args.browser
} else {
var_name = ENV_VAR_TPNOTE_BROWSER.to_string();
vv = vec![s
.split_ascii_whitespace()
.map(|s| percent_decode_str(s).decode_utf8_lossy().to_string())
.collect::<Vec<String>>()];
&vv
}
} else {
var_name = "app_args.browser".to_string();
&app_args.browser
};
// Prepare launch of browser/viewer.
for app in browser_args {
executable_list.push(&app[0]);
let mut args: Vec<&str> = Vec::new();
for s in app[1..].iter() {
args.push(s);
}
args.push(url);
args_list.push(args);
}
// Move and make immutable.
let args_list = args_list;
let executable_list = executable_list;
// Launch web browser.
let mut executable_found = false;
for i in 0..executable_list.len() {
log::debug!(
"Trying to launch the web browser:\n'{}' {}",
executable_list[i],
args_list[i]
.iter()
.map(|p| {
let mut s = "'".to_string();
s.push_str(p);
s.push_str("' ");
s
})
.collect::<String>()
);
// Check if this is a `flatpak run <app>` command.
#[cfg(target_family = "unix")]
if executable_list[i].starts_with("flatpak")
&& args_list[i].len() >= 3
&& args_list[i][0] == "run"
{
// Check if the Flatpak is installed on this system with `flatpak info <app>`.
if let Ok(ecode) = Command::new(executable_list[i])
.args(["info", args_list[i][1]])
.stderr(Stdio::null())
.stdout(Stdio::null())
.status()
{
if !ecode.success() {
// This is a Flatpak command, but the application is not installed on this system.
// Silently ignore this Flatpak command.
log::info!("Flatpak executable \"{}\" not found.", args_list[i][1]);
continue;
};
};
};
let (config_stdin, config_stdout) = (Stdio::null(), Stdio::null());
let mut command = Command::new(executable_list[i]);
command
.args(&args_list[i])
.stdin(config_stdin)
.stdout(config_stdout)
.stderr(Stdio::null());
match command.spawn() {
Ok(mut child) => {
let ecode = child.wait_subprocess()?;
if ecode.success() {
executable_found = true;
break;
} else {
return Err(ConfigFileError::ApplicationReturn {
code: ecode,
var_name: "browser_agrs".to_string(),
args: (*browser_args[i]).to_vec(),
}
.into());
}
}
Err(e) => {
log::info!("Web browser \"{}\" not found: {}", executable_list[i], e);
}
}
}
if !executable_found {
let mut app_list = String::new();
for l in browser_args.iter() {
app_list.push_str("\n\t");
for a in l {
app_list.push_str(a);
app_list.push(' ');
}
app_list.truncate(app_list.len() - " ".len());
}
return Err(ConfigFileError::NoApplicationFound { app_list, var_name }.into());
};
Ok(())
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote/src/viewer/sse_server.rs | tpnote/src/viewer/sse_server.rs | //! Server-sent-event server for the note viewer feature.
//! This module contains also the web browser JavaScript client code.
use crate::config::CFG;
use crate::viewer::error::ViewerError;
use crate::viewer::http_response::HttpResponse;
use crate::viewer::init::LOCALHOST;
use parking_lot::RwLock;
use percent_encoding::percent_decode_str;
use std::collections::HashSet;
use std::io::{ErrorKind, Read, Write};
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::net::{TcpListener, TcpStream};
use std::path::PathBuf;
use std::str;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::SystemTime;
use tpnote_lib::context::{Context, HasSettings};
/// The TCP stream is read in chunks. This is the read buffer size.
const TCP_READ_BUFFER_SIZE: usize = 0x400;
/// JavaScript client code, part 1
/// Refresh on `WTFiles` events.
pub const SSE_CLIENT_CODE1: &str = r#"
var evtSource = new EventSource("http://"#;
/// JavaScript client code, part 2
/// Save last scroll position into local storage.
/// Jump to the last saved scroll position.
pub const SSE_CLIENT_CODE2: &str = r#"/events");
evtSource.addEventListener("update", function(e) {
localStorage.setItem('scrollPosition', window.scrollY);
window.location.reload(true);
});
window.addEventListener('load', function() {
if(localStorage.getItem('scrollPosition') !== null)
window.scrollTo(0, localStorage.getItem('scrollPosition'));
});
"#;
/// URL path for Server-Sent-Events.
const SSE_EVENT_PATH: &str = "/events";
/// Server-Sent-Event tokens our HTTP client has registered to receive.
#[derive(Debug, Clone, Copy)]
pub enum SseToken {
/// Server-Sent-Event token to request nothing but check if the client is
/// still there.
Ping,
/// Server-Sent-Event token to request a page update.
Update,
}
pub fn manage_connections(
event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>,
listener: TcpListener,
doc_path: PathBuf,
) {
// A list of referenced local links to images or other documents as
// they appeared in the displayed documents.
// Every thread gets an (ARC) reference to it.
let allowed_urls = Arc::new(RwLock::new(HashSet::new()));
// Subset of the above list containing only displayed Tp-Note documents.
let delivered_tpnote_docs = Arc::new(RwLock::new(HashSet::new()));
// We use an ARC to count the number of running threads.
let conn_counter = Arc::new(());
// Store `doc_path` in the `context.path` and
// in the Tera variable `TMPL_VAR_PATH`.
let context = Context::from(&doc_path).expect("can not access document path");
//
log::info!(
"Viewer listens to incomming requests.\n\
Besides all Tp-Note document extensions, \
the following file extensions are served:\n\
{}",
{
use std::fmt::Write;
let mut list =
CFG.viewer
.served_mime_types
.iter()
.fold(String::new(), |mut output, (k, _v)| {
let _ = write!(output, "{k}, ");
output
});
list.truncate(list.len().saturating_sub(2));
list
}
);
for stream in listener.incoming() {
match stream {
Ok(stream) => {
let (event_tx, event_rx) = sync_channel(0);
event_tx_list.lock().unwrap().push(event_tx);
thread::spawn({
let allowed_urls = allowed_urls.clone();
let delivered_tpnote_docs = delivered_tpnote_docs.clone();
let conn_counter = conn_counter.clone();
let context = context.clone();
move || {
let mut st = ServerThread::new(
event_rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
);
st.serve_connection()
}
});
}
Err(e) => log::warn!("TCP connection failed: {}", e),
}
}
}
/// Server thread state.
pub(crate) struct ServerThread {
/// Receiver side of the channel where `update` events are sent.
rx: Receiver<SseToken>,
/// Byte stream coming from a TCP connection.
pub(crate) stream: TcpStream,
/// A list of referenced relative URLs to images or other
/// documents as they appear in the delivered Tp-Note documents.
/// This list contains local links that may or may not have been displayed.
/// The local links in this list are relative to `self.context.root_path`
pub(crate) allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
/// Subset of `allowed_urls` containing only URLs that
/// have been actually delivered. The list only contains URLs to Tp-Note
/// documents.
/// The local links in this list are absolute.
pub(crate) delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
/// We do not store anything here, instead we use the ARC pointing to
/// `conn_counter` to count the number of instances of `ServerThread`.
pub(crate) conn_counter: Arc<()>,
/// The constructor stores the path of the note document in `context.path`
/// and in the Tera variable `TMPL_VAR_PATH`.
/// Both are needed for rendering to HTML.
pub(crate) context: Context<HasSettings>,
/// Java Script injection code used by the root page for live updates.
/// Root pages insert this in their context with the key
/// `TMPL_HTML_VAR_VIEWR_DOC_JS`.
pub(crate) live_update_js: String,
}
impl ServerThread {
/// Constructor.
fn new(
rx: Receiver<SseToken>,
stream: TcpStream,
allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
conn_counter: Arc<()>,
context: Context<HasSettings>,
) -> Self {
let local_addr = stream.local_addr();
// Compose JavaScript code.
let live_update_js = match local_addr {
Ok(addr) => format!(
"{}{}:{}{}",
SSE_CLIENT_CODE1,
LOCALHOST,
addr.port(),
SSE_CLIENT_CODE2
),
Err(_) => {
panic!("No TCP connection: socket address of local half is missing.")
}
};
Self {
rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
live_update_js,
}
}
/// Wrapper for `serve_connection2()` that logs
/// errors as log message warnings.
fn serve_connection(&mut self) {
match Self::serve_connection2(self) {
Ok(_) => (),
Err(e) => {
log::debug!(
"TCP port local {} to peer {}: Closed connection because of error: {}",
self.stream
.local_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
self.stream
.peer_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
e
);
}
}
}
/// HTTP server: serves content and events via the specified subscriber stream.
#[inline]
#[allow(clippy::needless_return)]
fn serve_connection2(&mut self) -> Result<(), ViewerError> {
// One reference is hold by the `manage_connections` thread and does not count.
// This is why we subtract 1.
let open_connections = Arc::<()>::strong_count(&self.conn_counter) - 1;
log::trace!(
"TCP port local {} to peer {}: New incoming TCP connection ({} open).",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
open_connections
);
// Check if we exceed our connection limit.
if open_connections > CFG.viewer.tcp_connections_max {
self.respond_service_unavailable()?;
// This ends this thread and closes the connection.
return Err(ViewerError::TcpConnectionsExceeded {
max_conn: CFG.viewer.tcp_connections_max,
});
}
'tcp_connection: loop {
// This is inspired by the Spook crate.
// Read the request.
let mut read_buffer = [0u8; TCP_READ_BUFFER_SIZE];
let mut buffer = Vec::new();
let (method, path) = 'assemble_tcp_chunks: loop {
// Read the request, or part thereof.
match self.stream.read(&mut read_buffer) {
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Connection by peer.
break 'tcp_connection;
}
Err(e) => {
// Connection closed or error.
return Err(ViewerError::StreamRead { error: e });
}
Ok(n) => {
// Successful read.
buffer.extend_from_slice(&read_buffer[..n]);
log::trace!(
"TCP port local {} to peer {}: chunk: {:?} ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
std::str::from_utf8(&read_buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>()
);
}
}
// Try to parse the request.
let mut headers = [httparse::EMPTY_HEADER; 16];
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buffer)?;
if res.is_partial() {
continue 'assemble_tcp_chunks;
}
// Check if the HTTP header is complete and valid.
if res.is_complete() {
if let (Some(method), Some(path)) = (req.method, req.path) {
// This is the only regular exit.
break 'assemble_tcp_chunks (method, path);
}
};
// We quit with error. There is nothing more we can do here.
return Err(ViewerError::StreamParse {
source_str: std::str::from_utf8(&buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>(),
});
};
// End of input chunk loop.
// The only supported request method for SSE is GET.
if method != "GET" {
self.respond_method_not_allowed(method)?;
continue 'tcp_connection;
}
// Decode the percent encoding in the URL path.
let path = percent_decode_str(path).decode_utf8()?;
// Check the path.
// Serve note rendition.
match &*path {
// This is a connection for Server-Sent-Events.
SSE_EVENT_PATH => {
// Serve event response, but keep the connection.
self.respond_event_ok()?;
// Make the stream non-blocking to be able to detect whether the
// connection was closed by the client.
self.stream.set_nonblocking(true)?;
// Serve events until the connection is closed.
// Keep in mind that the client will often close
// the request after the first event if the event
// is used to trigger a page refresh, so try to eagerly
// detect closed connections.
'_event: loop {
// Wait for the next update.
let msg = self.rx.recv()?;
// Detect whether the connection was closed.
match self.stream.read(&mut read_buffer) {
// Connection closed.
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Event connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Our peer closed this connection, we finish also then.
break 'tcp_connection;
}
// Connection alive.
Ok(_) => {}
// `WouldBlock` is OK, all others not.
Err(e) => {
if e.kind() != ErrorKind::WouldBlock {
// Something bad happened.
return Err(ViewerError::StreamRead { error: e });
}
}
}
// Send event.
let event = match msg {
SseToken::Update => "event: update\r\ndata:\r\n\r\n".to_string(),
SseToken::Ping => ": ping\r\n\r\n".to_string(),
};
self.stream.write_all(event.as_bytes())?;
log::trace!(
"TCP port local {} to peer {} ({} open TCP conn.): pushed '{:?}' in event connection to web browser.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
Arc::<()>::strong_count(&self.conn_counter) - 1,
msg,
);
}
}
// Serve all other documents.
_ => self.respond(&path)?,
}; // End of match path
} // Go to 'tcp_connection loop start
log::trace!(
"TCP port local {} to peer {}: ({} open). Closing this TCP connection.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
// We subtract 1 for the `manage connection()` thread, and
// 1 for the thread we will close in a moment.
Arc::<()>::strong_count(&self.conn_counter) - 2,
);
// We came here because the client closed this connection.
Ok(())
}
/// Write HTTP event response.
fn respond_event_ok(&mut self) -> Result<(), ViewerError> {
// Declare SSE capability and allow cross-origin access.
let response = format!(
"\
HTTP/1.1 200 OK\r\n\
Date: {}\r\n\
Access-Control-Allow-Origin: *\r\n\
Cache-Control: no-cache\r\n\
Content-Type: text/event-stream\r\n\
\r\n",
httpdate::fmt_http_date(SystemTime::now()),
);
self.stream.write_all(response.as_bytes())?;
log::debug!(
"TCP port local {} to peer {}: 200 OK, served event header, \
keeping event connection open ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
);
Ok(())
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/config.rs | tpnote-lib/src/config.rs | //! Set configuration defaults by reading the internal default
//! configuration file `LIB_CONFIG_DEFAULT_TOML`. After processing, the
//! configuration data is exposed via the variable `LIB_CFG` behind a
//! mutex. This makes it possible to modify all configuration defaults
//! (including templates) at runtime.
//!
//! ```rust
//! use tpnote_lib::config::LIB_CFG;
//!
//! let mut lib_cfg = LIB_CFG.write();
//! let i = lib_cfg.scheme_idx("default").unwrap();
//! (*lib_cfg).scheme[i].filename.copy_counter.extra_separator = '@'.to_string();
//! ```
//!
//! Contract to be uphold by the user of this API:
//! seeing that `LIB_CFG` is mutable at runtime, it must be sourced before the
//! start of Tp-Note. All modification of `LIB_CFG` is terminated before
//! accessing the high-level API in the `workflow` module of this crate.
use crate::config_value::CfgVal;
use crate::error::LibCfgError;
#[cfg(feature = "renderer")]
use crate::highlight::get_highlighting_css;
#[cfg(feature = "lang-detection")]
use crate::lingua::IsoCode639_1;
use crate::markup_language::InputConverter;
use crate::markup_language::MarkupLanguage;
use parking_lot::RwLock;
use sanitize_filename_reader_friendly::TRIM_LINE_CHARS;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::Write;
use std::str::FromStr;
use std::sync::LazyLock;
#[cfg(feature = "renderer")]
use syntect::highlighting::ThemeSet;
use toml::Value;
/// Default library configuration as TOML.
pub const LIB_CONFIG_DEFAULT_TOML: &str = include_str!("config_default.toml");
/// Maximum length of a note's filename in bytes. If a filename template produces
/// a longer string, it will be truncated.
pub const FILENAME_LEN_MAX: usize =
// Most filesystem's limit.
255
// Additional separator.
- 2
// Additional copy counter.
- 5
// Extra spare bytes, in case the user's copy counter is longer.
- 6;
/// The appearance of a file with this filename marks the position of
/// `TMPL_VAR_ROOT_PATH`.
pub const FILENAME_ROOT_PATH_MARKER: &str = "tpnote.toml";
/// When a filename is taken already, Tp-Note adds a copy
/// counter number in the range of `0..COPY_COUNTER_MAX`
/// at the end.
pub const FILENAME_COPY_COUNTER_MAX: usize = 400;
/// A filename extension, if present, is separated by a dot.
pub(crate) const FILENAME_EXTENSION_SEPARATOR_DOT: char = '.';
/// A dotfile starts with a dot.
pub(crate) const FILENAME_DOTFILE_MARKER: char = '.';
/// The template variable contains the fully qualified path of the `<path>`
/// command line argument. If `<path>` points to a file, the variable contains
/// the file path. If it points to a directory, it contains the directory path,
/// or - if no `path` is given - the current working directory.
pub const TMPL_VAR_PATH: &str = "path";
/// Contains the fully qualified directory path of the `<path>` command line
/// argument.
/// If `<path>` points to a file, the last component (the filename) is omitted.
/// If it points to a directory, the content of this variable is identical to
/// `TMPL_VAR_PATH`,
pub const TMPL_VAR_DIR_PATH: &str = "dir_path";
/// The root directory of the current note. This is the first directory,
/// that upwards from `TMPL_VAR_DIR_PATH` contains a file named
/// `FILENAME_ROOT_PATH_MARKER`. The root directory is used by Tp-Note's viewer
/// as base directory
pub const TMPL_VAR_ROOT_PATH: &str = "root_path";
/// Names the header of some `Content`.
pub const TMPL_VAR_HEADER: &str = "header";
/// Names the body of some `Content`.
pub const TMPL_VAR_BODY: &str = "body";
/// The name of the HTML clipboard to refer to in templates.
/// Note: as current HTML clipboard provider never send YAML headers (yet),
/// `html_clipboard.header` is always empty.
pub const TMPL_VAR_HTML_CLIPBOARD: &str = "html_clipboard";
/// The name of the plaintext clipboard to refer to in templates.
pub const TMPL_VAR_TXT_CLIPBOARD: &str = "txt_clipboard";
/// The name of the standard input stream to refer to in templates.
pub const TMPL_VAR_STDIN: &str = "stdin";
/// Contains the name of the selected scheme.
pub const TMPL_VAR_CURRENT_SCHEME: &str = "current_scheme";
/// Contains the default file extension for new note files as defined in the
/// configuration file.
pub const TMPL_VAR_EXTENSION_DEFAULT: &str = "extension_default";
/// Contains the name of the default scheme when no `scheme:` field is
/// present in the note's YAML header.
/// This value defined in the configuration file under the same name and
/// copied from there.
pub const TMPL_VAR_SCHEME_SYNC_DEFAULT: &str = "scheme_sync_default";
/// Contains the content of the first non empty environment variable
/// `LOGNAME`, `USERNAME` or `USER`.
pub const TMPL_VAR_USERNAME: &str = "username";
/// Contains the user's language tag as defined in
/// [RFC 5646](http://www.rfc-editor.org/rfc/rfc5646.txt).
/// Not to be confused with the Unix `LANG` environment variable from which
/// this value is derived under Linux/MacOS.
/// Under Windows, the user's language tag is queried through the Win-API.
/// If defined, the environment variable `TPNOTE_LANG` overwrites this value
/// (all operating systems).
pub const TMPL_VAR_LANG: &str = "lang";
/// A copy of the command line option `--force_lang`. The empty value
/// means "disable language forcing".
pub const TMPL_VAR_FORCE_LANG: &str = "force_lang";
/// Contains the body of the file the command line option `<path>`
/// points to. Only available in the `tmpl.from_text_file_content`,
/// `tmpl.sync_filename` and HTML templates.
pub const TMPL_VAR_DOC: &str = "doc";
/// Contains the date of the file the command line option `<path>` points to.
/// The date is represented as an integer the way `std::time::SystemTime`
/// resolves to on the platform. Only available in the
/// `tmpl.from_text_file_content`, `tmpl.sync_filename` and HTML templates.
/// Note: this variable might not be defined with some filesystems or on some
/// platforms.
pub const TMPL_VAR_DOC_FILE_DATE: &str = "doc_file_date";
/// Prefix prepended to front matter field names when a template variable
/// is generated with the same name.
pub const TMPL_VAR_FM_: &str = "fm_";
/// Contains a Hash Map with all front matter fields. Lists are flattened
/// into strings. These variables are only available in the
/// `tmpl.from_text_file_content`, `tmpl.sync_filename` and HTML templates.
pub const TMPL_VAR_FM_ALL: &str = "fm";
/// If present, this header variable can switch the `settings.current_theme`
/// before the filename template is processed.
pub const TMPL_VAR_FM_SCHEME: &str = "fm_scheme";
/// By default, the template `tmpl.sync_filename` defines the function of this
/// variable as follows:
/// Contains the value of the front matter field `file_ext` and determines the
/// markup language used to render the document. When the field is missing the
/// markup language is derived from the note's filename extension.
///
/// This is a dynamically generated variable originating from the front matter
/// of the current note. As all front matter variables, its value is copied as
/// it is without modification. Here, the only special treatment is, when
/// analyzing the front matter, it is verified, that the value of this variable
/// is registered in one of the `filename.extensions_*` variables.
pub const TMPL_VAR_FM_FILE_EXT: &str = "fm_file_ext";
/// By default, the template `tmpl.sync_filename` defines the function of this
/// variable as follows:
/// If this variable is defined, the _sort tag_ of the filename is replaced with
/// the value of this variable next time the filename is synchronized. If not
/// defined, the sort tag of the filename is never changed.
///
/// This is a dynamically generated variable originating from the front matter
/// of the current note. As all front matter variables, its value is copied as
/// it is without modification. Here, the only special treatment is, when
/// analyzing the front matter, it is verified, that all the characters of the
/// value of this variable are listed in `filename.sort_tag.extra_chars`.
pub const TMPL_VAR_FM_SORT_TAG: &str = "fm_sort_tag";
/// Contains the value of the front matter field `no_filename_sync`. When set
/// to `no_filename_sync:` or `no_filename_sync: true`, the filename
/// synchronization mechanism is disabled for this note file. Depreciated
/// in favor of `TMPL_VAR_FM_FILENAME_SYNC`.
pub const TMPL_VAR_FM_NO_FILENAME_SYNC: &str = "fm_no_filename_sync";
/// Contains the value of the front matter field `filename_sync`. When set to
/// `filename_sync: false`, the filename synchronization mechanism is
/// disabled for this note file. Default value is `true`.
pub const TMPL_VAR_FM_FILENAME_SYNC: &str = "fm_filename_sync";
/// HTML template variable containing the automatically generated JavaScript
/// code to be included in the HTML rendition.
pub const TMPL_HTML_VAR_VIEWER_DOC_JS: &str = "viewer_doc_js";
/// HTML template variable name. The value contains Tp-Note's CSS code
/// to be included in the HTML rendition produced by the exporter.
pub const TMPL_HTML_VAR_EXPORTER_DOC_CSS: &str = "exporter_doc_css";
/// HTML template variable name. The value contains the highlighting CSS code
/// to be included in the HTML rendition produced by the exporter.
pub const TMPL_HTML_VAR_EXPORTER_HIGHLIGHTING_CSS: &str = "exporter_highlighting_css";
/// HTML template variable name. The value contains the path, for which the
/// viewer delivers Tp-Note's CSS code. Note, the viewer delivers the same CSS
/// code which is stored as value for `TMPL_HTML_VAR_VIEWER_DOC_CSS`.
pub const TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH: &str = "viewer_doc_css_path";
/// The constant URL for which Tp-Note's internal web server delivers the CSS
/// style sheet. In HTML templates, this constant can be accessed as value of
/// the `TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH` variable.
pub const TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH_VALUE: &str = "/viewer_doc.css";
/// HTML template variable name. The value contains the path, for which the
/// viewer delivers Tp-Note's highlighting CSS code.
pub const TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH: &str = "viewer_highlighting_css_path";
/// The constant URL for which Tp-Note's internal web server delivers the CSS
/// style sheet. In HTML templates, this constant can be accessed as value of
/// the `TMPL_HTML_VAR_NOTE_CSS_PATH` variable.
pub const TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH_VALUE: &str = "/viewer_highlighting.css";
/// HTML template variable used in the error page containing the error message
/// explaining why this page could not be rendered.
#[cfg(feature = "viewer")]
pub const TMPL_HTML_VAR_DOC_ERROR: &str = "doc_error";
/// HTML template variable used in the error page containing a verbatim
/// HTML rendition with hyperlinks of the erroneous note file.
#[cfg(feature = "viewer")]
pub const TMPL_HTML_VAR_DOC_TEXT: &str = "doc_text";
/// Global variable containing the filename and template related configuration
/// data. This can be changed by the consumer of this library. Once the
/// initialization done, this should remain static.
/// For session configuration see: `settings::SETTINGS`.
pub static LIB_CFG: LazyLock<RwLock<LibCfg>> = LazyLock::new(|| RwLock::new(LibCfg::default()));
/// An array of field names after deserialization.
pub const LIB_CFG_RAW_FIELD_NAMES: [&str; 4] =
["scheme_sync_default", "base_scheme", "scheme", "tmpl_html"];
/// Processed configuration data.
///
/// Its structure is different form the input form defined in `LibCfgRaw` (see
/// example in `LIB_CONFIG_DEFAULT_TOML`).
/// For conversion use:
///
/// ```rust
/// use tpnote_lib::config::LIB_CONFIG_DEFAULT_TOML;
/// use tpnote_lib::config::LibCfg;
/// use tpnote_lib::config_value::CfgVal;
/// use std::str::FromStr;
///
/// let cfg_val = CfgVal::from_str(LIB_CONFIG_DEFAULT_TOML).unwrap();
///
/// // Run test.
/// let lib_cfg = LibCfg::try_from(cfg_val).unwrap();
///
/// // Check.
/// assert_eq!(lib_cfg.scheme_sync_default, "default")
/// ```
#[derive(Debug, Serialize, Deserialize)]
#[serde(try_from = "LibCfgIntermediate")]
pub struct LibCfg {
/// The fallback scheme for the `sync_filename` template choice, if the
/// `scheme` header variable is empty or is not defined.
pub scheme_sync_default: String,
/// Configuration of `Scheme`.
pub scheme: Vec<Scheme>,
/// Configuration of HTML templates.
pub tmpl_html: TmplHtml,
}
/// Unprocessed configuration data, deserialized from the configuration file.
/// This is an intermediate representation of `LibCfg`.
/// This defines the structure of the configuration file.
/// Its default values are stored in serialized form in
/// `LIB_CONFIG_DEFAULT_TOML`.
#[derive(Debug, Serialize, Deserialize)]
struct LibCfgIntermediate {
/// The fallback scheme for the `sync_filename` template choice, if the
/// `scheme` header variable is empty or is not defined.
pub scheme_sync_default: String,
/// This is the base scheme, from which all instantiated schemes inherit.
pub base_scheme: Value,
/// This flatten into a `scheme=Vec<Scheme>` in which the `Scheme`
/// definitions are not complete. Only after merging it into a copy of
/// `base_scheme` we can parse it into a `Scheme` structs. The result is not
/// kept here, it is stored into `LibCfg` struct instead.
#[serde(flatten)]
pub scheme: HashMap<String, Value>,
/// Configuration of HTML templates.
pub tmpl_html: TmplHtml,
}
impl LibCfg {
/// Returns the index of a named scheme. If no scheme with that name can be
/// found, return `LibCfgError::SchemeNotFound`.
pub fn scheme_idx(&self, name: &str) -> Result<usize, LibCfgError> {
self.scheme
.iter()
.enumerate()
.find(|&(_, scheme)| scheme.name == name)
.map_or_else(
|| {
Err(LibCfgError::SchemeNotFound {
scheme_name: name.to_string(),
schemes: {
//Already imported: `use std::fmt::Write;`
let mut errstr =
self.scheme.iter().fold(String::new(), |mut output, s| {
let _ = write!(output, "{}, ", s.name);
output
});
errstr.truncate(errstr.len().saturating_sub(2));
errstr
},
})
},
|(i, _)| Ok(i),
)
}
/// Perform some semantic consistency checks.
/// * `sort_tag.extra_separator` must NOT be in `sort_tag.extra_chars`.
/// * `sort_tag.extra_separator` must NOT be in `0..9`.
/// * `sort_tag.extra_separator` must NOT be in `a..z`.
/// * `sort_tag.extra_separator` must NOT be in `sort_tag.extra_chars`.
/// * `sort_tag.extra_separator` must NOT `FILENAME_DOTFILE_MARKER`.
/// * `copy_counter.extra_separator` must be one of
/// `sanitize_filename_reader_friendly::TRIM_LINE_CHARS`.
/// * All characters of `sort_tag.separator` must be in `sort_tag.extra_chars`.
/// * `sort_tag.separator` must start with NOT `FILENAME_DOTFILE_MARKER`.
pub fn assert_validity(&self) -> Result<(), LibCfgError> {
for scheme in &self.scheme {
// Check for obvious configuration errors.
// * `sort_tag.extra_separator` must NOT be in `sort_tag.extra_chars`.
// * `sort_tag.extra_separator` must NOT `FILENAME_DOTFILE_MARKER`.
if scheme
.filename
.sort_tag
.extra_chars
.contains(scheme.filename.sort_tag.extra_separator)
|| (scheme.filename.sort_tag.extra_separator == FILENAME_DOTFILE_MARKER)
|| scheme.filename.sort_tag.extra_separator.is_ascii_digit()
|| scheme
.filename
.sort_tag
.extra_separator
.is_ascii_lowercase()
{
return Err(LibCfgError::SortTagExtraSeparator {
scheme_name: scheme.name.to_string(),
dot_file_marker: FILENAME_DOTFILE_MARKER,
sort_tag_extra_chars: scheme
.filename
.sort_tag
.extra_chars
.escape_default()
.to_string(),
extra_separator: scheme
.filename
.sort_tag
.extra_separator
.escape_default()
.to_string(),
});
}
// Check for obvious configuration errors.
// * All characters of `sort_tag.separator` must be in `sort_tag.extra_chars`.
// * `sort_tag.separator` must NOT start with `FILENAME_DOTFILE_MARKER`.
// * `sort_tag.separator` must NOT contain ASCII `0..9` or `a..z`.
if !scheme.filename.sort_tag.separator.chars().all(|c| {
c.is_ascii_digit()
|| c.is_ascii_lowercase()
|| scheme.filename.sort_tag.extra_chars.contains(c)
}) || scheme
.filename
.sort_tag
.separator
.starts_with(FILENAME_DOTFILE_MARKER)
{
return Err(LibCfgError::SortTagSeparator {
scheme_name: scheme.name.to_string(),
dot_file_marker: FILENAME_DOTFILE_MARKER,
chars: scheme
.filename
.sort_tag
.extra_chars
.escape_default()
.to_string(),
separator: scheme
.filename
.sort_tag
.separator
.escape_default()
.to_string(),
});
}
// Check for obvious configuration errors.
// * `copy_counter.extra_separator` must one of
// `sanitize_filename_reader_friendly::TRIM_LINE_CHARS`.
if !TRIM_LINE_CHARS.contains(&scheme.filename.copy_counter.extra_separator) {
return Err(LibCfgError::CopyCounterExtraSeparator {
scheme_name: scheme.name.to_string(),
chars: TRIM_LINE_CHARS.escape_default().to_string(),
extra_separator: scheme
.filename
.copy_counter
.extra_separator
.escape_default()
.to_string(),
});
}
// Assert that `filename.extension_default` is listed in
// `filename.extensions[..].0`.
if !scheme
.filename
.extensions
.iter()
.any(|ext| ext.0 == scheme.filename.extension_default)
{
return Err(LibCfgError::ExtensionDefault {
scheme_name: scheme.name.to_string(),
extension_default: scheme.filename.extension_default.to_owned(),
extensions: {
let mut list = scheme.filename.extensions.iter().fold(
String::new(),
|mut output, (k, _v1, _v2)| {
let _ = write!(output, "{k}, ");
output
},
);
list.truncate(list.len().saturating_sub(2));
list
},
});
}
if let Mode::Error(e) = &scheme.tmpl.filter.get_lang.mode {
return Err(e.clone());
}
// Assert that `filter.get_lang.relative_distance_min` is
// between `0.0` and `0.99`.
let dist = scheme.tmpl.filter.get_lang.relative_distance_min;
if !(0.0..=0.99).contains(&dist) {
return Err(LibCfgError::MinimumRelativeDistanceInvalid {
scheme_name: scheme.name.to_string(),
dist,
});
}
}
// Highlighting config is valid?
// Validate `tmpl_html.viewer_highlighting_theme` and
// `tmpl_html.exporter_highlighting_theme`.
#[cfg(feature = "renderer")]
{
let hl_theme_set = ThemeSet::load_defaults();
let hl_theme_name = &self.tmpl_html.viewer_highlighting_theme;
if !hl_theme_name.is_empty() && !hl_theme_set.themes.contains_key(hl_theme_name) {
return Err(LibCfgError::HighlightingThemeName {
var: "viewer_highlighting_theme".to_string(),
value: hl_theme_name.to_owned(),
available: hl_theme_set.themes.into_keys().fold(
String::new(),
|mut output, k| {
let _ = write!(output, "{k}, ");
output
},
),
});
};
let hl_theme_name = &self.tmpl_html.exporter_highlighting_theme;
if !hl_theme_name.is_empty() && !hl_theme_set.themes.contains_key(hl_theme_name) {
return Err(LibCfgError::HighlightingThemeName {
var: "exporter_highlighting_theme".to_string(),
value: hl_theme_name.to_owned(),
available: hl_theme_set.themes.into_keys().fold(
String::new(),
|mut output, k| {
let _ = write!(output, "{k}, ");
output
},
),
});
};
}
Ok(())
}
}
/// Reads the file `./config_default.toml` (`LIB_CONFIG_DEFAULT_TOML`) into
/// `LibCfg`. Panics if this is not possible.
impl Default for LibCfg {
fn default() -> Self {
toml::from_str(LIB_CONFIG_DEFAULT_TOML)
.expect("Error parsing LIB_CONFIG_DEFAULT_TOML into LibCfg")
}
}
impl TryFrom<LibCfgIntermediate> for LibCfg {
type Error = LibCfgError;
/// Constructor expecting a `LibCfgRaw` struct as input.
/// The variables `LibCfgRaw.scheme`,
/// `LibCfgRaw.html_tmpl.viewer_highlighting_css` and
/// `LibCfgRaw.html_tmpl.exporter_highlighting_css` are processed before
/// storing in `Self`:
/// 1. The entries in `LibCfgRaw.scheme` are merged into copies of
/// `LibCfgRaw.base_scheme` and the results are stored in `LibCfg.scheme`
/// 2. If `LibCfgRaw.html_tmpl.viewer_highlighting_css` is empty,
/// a css is calculated from `tmpl.viewer_highlighting_theme`
/// and stored in `LibCfg.html_tmpl.viewer_highlighting_css`.
/// 3. Do the same for `LibCfgRaw.html_tmpl.exporter_highlighting_css`.
fn try_from(lib_cfg_raw: LibCfgIntermediate) -> Result<Self, Self::Error> {
let mut raw = lib_cfg_raw;
// Now we merge all `scheme` into a copy of `base_scheme` and
// parse the result into a `Vec<Scheme>`.
//
// Here we keep the result after merging and parsing.
let mut schemes: Vec<Scheme> = vec![];
// Get `theme`s in `config` as toml array. Clears the map as it is not
// needed any more.
if let Some(toml::Value::Array(lib_cfg_scheme)) = raw
.scheme
.drain()
// Silently ignore all potential toml variables other than `scheme`.
.filter(|(k, _)| k == "scheme")
.map(|(_, v)| v)
.next()
{
// Merge all `s` into a `base_scheme`, parse the result into a `Scheme`
// and collect a `Vector`. `merge_depth=0` means we never append
// to left-hand arrays, we always overwrite them.
schemes = lib_cfg_scheme
.into_iter()
.map(|v| CfgVal::merge_toml_values(raw.base_scheme.clone(), v, 0))
.map(|v| v.try_into().map_err(|e| e.into()))
.collect::<Result<Vec<Scheme>, LibCfgError>>()?;
}
let raw = raw; // Freeze.
let mut tmpl_html = raw.tmpl_html;
// Now calculate `LibCfgRaw.tmpl_html.viewer_highlighting_css`:
#[cfg(feature = "renderer")]
let css = if !tmpl_html.viewer_highlighting_css.is_empty() {
tmpl_html.viewer_highlighting_css
} else {
get_highlighting_css(&tmpl_html.viewer_highlighting_theme)
};
#[cfg(not(feature = "renderer"))]
let css = String::new();
tmpl_html.viewer_highlighting_css = css;
// Calculate `LibCfgRaw.tmpl_html.exporter_highlighting_css`:
#[cfg(feature = "renderer")]
let css = if !tmpl_html.exporter_highlighting_css.is_empty() {
tmpl_html.exporter_highlighting_css
} else {
get_highlighting_css(&tmpl_html.exporter_highlighting_theme)
};
#[cfg(not(feature = "renderer"))]
let css = String::new();
tmpl_html.exporter_highlighting_css = css;
// Store the result:
let res = LibCfg {
// Copy the parts of `config` into `LIB_CFG`.
scheme_sync_default: raw.scheme_sync_default,
scheme: schemes,
tmpl_html,
};
// Perform some additional semantic checks.
res.assert_validity()?;
Ok(res)
}
}
/// This constructor accepts as input the newtype `CfgVal` containing
/// a `toml::map::Map<String, Value>`. Each `String` is the name of a top
/// level configuration variable.
/// The inner Map is expected to be a data structure that can be copied into
/// the internal temporary variable `LibCfgRaw`. This internal variable
/// is then processed and the result is stored in a `LibCfg` struct. For details
/// see the `impl TryFrom<LibCfgRaw> for LibCfg`. The processing occurs as
/// follows:
///
/// 1. Merge each incomplete `CfgVal(key="scheme")` into
/// `CfgVal(key="base_scheme")` and
/// store the resulting `scheme` struct in `LibCfg.scheme`.
/// 2. If `CfgVal(key="html_tmpl.viewer_highlighting_css")` is empty, generate
/// the value from `CfgVal(key="tmpl.viewer_highlighting_theme")`.
/// 3. Do the same for `CfgVal(key="html_tmpl.exporter_highlighting_css")`.
impl TryFrom<CfgVal> for LibCfg {
type Error = LibCfgError;
fn try_from(cfg_val: CfgVal) -> Result<Self, Self::Error> {
let value: toml::Value = cfg_val.into();
Ok(value.try_into()?)
}
}
/// Configuration data, deserialized from the configuration file.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Scheme {
pub name: String,
/// Configuration of filename parsing.
pub filename: Filename,
/// Configuration of content and filename templates.
pub tmpl: Tmpl,
}
/// Configuration of filename parsing, deserialized from the
/// configuration file.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Filename {
pub sort_tag: SortTag,
pub copy_counter: CopyCounter,
pub extension_default: String,
pub extensions: Vec<(String, InputConverter, MarkupLanguage)>,
}
/// Configuration for sort-tag.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct SortTag {
pub extra_chars: String,
pub separator: String,
pub extra_separator: char,
pub letters_in_succession_max: u8,
pub sequential: Sequential,
}
/// Requirements for chronological sort tags.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Sequential {
pub digits_in_succession_max: u8,
}
/// Configuration for copy-counter.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CopyCounter {
pub extra_separator: String,
pub opening_brackets: String,
pub closing_brackets: String,
}
/// Filename templates and content templates, deserialized from the
/// configuration file.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Tmpl {
pub fm_var: FmVar,
pub filter: Filter,
pub from_dir_content: String,
pub from_dir_filename: String,
pub from_text_file_content: String,
pub from_text_file_filename: String,
pub annotate_file_content: String,
pub annotate_file_filename: String,
pub sync_filename: String,
}
/// Configuration describing how to localize and check front matter variables.
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct FmVar {
pub localization: Vec<(String, String)>,
pub assertions: Vec<(String, Vec<Assertion>)>,
}
/// Configuration related to various Tera template filters.
#[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct Filter {
pub get_lang: GetLang,
pub map_lang: Vec<Vec<String>>,
pub to_yaml_tab: u64,
}
/// Configuration related to various Tera template filters.
#[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)]
#[serde(try_from = "GetLangIntermediate")]
pub struct GetLang {
pub mode: Mode,
#[cfg(feature = "lang-detection")]
pub language_candidates: Vec<IsoCode639_1>,
#[cfg(not(feature = "lang-detection"))]
pub language_candidates: Vec<String>,
pub relative_distance_min: f64,
pub consecutive_words_min: usize,
pub words_total_percentage_min: usize,
}
/// Configuration related to various Tera template filters.
#[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)]
struct GetLangIntermediate {
pub mode: Mode,
pub language_candidates: Vec<String>,
pub relative_distance_min: f64,
pub consecutive_words_min: usize,
pub words_total_percentage_min: usize,
}
impl TryFrom<GetLangIntermediate> for GetLang {
type Error = LibCfgError; // Use String as error type just for simplicity
fn try_from(value: GetLangIntermediate) -> Result<Self, Self::Error> {
let GetLangIntermediate {
mode,
language_candidates,
relative_distance_min,
consecutive_words_min,
words_total_percentage_min,
} = value;
#[cfg(feature = "lang-detection")]
let language_candidates: Vec<IsoCode639_1> = language_candidates
.iter()
// No `to_uppercase()` required, this is done automatically by
// `IsoCode639_1::from_str`.
.map(|l| {
IsoCode639_1::from_str(l.trim())
// Emit proper error message.
.map_err(|_| {
// The error path.
// Produce list of all available languages.
let mut all_langs = lingua::Language::all()
.iter()
.map(|l| {
let mut s = l.iso_code_639_1().to_string();
s.push_str(", ");
s
})
.collect::<Vec<String>>();
all_langs.sort();
let mut all_langs = all_langs.into_iter().collect::<String>();
all_langs.truncate(all_langs.len() - ", ".len());
// Insert data into error object.
LibCfgError::ParseLanguageCode {
language_code: l.into(),
all_langs,
}
})
})
.collect::<Result<Vec<IsoCode639_1>, LibCfgError>>()?;
Ok(GetLang {
mode,
language_candidates,
relative_distance_min,
consecutive_words_min,
words_total_percentage_min,
})
}
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize, Serialize)]
pub enum Mode {
/// The `get_lang` filter is disabled. No language guessing occurs.
Disabled,
/// The algorithm of the `get_lang` filter assumes, that the input is
/// monolingual. Only one language is searched and reported.
Monolingual,
/// The algorithm of the `get_lang` filter assumes, that the input is
/// monolingual. If present in the input, more than one language can be
/// reported.
#[default]
Multilingual,
/// Variant to represent the error state of an invalid `GetLang` object.
#[serde(skip)]
Error(LibCfgError),
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | true |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/settings.rs | tpnote-lib/src/settings.rs | //! Configuration data that origins from environment variables.
//! Unlike the configuration data in `LIB_CFG` which is sourced only once when
//! Tp-Note is launched, the `SETTINGS` object may be sourced more often in
//! order to follow changes in the related environment variables.
use crate::config::{GetLang, LIB_CFG, Mode};
use crate::error::LibCfgError;
#[cfg(feature = "lang-detection")]
use lingua;
#[cfg(feature = "lang-detection")]
use lingua::IsoCode639_1;
use parking_lot::RwLock;
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::env;
#[cfg(feature = "lang-detection")]
use std::str::FromStr;
#[cfg(target_family = "windows")]
use windows_sys::Win32::Globalization::GetUserDefaultLocaleName;
#[cfg(target_family = "windows")]
use windows_sys::Win32::System::SystemServices::LOCALE_NAME_MAX_LENGTH;
/// The name of the environment variable which can be optionally set to
/// overwrite the `scheme_default` configuration file setting.
pub const ENV_VAR_TPNOTE_SCHEME: &str = "TPNOTE_SCHEME";
/// The name of the environment variable which can be optionally set to
/// overwrite the `filename.extension_default` configuration file setting.
pub const ENV_VAR_TPNOTE_EXTENSION_DEFAULT: &str = "TPNOTE_EXTENSION_DEFAULT";
/// Name of the environment variable, that can be optionally
/// used to overwrite the user's default language setting, which is
/// accessible as `{{ lang }}` template variable and used in various
/// templates.
pub const ENV_VAR_TPNOTE_LANG: &str = "TPNOTE_LANG";
/// A pseudo language tag for the `get_lang_filter`. When placed in the
/// `ENV_VAR_TPNOTE_LANG` list, all available languages are selected.
pub const ENV_VAR_TPNOTE_LANG_PLUS_ALL: &str = "+all";
/// Name of the environment variable, that can be optionally
/// used to overwrite the user's `tmpl.filter.get_lang.language_candidates`
/// and `tmpl.filter.map_lang` configuration file setting.
pub const ENV_VAR_TPNOTE_LANG_DETECTION: &str = "TPNOTE_LANG_DETECTION";
/// Name of the environment variable, that can be optionally
/// used to overwrite the user's login name. The result is accessible as
/// `{{ username }}` template variable and used in various templates.
pub const ENV_VAR_TPNOTE_USER: &str = "TPNOTE_USER";
/// Name of the `LOGNAME` environment variable.
const ENV_VAR_LOGNAME: &str = "LOGNAME";
/// Name of the `USERNAME` environment variable.
const ENV_VAR_USERNAME: &str = "USERNAME";
/// Name of the `USER` environment variable.
const ENV_VAR_USER: &str = "USER";
/// Name of the `LANG` environment variable.
#[cfg(not(target_family = "windows"))]
const ENV_VAR_LANG: &str = "LANG";
/// Struct containing additional user configuration read from or depending
/// on environment variables.
#[derive(Debug)]
#[allow(dead_code)]
pub(crate) struct Settings {
/// This is the index as the schemes are listed in the config file.
pub current_scheme: usize,
/// This has the format of a login name.
pub author: String,
/// Contains the content of the environment variable `TPNOTE_LANG` or if not
/// defined- `LANG` into `SETTINGS.lang`
/// as [RFC 5646, Tags for the Identification of Languages](http://www.rfc-editor.org/rfc/rfc5646.txt).
/// This will be injected as `lang` variable into content templates.
pub lang: String,
/// The `force_lang` option of this API. When empty no language forcing
/// happens (implemented in templates).
/// This will be injected as `force_lang` variable into content templates.
pub force_lang: String,
/// Extension without dot, e.g. `md`
pub extension_default: String,
/// See definition of type.
pub get_lang_filter: GetLang,
/// The keys and values from
/// `LIB_CFG.schemes[settings.current_scheme].tmpl.filter_btmap_lang` in the `BTreeMap`
/// with the user's default language and region added.
pub map_lang_filter_btmap: Option<BTreeMap<String, String>>,
}
const DEFAULT_SETTINGS: Settings = Settings {
current_scheme: 0,
author: String::new(),
lang: String::new(),
force_lang: String::new(),
extension_default: String::new(),
get_lang_filter: GetLang {
mode: Mode::Disabled,
language_candidates: vec![],
relative_distance_min: 0.0,
consecutive_words_min: 0,
words_total_percentage_min: 0,
},
map_lang_filter_btmap: None,
};
impl Default for Settings {
#[cfg(not(any(test, doc)))]
/// Defaults to empty lists and values.
fn default() -> Self {
DEFAULT_SETTINGS
}
#[cfg(any(test, doc))]
/// Defaults to test values.
/// Do not use outside of tests.
fn default() -> Self {
let mut settings = DEFAULT_SETTINGS;
settings.author = String::from("testuser");
settings.lang = String::from("ab-AB");
settings.extension_default = String::from("md");
settings
}
}
/// Global mutable variable of type `Settings`.
#[cfg(not(test))]
pub(crate) static SETTINGS: RwLock<Settings> = RwLock::new(DEFAULT_SETTINGS);
#[cfg(test)]
/// Global default for `SETTINGS` in test environments.
pub(crate) static SETTINGS: RwLock<Settings> = RwLock::new(DEFAULT_SETTINGS);
/// Like `Settings::update`, with `scheme_source = SchemeSource::Force("default")`
/// and `force_lang = None`.
/// This is used in doctests only.
pub fn set_test_default_settings() -> Result<(), LibCfgError> {
let mut settings = SETTINGS.write();
settings.update(SchemeSource::Force("default"), None)
}
/// How should `update_settings` collect the right scheme?
#[derive(Debug, Clone)]
pub(crate) enum SchemeSource<'a> {
/// Ignore environment and configuration, take this.
Force(&'a str),
/// Take the value `lib_cfg.scheme_sync_default`.
SchemeSyncDefault,
/// Take env. var. `TPNOTE_SCHEME` or -if not defined- take this.
SchemeNewDefault(&'a str),
}
impl Settings {
/// (Re)read environment variables and store them in the global `SETTINGS`
/// object. Some data originates from `LIB_CFG`.
/// First set `self.current_scheme`:
/// 1. If `SchemeSource::Force(Some(scheme))`, take `scheme`,
/// or,
/// 2. if `SchemeSource::SchemeSyncDefault`, take `lib_cfg.scheme_sync_default`,
/// or,
/// 3. if `SchemeSource::SchemeNewDefault(s)` take the environment variable
/// `TPNOTE_SCHEME`,
/// or, -if the above environment variable is not defined or empty-
/// 4. take `s`.
///
/// Then, it set all other fields.
/// Reads the environment variable `TPNOTE_LANG` or if not defined- `LANG`
/// -into `SETTINGS.lang`.
/// If `force_lang` is `Some` then copy it into `SETTINGS.force_lang`.
/// If `force_lang` is `Some("")` then copy `Settings.lang` into
/// `SETTINGS.force_lang`.
pub(crate) fn update(
&mut self,
scheme_source: SchemeSource,
// TODO: `force_lang=Some("")` is a sentinal value in this API and
// should be replaced by a proper enum variant.
force_lang: Option<&str>,
) -> Result<(), LibCfgError> {
self.update_current_scheme(scheme_source)?;
self.update_author();
self.update_extension_default();
self.update_lang(force_lang);
self.update_get_lang_filter();
self.update_map_lang_filter_btmap();
self.update_env_lang_detection();
log::trace!(
"`SETTINGS` updated (reading config + env. vars.):\n{:#?}",
self
);
if let Mode::Error(e) = &self.get_lang_filter.mode {
Err(e.clone())
} else {
Ok(())
}
}
/// Set `SETTINGS.current_scheme`:
/// 1. If `SchemeSource::Force(Some(scheme))`, take `scheme`,
/// or,
/// 2. if `SchemeSource::SchemeSyncDefault`, take `lib_cfg.scheme_sync_default`,
/// or,
/// 3. if `SchemeSource::SchemeNewDefault(s)` take the environment variable
/// `TPNOTE_SCHEME`,
/// or, -if the above environment variable is not defined or empty-
/// 4. take `s`.
pub(crate) fn update_current_scheme(
&mut self,
scheme_source: SchemeSource,
) -> Result<(), LibCfgError> {
let lib_cfg = LIB_CFG.read_recursive();
let scheme = match scheme_source {
SchemeSource::Force(s) => Cow::Borrowed(s),
SchemeSource::SchemeSyncDefault => Cow::Borrowed(&*lib_cfg.scheme_sync_default),
SchemeSource::SchemeNewDefault(s) => match env::var(ENV_VAR_TPNOTE_SCHEME) {
Ok(ed_env) if !ed_env.is_empty() => Cow::Owned(ed_env),
Err(_) | Ok(_) => Cow::Borrowed(s),
},
};
self.current_scheme = lib_cfg.scheme_idx(scheme.as_ref())?;
Ok(())
}
/// Set `SETTINGS.author` to content of the first not empty environment
/// variable: `TPNOTE_USER`, `LOGNAME` or `USER`.
fn update_author(&mut self) {
let author = env::var(ENV_VAR_TPNOTE_USER).unwrap_or_else(|_| {
env::var(ENV_VAR_LOGNAME).unwrap_or_else(|_| {
env::var(ENV_VAR_USERNAME)
.unwrap_or_else(|_| env::var(ENV_VAR_USER).unwrap_or_default())
})
});
// Store result.
self.author = author;
}
/// Read the environment variable `TPNOTE_EXTENSION_DEFAULT` or -if empty-
/// the configuration file variable `filename.extension_default` into
/// `SETTINGS.extension_default`.
fn update_extension_default(&mut self) {
// Get the environment variable if it exists.
let ext = match env::var(ENV_VAR_TPNOTE_EXTENSION_DEFAULT) {
Ok(ed_env) if !ed_env.is_empty() => ed_env,
Err(_) | Ok(_) => {
let lib_cfg = LIB_CFG.read_recursive();
lib_cfg.scheme[self.current_scheme]
.filename
.extension_default
.to_string()
}
};
self.extension_default = ext;
}
/// Reads the environment variable `TPNOTE_LANG` or if not defined- `LANG`
/// -into `SETTINGS.lang`.
/// If `force_lang` is `Some` then copy it into `SETTINGS.force_lang`.
/// If `force_lang` is `Some("")` then copy `Settings.lang` into
/// `SETTINGS.force_lang`.
///
fn update_lang(&mut self, force_lang: Option<&str>) {
// Get the user's language tag.
// [RFC 5646, Tags for the Identification of Languages](http://www.rfc-editor.org/rfc/rfc5646.txt)
let mut lang = String::new();
// Get the environment variable if it exists.
let tpnotelang = env::var(ENV_VAR_TPNOTE_LANG).ok();
// Unix/MacOS version.
#[cfg(not(target_family = "windows"))]
if let Some(tpnotelang) = tpnotelang {
lang = tpnotelang;
} else {
// [Linux: Define Locale and Language Settings -
// ShellHacks](https://www.shellhacks.com/linux-define-locale-language-settings/)
if let Ok(lang_env) = env::var(ENV_VAR_LANG) {
if !lang_env.is_empty() {
// [ISO 639](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) language code.
let mut language = "";
// [ISO 3166](https://en.wikipedia.org/wiki/ISO_3166-1#Current_codes) country code.
let mut territory = "";
if let Some((l, lang_env)) = lang_env.split_once('_') {
language = l;
if let Some((t, _codeset)) = lang_env.split_once('.') {
territory = t;
}
}
lang = language.to_string();
lang.push('-');
lang.push_str(territory);
}
}
}
// Get the user's language tag.
// Windows version.
#[cfg(target_family = "windows")]
if let Some(tpnotelang) = tpnotelang {
lang = tpnotelang;
} else {
let mut buf = [0u16; LOCALE_NAME_MAX_LENGTH as usize];
let len = unsafe { GetUserDefaultLocaleName(buf.as_mut_ptr(), buf.len() as i32) };
if len > 0 {
lang = String::from_utf16_lossy(&buf[..((len - 1) as usize)]);
}
};
// Store result.
self.lang = lang;
// Store `force_lang` argument.
self.force_lang = match force_lang {
Some("") => self.lang.clone(),
Some(lang) => lang.to_owned(),
None => String::new(),
};
}
/// Copies the settings form
/// `LIB_CFG.schemes[settings.scheme].tmpl.filter.get_lang` into
/// `SETTINGS.get_lang_filter`. Then append the user's
/// default language subtag to
/// `SETTINGS.get_lang_filter.language_candidates`.
/// Errors are stored in the `SETTINGS.filter.get_lang.mode` `Mode::Error(e)` variant.
#[cfg(feature = "lang-detection")]
fn update_get_lang_filter(&mut self) {
use crate::config::Mode;
{
let lib_cfg = LIB_CFG.read_recursive();
let current_scheme = &lib_cfg.scheme[self.current_scheme];
// Start form config.
self.get_lang_filter = current_scheme.tmpl.filter.get_lang.clone();
} // Release lock.
// Check if disabled in config file. Early return.
if matches!(self.get_lang_filter.mode, Mode::Disabled) {
return;
}
// Read ISO codes from config object.
let iso_codes = &mut self.get_lang_filter.language_candidates;
// Check if all languages are selected, then we can return early.
if iso_codes.is_empty() {
return;
}
// Add the user's language subtag as reported from the OS.
// Silently ignore if anything goes wrong here.
if !self.lang.is_empty() {
if let Some((lang_subtag, _)) = self.lang.split_once('-') {
if let Ok(iso_code) = IsoCode639_1::from_str(lang_subtag) {
if !iso_codes.contains(&iso_code) {
iso_codes.push(iso_code);
}
}
}
}
// Check if there are at least 2 languages in the list.
if iso_codes.len() <= 1 {
self.get_lang_filter.mode = Mode::Error(LibCfgError::NotEnoughLanguageCodes {
language_code: iso_codes[0].to_string(),
})
}
}
#[cfg(not(feature = "lang-detection"))]
/// Disable filter.
fn update_get_lang_filter(&mut self) {
self.get_lang_filter.mode = Mode::Disabled;
}
/// Read keys and values from
/// `LIB_CFG.schemes[self.current_scheme].tmpl.filter_btmap_lang` in the
/// `BTreeMap`. Add the user's default language and region.
fn update_map_lang_filter_btmap(&mut self) {
let mut btm = BTreeMap::new();
let lib_cfg = LIB_CFG.read_recursive();
for l in &lib_cfg.scheme[self.current_scheme].tmpl.filter.map_lang {
if l.len() >= 2 {
btm.insert(l[0].to_string(), l[1].to_string());
};
}
// Insert the user's default language and region in the Map.
if !self.lang.is_empty() {
if let Some((lang_subtag, _)) = self.lang.split_once('-') {
// Do not overwrite existing languages.
if !lang_subtag.is_empty() && !btm.contains_key(lang_subtag) {
btm.insert(lang_subtag.to_string(), self.lang.to_string());
}
};
}
// Store result.
self.map_lang_filter_btmap = Some(btm);
}
/// Reads the environment variable `LANG_DETECTION`. If not empty,
/// parse the content and overwrite the `self.get_lang_filter` and the
/// `self.map_lang_filter` variables.
/// Finally, if `force_lang` is true, then it sets
/// `self.get_lang_filter.mode` Mode::Disabled.
#[cfg(feature = "lang-detection")]
fn update_env_lang_detection(&mut self) {
use crate::config::Mode;
if let Ok(env_var) = env::var(ENV_VAR_TPNOTE_LANG_DETECTION) {
if env_var.is_empty() {
// Early return.
self.get_lang_filter.mode = Mode::Disabled;
self.map_lang_filter_btmap = None;
log::debug!(
"Empty env. var. `{}` disables the `lang-detection` feature.",
ENV_VAR_TPNOTE_LANG_DETECTION
);
return;
}
// Read and convert ISO codes from config object.
let mut hm: BTreeMap<String, String> = BTreeMap::new();
let mut all_languages_selected = false;
let iso_codes = env_var
.split(',')
.map(|t| {
let t = t.trim();
if let Some((lang_subtag, _)) = t.split_once('-') {
// Do not overwrite existing languages.
if !lang_subtag.is_empty() && !hm.contains_key(lang_subtag) {
hm.insert(lang_subtag.to_string(), t.to_string());
};
lang_subtag
} else {
t
}
})
// Check if this is the pseudo tag `TMPL_GET_LANG_filter_ALL `.
.filter(|&l| {
if l == ENV_VAR_TPNOTE_LANG_PLUS_ALL {
all_languages_selected = true;
// Skip this string.
false
} else {
// Continue.
true
}
})
.map(|l| {
IsoCode639_1::from_str(l.trim()).map_err(|_| {
// The error path.
// Produce list of all available languages.
let mut all_langs = lingua::Language::all()
.iter()
.map(|l| {
let mut s = l.iso_code_639_1().to_string();
s.push_str(", ");
s
})
.collect::<Vec<String>>();
all_langs.sort();
let mut all_langs = all_langs.into_iter().collect::<String>();
all_langs.truncate(all_langs.len() - ", ".len());
// Insert data into error object.
LibCfgError::ParseLanguageCode {
language_code: l.into(),
all_langs,
}
})
})
.collect::<Result<Vec<IsoCode639_1>, LibCfgError>>();
match iso_codes {
// The happy path.
Ok(mut iso_codes) => {
// Add the user's language subtag as reported from the OS.
// Continue the happy path.
if !self.lang.is_empty() {
if let Some(lang_subtag) = self.lang.split('-').next() {
if let Ok(iso_code) = IsoCode639_1::from_str(lang_subtag) {
if !iso_codes.contains(&iso_code) {
iso_codes.push(iso_code);
}
// Check if there is a remainder (region code).
if lang_subtag != self.lang && !hm.contains_key(lang_subtag) {
hm.insert(lang_subtag.to_string(), self.lang.to_string());
}
}
}
}
// Store result.
if all_languages_selected {
self.get_lang_filter.language_candidates = vec![];
if matches!(self.get_lang_filter.mode, Mode::Disabled) {
self.get_lang_filter.mode = Mode::Multilingual;
}
} else {
match iso_codes.len() {
0 => self.get_lang_filter.mode = Mode::Disabled,
1 => {
self.get_lang_filter.mode =
Mode::Error(LibCfgError::NotEnoughLanguageCodes {
language_code: iso_codes[0].to_string(),
})
}
_ => {
self.get_lang_filter.language_candidates = iso_codes;
if matches!(self.get_lang_filter.mode, Mode::Disabled) {
self.get_lang_filter.mode = Mode::Multilingual;
}
}
}
}
self.map_lang_filter_btmap = Some(hm);
}
// The error path.
Err(e) =>
// Store error.
{
self.get_lang_filter.mode = Mode::Error(e);
}
}
}
}
/// Ignore the environment variable `LANG_DETECTION`.
#[cfg(not(feature = "lang-detection"))]
fn update_env_lang_detection(&mut self) {
if let Ok(env_var) = env::var(ENV_VAR_TPNOTE_LANG_DETECTION) {
if !env_var.is_empty() {
self.get_lang_filter.mode = Mode::Disabled;
self.map_lang_filter_btmap = None;
log::debug!(
"Ignoring the env. var. `{}`. The `lang-detection` feature \
is not included in this build.",
ENV_VAR_TPNOTE_LANG_DETECTION
);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Attention: as these test-functions run in parallel, make sure that
/// each environment variable appears in one function only!
#[test]
fn test_update_author_setting() {
let mut settings = Settings::default();
unsafe {
env::set_var(ENV_VAR_LOGNAME, "testauthor");
}
settings.update_author();
assert_eq!(settings.author, "testauthor");
}
#[test]
fn test_update_extension_default_setting() {
let mut settings = Settings::default();
unsafe {
env::set_var(ENV_VAR_TPNOTE_EXTENSION_DEFAULT, "markdown");
}
settings.update_extension_default();
assert_eq!(settings.extension_default, "markdown");
let mut settings = Settings::default();
unsafe {
std::env::remove_var(ENV_VAR_TPNOTE_EXTENSION_DEFAULT);
}
settings.update_extension_default();
assert_eq!(settings.extension_default, "md");
}
#[test]
#[cfg(not(target_family = "windows"))]
fn test_update_lang_setting() {
// Test 1
let mut settings = Settings::default();
unsafe {
env::remove_var(ENV_VAR_TPNOTE_LANG);
env::set_var(ENV_VAR_LANG, "en_GB.UTF-8");
}
settings.update_lang(None);
assert_eq!(settings.lang, "en-GB");
// Test empty input.
let mut settings = Settings::default();
unsafe {
env::remove_var(ENV_VAR_TPNOTE_LANG);
env::set_var(ENV_VAR_LANG, "");
}
settings.update_lang(None);
assert_eq!(settings.lang, "");
// Test precedence of `TPNOTE_LANG`.
let mut settings = Settings::default();
unsafe {
env::set_var(ENV_VAR_TPNOTE_LANG, "it-IT");
env::set_var(ENV_VAR_LANG, "en_GB.UTF-8");
}
settings.update_lang(None);
assert_eq!(settings.lang, "it-IT");
}
#[test]
#[cfg(feature = "lang-detection")]
fn test_update_get_lang_filter_setting() {
// Test 1.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
settings.update_get_lang_filter();
let output_get_lang_filter = settings
.get_lang_filter
.language_candidates
.iter()
.map(|l| {
let mut l = l.to_string();
l.push(' ');
l
})
.collect::<String>();
assert_eq!(output_get_lang_filter, "en fr de ");
//
// Test 2.
let mut settings = Settings {
lang: "it-IT".to_string(),
..Default::default()
};
settings.update_get_lang_filter();
let output_get_lang_filter = settings
.get_lang_filter
.language_candidates
.iter()
.map(|l| {
let mut l = l.to_string();
l.push(' ');
l
})
.collect::<String>();
assert_eq!(output_get_lang_filter, "en fr de it ");
}
#[test]
fn test_update_map_lang_filter_hmap_setting() {
// Test 1.
let mut settings = Settings {
lang: "it-IT".to_string(),
..Default::default()
};
settings.update_map_lang_filter_btmap();
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("et").unwrap(), "et-ET");
assert_eq!(output_map_lang_filter.get("it").unwrap(), "it-IT");
//
// Test short `settings.lang`.
let mut settings = Settings {
lang: "it".to_string(),
..Default::default()
};
settings.update_map_lang_filter_btmap();
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("et").unwrap(), "et-ET");
assert_eq!(output_map_lang_filter.get("it"), None);
}
#[test]
#[cfg(feature = "lang-detection")]
fn test_update_env_lang_detection() {
// Test 1.
// Test short `settings.lang`.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "fr-FR, de-DE, hu") };
settings.update_env_lang_detection();
let output_get_lang_filter = settings
.get_lang_filter
.language_candidates
.iter()
.map(|l| {
let mut l = l.to_string();
l.push(' ');
l
})
.collect::<String>();
assert_eq!(output_get_lang_filter, "fr de hu en ");
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("fr").unwrap(), "fr-FR");
assert_eq!(output_map_lang_filter.get("en").unwrap(), "en-GB");
//
// Test 2.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "de-DE, de-AT, en-US") };
settings.update_env_lang_detection();
let output_get_lang_filter = settings
.get_lang_filter
.language_candidates
.iter()
.map(|l| {
let mut l = l.to_string();
l.push(' ');
l
})
.collect::<String>();
assert_eq!(output_get_lang_filter, "de de en ");
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("en").unwrap(), "en-US");
//
// Test 3.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "de-DE, +all, en-US") };
settings.update_env_lang_detection();
assert!(settings.get_lang_filter.language_candidates.is_empty());
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("en").unwrap(), "en-US");
//
// Test 4.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "de-DE, de-AT, en") };
settings.update_env_lang_detection();
let output_get_lang_filter = settings
.get_lang_filter
.language_candidates
.iter()
.map(|l| {
let mut l = l.to_string();
l.push(' ');
l
})
.collect::<String>();
assert_eq!(output_get_lang_filter, "de de en ");
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("en").unwrap(), "en-GB");
//
// Test 5.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "de-DE, +all, de-AT, en") };
settings.update_env_lang_detection();
assert!(settings.get_lang_filter.language_candidates.is_empty());
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("en").unwrap(), "en-GB");
// Test `force_lang`.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "fr-FR, de-DE, hu") };
settings.update_env_lang_detection();
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("de").unwrap(), "de-DE");
assert_eq!(output_map_lang_filter.get("fr").unwrap(), "fr-FR");
assert_eq!(output_map_lang_filter.get("en").unwrap(), "en-GB");
//
// Test empty env. var.
let mut settings = Settings {
lang: "".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "") };
settings.update_env_lang_detection();
assert_eq!(settings.get_lang_filter.mode, Mode::Disabled);
assert!(settings.map_lang_filter_btmap.is_none());
//
// Test faulty `settings.lang`.
let mut settings = Settings {
lang: "xy-XY".to_string(),
..Default::default()
};
unsafe { env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "en-GB, fr") };
settings.update_env_lang_detection();
let output_get_lang_filter = settings
.get_lang_filter
.language_candidates
.iter()
.map(|l| {
let mut l = l.to_string();
l.push(' ');
l
})
.collect::<String>();
assert_eq!(output_get_lang_filter, "en fr ");
let output_map_lang_filter = settings.map_lang_filter_btmap.unwrap();
assert_eq!(output_map_lang_filter.get("en").unwrap(), "en-GB");
//
// Test faulty entry in list.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe {
env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "de-DE, xy-XY");
}
settings.update_env_lang_detection();
assert!(matches!(settings.get_lang_filter.mode, Mode::Error(..)));
assert!(settings.map_lang_filter_btmap.is_none());
//
// Test empty list.
let mut settings = Settings {
lang: "en-GB".to_string(),
..Default::default()
};
unsafe {
env::set_var(ENV_VAR_TPNOTE_LANG_DETECTION, "");
}
settings.update_env_lang_detection();
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | true |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/html.rs | tpnote-lib/src/html.rs | //! Helper functions dealing with HTML conversion.
use crate::clone_ext::CloneExt;
use crate::error::InputStreamError;
use crate::filename::{NotePath, NotePathStr};
use crate::{config::LocalLinkKind, error::NoteError};
use html_escape;
use parking_lot::RwLock;
use parse_hyperlinks::parser::Link;
use parse_hyperlinks_extras::iterator_html::HtmlLinkInlineImage;
use percent_encoding::percent_decode_str;
use std::path::MAIN_SEPARATOR_STR;
use std::{
borrow::Cow,
collections::HashSet,
path::{Component, Path, PathBuf},
sync::Arc,
};
pub(crate) const HTML_EXT: &str = ".html";
/// A local path can carry a format string at the end. This is the separator
/// character.
const FORMAT_SEPARATOR: char = '?';
/// If followed directly after FORMAT_SEPARATOR, it selects the sort-tag
/// for further matching.
const FORMAT_ONLY_SORT_TAG: char = '#';
/// If followed directly after FORMAT_SEPARATOR, it selects the whole filename
/// for further matching.
const FORMAT_COMPLETE_FILENAME: &str = "?";
/// A format string can be separated in a _from_ and _to_ part. This
/// optional separator is placed after `FORMAT_SEPARATOR` and separates
/// the _from_ and _to_ pattern.
const FORMAT_FROM_TO_SEPARATOR: char = ':';
/// If `rewrite_rel_path` and `dest` is relative, concatenate `docdir` and
/// `dest`, then strip `root_path` from the left before returning.
/// If not `rewrite_rel_path` and `dest` is relative, return `dest`.
/// If `rewrite_abs_path` and `dest` is absolute, concatenate and return
/// `root_path` and `dest`.
/// If not `rewrite_abs_path` and `dest` is absolute, return `dest`.
/// The `dest` portion of the output is always canonicalized.
/// Return the assembled path, when in `root_path`, or `None` otherwise.
/// Asserts in debug mode, that `doc_dir` is in `root_path`.
fn assemble_link(
root_path: &Path,
docdir: &Path,
dest: &Path,
rewrite_rel_paths: bool,
rewrite_abs_paths: bool,
) -> Option<PathBuf> {
///
/// Concatenate `path` and `append`.
/// The `append` portion of the output is if possible canonicalized.
/// In case of underflow of an absolute link, the returned path is empty.
fn append(path: &mut PathBuf, append: &Path) {
// Append `dest` to `link` and canonicalize.
for dir in append.components() {
match dir {
Component::ParentDir => {
if !path.pop() {
let path_is_relative = {
let mut c = path.components();
!(c.next() == Some(Component::RootDir)
|| c.next() == Some(Component::RootDir))
};
if path_is_relative {
path.push(Component::ParentDir.as_os_str());
} else {
path.clear();
break;
}
}
}
Component::Normal(c) => path.push(c),
_ => {}
}
}
}
// Under Windows `.is_relative()` does not detect `Component::RootDir`
let dest_is_relative = {
let mut c = dest.components();
!(c.next() == Some(Component::RootDir) || c.next() == Some(Component::RootDir))
};
// Check if the link points into `root_path`, reject otherwise
// (strip_prefix will not work).
debug_assert!(docdir.starts_with(root_path));
// Calculate the output.
let mut link = match (rewrite_rel_paths, rewrite_abs_paths, dest_is_relative) {
// *** Relative links.
// Result: "/" + docdir.strip(root_path) + dest
(true, false, true) => {
let link = PathBuf::from(Component::RootDir.as_os_str());
link.join(docdir.strip_prefix(root_path).ok()?)
}
// Result: docdir + dest
(true, true, true) => docdir.to_path_buf(),
// Result: dest
(false, _, true) => PathBuf::new(),
// *** Absolute links.
// Result: "/" + dest
(_, false, false) => PathBuf::from(Component::RootDir.as_os_str()),
// Result: "/" + root_path
(_, true, false) => root_path.to_path_buf(),
};
append(&mut link, dest);
if link.as_os_str().is_empty() {
None
} else {
Some(link)
}
}
trait Hyperlink {
/// A helper function, that first HTML escape decodes all strings of the
/// link. Then it percent decodes the link destination (and the
/// link text in case of an autolink).
fn decode_ampersand_and_percent(&mut self);
/// True if the value is a local link.
#[allow(clippy::ptr_arg)]
fn is_local_fn(value: &Cow<str>) -> bool;
/// * `Link::Text2Dest`: strips a possible scheme in local `dest`.
/// * `Link::Image2Dest`: strip local scheme in `dest`.
/// * `Link::Image`: strip local scheme in `src`.
///
/// No action if not local.
fn strip_local_scheme(&mut self);
/// Helper function that strips a possible scheme in `input`.
fn strip_scheme_fn(input: &mut Cow<str>);
/// True if the link is:
/// * `Link::Text2Dest` and the link text equals the link destination, or
/// * `Link::Image` and the links `alt` equals the link source.
///
/// WARNING: place this test after `decode_html_escape_and_percent()`
/// and before: `rebase_local_link`, `expand_shorthand_link`,
/// `rewrite_autolink` and `apply_format_attribute`.
fn is_autolink(&self) -> bool;
/// A method that converts the relative URLs (local links) in `self`.
/// If successful, it returns `Ok(Some(URL))`, otherwise
/// `Err(NoteError::InvalidLocalLink)`.
/// If `self` contains an absolute URL, no conversion is performed and the
/// return value is `Ok(())`.
///
/// Conversion details:
/// The base path for this conversion (usually where the HTML file resides),
/// is `docdir`. If not `rewrite_rel_links`, relative local links are not
/// converted. Furthermore, all local links starting with `/` are prepended
/// with `root_path`. All absolute URLs always remain untouched.
///
/// Algorithm:
/// 1. If `rewrite_abs_links==true` and `link` starts with `/`, concatenate
/// and return `root_path` and `dest`.
/// 2. If `rewrite_abs_links==false` and `dest` does not start wit `/`,
/// return `dest`.
/// 3. If `rewrite_ext==true` and the link points to a known Tp-Note file
/// extension, then `.html` is appended to the converted link.
///
/// Remark: The _anchor's text property_ is never changed. However, there
/// is one exception: when the text contains a URL starting with `http:` or
/// `https:`, only the file stem is kept. Example, the anchor text property:
/// `<a ...>http:dir/my file.md</a>` is rewritten into `<a ...>my file</a>`.
///
/// Contracts:
/// 1. `link` may have a scheme.
/// 2. `link` is `Link::Text2Dest` or `Link::Image`
/// 3. `root_path` and `docdir` are absolute paths to directories.
/// 4. `root_path` is never empty `""`. It can be `"/"`.
fn rebase_local_link(
&mut self,
root_path: &Path,
docdir: &Path,
rewrite_rel_paths: bool,
rewrite_abs_paths: bool,
) -> Result<(), NoteError>;
/// If `dest` in `Link::Text2Dest` contains only a sort
/// tag as filename, expand the latter to a full filename.
/// Otherwise, no action.
/// This method accesses the filesystem. Therefore sometimes `prepend_path`
/// is needed as parameter and prepended.
fn expand_shorthand_link(&mut self, prepend_path: Option<&Path>) -> Result<(), NoteError>;
/// This removes a possible scheme in `text`.
/// Call this method only when you sure that this
/// is an autolink by testing with `is_autolink()`.
fn rewrite_autolink(&mut self);
/// A formatting attribute is a format string starting with `?` followed
/// by one or two patterns. It is appended to `dest` or `src`.
/// Processing details:
/// 1. Extract some a possible formatting attribute string in `dest`
/// (`Link::Text2Dest`) or `src` (`Link::Image`) after `?`.
/// 2. Extract the _path_ before `?` in `dest` or `src`.
/// 3. Apply the formatting to _path_.
/// 4. Store the result by overwriting `text` or `alt`.
fn apply_format_attribute(&mut self);
/// If the link destination `dest` is a local path, return it.
/// Otherwise return `None`.
/// Acts on `Link:Text2Dest` and `Link::Imgage2Dest` only.
fn get_local_link_dest_path(&self) -> Option<&Path>;
/// If `dest` or `src` is a local path, return it.
/// Otherwise return `None`.
/// Acts an `Link:Image` and `Link::Image2Dest` only.
fn get_local_link_src_path(&self) -> Option<&Path>;
/// If the extension of a local path in `dest` is some Tp-Note
/// extension, append `.html` to the path. Otherwise silently return.
/// Acts on `Link:Text2Dest` only.
fn append_html_ext(&mut self);
/// Renders `Link::Text2Dest`, `Link::Image2Dest` and `Link::Image`
/// to HTML. Some characters in `dest` or `src` might be HTML
/// escape encoded. This does not percent encode at all, because
/// we know, that the result will be inserted later in a UTF-8 template.
fn to_html(&self) -> String;
}
impl Hyperlink for Link<'_> {
#[inline]
fn decode_ampersand_and_percent(&mut self) {
// HTML escape decode value.
fn dec_amp(val: &mut Cow<str>) {
let decoded_text = html_escape::decode_html_entities(val);
if matches!(&decoded_text, Cow::Owned(..)) {
// Does nothing, but satisfying the borrow checker. Does not `clone()`.
let decoded_text = Cow::Owned(decoded_text.into_owned());
// Store result.
let _ = std::mem::replace(val, decoded_text);
}
}
// HTML escape decode and percent decode value.
fn dec_amp_percent(val: &mut Cow<str>) {
dec_amp(val);
let decoded_dest = percent_decode_str(val.as_ref()).decode_utf8().unwrap();
if matches!(&decoded_dest, Cow::Owned(..)) {
// Does nothing, but satisfying the borrow checker. Does not `clone()`.
let decoded_dest = Cow::Owned(decoded_dest.into_owned());
// Store result.
let _ = std::mem::replace(val, decoded_dest);
}
}
match self {
Link::Text2Dest(text1, dest, title) => {
dec_amp(text1);
dec_amp_percent(dest);
dec_amp(title);
}
Link::Image(alt, src) => {
dec_amp(alt);
dec_amp_percent(src);
}
Link::Image2Dest(text1, alt, src, text2, dest, title) => {
dec_amp(text1);
dec_amp(alt);
dec_amp_percent(src);
dec_amp(text2);
dec_amp_percent(dest);
dec_amp(title);
}
_ => unimplemented!(),
};
}
//
fn is_local_fn(dest: &Cow<str>) -> bool {
!((dest.contains("://") && !dest.contains(":///"))
|| dest.starts_with("mailto:")
|| dest.starts_with("tel:"))
}
//
fn strip_local_scheme(&mut self) {
fn strip(dest: &mut Cow<str>) {
if <Link<'_> as Hyperlink>::is_local_fn(dest) {
<Link<'_> as Hyperlink>::strip_scheme_fn(dest);
}
}
match self {
Link::Text2Dest(_, dest, _title) => strip(dest),
Link::Image2Dest(_, _, src, _, dest, _) => {
strip(src);
strip(dest);
}
Link::Image(_, src) => strip(src),
_ => {}
};
}
//
fn strip_scheme_fn(inout: &mut Cow<str>) {
let output = inout
.trim_start_matches("https://")
.trim_start_matches("https:")
.trim_start_matches("http://")
.trim_start_matches("http:")
.trim_start_matches("tpnote:")
.trim_start_matches("mailto:")
.trim_start_matches("tel:");
if output != inout.as_ref() {
let _ = std::mem::replace(inout, Cow::Owned(output.to_string()));
}
}
//
fn is_autolink(&self) -> bool {
let (text, dest) = match self {
Link::Text2Dest(text, dest, _title) => (text, dest),
Link::Image(alt, source) => (alt, source),
// `Link::Image2Dest` is never an autolink.
_ => return false,
};
text == dest
}
//
fn rebase_local_link(
&mut self,
root_path: &Path,
docdir: &Path,
rewrite_rel_paths: bool,
rewrite_abs_paths: bool,
) -> Result<(), NoteError> {
let do_rebase = |path: &mut Cow<str>| -> Result<(), NoteError> {
if <Link as Hyperlink>::is_local_fn(path) {
let dest_out = assemble_link(
root_path,
docdir,
Path::new(path.as_ref()),
rewrite_rel_paths,
rewrite_abs_paths,
)
.ok_or(NoteError::InvalidLocalPath {
path: path.as_ref().to_string(),
})?;
// Store result.
let new_dest = Cow::Owned(dest_out.to_str().unwrap_or_default().to_string());
let _ = std::mem::replace(path, new_dest);
}
Ok(())
};
match self {
Link::Text2Dest(_, dest, _) => do_rebase(dest),
Link::Image2Dest(_, _, src, _, dest, _) => do_rebase(src).and_then(|_| do_rebase(dest)),
Link::Image(_, src) => do_rebase(src),
_ => unimplemented!(),
}
}
//
fn expand_shorthand_link(&mut self, prepend_path: Option<&Path>) -> Result<(), NoteError> {
let shorthand_link = match self {
Link::Text2Dest(_, dest, _) => dest,
Link::Image2Dest(_, _, _, _, dest, _) => dest,
_ => return Ok(()),
};
if !<Link as Hyperlink>::is_local_fn(shorthand_link) {
return Ok(());
}
let (shorthand_str, shorthand_format) = match shorthand_link.split_once(FORMAT_SEPARATOR) {
Some((path, fmt)) => (path, Some(fmt)),
None => (shorthand_link.as_ref(), None),
};
let shorthand_path = Path::new(shorthand_str);
if let Some(sort_tag) = shorthand_str.is_valid_sort_tag() {
let full_shorthand_path = if let Some(root_path) = prepend_path {
// Concatenate `root_path` and `shorthand_path`.
let shorthand_path = shorthand_path
.strip_prefix(MAIN_SEPARATOR_STR)
.unwrap_or(shorthand_path);
Cow::Owned(root_path.join(shorthand_path))
} else {
Cow::Borrowed(shorthand_path)
};
// Search for the file.
let found = full_shorthand_path
.parent()
.and_then(|dir| dir.find_file_with_sort_tag(sort_tag));
if let Some(path) = found {
// We prepended `root_path` before, we can safely strip it
// and unwrap.
let found_link = path
.strip_prefix(prepend_path.unwrap_or(Path::new("")))
.unwrap();
// Prepend `/`.
let mut found_link = Path::new(MAIN_SEPARATOR_STR)
.join(found_link)
.to_str()
.unwrap_or_default()
.to_string();
if let Some(fmt) = shorthand_format {
found_link.push(FORMAT_SEPARATOR);
found_link.push_str(fmt);
}
// Store result.
let _ = std::mem::replace(shorthand_link, Cow::Owned(found_link));
} else {
return Err(NoteError::CanNotExpandShorthandLink {
path: full_shorthand_path.to_string_lossy().into_owned(),
});
}
}
Ok(())
}
//
fn rewrite_autolink(&mut self) {
let text = match self {
Link::Text2Dest(text, _, _) => text,
Link::Image(alt, _) => alt,
_ => return,
};
<Link as Hyperlink>::strip_scheme_fn(text);
}
//
fn apply_format_attribute(&mut self) {
// Is this an absolute URL?
let (text, dest) = match self {
Link::Text2Dest(text, dest, _) => (text, dest),
Link::Image(alt, source) => (alt, source),
_ => return,
};
if !<Link as Hyperlink>::is_local_fn(dest) {
return;
}
// We assume, that `dest` had been expanded already, so we can extract
// the full filename here.
// If ever it ends with a format string we apply it. Otherwise we quit
// the method and do nothing.
let (path, format) = match dest.split_once(FORMAT_SEPARATOR) {
Some(s) => s,
None => return,
};
let mut short_text = Path::new(path)
.file_name()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
// Select what to match:
let format = if format.starts_with(FORMAT_COMPLETE_FILENAME) {
// Keep complete filename.
format
.strip_prefix(FORMAT_COMPLETE_FILENAME)
.unwrap_or(format)
} else if format.starts_with(FORMAT_ONLY_SORT_TAG) {
// Keep only format-tag.
short_text = Path::new(path).disassemble().0;
format.strip_prefix(FORMAT_ONLY_SORT_TAG).unwrap_or(format)
} else {
// Keep only stem.
short_text = Path::new(path).disassemble().2;
format
};
match format.split_once(FORMAT_FROM_TO_SEPARATOR) {
// No `:`
None => {
if !format.is_empty() {
if let Some(idx) = short_text.find(format) {
short_text = &short_text[..idx];
};
}
}
// Some `:`
Some((from, to)) => {
if !from.is_empty() {
if let Some(idx) = short_text.find(from) {
short_text = &short_text[(idx + from.len())..];
};
}
if !to.is_empty() {
if let Some(idx) = short_text.find(to) {
short_text = &short_text[..idx];
};
}
}
}
// Store the result.
let _ = std::mem::replace(text, Cow::Owned(short_text.to_string()));
let _ = std::mem::replace(dest, Cow::Owned(path.to_string()));
}
//
fn get_local_link_dest_path(&self) -> Option<&Path> {
let dest = match self {
Link::Text2Dest(_, dest, _) => dest,
Link::Image2Dest(_, _, _, _, dest, _) => dest,
_ => return None,
};
if <Link as Hyperlink>::is_local_fn(dest) {
// Strip URL fragment.
match (dest.rfind('#'), dest.rfind(['/', '\\'])) {
(Some(n), sep) if sep.is_some_and(|sep| n > sep) || sep.is_none() => {
Some(Path::new(&dest.as_ref()[..n]))
}
_ => Some(Path::new(dest.as_ref())),
}
} else {
None
}
}
//
fn get_local_link_src_path(&self) -> Option<&Path> {
let src = match self {
Link::Image2Dest(_, _, src, _, _, _) => src,
Link::Image(_, src) => src,
_ => return None,
};
if <Link as Hyperlink>::is_local_fn(src) {
Some(Path::new(src.as_ref()))
} else {
None
}
}
//
fn append_html_ext(&mut self) {
let dest = match self {
Link::Text2Dest(_, dest, _) => dest,
Link::Image2Dest(_, _, _, _, dest, _) => dest,
_ => return,
};
if <Link as Hyperlink>::is_local_fn(dest) {
let path = dest.as_ref();
if path.has_tpnote_ext() {
let mut newpath = path.to_string();
newpath.push_str(HTML_EXT);
let _ = std::mem::replace(dest, Cow::Owned(newpath));
}
}
}
//
fn to_html(&self) -> String {
// HTML escape encode double quoted attributes
fn enc_amp(val: Cow<str>) -> Cow<str> {
let s = html_escape::encode_double_quoted_attribute(val.as_ref());
if s == val {
val
} else {
// No cloning happens here, because we own `s` already.
Cow::Owned(s.into_owned())
}
}
// Replace Windows backslash, then HTML escape encode.
fn repl_backspace_enc_amp(val: Cow<str>) -> Cow<str> {
let val = if val.as_ref().contains('\\') {
Cow::Owned(val.to_string().replace('\\', "/"))
} else {
val
};
let s = html_escape::encode_double_quoted_attribute(val.as_ref());
if s == val {
val
} else {
// No cloning happens here, because we own `s` already.
Cow::Owned(s.into_owned())
}
}
match self {
Link::Text2Dest(text, dest, title) => {
// Format title.
let title_html = if !title.is_empty() {
format!(" title=\"{}\"", enc_amp(title.shallow_clone()))
} else {
"".to_string()
};
format!(
"<a href=\"{}\"{}>{}</a>",
repl_backspace_enc_amp(dest.shallow_clone()),
title_html,
text
)
}
Link::Image2Dest(text1, alt, src, text2, dest, title) => {
// Format title.
let title_html = if !title.is_empty() {
format!(" title=\"{}\"", enc_amp(title.shallow_clone()))
} else {
"".to_string()
};
format!(
"<a href=\"{}\"{}>{}<img src=\"{}\" alt=\"{}\">{}</a>",
repl_backspace_enc_amp(dest.shallow_clone()),
title_html,
text1,
repl_backspace_enc_amp(src.shallow_clone()),
enc_amp(alt.shallow_clone()),
text2
)
}
Link::Image(alt, src) => {
format!(
"<img src=\"{}\" alt=\"{}\">",
repl_backspace_enc_amp(src.shallow_clone()),
enc_amp(alt.shallow_clone())
)
}
_ => unimplemented!(),
}
}
}
#[inline]
/// A helper function that scans the input HTML document in `html_input` for
/// HTML hyperlinks. When it finds a relative URL (local link), it analyzes it's
/// path. Depending on the `local_link_kind` configuration, relative local
/// links are converted into absolute local links and eventually rebased.
///
/// In order to achieve this, the user must respect the following convention
/// concerning absolute local links in Tp-Note documents:
/// 1. When a document contains a local link with an absolute path (absolute
/// local link), the base of this path is considered to be the directory
/// where the marker file ‘.tpnote.toml’ resides (or ‘/’ in non exists). The
/// marker file directory is `root_path`.
/// 2. Furthermore, the parameter `docdir` contains the absolute path of the
/// directory of the currently processed HTML document. The user guarantees
/// that `docdir` is the base for all relative local links in the document.
/// Note: `docdir` must always start with `root_path`.
///
/// If `LocalLinkKind::Off`, relative local links are not converted.
/// If `LocalLinkKind::Short`, relative local links are converted into an
/// absolute local links with `root_path` as base directory.
/// If `LocalLinkKind::Long`, in addition to the above, the resulting absolute
/// local link is prepended with `root_path`.
///
/// If `rewrite_ext` is true and a local link points to a known
/// Tp-Note file extension, then `.html` is appended to the converted link.
///
/// Remark: The link's text property is never changed. However, there is
/// one exception: when the link's text contains a string similar to URLs,
/// starting with `http:` or `tpnote:`. In this case, the string is interpreted
/// as URL and only the stem of the filename is displayed, e.g.
/// `<a ...>http:dir/my file.md</a>` is replaced with `<a ...>my file</a>`.
///
/// Finally, before a converted local link is reinserted in the output HTML, a
/// copy of that link is kept in `allowed_local_links` for further bookkeeping.
///
/// NB: All absolute URLs (starting with a domain) always remain untouched.
///
/// NB2: It is guaranteed, that the resulting HTML document contains only local
/// links to other documents within `root_path`. Deviant links displayed as
/// `INVALID LOCAL LINK` and URL is discarded.
pub fn rewrite_links(
html_input: String,
root_path: &Path,
docdir: &Path,
local_link_kind: LocalLinkKind,
rewrite_ext: bool,
allowed_local_links: Arc<RwLock<HashSet<PathBuf>>>,
) -> String {
let (rewrite_rel_paths, rewrite_abs_paths) = match local_link_kind {
LocalLinkKind::Off => (false, false),
LocalLinkKind::Short => (true, false),
LocalLinkKind::Long => (true, true),
};
// Search for hyperlinks and inline images in the HTML rendition
// of this note.
let mut rest = &*html_input;
let mut html_out = String::new();
for ((skipped, _consumed, remaining), mut link) in HtmlLinkInlineImage::new(&html_input) {
html_out.push_str(skipped);
rest = remaining;
// Check if `text` = `dest`.
let mut link_is_autolink = link.is_autolink();
// Percent decode link destination.
link.decode_ampersand_and_percent();
// Check again if `text` = `dest`.
link_is_autolink = link_is_autolink || link.is_autolink();
link.strip_local_scheme();
// Rewrite the local link.
match link
.rebase_local_link(root_path, docdir, rewrite_rel_paths, rewrite_abs_paths)
.and_then(|_| {
link.expand_shorthand_link(
(matches!(local_link_kind, LocalLinkKind::Short)).then_some(root_path),
)
}) {
Ok(()) => {}
Err(e) => {
let e = e.to_string();
let e = html_escape::encode_text(&e);
html_out.push_str(&format!("<i>{}</i>", e));
continue;
}
};
if link_is_autolink {
link.rewrite_autolink();
}
link.apply_format_attribute();
if let Some(dest_path) = link.get_local_link_dest_path() {
allowed_local_links.write().insert(dest_path.to_path_buf());
};
if let Some(src_path) = link.get_local_link_src_path() {
allowed_local_links.write().insert(src_path.to_path_buf());
};
if rewrite_ext {
link.append_html_ext();
}
html_out.push_str(&link.to_html());
}
// Add the last `remaining`.
html_out.push_str(rest);
log::trace!(
"Viewer: referenced allowed local files: {}",
allowed_local_links
.read_recursive()
.iter()
.map(|p| {
let mut s = "\n '".to_string();
s.push_str(&p.display().to_string());
s
})
.collect::<String>()
);
html_out
// The `RwLockWriteGuard` is released here.
}
/// This trait deals with tagged HTML `&str` data.
pub trait HtmlStr {
/// Lowercase pattern to check if this is a Doctype tag.
const TAG_DOCTYPE_PAT: &'static str = "<!doctype";
/// Lowercase pattern to check if this Doctype is HTML.
const TAG_DOCTYPE_HTML_PAT: &'static str = "<!doctype html";
/// Doctype HTML tag. This is inserted by
/// `<HtmlString>.prepend_html_start_tag()`
const TAG_DOCTYPE_HTML: &'static str = "<!DOCTYPE html>";
/// Pattern to check if f this is an HTML start tag.
const START_TAG_HTML_PAT: &'static str = "<html";
/// HTML end tag.
const END_TAG_HTML: &'static str = "</html>";
/// We consider `self` empty, when it equals to `<!DOCTYPE html...>` or
/// when it is empty.
fn is_empty_html(&self) -> bool;
/// We consider `html` empty, when it equals to `<!DOCTYPE html...>` or
/// when it is empty.
/// This is identical to `is_empty_html()`, but does not pull in
/// additional trait bounds.
fn is_empty_html2(html: &str) -> bool {
html.is_empty_html()
}
/// True if stream starts with `<!DOCTYPE html...>`.
fn has_html_start_tag(&self) -> bool;
/// True if `html` starts with `<!DOCTYPE html...>`.
/// This is identical to `has_html_start_tag()`, but does not pull in
/// additional trait bounds.
fn has_html_start_tag2(html: &str) -> bool {
html.has_html_start_tag()
}
/// Some heuristics to guess if the input stream contains HTML.
/// Current implementation:
/// True if:
///
/// * The stream starts with `<!DOCTYPE html ...>`, or
/// * the stream starts with `<html ...>`
///
/// This function does not check if the recognized HTML is valid.
fn is_html_unchecked(&self) -> bool;
}
impl HtmlStr for str {
fn is_empty_html(&self) -> bool {
if self.is_empty() {
return true;
}
let html = self
.trim_start()
.lines()
.next()
.map(|l| l.to_ascii_lowercase())
.unwrap_or_default();
html.as_str().starts_with(Self::TAG_DOCTYPE_HTML_PAT)
// The next closing bracket must be in last position.
&& html.find('>').unwrap_or_default() == html.len()-1
}
fn has_html_start_tag(&self) -> bool {
let html = self
.trim_start()
.lines()
.next()
.map(|l| l.to_ascii_lowercase());
html.as_ref()
.is_some_and(|l| l.starts_with(Self::TAG_DOCTYPE_HTML_PAT))
}
fn is_html_unchecked(&self) -> bool {
let html = self
.trim_start()
.lines()
.next()
.map(|l| l.to_ascii_lowercase());
html.as_ref().is_some_and(|l| {
(l.starts_with(Self::TAG_DOCTYPE_HTML_PAT)
&& l[Self::TAG_DOCTYPE_HTML_PAT.len()..].contains('>'))
|| (l.starts_with(Self::START_TAG_HTML_PAT)
&& l[Self::START_TAG_HTML_PAT.len()..].contains('>'))
})
}
}
/// This trait deals with tagged HTML `String` data.
pub trait HtmlString: Sized {
/// If the input does not start with `<!DOCTYPE html`
/// (or lowercase variants), then insert `<!DOCTYPE html>`.
/// Returns `InputStreamError::NonHtmlDoctype` if there is another Doctype
/// already.
fn prepend_html_start_tag(self) -> Result<Self, InputStreamError>;
}
impl HtmlString for String {
fn prepend_html_start_tag(self) -> Result<Self, InputStreamError> {
// Bring `HtmlStr` methods into scope.
use crate::html::HtmlStr;
let html2 = self
.trim_start()
.lines()
.next()
.map(|l| l.to_ascii_lowercase())
.unwrap_or_default();
if html2.starts_with(<str as HtmlStr>::TAG_DOCTYPE_HTML_PAT) {
// Has a start tag already.
Ok(self)
} else if !html2.starts_with(<str as HtmlStr>::TAG_DOCTYPE_PAT) {
// Insert HTML Doctype tag.
let mut html = self;
html.insert_str(0, <str as HtmlStr>::TAG_DOCTYPE_HTML);
Ok(html)
} else {
// There is a Doctype other than HTML.
Err(InputStreamError::NonHtmlDoctype {
html: self.chars().take(25).collect::<String>(),
})
}
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | true |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/lib.rs | tpnote-lib/src/lib.rs | //! The `tpnote-lib` library is designed to embed Tp-Note's core function in
//! common text editors and text editor plugins. It is dealing with templates
//! and input files and is also part of the command line application
//! [Tp-Note](https://blog.getreu.net/projects/tp-note/). This library also
//! provides a default configuration in the static variable `LIB_CFG` that can
//! be customized at runtime. The defaults for the variables grouped in
//! `LIB_CFG`, are defined as constants in the module `config` (see Rustdoc).
//! While `LIB_CFG` is sourced only once at the start of Tp-Note, the
//! `SETTINGS` may be sourced more often. The latter contains configuration
//! data originating form environment variables.
//!
//! Tp-Note's high-level API, cf. module `workflow`, abstracts most
//! implementation details. Roughly speaking, the input path correspond to
//! _Tp-Note's_ first positional command line parameter and the output path is
//! the same that is printed to standard output after usage. The main
//! consumer of `tpnote-lib`'s high-level API is the module `workflow` and
//! `html_renderer` in the `tpnote` crate.
//!
pub mod clone_ext;
pub mod config;
pub mod config_value;
pub mod content;
pub mod context;
pub mod error;
pub mod filename;
mod filter;
mod front_matter;
#[cfg(feature = "renderer")]
pub mod highlight;
pub mod html;
#[cfg(feature = "renderer")]
pub mod html2md;
pub mod html_renderer;
#[cfg(feature = "lang-detection")]
pub mod lingua;
pub mod markup_language;
mod note;
pub mod settings;
pub mod template;
pub mod text_reader;
pub mod workflow;
use std::iter::FusedIterator;
/// An iterator adapter that flattens an iterator of iterators,
/// while providing the index of the current outer (inner-producing) element.
pub struct FlattenWithIndex<I>
where
I: Iterator,
I::Item: IntoIterator,
{
iter: I,
current_inner: Option<<I::Item as IntoIterator>::IntoIter>,
outer_index: usize, // This is the counter you asked for
}
impl<I> FlattenWithIndex<I>
where
I: Iterator,
I::Item: IntoIterator,
{
/// Creates a new `FlattenWithIndex`.
pub fn new(iter: I) -> Self {
Self {
iter,
current_inner: None,
outer_index: 0,
}
}
}
impl<I> Iterator for FlattenWithIndex<I>
where
I: Iterator,
I::Item: IntoIterator,
{
type Item = (usize, <I::Item as IntoIterator>::Item);
fn next(&mut self) -> Option<Self::Item> {
loop {
// If we have a current inner iterator, try to get the next element from it
if let Some(inner) = &mut self.current_inner {
if let Some(item) = inner.next() {
return Some((self.outer_index - 1, item)); // -1 because we already incremented
}
}
// Current inner is exhausted (or None), get the next outer element
let next_outer = self.iter.next()?;
self.current_inner = Some(next_outer.into_iter());
self.outer_index += 1;
// Loop back to try the new inner iterator
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (inner_lower, inner_upper) = self
.current_inner
.as_ref()
.map_or((0, None), |inner| inner.size_hint());
let (outer_lower, outer_upper) = self.iter.size_hint();
let lower = inner_lower.saturating_add(outer_lower);
let upper = match (inner_upper, outer_upper) {
(Some(i), Some(o)) => i.checked_add(o),
_ => None,
};
(lower, upper)
}
}
// Optional: implement FusedIterator if the underlying iterators do
impl<I> FusedIterator for FlattenWithIndex<I>
where
I: Iterator + FusedIterator,
I::Item: IntoIterator,
<I::Item as IntoIterator>::IntoIter: FusedIterator,
{
}
pub trait FlattenWithIndexExt: Iterator {
fn flatten_with_index(self) -> FlattenWithIndex<Self>
where
Self::Item: IntoIterator,
Self: Sized,
{
FlattenWithIndex::new(self)
}
}
impl<T: Iterator> FlattenWithIndexExt for T {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_flatten_with_index() {
// Test with a non-empty outer iterator with multiple non-empty inner iterators
let data = vec![vec!['a', 'b'], vec!['c', 'd'], vec!['e', 'f', 'g']];
let result: Vec<(usize, char)> = data.into_iter().flatten_with_index().collect();
let expected = vec![
(0, 'a'),
(0, 'b'),
(1, 'c'),
(1, 'd'),
(2, 'e'),
(2, 'f'),
(2, 'g'),
];
assert_eq!(result, expected);
// Test with an empty outer iterator
let data: Vec<Vec<char>> = Vec::new();
let result: Vec<(usize, char)> = data.into_iter().flatten_with_index().collect();
assert!(result.is_empty());
// Test with an empty inner iterator (outer iterator is not empty)
let data = vec![
vec!['a', 'b'],
vec![], // Empty inner iterator
vec!['c', 'd'],
];
let result: Vec<(usize, char)> = data.into_iter().flatten_with_index().collect();
let expected = vec![(0, 'a'), (0, 'b'), (2, 'c'), (2, 'd')];
assert_eq!(result, expected);
// Test with a mix of non-empty and empty inner iterators
let data = vec![
vec!['a', 'b'],
vec![], // Empty inner
vec!['c'],
vec![], // Empty inner
vec!['d', 'e', 'f'],
];
let result: Vec<(usize, char)> = data.into_iter().flatten_with_index().collect();
let expected = vec![(0, 'a'), (0, 'b'), (2, 'c'), (4, 'd'), (4, 'e'), (4, 'f')];
assert_eq!(result, expected);
// Test with all empty inner iterators
let data = vec![vec![], vec![], vec![]];
let result: Vec<(usize, char)> = data.into_iter().flatten_with_index().collect();
assert!(result.is_empty());
// Test with just one element in the outer iterator
let data = vec![vec!['a', 'b', 'c']];
let result: Vec<(usize, char)> = data.into_iter().flatten_with_index().collect();
let expected = vec![(0, 'a'), (0, 'b'), (0, 'c')];
assert_eq!(result, expected);
// Test with just one element in the inner iterator (outer iterator has multiple elements)
let data = vec![
vec!['a'], // Inner iterator has one element
vec!['b', 'c'], // Inner iterator has one element
vec!['d'], // Inner iterator has one element
];
let result: Vec<(usize, char)> = data.into_iter().flatten_with_index().collect();
let expected = vec![(0, 'a'), (1, 'b'), (1, 'c'), (2, 'd')];
assert_eq!(result, expected);
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/highlight.rs | tpnote-lib/src/highlight.rs | //! Syntax highlighting for (inline) source code blocks in Markdown input.
use pulldown_cmark::{CodeBlockKind, Event, Tag, TagEnd};
use syntect::highlighting::ThemeSet;
use syntect::html::css_for_theme_with_class_style;
use syntect::html::{ClassStyle, ClassedHTMLGenerator};
use syntect::parsing::SyntaxSet;
use syntect::util::LinesWithEndings;
/// Get the viewer syntax highlighting CSS configuration.
pub(crate) fn get_highlighting_css(theme_name: &str) -> String {
let ts = ThemeSet::load_defaults();
ts.themes
.get(theme_name)
.and_then(|theme| {
css_for_theme_with_class_style(theme, syntect::html::ClassStyle::Spaced).ok()
})
.unwrap_or_default()
}
/// A wrapper for a `pulldown_cmark` event iterator.
#[derive(Debug, Default)]
pub struct SyntaxPreprocessor<'a, I: Iterator<Item = Event<'a>>> {
parent: I,
}
/// Constructor.
impl<'a, I: Iterator<Item = Event<'a>>> SyntaxPreprocessor<'a, I> {
pub fn new(parent: I) -> Self {
Self { parent }
}
}
/// Implement `Iterator` for wrapper `SyntaxPreprocessor`.
impl<'a, I: Iterator<Item = Event<'a>>> Iterator for SyntaxPreprocessor<'a, I> {
type Item = Event<'a>;
fn next(&mut self) -> Option<Self::Item> {
// Detect inline LaTeX.
let lang = match self.parent.next()? {
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(lang))) if !lang.is_empty() => lang,
// This is the depreciated inline math syntax.
// It is kept here for backwards compatibility.
Event::Code(c) if c.len() > 1 && c.starts_with('$') && c.ends_with('$') => {
return Some(Event::Html(
latex2mathml::latex_to_mathml(
&c[1..c.len() - 1],
latex2mathml::DisplayStyle::Inline,
)
.unwrap_or_else(|e| e.to_string())
.into(),
));
}
Event::InlineMath(c) => {
return Some(Event::Html(
latex2mathml::latex_to_mathml(c.as_ref(), latex2mathml::DisplayStyle::Inline)
.unwrap_or_else(|e| e.to_string())
.into(),
));
}
Event::DisplayMath(c) => {
return Some(Event::Html(
latex2mathml::latex_to_mathml(c.as_ref(), latex2mathml::DisplayStyle::Block)
.unwrap_or_else(|e| e.to_string())
.into(),
));
}
other => return Some(other),
};
let mut code = String::new();
let mut event = self.parent.next();
while let Some(Event::Text(ref code_block)) = event {
code.push_str(code_block);
event = self.parent.next();
}
debug_assert!(matches!(event, Some(Event::End(TagEnd::CodeBlock))));
if lang.as_ref() == "math" {
return Some(Event::Html(
latex2mathml::latex_to_mathml(&code, latex2mathml::DisplayStyle::Block)
.unwrap_or_else(|e| e.to_string())
.into(),
));
}
let mut html = String::with_capacity(code.len() + code.len() * 3 / 2 + 20);
// Use default syntax styling.
let ss = SyntaxSet::load_defaults_newlines();
let sr = match ss.find_syntax_by_token(lang.as_ref()) {
Some(sr) => {
html.push_str("<pre><code class=\"language-");
html.push_str(lang.as_ref());
html.push_str("\">");
sr
}
None => {
log::debug!(
"renderer: no syntax definition found for: `{}`",
lang.as_ref()
);
html.push_str("<pre><code>");
ss.find_syntax_plain_text()
}
};
let mut html_generator =
ClassedHTMLGenerator::new_with_class_style(sr, &ss, ClassStyle::Spaced);
for line in LinesWithEndings::from(&code) {
html_generator
.parse_html_for_line_which_includes_newline(line)
.unwrap_or_default();
}
html.push_str(html_generator.finalize().as_str());
html.push_str("</code></pre>");
Some(Event::Html(html.into()))
}
}
#[cfg(test)]
mod test {
use crate::highlight::SyntaxPreprocessor;
use pulldown_cmark::{html, Options, Parser};
#[test]
fn test_latex_math() {
// Inline math.
let input: &str = "casual $\\sum_{n=0}^\\infty \\frac{1}{n!}$ text";
let expected = "<p>casual <math xmlns=";
let options = Options::all();
let parser = Parser::new_ext(input, options);
let processed = SyntaxPreprocessor::new(parser);
let mut rendered = String::new();
html::push_html(&mut rendered, processed);
println!("Rendered: {}", rendered);
assert!(rendered.starts_with(expected));
//
// Depreciated inline math.
// This code might be removed later.
let input: &str = "casual `$\\sum_{n=0}^\\infty \\frac{1}{n!}$` text";
let expected = "<p>casual <math xmlns=";
let options = Options::all();
let parser = Parser::new_ext(input, options);
let processed = SyntaxPreprocessor::new(parser);
let mut rendered = String::new();
html::push_html(&mut rendered, processed);
assert!(rendered.starts_with(expected));
//
// Block math 1
let input = "text\n$$\nR(X, Y)Z = \\nabla_X\\nabla_Y Z - \
\\nabla_Y \\nabla_X Z - \\nabla_{[X, Y]} Z\n$$";
let expected = "<p>text\n\
<math xmlns=\"http://www.w3.org/1998/Math/MathML\" display=\"block\">\
<mi>R</mi><mo>(</mo><mi>X</mi><mo>,</mo><mi>Y</mi><mo>)</mo>\
<mi>Z</mi><mo>=</mo><msub><mo>∇</mo><mi>X</mi></msub><msub><mo>∇</mo>\
<mi>Y</mi></msub><mi>Z</mi><mo>-</mo><msub><mo>∇</mo><mi>Y</mi></msub>\
<msub><mo>∇</mo><mi>X</mi></msub><mi>Z</mi><mo>-</mo><msub><mo>∇</mo>\
<mrow><mo>[</mo><mi>X</mi><mo>,</mo><mi>Y</mi><mo>]</mo></mrow></msub>\
<mi>Z</mi></math></p>\n";
let options = Options::all();
let parser = Parser::new_ext(input, options);
let processed = SyntaxPreprocessor::new(parser);
let mut rendered = String::new();
html::push_html(&mut rendered, processed);
assert_eq!(rendered, expected);
// Block math 2
let input = "text\n```math\nR(X, Y)Z = \\nabla_X\\nabla_Y Z - \
\\nabla_Y \\nabla_X Z - \\nabla_{[X, Y]} Z\n```";
let expected = "<p>text</p>\n\
<math xmlns=\"http://www.w3.org/1998/Math/MathML\" display=\"block\">\
<mi>R</mi><mo>(</mo><mi>X</mi><mo>,</mo><mi>Y</mi><mo>)</mo>\
<mi>Z</mi><mo>=</mo><msub><mo>∇</mo><mi>X</mi></msub><msub><mo>∇</mo>\
<mi>Y</mi></msub><mi>Z</mi><mo>-</mo><msub><mo>∇</mo><mi>Y</mi></msub>\
<msub><mo>∇</mo><mi>X</mi></msub><mi>Z</mi><mo>-</mo><msub><mo>∇</mo>\
<mrow><mo>[</mo><mi>X</mi><mo>,</mo><mi>Y</mi><mo>]</mo></mrow></msub>\
<mi>Z</mi></math>";
let options = Options::all();
let parser = Parser::new_ext(input, options);
let processed = SyntaxPreprocessor::new(parser);
let mut rendered = String::new();
html::push_html(&mut rendered, processed);
assert_eq!(rendered, expected);
}
#[test]
fn test_rust_source() {
let input: &str = "```rust\n\
fn main() {\n\
println!(\"Hello, world!\");\n\
}\n\
```";
let expected = "<pre><code class=\"language-rust\">\
<span class=\"source rust\">";
let parser = Parser::new(input);
let processed = SyntaxPreprocessor::new(parser);
let mut rendered = String::new();
html::push_html(&mut rendered, processed);
assert!(rendered.starts_with(expected));
}
#[test]
fn test_plain_text() {
let input: &str = "```\nSome\nText\n```";
let expected = "<pre><code>\
Some\nText\n</code></pre>\n";
let parser = Parser::new(input);
let processed = SyntaxPreprocessor::new(parser);
let mut rendered = String::new();
html::push_html(&mut rendered, processed);
assert_eq!(rendered, expected);
}
#[test]
fn test_unkown_source() {
let input: &str = "```abc\n\
fn main() {\n\
println!(\"Hello, world!\");\n\
}\n\
```";
let expected = "<pre><code>\
<span class=\"text plain\">fn main()";
let parser = Parser::new(input);
let processed = SyntaxPreprocessor::new(parser);
let mut rendered = String::new();
html::push_html(&mut rendered, processed);
assert!(rendered.starts_with(expected));
}
#[test]
fn test_md() {
let markdown_input = "# Titel\n\nBody";
let expected = "<h1>Titel</h1>\n<p>Body</p>\n";
let options = Options::all();
let parser = Parser::new_ext(markdown_input, options);
let parser = SyntaxPreprocessor::new(parser);
// Write to String buffer.
let mut html_output: String = String::with_capacity(markdown_input.len() * 3 / 2);
html::push_html(&mut html_output, parser);
assert_eq!(html_output, expected);
}
#[test]
fn test_indented() {
let markdown_input = r#"
1. test
```bash
wget getreu.net
echo test
```
"#;
let expected = "<ol>\n<li>\n<p>test</p>\n<pre>\
<code class=\"language-bash\">\
<span class=\"source shell bash\">\
<span class=\"meta function-call shell\">\
<span class=\"variable function shell\">wget</span></span>";
let options = Options::all();
let parser = Parser::new_ext(markdown_input, options);
let parser = SyntaxPreprocessor::new(parser);
// Write to String buffer.
let mut html_output: String = String::with_capacity(markdown_input.len() * 3 / 2);
html::push_html(&mut html_output, parser);
assert!(html_output.starts_with(expected));
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/filename.rs | tpnote-lib/src/filename.rs | //! Helper functions dealing with filenames.
use crate::config::FILENAME_COPY_COUNTER_MAX;
use crate::config::FILENAME_DOTFILE_MARKER;
use crate::config::FILENAME_EXTENSION_SEPARATOR_DOT;
use crate::config::FILENAME_LEN_MAX;
use crate::config::LIB_CFG;
use crate::error::FileError;
use crate::markup_language::MarkupLanguage;
use crate::settings::SETTINGS;
use std::mem::swap;
use std::path::Path;
use std::path::PathBuf;
use std::time::SystemTime;
/// Extents `PathBuf` with methods dealing with paths to Tp-Note files.
pub trait NotePathBuf {
/// Concatenates the `sort_tag`, `stem`, `copy_counter`, `.` and
/// `extension`.
/// This functions inserts all potentially necessary separators and
/// extra separators.
fn from_disassembled(
sort_tag: &str,
stem: &str,
copy_counter: Option<usize>,
extension: &str,
) -> Self;
/// Append/increment a copy counter.
/// When the path `p` exists on disk already, append some extension
/// with an incrementing counter to the sort-tag in `p` until
/// we find a free unused filename.
/// ```rust
/// use std::env::temp_dir;
/// use std::fs;
/// use tpnote_lib::filename::NotePathBuf;
///
/// // Prepare test: create existing note file.
/// let raw = "some content";
/// let mut notefile = temp_dir().join("20221101-My day--Note.md");
/// fs::write(¬efile, raw.as_bytes()).unwrap();
/// let expected = temp_dir().join("20221101-My day--Note(1).md");
/// let _ = fs::remove_file(&expected);
///
/// // Start test
/// notefile.set_next_unused();
/// assert_eq!(notefile, expected);
/// ```
///
/// When the filename is not used, keep it.
/// ```rust
/// use std::env::temp_dir;
/// use std::fs;
/// use tpnote_lib::filename::NotePathBuf;
///
/// // Prepare test: make sure that there is no note file.
/// let mut notefile = temp_dir().join("20221102-My day--Note.md");
/// let _ = fs::remove_file(¬efile);
/// // The name should not change.
/// let expected = notefile.clone();
///
/// // Start test
/// notefile.set_next_unused();
/// assert_eq!(notefile, expected);
/// ```
fn set_next_unused(&mut self) -> Result<(), FileError>;
/// Shortens the stem of a filename so that
/// `filename.len() <= FILENAME_LEN_MAX`.
/// This method assumes, that the file stem does not contain a copy
/// counter. If stem ends with a pattern similar to a copy counter,
/// it appends `-` to stem (cf. unit test in the source code).
///
/// ```rust
/// use std::ffi::OsString;
/// use std::path::PathBuf;
/// use tpnote_lib::filename::NotePathBuf;
/// use tpnote_lib::config::FILENAME_LEN_MAX;
///
/// // Test short filename.
/// let mut input = PathBuf::from("short filename.md");
/// input.shorten_filename();
/// let output = input;
/// assert_eq!(OsString::from("short filename.md"),
/// output.into_os_string());
///
/// // Test too long filename.
/// let mut input = String::from("some/path/");
/// for _ in 0..(FILENAME_LEN_MAX - "long fi.ext".len()-1) {
/// input.push('x');
/// }
/// let mut expected = input.clone();
/// input.push_str("long filename to be cut.ext");
/// let mut input = PathBuf::from(input);
/// expected.push_str("long fi.ext");
///
/// input.shorten_filename();
/// let output = PathBuf::from(input);
/// assert_eq!(PathBuf::from(expected), output);
/// ```
fn shorten_filename(&mut self);
}
impl NotePathBuf for PathBuf {
#[inline]
fn from_disassembled(
sort_tag: &str,
stem: &str,
copy_counter: Option<usize>,
extension: &str,
) -> Self {
// Assemble path.
let mut filename = String::new();
// Add potential sort-tag and separators.
let scheme = &LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
if !sort_tag.is_empty() {
filename.push_str(sort_tag);
filename.push_str(&scheme.filename.sort_tag.separator);
}
// Does the beginning of `stem` look like a sort-tag?
// Make sure, that the path cannot be misinterpreted, even if a
// `sort_tag.separator` would follow.
let mut test_path = String::from(stem);
test_path.push_str(&scheme.filename.sort_tag.separator);
// Do we need an `extra_separator`?
if stem.is_empty() || !&test_path.split_sort_tag(false).0.is_empty() {
filename.push(scheme.filename.sort_tag.extra_separator);
}
filename.push_str(stem);
if let Some(cc) = copy_counter {
// Is `copy_counter.extra_separator` necessary?
// Does this stem ending look similar to a copy counter?
if stem.split_copy_counter().1.is_some() {
// Add an additional separator.
filename.push_str(&scheme.filename.copy_counter.extra_separator);
};
filename.push_str(&scheme.filename.copy_counter.opening_brackets);
filename.push_str(&cc.to_string());
filename.push_str(&scheme.filename.copy_counter.closing_brackets);
}
if !extension.is_empty() {
filename.push(FILENAME_EXTENSION_SEPARATOR_DOT);
filename.push_str(extension);
};
PathBuf::from(filename)
}
fn set_next_unused(&mut self) -> Result<(), FileError> {
if !&self.exists() {
return Ok(());
};
let (sort_tag, _, stem, _copy_counter, ext) = self.disassemble();
let mut new_path = self.clone();
// Try up to 99 sort tag extensions, then give up.
for copy_counter in 1..FILENAME_COPY_COUNTER_MAX {
let filename = Self::from_disassembled(sort_tag, stem, Some(copy_counter), ext);
new_path.set_file_name(filename);
if !new_path.exists() {
break;
}
}
// This only happens, when we have 99 copies already. Should never happen.
if new_path.exists() {
return Err(FileError::NoFreeFileName {
directory: self.parent().unwrap_or_else(|| Path::new("")).to_path_buf(),
});
}
swap(self, &mut new_path);
Ok(())
}
fn shorten_filename(&mut self) {
// Determine length of file-extension.
let stem = self
.file_stem()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
let ext = self
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
let ext_len = ext.len();
// Limit the size of the filename.
let mut stem_short = String::new();
// `+1` reserves one byte for `.` before the extension.
// `+1` reserves one byte for `-` a potential copy counter extra
// separator.
for i in (0..FILENAME_LEN_MAX - (ext_len + 2)).rev() {
if let Some(s) = stem.get(..=i) {
stem_short = s.to_string();
break;
}
}
// Does this ending look like a copy counter?
if stem_short.split_copy_counter().1.is_some() {
let scheme = &LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
stem_short.push_str(&scheme.filename.copy_counter.extra_separator);
}
// Assemble.
let mut note_filename = stem_short;
if !ext.is_empty() {
note_filename.push(FILENAME_DOTFILE_MARKER);
note_filename.push_str(ext);
}
// Replace filename`
self.set_file_name(note_filename);
}
}
/// Extents `Path` with methods dealing with paths to Tp-Note files.
pub trait NotePath {
/// Helper function that decomposes a fully qualified path name
/// into (`sort_tag`, `stem_copy_counter_ext`, `stem`, `copy_counter`, `ext`).
/// All sort-tag separators and copy-counter separators/brackets are removed.
fn disassemble(&self) -> (&str, &str, &str, Option<usize>, &str);
/// Compares with another `Path` to a Tp-Note file. They are considered equal
/// even when the copy counter is different.
fn exclude_copy_counter_eq(&self, p2: &Path) -> bool;
/// Compare to all file extensions Tp-Note can open.
fn has_tpnote_ext(&self) -> bool;
/// Check if a `Path` points to a file with a "well-formed" filename.
fn has_wellformed_filename(&self) -> bool;
/// Get the filename of the last created Tp-Note file in the directory
/// `self`. If more files have the same creation date, choose the
/// lexicographical last sort-tag in the current directory. Files without
/// sort tag are ignored.
/// <https://doc.rust-lang.org/std/cmp/trait.Ord.html#lexicographical-comparison>
fn find_last_created_file(&self) -> Option<String>;
/// Checks if the directory in `self` has a Tp-Note file starting with the
/// `sort_tag`. If found, return the filename, otherwise `None`
fn has_file_with_sort_tag(&self, sort_tag: &str) -> Option<String>;
/// A method that searches the directory in `self` for a Tp-Note
/// file with the sort-tag `sort_tag`. It returns the filename.
fn find_file_with_sort_tag(&self, sort_tag: &str) -> Option<PathBuf>;
}
impl NotePath for Path {
fn disassemble(&self) -> (&str, &str, &str, Option<usize>, &str) {
let sort_tag_stem_copy_counter_ext = self
.file_name()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
let (sort_tag, stem_copy_counter_ext, _) =
sort_tag_stem_copy_counter_ext.split_sort_tag(false);
let ext = Path::new(stem_copy_counter_ext)
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default(); // Trim `sort_tag`.
let (stem_copy_counter, ext) = if !ext.is_empty()
&& ext.chars().all(|c| c.is_alphanumeric())
{
(
// This is a little faster than `stem_copy_counter_ext.file_stem()`.
&stem_copy_counter_ext[..stem_copy_counter_ext.len().saturating_sub(ext.len() + 1)],
// `ext` is Ok, we keep it.
ext,
)
} else {
(stem_copy_counter_ext, "")
};
let (stem, copy_counter) = stem_copy_counter.split_copy_counter();
(sort_tag, stem_copy_counter_ext, stem, copy_counter, ext)
}
/// Check if 2 filenames are equal. Compare all parts, except the copy counter.
/// Consider 2 file identical even when they have a different copy counter.
fn exclude_copy_counter_eq(&self, p2: &Path) -> bool {
let (sort_tag1, _, stem1, _, ext1) = self.disassemble();
let (sort_tag2, _, stem2, _, ext2) = p2.disassemble();
sort_tag1 == sort_tag2 && stem1 == stem2 && ext1 == ext2
}
/// Returns `True` if the path in `self` ends with an extension, that Tp-
/// Note considers as it's own file. To do so the extension is compared
/// to all items in the registered `filename.extensions` table in the
/// configuration file.
fn has_tpnote_ext(&self) -> bool {
MarkupLanguage::from(self).is_some()
}
/// Check if a `path` points to a file with a
/// "well formed" filename.
/// We consider it well formed,
/// * if the filename is not empty, and
/// * if the filename is a dot file (len >1 and without whitespace), or
/// * if the filename has an extension.
///
/// A valid extension must not contain whitespace.
///
/// ```rust
/// use std::path::Path;
/// use tpnote_lib::filename::NotePath;
///
/// let f = Path::new("tpnote.toml");
/// assert!(f.has_wellformed_filename());
///
/// let f = Path::new("dir/tpnote.toml");
/// assert!(f.has_wellformed_filename());
///
/// let f = Path::new("tpnote.to ml");
/// assert!(!f.has_wellformed_filename());
/// ```
fn has_wellformed_filename(&self) -> bool {
let filename = &self.file_name().unwrap_or_default();
let ext = self
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
let is_filename = !filename.is_empty();
let filename = filename.to_str().unwrap_or_default();
let is_dot_file = filename.starts_with(FILENAME_DOTFILE_MARKER)
// We consider only dot files without whitespace.
&& (filename == filename.trim())
&& filename.split_whitespace().count() == 1;
let has_extension = !ext.is_empty()
// Only accept extensions with alphanumeric characters.
&& ext.chars().all(|c| c.is_ascii_alphanumeric());
is_filename && (is_dot_file || has_extension)
}
fn find_last_created_file(&self) -> Option<String> {
if let Ok(files) = self.read_dir() {
// If more than one file starts with `sort_tag`, retain the
// alphabetic first.
let mut filename_max = String::new();
let mut ctime_max = SystemTime::UNIX_EPOCH;
for file in files.flatten() {
match file.file_type() {
Ok(ft) if ft.is_file() => {}
_ => continue,
}
let ctime = file
.metadata()
.ok()
.and_then(|md| md.created().ok())
.unwrap_or(SystemTime::UNIX_EPOCH);
let filename = file.file_name();
let filename = filename.to_str().unwrap();
if filename.is_empty() || !filename.has_tpnote_ext() {
continue;
}
if ctime > ctime_max
|| (ctime == ctime_max
&& filename.split_sort_tag(false).0 > filename_max.split_sort_tag(false).0)
{
filename_max = filename.to_string();
ctime_max = ctime;
}
} // End of loop.
// Found, return result
if !filename_max.is_empty() {
Some(filename_max.to_string())
} else {
None
}
} else {
None
}
}
fn has_file_with_sort_tag(&self, sort_tag: &str) -> Option<String> {
if let Ok(files) = self.read_dir() {
for file in files.flatten() {
match file.file_type() {
Ok(ft) if ft.is_file() => {}
_ => continue,
}
let filename = file.file_name();
let filename = filename.to_str().unwrap();
// Tests in the order of the cost.
if filename.starts_with(sort_tag)
&& filename.has_tpnote_ext()
&& filename.split_sort_tag(false).0 == sort_tag
{
let filename = filename.to_string();
return Some(filename);
}
}
}
None
}
fn find_file_with_sort_tag(&self, sort_tag: &str) -> Option<PathBuf> {
let mut found = None;
if let Ok(files) = self.read_dir() {
// If more than one file starts with `sort_tag`, retain the
// alphabetic first.
let mut minimum = PathBuf::new();
'file_loop: for file in files.flatten() {
match file.file_type() {
Ok(ft) if ft.is_file() => {}
_ => continue,
}
let file = file.path();
if !(*file).has_tpnote_ext() {
continue 'file_loop;
}
// Does this sort-tag short link correspond to
// any sort-tag of a file in the same directory?
if file.disassemble().0 == sort_tag {
// Before the first assignment `minimum` is empty.
// Finds the minimum.
if minimum == Path::new("") || minimum > file {
minimum = file;
}
}
} // End of loop.
if minimum != Path::new("") {
log::debug!(
"File `{}` referenced by sort-tag match `{}`.",
minimum.to_str().unwrap_or_default(),
sort_tag,
);
// Found, return result
found = Some(minimum)
}
}
found
}
}
/// Some private helper functions related to note filenames.
pub(crate) trait NotePathStr {
/// Returns `True` is the path in `self` ends with an extension, that
/// registered as Tp-Note extension in `filename.extensions`.
/// The input may contain a path as long as it ends with a filename.
fn has_tpnote_ext(&self) -> bool;
/// Helper function that expects a filename in `self`, matches the copy
/// counter at the end of string and returns the result and the copy
/// counter.
/// This function removes all brackets and a potential extra separator.
/// The input must not contain a path, only a filename is allowed here.
fn split_copy_counter(&self) -> (&str, Option<usize>);
/// Helper function that expects a filename in `self`:
/// Greedily match sort tag chars and return it as a subslice as first tuple
/// and the rest as second tuple: `(sort-tag, rest, is_sequential)`.
/// The input must not contain a path, only a filename is allowed here.
/// If `filename.sort_tag.separator` is defined, it must appear after the
/// sort-tag (without being part of it). Otherwise the sort-tag is discarded.
/// A sort-tag cannot contain more than
/// `FILENAME_SORT_TAG_LETTERS_IN_SUCCESSION_MAX` lowercase letters in a row.
/// If `ignore_sort_tag_separator=true` this split runs with the setting
/// `filename_sort_tag_separator=""`.
/// If the boolean return value is true, the sort-tag satisfies the
/// criteria for a sequential sort-tag.
fn split_sort_tag(&self, ignore_sort_tag_separator: bool) -> (&str, &str, bool);
/// Check and return the filename in `self`, if it contains only
/// `lib_cfg.filename.sort_tag.extra_chars` (no sort-tag separator, no file
/// stem, no extension). The number of lowercase letters in a row must not
/// exceed `filename.sort_tag.letters_in_succession_max`.
/// The input may contain a path as long as it ends with `/`, `\\` or a
/// filename. The path, if present, it is ignored.
fn is_valid_sort_tag(&self) -> Option<&str>;
}
impl NotePathStr for str {
fn has_tpnote_ext(&self) -> bool {
MarkupLanguage::from(Path::new(self)).is_some()
}
#[inline]
fn split_copy_counter(&self) -> (&str, Option<usize>) {
let scheme = &LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
// Strip closing brackets at the end.
let tag1 =
if let Some(t) = self.strip_suffix(&scheme.filename.copy_counter.closing_brackets) {
t
} else {
return (self, None);
};
// Now strip numbers.
let tag2 = tag1.trim_end_matches(|c: char| c.is_numeric());
let copy_counter: Option<usize> = if tag2.len() < tag1.len() {
tag1[tag2.len()..].parse().ok()
} else {
return (self, None);
};
// And finally strip starting bracket.
let tag3 =
if let Some(t) = tag2.strip_suffix(&scheme.filename.copy_counter.opening_brackets) {
t
} else {
return (self, None);
};
// This is optional
if let Some(t) = tag3.strip_suffix(&scheme.filename.copy_counter.extra_separator) {
(t, copy_counter)
} else {
(tag3, copy_counter)
}
}
fn split_sort_tag(&self, ignore_sort_tag_separator: bool) -> (&str, &str, bool) {
let scheme = &LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
let mut is_sequential_sort_tag = true;
let mut digits: u8 = 0;
let mut letters: u8 = 0;
let mut sort_tag = &self[..self
.chars()
.take_while(|&c| {
if c.is_ascii_digit() {
digits += 1;
if digits > scheme.filename.sort_tag.sequential.digits_in_succession_max {
is_sequential_sort_tag = false;
}
} else {
digits = 0;
}
if c.is_ascii_lowercase() {
letters += 1;
} else {
letters = 0;
}
letters <= scheme.filename.sort_tag.letters_in_succession_max
&& (c.is_ascii_digit()
|| c.is_ascii_lowercase()
|| scheme.filename.sort_tag.extra_chars.contains([c]))
})
.count()];
let mut stem_copy_counter_ext;
if scheme.filename.sort_tag.separator.is_empty() || ignore_sort_tag_separator {
// `sort_tag` is correct.
stem_copy_counter_ext = &self[sort_tag.len()..];
} else {
// Take `sort_tag.separator` into account.
if let Some(i) = sort_tag.rfind(&scheme.filename.sort_tag.separator) {
sort_tag = &sort_tag[..i];
stem_copy_counter_ext = &self[i + scheme.filename.sort_tag.separator.len()..];
} else {
sort_tag = "";
stem_copy_counter_ext = self;
}
}
// Remove `sort_tag.extra_separator` if it is at the first position
// followed by a `sort_tag_char` at the second position.
let mut chars = stem_copy_counter_ext.chars();
if chars
.next()
.is_some_and(|c| c == scheme.filename.sort_tag.extra_separator)
&& chars.next().is_some_and(|c| {
c.is_ascii_digit()
|| c.is_ascii_lowercase()
|| scheme.filename.sort_tag.extra_chars.contains(c)
})
{
stem_copy_counter_ext = stem_copy_counter_ext
.strip_prefix(scheme.filename.sort_tag.extra_separator)
.unwrap();
}
(sort_tag, stem_copy_counter_ext, is_sequential_sort_tag)
}
fn is_valid_sort_tag(&self) -> Option<&str> {
let filename = if let Some((_, filename)) = self.rsplit_once(['\\', '/']) {
filename
} else {
self
};
if filename.is_empty() {
return None;
}
// If the rest is empty, all characters are in `sort_tag`.
if filename.split_sort_tag(true).1.is_empty() {
Some(filename)
} else {
None
}
}
}
/// A trait that interprets the implementing type as filename extension.
pub(crate) trait Extension {
/// Returns `True` if `self` is equal to one of the Tp-Note extensions
/// registered in the configuration file `filename.extensions` table.
fn is_tpnote_ext(&self) -> bool;
}
impl Extension for str {
fn is_tpnote_ext(&self) -> bool {
MarkupLanguage::from(self).is_some()
}
}
#[cfg(test)]
mod tests {
use std::ffi::OsString;
use std::path::Path;
use std::path::PathBuf;
#[test]
fn test_from_disassembled() {
use crate::filename::NotePathBuf;
let expected = PathBuf::from("My_file.md");
let result = PathBuf::from_disassembled("", "My_file", None, "md");
assert_eq!(expected, result);
let expected = PathBuf::from("1_2_3-My_file(1).md");
let result = PathBuf::from_disassembled("1_2_3", "My_file", Some(1), "md");
assert_eq!(expected, result);
let expected = PathBuf::from("1_2_3-123 my_file(1).md");
let result = PathBuf::from_disassembled("1_2_3", "123 my_file", Some(1), "md");
assert_eq!(expected, result);
let expected = PathBuf::from("1_2_3-'123-My_file(1).md");
let result = PathBuf::from_disassembled("1_2_3", "123-My_file", Some(1), "md");
assert_eq!(expected, result);
let expected = PathBuf::from("'123-My_file(1).md");
let result = PathBuf::from_disassembled("", "123-My_file", Some(1), "md");
assert_eq!(expected, result);
let res = PathBuf::from_disassembled("1234", "title--subtitle", Some(9), "md");
assert_eq!(res, Path::new("1234-title--subtitle(9).md"));
let res = PathBuf::from_disassembled("1234ab", "title--subtitle", Some(9), "md");
assert_eq!(res, Path::new("1234ab-title--subtitle(9).md"));
let res = PathBuf::from_disassembled("1234", "5678", Some(9), "md");
assert_eq!(res, Path::new("1234-'5678(9).md"));
let res = PathBuf::from_disassembled("1234", "5678--subtitle", Some(9), "md");
assert_eq!(res, Path::new("1234-'5678--subtitle(9).md"));
let res = PathBuf::from_disassembled("1234", "", None, "md");
assert_eq!(res, Path::new("1234-'.md"));
// This is a special case, that cannot be disassembled properly.
let res = PathBuf::from_disassembled("1234", "'5678--subtitle", Some(9), "md");
assert_eq!(res, Path::new("1234-'5678--subtitle(9).md"));
let res = PathBuf::from_disassembled("", "-", Some(9), "md");
assert_eq!(res, Path::new("'-(9).md"));
let res = PathBuf::from_disassembled("", "(1)", Some(9), "md");
assert_eq!(res, Path::new("(1)-(9).md"));
// This is a special case, that cannot be disassembled properly.
let res = PathBuf::from_disassembled("", "(1)-", Some(9), "md");
assert_eq!(res, Path::new("(1)-(9).md"));
}
#[test]
fn test_set_next_unused() {
use crate::filename::NotePathBuf;
use std::env::temp_dir;
use std::fs;
let raw = "This simulates a non tp-note file";
let mut notefile = temp_dir().join("20221030-some.pdf--Note.md");
fs::write(¬efile, raw.as_bytes()).unwrap();
notefile.set_next_unused().unwrap();
let expected = temp_dir().join("20221030-some.pdf--Note(1).md");
assert_eq!(notefile, expected);
let _ = fs::remove_file(notefile);
}
#[test]
fn test_shorten_filename() {
use crate::config::FILENAME_LEN_MAX;
use crate::filename::NotePathBuf;
// Test a short filename with a problematic file stem ending looking
// like a copy counter pattern. Therefor the method appends `-`.
let mut input = PathBuf::from("fn(1).md");
let expected = PathBuf::from("fn(1)-.md");
// As this filename is too short, `shorten_filename()` should not change
// anything.
input.shorten_filename();
let output = input;
assert_eq!(OsString::from(expected), output);
//
// Test if assembled correctly.
let mut input = PathBuf::from("20221030-some.pdf--Note.md");
let expected = input.clone();
input.shorten_filename();
let output = input;
assert_eq!(OsString::from(expected), output);
//
// Test long filename.
let mut input = "X".repeat(FILENAME_LEN_MAX + 10);
input.push_str(".ext");
let mut expected = "X".repeat(FILENAME_LEN_MAX - ".ext".len() - 1);
expected.push_str(".ext");
let mut input = PathBuf::from(input);
input.shorten_filename();
let output = input;
assert_eq!(OsString::from(expected), output);
}
#[test]
fn test_disassemble_filename() {
use crate::filename::NotePath;
let expected = (
"1_2_3",
"my_title--my_subtitle(1).md",
"my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1_2_3-my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"1_2_3",
"my_title--my_subtitle(1)-(9).md",
"my_title--my_subtitle(1)",
Some(9),
"md",
);
let p = Path::new("/my/dir/1_2_3-my_title--my_subtitle(1)-(9).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"2021.04.12",
"my_title--my_subtitle(1).md",
"my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/2021.04.12-my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"",
"2021 04 12 my_title--my_subtitle(1).md",
"2021 04 12 my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/2021 04 12 my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = ("2021-04-12", "", "", None, "");
let p = Path::new("/my/dir/2021-04-12-");
let result = p.disassemble();
assert_eq!(result, expected);
// This triggers the bug fixed with v1.14.3.
let expected = ("2021-04-12", ".dotfile", ".dotfile", None, "");
let p = Path::new("/my/dir/2021-04-12-'.dotfile");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = ("2021-04-12", "(9).md", "", Some(9), "md");
let p = Path::new("/my/dir/2021-04-12-(9).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"20221030",
"Some.pdf--Note.md",
"Some.pdf--Note",
None,
"md",
);
let p = Path::new("/my/dir/20221030-Some.pdf--Note.md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"1_2_3",
"my_title--my_subtitle(1).md",
"my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1_2_3-my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"1_2_3",
"123 my_title--my_subtitle(1).md",
"123 my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1_2_3-123 my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"1_2_3-123",
"My_title--my_subtitle(1).md",
"My_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1_2_3-123-My_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"1_2_3",
"123-my_title--my_subtitle(1).md",
"123-my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1_2_3-'123-my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"1_2_3",
"123 my_title--my_subtitle(1).md",
"123 my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1_2_3-123 my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(result, expected);
let expected = (
"1_2_3",
"my_title--my_subtitle(1).md",
"my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1_2_3-my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(expected, result);
let expected = (
"1a2b3ab",
"my_title--my_subtitle(1).md",
"my_title--my_subtitle",
Some(1),
"md",
);
let p = Path::new("/my/dir/1a2b3ab-my_title--my_subtitle(1).md");
let result = p.disassemble();
assert_eq!(expected, result);
let expected = (
"",
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | true |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/front_matter.rs | tpnote-lib/src/front_matter.rs | //! Creates a memory representation of the note's YAML header.
//! In this documentation, the terms “YAML header”, ”header” and ”front matter"
//! are used as synonyms for the note's meta data block at the beginning
//! of the text file. Technically this is a wrapper around a `tera::Map`.
use crate::error::NoteError;
use crate::error::FRONT_MATTER_ERROR_MAX_LINES;
use std::ops::Deref;
use std::ops::DerefMut;
use std::str;
use tera::Value;
#[derive(Debug, Eq, PartialEq)]
/// Represents the front matter of the note. This is a newtype
/// for `tera::Map<String, tera::Value>`.
pub struct FrontMatter(pub tera::Map<String, tera::Value>);
/// Helper function asserting that all the leaves of `val` have a certain type.
/// The first parameter is the type to check recursively.
/// The second is a closure that evaluates to true or false.
pub(crate) fn all_leaves(val: &Value, f: &dyn Fn(&Value) -> bool) -> bool {
match &val {
Value::Array(a) => {
for i in a.iter() {
if !all_leaves(i, &f) {
return false;
}
}
}
Value::Object(map) => {
for (_, v) in map {
if !all_leaves(v, &f) {
return false;
}
}
}
_ => {
return f(val);
}
}
true
}
impl TryFrom<&str> for FrontMatter {
type Error = NoteError;
/// Helper function deserializing the front-matter of the note file.
/// An empty header leads to an empty `tera::Map`; no error.
fn try_from(header: &str) -> Result<FrontMatter, NoteError> {
let map: tera::Map<String, tera::Value> =
serde_yaml::from_str(header).map_err(|e| NoteError::InvalidFrontMatterYaml {
front_matter: header
.lines()
.enumerate()
.map(|(n, s)| format!("{:03}: {}\n", n + 1, s))
.take(FRONT_MATTER_ERROR_MAX_LINES)
.collect::<String>(),
source_error: e,
})?;
let fm = FrontMatter(map);
Ok(fm)
}
}
/// Auto dereferences for convenient access to `tera::Map`.
impl Deref for FrontMatter {
type Target = tera::Map<String, tera::Value>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Auto dereferences for convenient access to `tera::Map`.
impl DerefMut for FrontMatter {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[cfg(test)]
mod tests {
use crate::config::TMPL_VAR_FM_ALL;
use crate::front_matter::FrontMatter;
use serde_json::json;
use tera::Value;
#[test]
fn test_deserialize() {
use super::FrontMatter;
use serde_json::json;
let input = "# document start
title: The book
subtitle: you always wanted
author: It's me
date: 2020-04-21
lang: en
revision: '1.0'
sort_tag: 20200420-21_22
file_ext: md
height: 1.23
count: 2
neg: -1
flag: true
numbers:
- 1
- 3
- 5
";
let mut expected = tera::Map::new();
expected.insert("title".to_string(), Value::String("The book".to_string()));
expected.insert(
"subtitle".to_string(),
Value::String("you always wanted".to_string()),
);
expected.insert("author".to_string(), Value::String("It\'s me".to_string()));
expected.insert("date".to_string(), Value::String("2020-04-21".to_string()));
expected.insert("lang".to_string(), Value::String("en".to_string()));
expected.insert("revision".to_string(), Value::String("1.0".to_string()));
expected.insert(
"sort_tag".to_string(),
Value::String("20200420-21_22".to_string()),
);
expected.insert("file_ext".to_string(), Value::String("md".to_string()));
expected.insert("height".to_string(), json!(1.23)); // Number()
expected.insert("count".to_string(), json!(2)); // Number()
expected.insert("neg".to_string(), json!(-1)); // Number()
expected.insert("flag".to_string(), json!(true)); // Bool()
expected.insert("numbers".to_string(), json!([1, 3, 5])); // Array()
let expected_front_matter = FrontMatter(expected);
assert_eq!(expected_front_matter, FrontMatter::try_from(input).unwrap());
}
#[test]
fn test_register_front_matter() {
use super::FrontMatter;
use crate::context::Context;
use serde_json::json;
use std::path::Path;
use tera::Value;
let mut tmp = tera::Map::new();
tmp.insert("file_ext".to_string(), Value::String("md".to_string())); // String
tmp.insert("height".to_string(), json!(1.23)); // Number()
tmp.insert("count".to_string(), json!(2)); // Number()
tmp.insert("neg".to_string(), json!(-1)); // Number()
tmp.insert("flag".to_string(), json!(true)); // Bool()
tmp.insert("numbers".to_string(), json!([1, 3, 5])); // Array([Numbers()..])!
let mut tmp2 = tera::Map::new();
tmp2.insert("fm_file_ext".to_string(), Value::String("md".to_string())); // String
tmp2.insert("fm_height".to_string(), json!(1.23)); // Number()
tmp2.insert("fm_count".to_string(), json!(2)); // Number()
tmp2.insert("fm_neg".to_string(), json!(-1)); // Number()
tmp2.insert("fm_flag".to_string(), json!(true)); // Bool()
tmp2.insert("fm_numbers".to_string(), json!([1, 3, 5])); // Array([Numbers()..])!
let input1 = Context::from(Path::new("a/b/test.md")).unwrap();
let input2 = FrontMatter(tmp);
let mut expected = Context::from(Path::new("a/b/test.md")).unwrap();
tmp2.remove("fm_numbers");
tmp2.insert("fm_numbers".to_string(), json!([1, 3, 5])); // String()!
let tmp2 = tera::Value::from(tmp2);
expected.insert(TMPL_VAR_FM_ALL, &tmp2); // Map()
let expected = expected.insert_front_matter(&FrontMatter::try_from("").unwrap());
let result = input1.insert_front_matter(&input2);
assert_eq!(result, expected);
}
#[test]
fn test_try_from_content() {
use crate::content::Content;
use crate::content::ContentString;
use serde_json::json;
// Create existing note.
let raw = "\u{feff}---\ntitle: \"My day\"\nsubtitle: \"Note\"\n---\nBody";
let content = ContentString::from_string(raw.to_string(), "doc".to_string());
assert!(!content.is_empty());
assert!(!content.borrow_dependent().header.is_empty());
let front_matter = FrontMatter::try_from(content.header()).unwrap();
assert_eq!(front_matter.get("title"), Some(&json!("My day")));
assert_eq!(front_matter.get("subtitle"), Some(&json!("Note")));
}
#[test]
fn test_all_leaves() {
use super::all_leaves;
let input = json!({
"first": "tmp: test",
"second": [
"string(a)",
"string(b)"
],});
assert!(all_leaves(&input, &|v| matches!(v, Value::String(..))));
let input = json!({
"first": "tmp: test",
"second": [
1234,
"string(b)"
],});
assert!(!all_leaves(&input, &|v| matches!(v, Value::String(..))));
let input = json!({
"first": "tmp: test",
"second": [
"string(a)",
false
],});
assert!(!all_leaves(&input, &|v| matches!(v, Value::String(..))));
let input = json!({
"first": "tmp: test",
"second": [
"string(a)",
"string(b)"
],});
assert!(all_leaves(&input, &|v| matches!(v, Value::String(..))
&& v.as_str() != Some("")));
let input = json!({
"first": "tmp: test",
"second": [
"string(a)",
""
],});
assert!(!all_leaves(&input, &|v| matches!(v, Value::String(..))
&& v.as_str() != Some("")));
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/markup_language.rs | tpnote-lib/src/markup_language.rs | //! Helper functions dealing with markup languages.
use crate::config::LIB_CFG;
use crate::error::NoteError;
#[cfg(feature = "renderer")]
use crate::highlight::SyntaxPreprocessor;
#[cfg(feature = "renderer")]
use crate::html2md::convert_html_to_md;
use crate::settings::SETTINGS;
use parse_hyperlinks::renderer::text_links2html;
use parse_hyperlinks::renderer::text_rawlinks2html;
#[cfg(feature = "renderer")]
use pulldown_cmark::{Options, Parser, html};
#[cfg(feature = "renderer")]
use rst_parser;
#[cfg(feature = "renderer")]
use rst_renderer;
use serde::{Deserialize, Serialize};
use std::path::Path;
#[cfg(feature = "renderer")]
use std::str::from_utf8;
/// The filter `filter_tags()` omits HTML `<span....>` after converting to
/// Markdown.
#[cfg(test)] // Currently the `filter_tags()` filter is not used in the code.
#[cfg(feature = "renderer")]
const FILTERED_TAGS: &[&str; 4] = &["<span", "</span>", "<div", "</div>"];
/// Available converters for converting the input from standard input or the
/// clipboard to HTML.
#[non_exhaustive]
#[derive(Default, Debug, Hash, Clone, Eq, PartialEq, Deserialize, Serialize, Copy)]
pub enum InputConverter {
/// Convert from HTML to Markdown.
ToMarkdown,
/// Do not convert, return an error instead.
#[default]
Disabled,
/// Do not convert, just pass through wrapped in `Ok()`.
PassThrough,
}
impl InputConverter {
/// Returns a function that implements the `InputConverter` looked up in
/// the `extensions` table in the `extension` line.
/// When `extension` is not found in `extensions`, the function returns
/// a `NoteError`.
#[inline]
pub(crate) fn build(extension: &str) -> fn(String) -> Result<String, NoteError> {
let settings = SETTINGS.read_recursive();
let scheme = &LIB_CFG.read_recursive().scheme[settings.current_scheme];
let mut input_converter = InputConverter::default();
for e in &scheme.filename.extensions {
if e.0 == *extension {
input_converter = e.1;
break;
}
}
match input_converter {
#[cfg(feature = "renderer")]
InputConverter::ToMarkdown => |s| convert_html_to_md(&s),
InputConverter::Disabled => {
|_: String| -> Result<String, NoteError> { Err(NoteError::HtmlToMarkupDisabled) }
}
_ => Ok,
}
}
/// Filters the `TARGET_TAGS`, e.g. `<span...>`, `</span>`, `<div...>`
/// and `<div>` in `text`.
/// Contract: the input substring `...` does not contain the characters
/// `>` or `\n`.
#[cfg(test)] // Currently the `filter_tags()` filter is not used in the code.
#[cfg(feature = "renderer")]
fn filter_tags(text: String) -> String {
let mut res = String::new();
let mut i = 0;
while let Some(mut start) = text[i..].find('<') {
if let Some(mut end) = text[i + start..].find('>') {
end += 1;
// Move on if there is another opening bracket.
if let Some(new_start) = text[i + start + 1..i + start + end].rfind('<') {
start += new_start + 1;
end -= new_start + 1;
}
// Is this a tag listed in `FILTERED_TAGS`?
let filter_tag = FILTERED_TAGS
.iter()
.any(|&pat| text[i + start..i + start + end].starts_with(pat));
if filter_tag {
res.push_str(&text[i..i + start]);
} else {
res.push_str(&text[i..i + start + end]);
};
i = i + start + end;
} else {
res.push_str(&text[i..i + start + 1]);
i = i + start + 1;
}
}
if i > 0 {
res.push_str(&text[i..]);
if res != text {
log::trace!("`html_to_markup` filter: removed tags in \"{}\"", text);
}
res
} else {
text
}
}
}
/// The Markup language of the note content.
#[non_exhaustive]
#[derive(Default, Debug, Hash, Clone, Eq, PartialEq, Deserialize, Serialize, Copy)]
pub enum MarkupLanguage {
Markdown,
ReStructuredText,
Html,
PlainText,
/// The markup language is known, but the renderer is disabled.
RendererDisabled,
/// This is a Tp-Note file, but we are not able to determine the
/// MarkupLanguage at this point.
Unkown,
/// This is not a Tp-Note file.
#[default]
None,
}
impl MarkupLanguage {
/// If `Self` is `None` return `rhs`, otherwise return `Self`.
pub fn or(self, rhs: Self) -> Self {
match self {
MarkupLanguage::None => rhs,
_ => self,
}
}
/// Returns the MIME type for all `Markup Languages.is_tpnote_file()==true`.
/// Otherwise, for `MarkupLanguage::None` this returns None.
pub fn mine_type(&self) -> Option<&'static str> {
match self {
Self::Markdown => Some("text/markodwn"),
Self::ReStructuredText => Some("x-rst"),
Self::Html => Some("text/html"),
Self::PlainText => Some("text/plain"),
Self::RendererDisabled => Some("text/plain"),
Self::Unkown => Some("text/plain"),
_ => None,
}
}
/// As we identify a markup language by the file's extension, we
/// can also tell, in case `Markuplanguage::from(ext).is_some()`,
/// that a file with the extension `ext` is a Tp-Note file.
pub fn is_some(&self) -> bool {
!matches!(self, Self::None)
}
/// As we identify a markup language by the file's extension, we
/// can also tell, in case `Markuplanguage::from(ext).is_none()`,
/// that a file with the extension `ext` is NOT a Tp-Note file.
pub fn is_none(&self) -> bool {
matches!(self, Self::None)
}
/// Every `MarkupLanguage` variant has an own internal HTML renderer:
/// * `Markdown` is rendered according the "CommonMark" standard.
/// * Currently only as small subset of ReStructuredText is rendered for
/// `ReStructuredText`. This feature is experimental.
/// * The `Html` renderer simply forwards the input without modification.
/// * `PlainText` is rendered as raw text. Hyperlinks in Markdown,
/// ReStructuredText, AsciiDoc and WikiText syntax are detected and
/// are displayed in the rendition with their link text. All hyperlinks
/// are clickable.
/// * `Unknown` is rendered like `PlainText`, hyperlinks are also
/// clickable, but they are displayed as they appear in the input.
/// * For the variant `None` the result is always the empty string whatever
/// the input may be.
pub fn render(&self, input: &str) -> String {
match self {
#[cfg(feature = "renderer")]
Self::Markdown => {
// Set up options and parser. Besides the CommonMark standard
// we enable some useful extras.
let options = Options::all();
let parser = Parser::new_ext(input, options);
let parser = SyntaxPreprocessor::new(parser);
// Write to String buffer.
let mut html_output: String = String::with_capacity(input.len() * 3 / 2);
html::push_html(&mut html_output, parser);
html_output
}
#[cfg(feature = "renderer")]
Self::ReStructuredText => {
// Note, that the current ReStructuredText renderer requires
// files to end with no new line.
let rest_input = input.trim();
// Write to String buffer.
let mut html_output: Vec<u8> = Vec::with_capacity(rest_input.len() * 3 / 2);
const STANDALONE: bool = false; // Don't wrap in `<!doctype html><html></html>`.
rst_parser::parse(rest_input.trim_start())
.map(|doc| rst_renderer::render_html(&doc, &mut html_output, STANDALONE))
.map_or_else(
|e| NoteError::RstParse { msg: e.to_string() }.to_string(),
|_| from_utf8(&html_output).unwrap_or_default().to_string(),
)
}
Self::Html => input.to_string(),
Self::PlainText | Self::RendererDisabled => text_links2html(input),
Self::Unkown => text_rawlinks2html(input),
_ => String::new(),
}
}
}
impl From<&Path> for MarkupLanguage {
/// Is the file extension ` at the end of the given path listed in
/// `file.extensions`? Return the corresponding `MarkupLanguage`.
/// Only the extension of `Path` is considered here.
#[inline]
fn from(path: &Path) -> Self {
let file_extension = path
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default();
Self::from(file_extension)
}
}
impl From<&str> for MarkupLanguage {
/// Is `file_extension` listed in `file.extensions`?
#[inline]
fn from(file_extension: &str) -> Self {
let scheme = &LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
for e in &scheme.filename.extensions {
if e.0 == file_extension {
return e.2;
}
}
// Nothing was found.
MarkupLanguage::None
}
}
#[cfg(test)]
mod tests {
use super::InputConverter;
use super::MarkupLanguage;
use std::path::Path;
#[test]
fn test_markuplanguage_from() {
//
let path = Path::new("/dir/file.md");
assert_eq!(MarkupLanguage::from(path), MarkupLanguage::Markdown);
//
let path = Path::new("md");
assert_eq!(MarkupLanguage::from(path), MarkupLanguage::None);
//
let ext = "/dir/file.md";
assert_eq!(MarkupLanguage::from(ext), MarkupLanguage::None);
//
let ext = "md";
assert_eq!(MarkupLanguage::from(ext), MarkupLanguage::Markdown);
//
let ext = "rst";
assert_eq!(MarkupLanguage::from(ext), MarkupLanguage::ReStructuredText);
}
#[test]
fn test_markuplanguage_render() {
// Markdown
let input = "[Link text](https://domain.invalid/)";
let expected: &str = "<p><a href=\"https://domain.invalid/\">Link text</a></p>\n";
let result = MarkupLanguage::Markdown.render(input);
assert_eq!(result, expected);
// ReStructuredText
let input = "`Link text <https://domain.invalid/>`_";
let expected: &str = "<p><a href=\"https://domain.invalid/\">Link text</a></p>";
let result = MarkupLanguage::ReStructuredText.render(input);
assert_eq!(result, expected);
}
#[test]
fn test_input_converter_md() {
let ic = InputConverter::build("md");
let input: &str =
"<div id=\"videopodcast\">outside <span id=\"pills\">inside</span>\n</div>";
let expected: &str = "outside inside";
let result = ic(input.to_string());
assert_eq!(result.unwrap(), expected);
//
let input: &str = r#"<p><a href="/my_uri">link</a></p>"#;
let expected: &str = "[link](/my_uri)";
let result = ic(input.to_string());
assert_eq!(result.unwrap(), expected);
//
// [CommonMark: Example 489](https://spec.commonmark.org/0.31.2/#example-489)
let input: &str = r#"<p><a href="/my uri">link</a></p>"#;
let expected: &str = "[link](</my uri>)";
let result = ic(input.to_string());
assert_eq!(result.unwrap(), expected);
//
// [CommonMark: Example 489](https://spec.commonmark.org/0.31.2/#example-489)
let input: &str = r#"<p><a href="/my%20uri">link</a></p>"#;
let expected: &str = "[link](</my uri>)";
let result = ic(input.to_string());
assert_eq!(result.unwrap(), expected);
//
// We want ATX style headers.
let input: &str = r#"<p><h1>Title</h1></p>"#;
let expected: &str = "# Title";
let result = ic(input.to_string());
assert_eq!(result.unwrap(), expected);
}
#[test]
fn test_filter_tags() {
let input: &str =
"A<div id=\"videopodcast\">out<p>side <span id=\"pills\">inside</span>\n</div>B";
let expected: &str = "Aout<p>side inside\nB";
let result = InputConverter::filter_tags(input.to_string());
assert_eq!(result, expected);
let input: &str = "A<B<C <div>D<E<p>F<>G";
let expected: &str = "A<B<C D<E<p>F<>G";
let result = InputConverter::filter_tags(input.to_string());
assert_eq!(result, expected);
}
}
// `rewrite_rel_links=true`
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/content.rs | tpnote-lib/src/content.rs | //! Self referencing data structures to store the note's
//! content as a raw string.
use self_cell::self_cell;
use std::fmt;
use std::fmt::Debug;
use std::fs::File;
use std::fs::OpenOptions;
use std::fs::create_dir_all;
use std::io::Write;
use std::path::Path;
use substring::Substring;
use crate::config::TMPL_VAR_DOC;
use crate::error::InputStreamError;
use crate::text_reader::read_as_string_with_crlf_suppression;
/// As all text before the header marker `"---"` is ignored, this
/// constant limits the maximum number of characters that are skipped
/// before the header starts. In other words: the header
/// must start within the first `BEFORE_HEADER_MAX_IGNORED_CHARS`.
const BEFORE_HEADER_MAX_IGNORED_CHARS: usize = 1024;
/// This trait represents Tp-Note content.
/// The content is devided into header and body.
/// The header is the YAML meta data describing the body.
/// In some cases the header might be empty, e.g. when the data comes from
/// the clipboard (the `txt_clipboard` data might come with a header).
/// The body is flat UTF-8 markup formatted text, e.g. in
/// Markdown or in ReStructuredText.
/// A special case is HTML data in the body, originating from the HTML
/// clipboard. Here, the body always starts with an HTML start tag
/// (for details see the `html::HtmlStream` trait) and the header is always
/// empty.
///
/// The trait provides cheap access to the header with `header()`, the body
/// with `body()`, and the whole raw text with `as_str()`.
/// Implementers should cache the `header()` and `body()` function results in
/// order to keep these as cheap as possible.
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// let input = "---\ntitle: My note\n---\nMy body";
/// let c = ContentString::from_string(
/// String::from(input), "doc".to_string());
///
/// assert_eq!(c.header(), "title: My note");
/// assert_eq!(c.body(), "My body");
/// assert_eq!(c.name(), "doc");
/// assert_eq!(c.as_str(), input);
///
/// // A test without front matter leads to an empty header:
/// let c = ContentString::from_string(
/// String::from("No header"), "doc".to_string());
///
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "No header");
/// assert_eq!(c.name(), "doc");
/// assert_eq!(c.as_str(), "No header");
/// ```
///
/// The `Content` trait allows to plug in your own storage back end if
/// `ContentString` does not suit you. In addition to the example shown below,
/// you can overwrite `Content::open()` and `Content::save_as()` as well.
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use std::string::String;
///
/// #[derive(Debug, Eq, PartialEq, Default)]
/// struct MyString(String, String);
/// impl Content for MyString {
/// /// Constructor
/// fn from_string(input: String,
/// name: String) -> Self {
/// MyString(input, name)
/// }
///
/// /// This sample implementation may be too expensive.
/// /// Better precalculate this in `Self::from()`.
/// fn header(&self) -> &str {
/// Self::split(&self.as_str()).0
/// }
/// fn body(&self) -> &str {
/// Self::split(&self.as_str()).1
/// }
/// fn name(&self) -> &str {
/// &self.1
/// }
/// }
///
/// impl AsRef<str> for MyString {
/// fn as_ref(&self) -> &str {
/// &self.0
/// }
/// }
///
/// let input = "---\ntitle: My note\n---\nMy body";
/// let s = MyString::from_string(
/// input.to_string(), "doc".to_string());
///
/// assert_eq!(s.header(), "title: My note");
/// assert_eq!(s.body(), "My body");
/// assert_eq!(s.name(), "doc");
/// assert_eq!(s.as_str(), input);
/// ```
pub trait Content: AsRef<str> + Debug + Eq + PartialEq + Default {
/// Reads the file at `path` and stores the content
/// `Content`. Possible `\r\n` are replaced by `\n`.
/// This trait has a default implementation, the empty content.
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// use std::env::temp_dir;
///
/// // Prepare test.
/// let raw = "---\ntitle: My note\n---\nMy body";
/// let notefile = temp_dir().join("20221030-hello -- world.md");
/// let _ = std::fs::write(¬efile, raw.as_bytes());
///
/// // Start test.
/// let c = ContentString::open(¬efile).unwrap();
///
/// assert_eq!(c.header(), "title: My note");
/// assert_eq!(c.body(), "My body");
/// assert_eq!(c.name(), "doc");
/// ```
fn open(path: &Path) -> Result<Self, std::io::Error>
where
Self: Sized,
{
Ok(Self::from_string(
read_as_string_with_crlf_suppression(File::open(path)?)?,
TMPL_VAR_DOC.to_string(),
))
}
/// Constructor that parses a Tp-Note document.
/// A valid document is UTF-8 encoded and starts with an optional
/// BOM (byte order mark) followed by `---`. When the start marker
/// `---` does not follow directly the BOM, it must be prepended
/// by an empty line. In this case all text before is ignored:
/// BOM + ignored text + empty line + `---`.
/// Contract: the input string does not contain `\r\n`. If
/// it may, use `Content::from_string_with_cr()` instead.
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// let input = "---\ntitle: My note\n---\nMy body";
/// let c = ContentString::from_string(
/// input.to_string(), "doc".to_string());
///
/// assert_eq!(c.header(), "title: My note");
/// assert_eq!(c.body(), "My body");
/// assert_eq!(c.name(), "doc");
///
/// // A test without front matter leads to an empty header:
/// let c = ContentString::from_string("No header".to_string(),
/// "doc".to_string());
///
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "No header");
/// assert_eq!(c.name(), "doc");
/// ```
/// Self referential. The constructor splits the content
/// in header and body and associates names to both. These names are
/// referenced in various templates.
fn from_string(input: String, name: String) -> Self;
/// Returns a reference to the inner part in between `---`.
fn header(&self) -> &str;
/// Returns the body below the second `---`.
fn body(&self) -> &str;
/// Returns the associated name exactly as it was given to the constructor.
fn name(&self) -> &str;
/// Constructor that accepts and store HTML input in the body.
/// If the HTML input does not start with `<!DOCTYPE html...>` it is
/// automatically prepended.
/// If the input starts with another DOCTYPE than HTMl, return
/// `InputStreamError::NonHtmlDoctype`.
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
///
/// let c = ContentString::from_html(
/// "Some HTML content".to_string(),
/// "html_clipboard".to_string()).unwrap();
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "<!DOCTYPE html>Some HTML content");
/// assert_eq!(c.name(), "html_clipboard");
///
/// let c = ContentString::from_html(String::from(
/// "<!DOCTYPE html>Some HTML content"),
/// "html_clipboard".to_string()).unwrap();
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "<!DOCTYPE html>Some HTML content");
/// assert_eq!(c.name(), "html_clipboard");
///
/// let c = ContentString::from_html(String::from(
/// "<!DOCTYPE xml>Some HTML content"), "".to_string());
/// assert!(c.is_err());
/// ```
fn from_html(input: String, name: String) -> Result<Self, InputStreamError> {
use crate::html::HtmlString;
let input = input.prepend_html_start_tag()?;
Ok(Self::from_string(input, name))
}
/// Writes the note to disk with `new_file_path` as filename.
/// If `new_file_path` contains missing directories, they will be
/// created on the fly.
///
/// ```rust
/// use std::path::Path;
/// use std::env::temp_dir;
/// use std::fs;
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// let c = ContentString::from_string(
/// String::from("prelude\n\n---\ntitle: My note\n---\nMy body"),
/// "doc".to_string()
/// );
/// let outfile = temp_dir().join("mynote.md");
/// #[cfg(not(target_family = "windows"))]
/// let expected = "\u{feff}prelude\n\n---\ntitle: My note\n---\nMy body\n";
/// #[cfg(target_family = "windows")]
/// let expected = "\u{feff}prelude\r\n\r\n---\r\ntitle: My note\r\n---\r\nMy body\r\n";
///
/// c.save_as(&outfile).unwrap();
/// let result = fs::read_to_string(&outfile).unwrap();
///
/// assert_eq!(result, expected);
/// fs::remove_file(&outfile);
/// ```
fn save_as(&self, new_file_path: &Path) -> Result<(), std::io::Error> {
// Create missing directories, if there are any.
create_dir_all(new_file_path.parent().unwrap_or_else(|| Path::new("")))?;
let mut outfile = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(new_file_path)?;
log::trace!("Creating file: {:?}", new_file_path);
write!(outfile, "\u{feff}")?;
for l in self.as_str().lines() {
write!(outfile, "{}", l)?;
#[cfg(target_family = "windows")]
write!(outfile, "\r")?;
writeln!(outfile)?;
}
Ok(())
}
/// Accesses the whole content with all `---`.
/// Contract: The content does not contain any `\r\n`.
/// If your content contains `\r\n` use the
/// `from_string_with_cr()` constructor.
/// Possible BOM at the first position is not returned.
fn as_str(&self) -> &str {
self.as_ref().trim_start_matches('\u{feff}')
}
/// True if the header and body is empty.
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
///
/// let c = ContentString::default();
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "");
/// assert!(c.is_empty());
///
/// let c = ContentString::from_string(
/// "".to_string(),
/// "doc".to_string(),
/// );
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "");
/// assert!(c.is_empty());
///
/// let c = ContentString::from_string(
/// "Some content".to_string(),
/// "doc".to_string(),
/// );
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "Some content");
/// assert!(!c.is_empty());
///
/// let c = ContentString::from_html(
/// "".to_string(),
/// "doc".to_string(),
/// ).unwrap();
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "<!DOCTYPE html>");
/// assert!(c.is_empty());
///
/// let c = ContentString::from_html(
/// "Some HTML content".to_string(),
/// "doc".to_string(),
/// ).unwrap();
/// assert_eq!(c.header(), "");
/// assert_eq!(c.body(), "<!DOCTYPE html>Some HTML content");
/// assert!(!c.is_empty());
///
///
/// let c = ContentString::from_string(
/// String::from("---\ntitle: My note\n---\n"),
/// "doc".to_string(),
/// );
/// assert_eq!(c.header(), "title: My note");
/// assert_eq!(c.body(), "");
/// assert!(!c.is_empty());
/// ```
fn is_empty(&self) -> bool {
// Bring methods into scope. Overwrites `is_empty()`.
use crate::html::HtmlStr;
// `.is_empty_html()` is true for `""` or only `<!DOCTYPE html...>`.
self.header().is_empty() && (self.body().is_empty_html())
}
/// Helper function that splits the content into header and body.
/// The header, if present, is trimmed (`trim()`), the body
/// is kept as it is.
/// Any BOM (byte order mark) at the beginning is ignored.
///
/// 1. Ignore `\u{feff}` if present
/// 2. Ignore `---\n` or ignore all bytes until`\n\n---\n`,
/// 3. followed by header bytes,
/// 4. optionally followed by `\n`,
/// 5. followed by `\n---\n` or `\n...\n`,
/// 6. optionally followed by some `\t` and/or some ` `,
/// 7. optionally followed by `\n`.
///
/// The remaining bytes are the "body".
///
/// Alternatively, a YAML metadata block may occur anywhere in the document, but if it is not
/// at the beginning, it must be preceded by a blank line:
/// 1. skip all text (BEFORE_HEADER_MAX_IGNORED_CHARS) until you find `"\n\n---"`
/// 2. followed by header bytes,
/// 3. same as above ...
fn split(content: &str) -> (&str, &str) {
// Bring in scope `HtmlString`.
use crate::html::HtmlStr;
// Remove BOM
let content = content.trim_start_matches('\u{feff}');
if content.is_empty() {
return ("", "");
};
// If this is HTML content, leave the header empty.
// TODO: In the future the header might be constructed from
// the "meta" HTML fields. Though I am not sure if something meaningful
// can be found in HTML clipboard meta data.
if content.has_html_start_tag() {
return ("", content);
}
const HEADER_START_TAG: &str = "---";
let fm_start = if content.starts_with(HEADER_START_TAG) {
// Found at first byte.
HEADER_START_TAG.len()
} else {
const HEADER_START_TAG: &str = "\n\n---";
if let Some(start) = content
.substring(0, BEFORE_HEADER_MAX_IGNORED_CHARS)
.find(HEADER_START_TAG)
.map(|x| x + HEADER_START_TAG.len())
{
// Found just before `start`!
start
} else {
// Not found.
return ("", content);
}
};
// The first character after the document start marker
// must be a whitespace.
if !content[fm_start..]
.chars()
.next()
// If none, make test fail.
.unwrap_or('x')
.is_whitespace()
{
return ("", content);
};
// No need to search for an additional `\n` here, as we trim the
// header anyway.
const HEADER_END_TAG1: &str = "\n---";
// Contract: next pattern must have the same length!
const HEADER_END_TAG2: &str = "\n...";
debug_assert_eq!(HEADER_END_TAG1.len(), HEADER_END_TAG2.len());
const TAG_LEN: usize = HEADER_END_TAG1.len();
let fm_end = content[fm_start..]
.find(HEADER_END_TAG1)
.or_else(|| content[fm_start..].find(HEADER_END_TAG2))
.map(|x| x + fm_start);
let fm_end = if let Some(n) = fm_end {
n
} else {
return ("", content);
};
// We advance 4 because `"\n---"` has 4 bytes.
let mut body_start = fm_end + TAG_LEN;
// Skip spaces and tabs followed by one optional newline.
while let Some(c) = content[body_start..].chars().next() {
if c == ' ' || c == '\t' {
body_start += 1;
} else {
// Skip exactly one newline, if there is at least one.
if c == '\n' {
body_start += 1;
}
// Exit loop.
break;
};
}
(content[fm_start..fm_end].trim(), &content[body_start..])
}
}
#[derive(Debug, Eq, PartialEq)]
/// Pointers belonging to the self referential struct `Content`.
pub struct ContentRef<'a> {
/// Skip optional BOM and `"---" `in `s` until next `"---"`.
/// When no `---` is found, this is empty.
/// `header` is always trimmed.
pub header: &'a str,
/// A name associated with this header. Used in templates.
pub body: &'a str,
/// A name associated with this content. Used in templates.
pub name: String,
}
self_cell!(
/// Holds the notes content in a string and two string slices
/// `header` and `body`.
/// This struct is self referential.
/// It deals with operating system specific handling of newlines.
/// The note's content is stored as a UTF-8 string with
/// one `\n` character as newline. If present, a Byte Order Mark
/// BOM is removed while reading with `new()`.
pub struct ContentString {
owner: String,
#[covariant]
dependent: ContentRef,
}
impl {Debug, Eq, PartialEq}
);
/// Add `header()` and `body()` implementation.
impl Content for ContentString {
fn from_string(input: String, name: String) -> Self {
ContentString::new(input, |owner: &String| {
let (header, body) = ContentString::split(owner);
ContentRef { header, body, name }
})
}
/// Cheap access to the note's header.
fn header(&self) -> &str {
self.borrow_dependent().header
}
/// Cheap access to the note's body.
fn body(&self) -> &str {
self.borrow_dependent().body
}
/// Returns the name as given at construction.
fn name(&self) -> &str {
&self.borrow_dependent().name
}
}
/// Default is the empty string.
impl Default for ContentString {
fn default() -> Self {
Self::from_string(String::new(), String::new())
}
}
/// Returns the whole raw content with header and body.
/// Possible `\r\n` in the input are replaced by `\n`.
impl AsRef<str> for ContentString {
fn as_ref(&self) -> &str {
self.borrow_owner()
}
}
/// Concatenates the header and the body and prints the content.
/// This function is expensive as it involves copying the
/// whole content.
impl fmt::Display for ContentRef<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = if self.header.is_empty() {
self.body.to_string()
} else {
format!("\u{feff}---\n{}\n---\n{}", &self.header, &self.body)
};
write!(f, "{}", s)
}
}
/// Delegates the printing to `Display for ContentRef`.
impl fmt::Display for ContentString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
std::fmt::Display::fmt(&self.borrow_dependent(), f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_string() {
// Test Unix string.
let content =
ContentString::from_string("first\nsecond\nthird".to_string(), "doc".to_string());
assert_eq!(content.borrow_dependent().body, "first\nsecond\nthird");
assert_eq!(content.borrow_dependent().name, "doc");
// Test BOM removal.
let content = ContentString::from_string(
"\u{feff}first\nsecond\nthird".to_string(),
"doc".to_string(),
);
assert_eq!(content.borrow_dependent().body, "first\nsecond\nthird");
assert_eq!(content.borrow_dependent().name, "doc");
// Test header extraction.
let content = ContentString::from_string(
"\u{feff}---\nfirst\n---\nsecond\nthird".to_string(),
"doc".to_string(),
);
assert_eq!(content.borrow_dependent().header, "first");
assert_eq!(content.borrow_dependent().body, "second\nthird");
assert_eq!(content.borrow_dependent().name, "doc");
// Test header extraction without `\n` at the end.
let content =
ContentString::from_string("\u{feff}---\nfirst\n---".to_string(), "doc".to_string());
assert_eq!(content.borrow_dependent().header, "first");
assert_eq!(content.borrow_dependent().body, "");
// Some skipped bytes.
let content = ContentString::from_string(
"\u{feff}ignored\n\n---\nfirst\n---".to_string(),
"doc".to_string(),
);
assert_eq!(content.borrow_dependent().header, "first");
assert_eq!(content.borrow_dependent().body, "");
// This fails to find the header because the `---` comes to late.
let mut s = "\u{feff}".to_string();
s.push_str(&String::from_utf8(vec![b'X'; BEFORE_HEADER_MAX_IGNORED_CHARS]).unwrap());
s.push_str("\n\n---\nfirst\n---\nsecond");
let s_ = s.clone();
let content = ContentString::from_string(s, "doc".to_string());
assert_eq!(content.borrow_dependent().header, "");
assert_eq!(content.borrow_dependent().body, &s_[3..]);
// This finds the header.
let mut s = "\u{feff}".to_string();
s.push_str(
&String::from_utf8(vec![
b'X';
BEFORE_HEADER_MAX_IGNORED_CHARS - "\n\n---".len()
])
.unwrap(),
);
s.push_str("\n\n---\nfirst\n---\nsecond");
let content = ContentString::from_string(s, "doc".to_string());
assert_eq!(content.borrow_dependent().header, "first");
assert_eq!(content.borrow_dependent().body, "second");
}
#[test]
fn test_split() {
// Document start marker is not followed by whitespace.
let input_stream = String::from("---first\n---\nsecond\nthird");
let expected = ("", "---first\n---\nsecond\nthird");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
// Document start marker is followed by whitespace.
let input_stream = String::from("---\nfirst\n---\nsecond\nthird");
let expected = ("first", "second\nthird");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
// Document start marker is followed by whitespace.
let input_stream = String::from("---\tfirst\n---\nsecond\nthird");
let expected = ("first", "second\nthird");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
// Document start marker is followed by whitespace.
let input_stream = String::from("--- first\n---\nsecond\nthird");
let expected = ("first", "second\nthird");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
// Header is trimmed.
let input_stream = String::from("---\n\nfirst\n\n---\nsecond\nthird");
let expected = ("first", "second\nthird");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
// Body is kept as it is (not trimmed).
let input_stream = String::from("---\nfirst\n---\n\nsecond\nthird\n");
let expected = ("first", "\nsecond\nthird\n");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
// Header end marker line is trimmed right.
let input_stream = String::from("---\nfirst\n--- \t \n\nsecond\nthird\n");
let expected = ("first", "\nsecond\nthird\n");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
let input_stream = String::from("\nsecond\nthird");
let expected = ("", "\nsecond\nthird");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
let input_stream = String::from("");
let expected = ("", "");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
let input_stream = String::from("\u{feff}\nsecond\nthird");
let expected = ("", "\nsecond\nthird");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
let input_stream = String::from("\u{feff}");
let expected = ("", "");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
let input_stream = String::from("[📽 2 videos]");
let expected = ("", "[📽 2 videos]");
let result = ContentString::split(&input_stream);
assert_eq!(result, expected);
let input_stream = "my prelude\n\n---\nmy header\n--- \nmy body\n";
let expected = ("my header", "my body\n");
let result = ContentString::split(input_stream);
assert_eq!(result, expected);
}
#[test]
fn test_display_for_content() {
let expected = "\u{feff}---\nfirst\n---\n\nsecond\nthird\n".to_string();
let input = ContentString::from_string(expected.clone(), "does not matter".to_string());
assert_eq!(input.to_string(), expected);
let expected = "\nsecond\nthird\n".to_string();
let input = ContentString::from_string(expected.clone(), "does not matter".to_string());
assert_eq!(input.to_string(), expected);
let expected = "".to_string();
let input = ContentString::from_string(expected.clone(), "does not matter".to_string());
assert_eq!(input.to_string(), expected);
let expected = "\u{feff}---\nfirst\n---\n\nsecond\nthird\n".to_string();
let input = ContentString::from_string(
"\u{feff}ignored\n\n---\nfirst\n---\n\nsecond\nthird\n".to_string(),
"does not matter".to_string(),
);
assert_eq!(input.to_string(), expected);
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/filter.rs | tpnote-lib/src/filter.rs | //! Extends the built-in Tera filters.
//! All custom filters check the type of their input variables at runtime and
//! throw an error if the type is other than specified.
use crate::config::FILENAME_DOTFILE_MARKER;
use crate::config::LIB_CFG;
use crate::config::Scheme;
use crate::config::TMPL_VAR_FM_;
use crate::filename::NotePath;
use crate::filename::NotePathBuf;
use crate::filename::NotePathStr;
#[cfg(feature = "lang-detection")]
use crate::lingua::get_lang;
use crate::markup_language::InputConverter;
use crate::markup_language::MarkupLanguage;
use crate::settings::SETTINGS;
use parse_hyperlinks::iterator::MarkupLink;
use parse_hyperlinks::parser::Link;
use sanitize_filename_reader_friendly::sanitize;
use std::borrow::Cow;
use std::collections::HashMap;
use std::hash::BuildHasher;
use std::path::Path;
use std::path::PathBuf;
use std::sync::LazyLock;
use tera::Map;
use tera::{Result as TeraResult, Tera, Value, try_get_value};
/// Filter parameter of the `trunc_filter()` limiting the maximum length of
/// template variables. The filter is usually used to in the note's front matter
/// as title. For example: the title should not be too long, because it will end
/// up as part of the filename when the note is saved to disk. Filenames of some
/// operating systems are limited to 255 bytes.
#[cfg(not(test))]
const TRUNC_LEN_MAX: usize = 200;
#[cfg(test)]
pub const TRUNC_LEN_MAX: usize = 10;
/// Tera object with custom functions registered.
pub static TERA: LazyLock<Tera> = LazyLock::new(|| {
let mut tera = Tera::default();
tera.register_filter("append", append_filter);
tera.register_filter("file_copy_counter", file_copy_counter_filter);
tera.register_filter("file_ext", file_ext_filter);
tera.register_filter("file_name", file_name_filter);
tera.register_filter("file_sort_tag", file_sort_tag_filter);
tera.register_filter("file_stem", file_stem_filter);
tera.register_filter("find_last_created_file", find_last_created_file);
tera.register_filter("flatten_array", flatten_array_filter);
tera.register_filter("get_lang", get_lang_filter);
tera.register_filter("heading", heading_filter);
tera.register_filter("html_heading", html_heading_filter);
tera.register_filter("html_to_markup", html_to_markup_filter);
tera.register_filter("incr_sort_tag", incr_sort_tag_filter);
tera.register_filter("insert", insert_filter);
tera.register_filter("insert", insert_filter);
tera.register_filter("link_dest", link_dest_filter);
tera.register_filter("link_text", link_text_filter);
tera.register_filter("link_text_picky", link_text_picky_filter);
tera.register_filter("link_title", link_title_filter);
tera.register_filter("map_lang", map_lang_filter);
tera.register_filter("markup_to_html", markup_to_html_filter);
tera.register_filter("name", name_filter);
tera.register_filter("prepend", prepend_filter);
tera.register_filter("remove", remove_filter);
tera.register_filter("replace_empty", replace_empty_filter);
tera.register_filter("sanit", sanit_filter);
tera.register_filter("to_html", to_html_filter);
tera.register_filter("to_yaml", to_yaml_filter);
tera.register_filter("trim_file_sort_tag", trim_file_sort_tag_filter);
tera.register_filter("trunc", trunc_filter);
tera
});
/// A filter converting any input `tera::Value` into a `tera::Value::String(s)`
/// with `s` being the YAML representation of the object. The input can be of
/// any type, the output type is always a `Value::String()`.
/// If the input type is `tera::Value::Object`, all top level keys starting with
/// `fm_` are localized (see `fm_var.localization`).
/// When the optional parameter `key='k'` is given, the input is regarded as
/// the corresponding value to this key.
/// The optional parameter `tab=n` indents the YAML values `n` characters to
/// the right of the first character of the key by inserting additional spaces
/// between the key and the value. When `tab=n` is given, it has precedence
/// over the default value, read from the configuration file variable
/// `tmpl.filter.to_yaml_tab`.
fn to_yaml_filter<S: BuildHasher>(
val: &Value,
args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let scheme = &LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
let val_yaml = if let Some(Value::String(k)) = args.get("key") {
let mut m = tera::Map::new();
let k = name(scheme, k);
m.insert(k.to_owned(), val.to_owned());
serde_yaml::to_string(&m).unwrap()
} else {
match &val {
Value::Object(map) => {
let mut m = Map::new();
for (k, v) in map.into_iter() {
//
let new_k = name(scheme, k);
m.insert(new_k.to_owned(), v.to_owned());
}
let o = serde_json::Value::Object(m);
serde_yaml::to_string(&o).unwrap()
}
&oo => serde_yaml::to_string(oo).unwrap(),
}
};
// Translate the empty set, into an empty string and return it.
if val_yaml.trim_end() == "{}" {
return Ok(tera::Value::String("".to_string()));
}
// Formatting: adjust indent.
let val_yaml: String = if let Some(tab) =
args.get("tab").and_then(|v| v.as_u64()).or_else(|| {
let n = scheme.tmpl.filter.to_yaml_tab;
if n == 0 { None } else { Some(n) }
}) {
val_yaml
.lines()
.map(|l| {
let mut insert_pos = 0;
let mut inserts_n = 0;
if let Some(colpos) = l.find(": ") {
if let Some(key_pos) = l.find(char::is_alphabetic) {
if key_pos < colpos
&& l.find('\'').is_none_or(|p| p >= colpos)
&& l.find("\"'").is_none_or(|p| p >= colpos)
{
insert_pos = colpos + ": ".len();
inserts_n = (tab as usize).saturating_sub(insert_pos);
}
}
} else if l.starts_with("- ") {
inserts_n = tab as usize;
};
// Enlarge indent.
let mut l = l.to_owned();
let strut = " ".repeat(inserts_n);
// If `insert>0`, we know that `colon_pos>0`.
// `colon_pos+1` inserts between `: `.
l.insert_str(insert_pos, &strut);
l.push('\n');
l
})
.collect::<String>()
} else {
val_yaml
};
let val_yaml = val_yaml.trim_end().to_owned();
Ok(Value::String(val_yaml))
}
/// A filter that coverts a `tera::Value` tree into an HTML representation,
/// with following HTML tags:
/// * `Value::Object`: `<blockquote class="fm">` and `<div class="fm">`,
/// * `Value::Array`: `<ul class="fm">` and `<li class="fm">`,
/// * `Value::String`: no tag,
/// * Other non-string basic types: `<code class="fm">`.
///
/// The input can be of any type, the output type is `Value::String()`.
/// If the input type is `Value::Object`, all top level keys starting with
/// `fm_` are localized (see `fm_var.localization`).
/// Note: HTML templates escape HTML critical characters by default.
/// To use the `to_hmtl` filter in HTML templates, add a `safe` filter in last
/// position. This is no risk, as the `to_html` filter always escapes string
/// values automatically, regardless of the template type.
fn to_html_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
fn tag_to_html(val: Value, is_root: bool, output: &mut String) {
match val {
Value::Array(a) => {
output.push_str("<ul class=\"fm\">");
for i in a {
output.push_str("<li class=\"fm\">");
tag_to_html(i, false, output);
output.push_str("</li>");
}
output.push_str("</ul>");
}
Value::String(s) => output.push_str(&html_escape::encode_text(&s)),
Value::Object(map) => {
output.push_str("<blockquote class=\"fm\">");
if is_root {
let scheme =
&LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
for (k, v) in map {
output.push_str("<div class=\"fm\">");
output.push_str(name(scheme, &k));
output.push_str(": ");
tag_to_html(v, false, output);
output.push_str("</div>");
}
} else {
for (k, v) in map {
output.push_str("<div class=\"fm\">");
output.push_str(&k);
output.push_str(": ");
tag_to_html(v, false, output);
output.push_str("</div>");
}
}
output.push_str("</blockquote>");
}
_ => {
output.push_str("<code class=\"fm\">");
output.push_str(&val.to_string());
output.push_str("</code>");
}
};
}
let mut html = String::new();
tag_to_html(value.to_owned(), true, &mut html);
Ok(Value::String(html))
}
/// This filter translates `fm_*` header variable names into some human
/// language. Suppose we have:
/// ```rust, ignore
/// scheme.tmpl.variables.names_assertions = []
/// `[ "fm_lang", "Sprache", [], ],
/// ]
/// ```
/// Then, the expression `'fm_lang'|name` resolves into `Sprache`.
/// For variables not listed below, only the prefix `fm_` is stripped and
/// no translation occurs, e.g. `'fm_unknown'|name` becomes `unknown`.
/// The input type must be `Value::String` and the output type is
/// `Value::String`.
fn name_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("translate", "value", String, value);
// This replaces the `fm`-name in the key by the localized name.
let scheme = &LIB_CFG.read_recursive().scheme[SETTINGS.read_recursive().current_scheme];
let output = name(scheme, &input);
Ok(Value::String(output.to_string()))
}
/// Returns the localized header field name. For example: `fm_subtitle`
/// resolves into `Untertitel`. The configuration file variable
/// '`scheme.tmpl.variables.names_assertions`' contains the translation table.
pub(crate) fn name<'a>(scheme: &'a Scheme, input: &'a str) -> &'a str {
let vars = &scheme.tmpl.fm_var.localization;
vars.iter().find(|&l| l.0 == input).map_or_else(
|| input.strip_prefix(TMPL_VAR_FM_).unwrap_or(input),
|l| &l.1,
)
}
/// A filter that converts incoming HTML into some target markup language.
/// The parameter file `extension` indicates in what Markup
/// language the input is written. When no `extension` is given, the filler
/// does not convert, it just passes through.
/// This filter only converts, if the first line of the input stream starts with
/// the pattern `<html` or `<!DOCTYPE html`.
/// In any case, the output of the converter is trimmed at the end
/// (`trim_end()`).
fn html_to_markup_filter<S: BuildHasher>(
value: &Value,
#[allow(unused_variables)] args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
// Bring new methods into scope.
use crate::html::HtmlStr;
#[allow(unused_mut)]
let mut buffer = try_get_value!("html_to_markup", "value", String, value);
let default = if let Some(default_val) = args.get("default") {
try_get_value!("markup_to_html", "default", String, default_val)
} else {
String::new()
};
let firstline = buffer
.lines()
.next()
.map(|l| l.trim_start().to_ascii_lowercase());
if firstline.is_some_and(|l| l.as_str().has_html_start_tag()) {
let extension = if let Some(ext) = args.get("extension") {
try_get_value!("markup_to_html", "extension", String, ext)
} else {
String::new()
};
let converter = InputConverter::build(&extension);
buffer = match converter(buffer) {
Ok(converted) if converted.is_empty() => default,
Ok(converted) => converted,
Err(e) => {
log::info!("{}", e);
default
}
};
} else {
buffer = default;
}
// Trim end without reallocation.
buffer.truncate(buffer.trim_end().len());
Ok(Value::String(buffer))
}
/// Takes the markup formatted input and renders it to HTML.
/// The parameter file `extension` indicates in what Markup
/// language the input is written.
/// When `extension` is not given or known, the renderer defaults to
/// `MarkupLanguage::Unknown`.
/// The input types must be `Value::String` and the output type is
/// `Value::String()`
fn markup_to_html_filter<S: BuildHasher>(
value: &Value,
args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("markup_to_html", "value", String, value);
let markup_language = if let Some(ext) = args.get("extension") {
let ext = try_get_value!("markup_to_html", "extension", String, ext);
let ml = MarkupLanguage::from(ext.as_str());
if ml.is_some() {
ml
} else {
MarkupLanguage::Unkown
}
} else {
MarkupLanguage::Unkown
};
// Render the markup language.
let html_output = markup_language.render(&input);
Ok(Value::String(html_output))
}
/// Adds a new filter to Tera templates:
/// `sanit` or `sanit()` sanitizes a string so that it can be used to
/// assemble filenames or paths. In addition, `sanit(alpha=true)` prepends
/// the `sort_tag.extra_separator` when the result starts with one of
/// `sort_tag.extra_chars`, usually a number. This way we guaranty that the filename
/// never starts with a number. We do not allow this, to be able to distinguish
/// reliably the sort tag from the filename. In addition to the above, the
/// filter checks if the string represents a "well-formed" filename. If it
/// is the case, and the filename starts with a dot, the file is prepended by
/// `sort_tag.extra_separator`.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn sanit_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("sanit", "value", String, value);
// Check if this is a usual dotfile filename.
let is_dotfile = input.starts_with(FILENAME_DOTFILE_MARKER)
&& PathBuf::from(&*input).has_wellformed_filename();
// Sanitize string.
let mut res = sanitize(&input);
// If `FILNAME_DOTFILE_MARKER` was stripped, prepend one.
if is_dotfile && !res.starts_with(FILENAME_DOTFILE_MARKER) {
res.insert(0, FILENAME_DOTFILE_MARKER);
}
Ok(Value::String(res))
}
/// A Tera filter that searches for the first Markdown or ReStructuredText link
/// in the input stream and returns the link's name (link text).
/// If not found, it returns the empty string.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn link_text_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("link_text", "value", String, value);
let hyperlink = FirstHyperlink::from(&input).unwrap_or_default();
Ok(Value::String(hyperlink.text.to_string()))
}
/// A Tera filter that searches for the first Markdown or ReStructuredText link
/// in the input stream and returns the link's URL.
/// If not found, it returns the empty string.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn link_dest_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("link_dest", "value", String, value);
let hyperlink = FirstHyperlink::from(&p).unwrap_or_default();
Ok(Value::String(hyperlink.dest.to_string()))
}
/// A Tera filter that searches for the first Markdown or ReStructuredText link
/// in the input stream and returns the link's text's name (link text).
/// Unlike the filter `link_dest`, it does not necessarily return the first
/// finding. For example, it skips autolinks, local links and links
/// with some URL in the link text.
/// If not found, it returns the empty string.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn link_text_picky_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("link_text_picky", "value", String, value);
let hyperlink = FirstHyperlink::from_picky(&p).unwrap_or_default();
Ok(Value::String(hyperlink.text.to_string()))
}
/// A Tera filter that searches for the first Markdown or ReStructuredText link
/// in the input stream and returns the link's title.
/// If not found, it returns the empty string.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn link_title_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("link_title", "value", String, value);
let hyperlink = FirstHyperlink::from(&p).unwrap_or_default();
Ok(Value::String(hyperlink.title.to_string()))
}
/// A Tera filter that searches for the first HTML heading
/// in the HTML input stream and returns the heading text.
/// If not found, it returns the empty string.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn html_heading_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("html_heading", "value", String, value);
let html_heading = FirstHtmlHeading::from(&p).unwrap_or_default();
Ok(Value::String(html_heading.0.to_string()))
}
/// A Tera filter that truncates the input stream and returns the
/// max `TRUNC_LEN_MAX` bytes of valid UTF-8.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn trunc_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("trunc", "value", String, value);
let mut short = "";
for i in (0..TRUNC_LEN_MAX).rev() {
if let Some(s) = input.get(..i) {
short = s;
break;
}
}
Ok(Value::String(short.to_owned()))
}
/// A Tera filter that returns the first line or the first sentence of the input
/// stream.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn heading_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("heading", "value", String, value);
let p = p.trim_start();
// Find the first heading, can finish with `. `, `.\n` or `.\r\n` on Windows.
let mut index = p.len();
if let Some(i) = p.find(". ") {
if i < index {
index = i;
}
}
if let Some(i) = p.find(".\n") {
if i < index {
index = i;
}
}
if let Some(i) = p.find(".\r\n") {
if i < index {
index = i;
}
}
if let Some(i) = p.find('!') {
if i < index {
index = i;
}
}
if let Some(i) = p.find('?') {
if i < index {
index = i;
}
}
if let Some(i) = p.find("\n\n") {
if i < index {
index = i;
}
}
if let Some(i) = p.find("\r\n\r\n") {
if i < index {
index = i;
}
}
let content_heading = p[0..index].to_string();
Ok(Value::String(content_heading))
}
/// A Tera filter that takes a path and extracts the tag of the filename.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn file_sort_tag_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("file_sort_tag", "value", String, value);
let p = PathBuf::from(p);
let (tag, _, _, _, _) = p.disassemble();
Ok(Value::String(tag.to_owned()))
}
/// A Tera filter that takes a path and extracts its last element.
/// This function trims the `sort_tag` if present.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn trim_file_sort_tag_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("trim_file_sort_tag", "value", String, value);
let input = PathBuf::from(input);
let (_, fname, _, _, _) = input.disassemble();
Ok(Value::String(fname.to_owned()))
}
/// A Tera filter that takes a path and extracts its file stem,
/// in other words: the filename without `sort_tag`, `file_copy_counter`
/// and `extension`.
/// The input type must be `Value::String` and the output type is
/// `Value::String()`
fn file_stem_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("file_stem", "value", String, value);
let input = PathBuf::from(input);
let (_, _, stem, _, _) = input.disassemble();
Ok(Value::String(stem.to_owned()))
}
/// A Tera filter that takes a path and extracts its copy counter,
/// or, to put it another way: the filename without `sort_tag`, `file_stem`
/// and `file_ext` (and their separators). If the filename contains a
/// `copy_counter=n`, the returned JSON value variant is `Value::Number(n)`.
/// If there is no copy counter in the input, the output is `Value::Number(0)`.
/// The input type must be `Value::String` and the output type is
/// `Value::Number()`
fn file_copy_counter_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("file_copy_counter", "value", String, value);
let input = PathBuf::from(input);
let (_, _, _, copy_counter, _) = input.disassemble();
let copy_counter = copy_counter.unwrap_or(0);
Ok(Value::from(copy_counter))
}
/// A Tera filter that takes a path and extracts its filename without
/// file extension. The filename may contain a sort-tag, a copy-counter and
/// separators. The input type must be `Value::String` and the output type is
/// `Value::String()`
fn file_name_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("file_name", "value", String, value);
let filename = Path::new(&p)
.file_name()
.unwrap_or_default()
.to_str()
.unwrap_or_default()
.to_owned();
Ok(Value::String(filename))
}
/// A Tera filter that replace the input string with the parameter `with`, but
/// only if the input stream is empty, e.g.:
///
/// * `Value::Null` or
/// * `Value::String("")`, or
/// * `Value::Array([])`, or
/// * the array contains only empty strings.
///
/// The parameter `with` can be any `Value` type.
fn replace_empty_filter<S: BuildHasher>(
value: &Value,
args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
// Detect the empty case. Exit early.
match value {
Value::Null => {}
Value::String(s) if s.is_empty() => {}
Value::Array(values) if values.is_empty() => {}
Value::Array(values) => {
if !values
.iter()
.map(|v| v.as_str())
.all(|s| s.is_some_and(|s| s.is_empty()))
{
return Ok(value.to_owned());
}
}
_ => return Ok(value.to_owned()),
}
if let Some(with) = args.get("with") {
Ok(with.to_owned())
} else {
Ok(value.to_owned())
}
}
/// A Tera filter that prepends the string parameter `with`, but only if the
/// input stream is not empty.
/// In addition, the flag `newline` inserts a newline character at end of the
/// result. In case the input stream is empty nothing is appended.
/// When called with the strings parameter `with_sort_tag`, the filter
/// prepends the sort-tag and all necessary sort-tag separator characters,
/// regardless whether the input stream in empty or not.
/// The input type, and the type of the parameter `with` and `with_sort_tag`
/// must be `Value::String`. The parameter `newline` must be a `Value::Bool` and
/// the output type is `Value::String()`.
fn prepend_filter<S: BuildHasher>(
value: &Value,
args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("prepend", "value", String, value);
let mut res = input;
if let Some(with) = args.get("with") {
let with = try_get_value!("prepend", "with", String, with);
let mut s = String::new();
if !res.is_empty() {
s.push_str(&with);
s.push_str(&res);
res = s;
};
} else if let Some(sort_tag) = args.get("with_sort_tag") {
let sort_tag = try_get_value!("prepend", "with_sort_tag", String, sort_tag);
res = PathBuf::from_disassembled(&sort_tag, &res, None, "")
.to_str()
.unwrap_or_default()
.to_string();
};
if let Some(Value::Bool(newline)) = args.get("newline") {
if *newline && !res.is_empty() {
let mut s = String::new();
s.push('\n');
s.push_str(&res);
res = s;
}
};
Ok(Value::String(res))
}
/// A Tera filter that appends the string parameter `with`. In addition, the
/// flag `newline` inserts a newline character at end of the result. In
/// case the input stream is empty, nothing is appended.
/// The input type, and the type of the parameter `with` must be
/// `Value::String`. The parameter `newline` must be a `Value::Bool` and the
/// output type is `Value::String()`.
fn append_filter<S: BuildHasher>(
value: &Value,
args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("append", "value", String, value);
if input.is_empty() {
return Ok(Value::String("".to_string()));
}
let mut res = input.clone();
if let Some(with) = args.get("with") {
let with = try_get_value!("append", "with", String, with);
res.push_str(&with);
};
if let Some(newline) = args.get("newline") {
let newline = try_get_value!("newline", "newline", bool, newline);
if newline && !res.is_empty() {
res.push('\n');
}
};
Ok(Value::String(res))
}
/// A Tera filter that takes a path and extracts its file extension.
/// The input type must be `Value::String()`, the output type is
/// `Value::String()`.
fn file_ext_filter<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p = try_get_value!("file_ext", "value", String, value);
let ext = Path::new(&p)
.extension()
.unwrap_or_default()
.to_str()
.unwrap_or_default()
.to_owned();
Ok(Value::String(ext))
}
/// A Tera filter that takes a directory path and returns the alphabetically
/// last sort-tag of all Tp-Note documents in that directory.
/// The filter returns the empty string if none was found.
/// The input type must be `Value::String()`, the output type is
/// `Value::String()`.
fn find_last_created_file<S: BuildHasher>(
value: &Value,
_args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let p_str = try_get_value!("dir_last_created", "value", String, value);
let p = Path::new(&p_str);
let last = match p.find_last_created_file() {
Some(filename) => Path::join(p, Path::new(&filename))
.to_str()
.unwrap()
.to_string(),
None => String::new(),
};
Ok(Value::String(last.to_string()))
}
/// Expects a path a filename in its input and returns an incremented sequential
/// sort-tag.
/// First, from the input's filename the sort-tag is extracted. Then, it
/// matches all digits from the end of the sort- tag, increments them
/// and replaces the matched digits with the result. If no numeric digits can be
/// matched, consider alphabetic letters as base 26 number system and try again.
/// Returns the default value if no match succeeds.
/// Note, that only sequential sort-tags are incremented, for others or, if the
/// input is empty, `default` is returned.
/// The path in the input allows to check if the resulting sort-tag exists
/// on disk already. If this is the case, a subcounter is appended to the
/// resulting sort-tag.
/// All input types are `Value::String`. The output type is `Value::String()`.
fn incr_sort_tag_filter<S: BuildHasher>(
value: &Value,
args: &HashMap<String, Value, S>,
) -> TeraResult<Value> {
let input = try_get_value!("incr_sort_tag", "value", String, value);
let mut default = String::new();
if let Some(d) = args.get("default") {
default = try_get_value!("incr_sort_tag", "default", String, d);
};
let (input_dir, filename) = input.rsplit_once(['/', '\\']).unwrap_or(("", &input));
let (input_sort_tag, _, is_sequential) = filename.split_sort_tag(false);
if input_sort_tag.is_empty() || !is_sequential {
return Ok(Value::String(default));
}
// Start analyzing the input.
let (prefix, digits) = match input_sort_tag.rfind(|c: char| !c.is_ascii_digit()) {
Some(idx) => (&input_sort_tag[..idx + 1], &input_sort_tag[idx + 1..]),
None => ("", input_sort_tag),
};
// Search for digits
let mut output_sort_tag = if !digits.is_empty() {
// Return early if this number is too big.
const DIGITS_MAX: usize = u32::MAX.ilog10() as usize; // 9
if digits.len() > DIGITS_MAX {
return Ok(Value::String(default));
}
// Convert string to n base 10.
let mut n = match digits.parse::<u32>() {
Ok(n) => n,
_ => return Ok(Value::String(default)),
};
n += 1;
let mut res = n.to_string();
if res.len() < digits.len() {
let padding = "0".repeat(digits.len() - res.len());
res = format!("{}{}", padding, res);
}
// Assemble sort-tag.
prefix.to_string() + &res
} else {
//
// Search for letters as digits
let (prefix, letters) = match input_sort_tag.rfind(|c: char| !c.is_ascii_lowercase()) {
Some(idx) => (&input_sort_tag[..idx + 1], &input_sort_tag[idx + 1..]),
None => ("", input_sort_tag),
};
if !letters.is_empty() {
const LETTERS_BASE: u32 = 26;
const LETTERS_MAX: usize = (u32::MAX.ilog2() / (LETTERS_BASE.ilog2() + 1)) as usize; // 6=31/(4+1)
// Return early if this number is too big.
if letters.len() > LETTERS_MAX {
return Ok(Value::String(default));
}
// Interpret letters as base LETTERS_BASE and convert to int.
let mut n = letters.chars().fold(0, |acc, c| {
LETTERS_BASE * acc + (c as u8).saturating_sub(b'a') as u32
});
n += 1;
// Convert back to letters base LETTERS_BASE.
let mut res = String::new();
while n > 0 {
let c = char::from_u32('a' as u32 + n.rem_euclid(LETTERS_BASE)).unwrap_or_default();
n = n.div_euclid(LETTERS_BASE);
res = format!("{}{}", c, res);
}
if res.len() < letters.len() {
let padding = "a".repeat(letters.len() - res.len());
res = format!("{}{}", padding, res);
}
// Assemble sort-tag.
prefix.to_string() + &res
} else {
default
}
};
// Check for a free slot, branch if not free.
let input_dir = Path::new(input_dir);
if input_dir.has_file_with_sort_tag(&output_sort_tag).is_some() {
output_sort_tag = input_sort_tag.to_string();
}
while input_dir.has_file_with_sort_tag(&output_sort_tag).is_some() {
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | true |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/html_renderer.rs | tpnote-lib/src/html_renderer.rs | //! Tp-Note's high level HTML rendering API.
//!
//! A set of functions that take a `Context` type and a `Content` type (or raw
//! text) and return the HTML rendition of the content. The API is completely
//! stateless. All functions read the `LIB_CFG` global variable to read the
//! configuration stored in `LibCfg.tmpl_html`.
use crate::config::LIB_CFG;
use crate::config::LocalLinkKind;
use crate::content::Content;
use crate::context::Context;
use crate::context::HasSettings;
use crate::error::NoteError;
#[cfg(feature = "viewer")]
use crate::filter::TERA;
use crate::html::HTML_EXT;
use crate::html::rewrite_links;
use crate::note::Note;
#[cfg(feature = "viewer")]
use crate::note::ONE_OFF_TEMPLATE_NAME;
#[cfg(feature = "viewer")]
use crate::note_error_tera_template;
use crate::template::TemplateKind;
use parking_lot::RwLock;
use std::collections::HashSet;
use std::fs::OpenOptions;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
#[cfg(feature = "viewer")]
use tera::Tera;
/// High level API to render a note providing its `content` and some `context`.
pub struct HtmlRenderer;
impl HtmlRenderer {
/// Returns the HTML rendition of a `ContentString`.
///
/// The markup to HTML rendition engine is determined by the file extension
/// of the variable `context.path`. The resulting HTML and other HTML
/// template variables originating from `context` are inserted into the
/// `TMPL_HTML_VIEWER` template before being returned.
/// The string `viewer_doc_js` contains JavaScript live update code that
/// will be injected into the HTML page via the
/// `TMPL_HTML_VAR_DOC_VIEWER_JS` template variable.
/// This function is stateless.
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// use tpnote_lib::context::Context;
/// use tpnote_lib::html_renderer::HtmlRenderer;
/// use std::env::temp_dir;
/// use std::fs;
/// use std::path::Path;
///
/// // Prepare test: create existing note file.
/// let content = ContentString::from_string(String::from(r#"---
/// title: My day
/// subtitle: Note
/// ---
/// Body text
/// "#), "doc".to_string());
///
/// // Start test
/// let mut context = Context::from(Path::new("/path/to/note.md")).unwrap();
/// // We do not inject any JavaScript.
/// // Render.
/// let html = HtmlRenderer::viewer_page::<ContentString>(context, content, "")
/// .unwrap();
/// // Check the HTML rendition.
/// assert!(html.starts_with("<!DOCTYPE html>\n<html"))
/// ```
///
/// A more elaborated example that reads from disk:
///
/// ```rust
/// use tpnote_lib::config::LIB_CFG;
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// use tpnote_lib::context::Context;
/// use tpnote_lib::html_renderer::HtmlRenderer;
/// use std::env::temp_dir;
/// use std::fs;
///
/// // Prepare test: create existing note file.
/// let raw = r#"---
/// title: My day2
/// subtitle: Note
/// ---
/// Body text
/// "#;
/// let notefile = temp_dir().join("20221030-My day2--Note.md");
/// fs::write(¬efile, raw.as_bytes()).unwrap();
///
/// // Start test
/// let mut context = Context::from(¬efile).unwrap();
/// // We do not inject any JavaScript.
/// // Render.
/// let content = ContentString::open(context.get_path()).unwrap();
/// // You can plug in your own type (must impl. `Content`).
/// let html = HtmlRenderer::viewer_page(context, content, "").unwrap();
/// // Check the HTML rendition.
/// assert!(html.starts_with("<!DOCTYPE html>\n<html"))
/// ```
pub fn viewer_page<T: Content>(
context: Context<HasSettings>,
content: T,
// Java Script live updater inject code. Will be inserted into
// `tmpl_html.viewer`.
viewer_doc_js: &str,
) -> Result<String, NoteError> {
let tmpl_html = &LIB_CFG.read_recursive().tmpl_html.viewer;
HtmlRenderer::render(context, content, viewer_doc_js, tmpl_html)
}
/// Returns the HTML rendition of a `ContentString`.
/// The markup to HTML rendition engine is determined by the file extension
/// of the variable `context.path`. The resulting HTML and other HTML
/// template variables originating from `context` are inserted into the
/// `TMPL_HTML_EXPORTER` template before being returned.
/// `context` is expected to have at least all `HasSettings` keys
/// and the additional key `TMPL_HTML_VAR_VIEWER_DOC_JS` set and valid.
/// All other keys are ignored.
/// This function is stateless.
///
/// ```rust
/// use tpnote_lib::config::TMPL_HTML_VAR_VIEWER_DOC_JS;
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// use tpnote_lib::context::Context;
/// use tpnote_lib::html_renderer::HtmlRenderer;
/// use std::env::temp_dir;
/// use std::fs;
/// use std::path::Path;
///
/// // Prepare test: create existing note file.
/// let content= ContentString::from_string(String::from(r#"---
/// title: "My day"
/// subtitle: "Note"
/// ---
/// Body text
/// "#), "doc".to_string());
///
/// // Start test
/// let mut context = Context::from(Path::new("/path/to/note.md")).unwrap();
/// // Render.
/// let html = HtmlRenderer::exporter_page::<ContentString>(context, content)
/// .unwrap();
/// // Check the HTML rendition.
/// assert!(html.starts_with("<!DOCTYPE html>\n<html"))
/// ```
pub fn exporter_page<T: Content>(
context: Context<HasSettings>,
content: T,
) -> Result<String, NoteError> {
let tmpl_html = &LIB_CFG.read_recursive().tmpl_html.exporter;
HtmlRenderer::render(context, content, "", tmpl_html)
}
/// Helper function.
fn render<T: Content>(
context: Context<HasSettings>,
content: T,
viewer_doc_js: &str,
tmpl_html: &str,
) -> Result<String, NoteError> {
let note = Note::from_existing_content(context, content, TemplateKind::None)?;
note.render_content_to_html(tmpl_html, viewer_doc_js)
}
/// When the header cannot be deserialized, the file located in
/// `context.path` is rendered as "Error HTML page".
///
/// The erroneous content is rendered to html with
/// `parse_hyperlinks::renderer::text_rawlinks2html` and inserted in
/// the `TMPL_HTML_VIEWER_ERROR` template (which can be configured at
/// runtime).
/// The string `viewer_doc_js` contains JavaScript live update code that
/// will be injected into the HTML page via the
/// `TMPL_HTML_VAR_DOC_VIEWER_JS` template variable.
/// This function is stateless.
///
/// ```rust
/// use tpnote_lib::config::LIB_CFG;
/// use tpnote_lib::config::TMPL_HTML_VAR_DOC_ERROR;
/// use tpnote_lib::config::TMPL_HTML_VAR_VIEWER_DOC_JS;
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// use tpnote_lib::context::Context;
/// use tpnote_lib::error::NoteError;
/// use tpnote_lib::html_renderer::HtmlRenderer;
/// use std::env::temp_dir;
/// use std::fs;
///
/// // Prepare test: create existing erroneous note file.
/// let raw_error = r#"---
/// title: "My day3"
/// subtitle: "Note"
/// --
/// Body text
/// "#;
/// let notefile = temp_dir().join("20221030-My day3--Note.md");
/// fs::write(¬efile, raw_error.as_bytes()).unwrap();
/// let mut context = Context::from(¬efile);
/// let e = NoteError::FrontMatterFieldMissing { field_name: "title".to_string() };
///
/// // Start test
/// let mut context = Context::from(¬efile).unwrap();
/// // We do not inject any JavaScript.
/// // Render.
/// // Read from file.
/// // You can plug in your own type (must impl. `Content`).
/// let content = ContentString::open(context.get_path()).unwrap();
/// let html = HtmlRenderer::error_page(
/// context, content, &e.to_string(), "").unwrap();
/// // Check the HTML rendition.
/// assert!(html.starts_with("<!DOCTYPE html>\n<html"))
/// ```
#[cfg(feature = "viewer")]
pub fn error_page<T: Content>(
context: Context<HasSettings>,
note_erroneous_content: T,
error_message: &str,
// Java Script live updater inject code. Will be inserted into
// `tmpl_html.viewer`.
viewer_doc_js: &str,
) -> Result<String, NoteError> {
//
let context =
context.insert_error_content(¬e_erroneous_content, error_message, viewer_doc_js);
let tmpl_html = &LIB_CFG.read_recursive().tmpl_html.viewer_error;
// Apply template.
let mut tera = Tera::default();
// Switch `autoescape_on()` only for HTML templates.
tera.autoescape_on(vec![ONE_OFF_TEMPLATE_NAME]);
tera.extend(&TERA)?;
let html = tera
.render_str(tmpl_html, &context)
.map_err(|e| note_error_tera_template!(e, "[html_tmpl] viewer_error".to_string()))?;
Ok(html)
}
/// Renders `doc_path` with `content` into HTML and saves the result in
/// `export_dir` in case `export_dir` is an absolute directory. Otherwise
/// the parent directory of `doc_path` is concatenated with `export_dir`
/// and the result is stored there.
/// `-` dumps the rendition to the standard output. The filename of the HTML
/// rendition is the same as in `doc_path` but with `.html` appended.
///
/// ```rust
/// use tpnote_lib::config::LIB_CFG;
/// use tpnote_lib::config::TMPL_HTML_VAR_VIEWER_DOC_JS;
/// use tpnote_lib::config::LocalLinkKind;
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// use tpnote_lib::context::Context;
/// use tpnote_lib::html_renderer::HtmlRenderer;
/// use std::env::temp_dir;
/// use std::fs;
/// use std::path::Path;
///
/// // Prepare test: create existing note file.
/// let raw = r#"---
/// title: "My day3"
/// subtitle: "Note"
/// ---
/// Body text
/// "#;
/// let notefile = temp_dir().join("20221030-My day3--Note.md");
/// fs::write(¬efile, raw.as_bytes()).unwrap();
///
/// // Start test
/// let content = ContentString::open(¬efile).unwrap();
/// // You can plug in your own type (must impl. `Content`).
/// HtmlRenderer::save_exporter_page(
/// ¬efile, content, Path::new("."), LocalLinkKind::Long).unwrap();
/// // Check the HTML rendition.
/// let expected_file = temp_dir().join("20221030-My day3--Note.md.html");
/// let html = fs::read_to_string(expected_file).unwrap();
/// assert!(html.starts_with("<!DOCTYPE html>\n<html"))
/// ```
pub fn save_exporter_page<T: Content>(
doc_path: &Path,
content: T,
export_dir: &Path,
local_link_kind: LocalLinkKind,
) -> Result<(), NoteError> {
let context = Context::from(doc_path)?;
let doc_path = context.get_path();
let doc_dir = context.get_dir_path().to_owned();
// Determine filename of html-file.
let html_path = match export_dir {
p if p == Path::new("-") => PathBuf::new(),
p => {
let mut html_filename = doc_path
.file_name()
.unwrap_or_default()
.to_str()
.unwrap_or_default()
.to_string();
html_filename.push_str(HTML_EXT);
let mut q = doc_path.parent().unwrap_or(Path::new("")).to_path_buf();
q.push(p);
q.push(PathBuf::from(html_filename));
q
}
};
if html_path == Path::new("") {
log::debug!("Rendering HTML to STDOUT (`{:?}`)", export_dir);
} else {
log::debug!("Rendering HTML into: {:?}", html_path);
};
// These must live longer than `writeable`, and thus are declared first:
let (mut stdout_write, mut file_write);
// We need to ascribe the type to get dynamic dispatch.
let writeable: &mut dyn Write = if html_path == Path::new("") {
stdout_write = io::stdout();
&mut stdout_write
} else {
file_write = OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(&html_path)?;
&mut file_write
};
// Render HTML.
let root_path = context.get_root_path().to_owned();
let html = Self::exporter_page(context, content)?;
let html = rewrite_links(
html,
&root_path,
&doc_dir,
local_link_kind,
// Do append `.html` to `.md` in links.
true,
Arc::new(RwLock::new(HashSet::new())),
);
// Write HTML rendition.
writeable.write_all(html.as_bytes())?;
Ok(())
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/template.rs | tpnote-lib/src/template.rs | //!Abstractions for content templates and filename templates.
use crate::filename::NotePath;
use crate::settings::SETTINGS;
use crate::{config::LIB_CFG, content::Content};
use std::path::Path;
/// Each workflow is related to one `TemplateKind`, which relates to one
/// content template and one filename template.
#[non_exhaustive]
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum TemplateKind {
/// Templates used when Tp-Note is invoked with a directory path.
/// Clipboard data may be available.
FromDir,
/// Templates used when Tp-Note is invoked with a path pointing to a text
/// file that does not contain a YAML header.
FromTextFile,
/// Templates used when Tp-Note is invoked with a path pointing to a non
/// text file.
AnnotateFile,
/// Templates used when Tp-Note is invoked with a path pointing to a Tp-Note
/// text file with a valid YAML header (with a `title:` field).
SyncFilename,
/// No templates are used, but the file is still parsed in order to
/// render it later to HTML (cf. `<Note>.render_content_to_html()`.
#[default]
None,
}
impl TemplateKind {
/// A constructor returning the tuple `(template_kind, Some(content))`.
/// `template_kind` is the result of the logic calculating under what
/// circumstances what template should be used.
/// If `path` has a Tp-Note extension (e.g. `.md`) and the file indicated by
/// `path` could be opened and loaded from disk, `Some(content)` contains
/// its content. Otherwise `None` is returned.
pub fn from<T: Content>(path: &Path) -> (Self, Option<T>) {
//
let path_is_dir = path.is_dir();
let path_is_file = path.is_file();
let path_has_tpnote_extension = path.has_tpnote_ext();
let path_is_tpnote_file = path_is_file && path_has_tpnote_extension;
let (path_is_tpnote_file_and_has_header, content) = if path_is_tpnote_file {
let content: T = Content::open(path).unwrap_or_default();
(!content.header().is_empty(), Some(content))
} else {
(false, None)
};
// This determines the workflow and what template will be applied.
let template_kind = match (
path_is_dir,
path_is_file,
path_is_tpnote_file,
path_is_tpnote_file_and_has_header,
) {
(true, false, _, _) => TemplateKind::FromDir,
(false, true, true, true) => TemplateKind::SyncFilename,
(false, true, true, false) => TemplateKind::FromTextFile,
(false, true, false, _) => TemplateKind::AnnotateFile,
(_, _, _, _) => TemplateKind::None,
};
log::debug!("Choosing the \"{:?}\" template.", template_kind);
log::trace!(
"Template choice is based on:
path=\"{}\",
path_is_dir={},
path_is_file={},
path_is_tpnote_file={},
path_is_tpnote_file_and_has_header={}",
path.to_str().unwrap(),
path_is_dir,
path_is_file,
path_is_tpnote_file,
path_is_tpnote_file_and_has_header,
);
(template_kind, content)
}
/// Returns the content template string as it is defined in the configuration file.
/// Panics for `TemplateKind::SyncFilename` and `TemplateKind::None`.
pub fn get_content_template(&self) -> String {
let lib_cfg = LIB_CFG.read_recursive();
let scheme_idx = SETTINGS.read_recursive().current_scheme;
log::trace!(
"Scheme index: {}, applying the content template: `{}`",
scheme_idx,
self.get_content_template_name()
);
let tmpl = &lib_cfg.scheme[scheme_idx].tmpl;
match self {
Self::FromDir => tmpl.from_dir_content.clone(),
Self::FromTextFile => tmpl.from_text_file_content.clone(),
Self::AnnotateFile => tmpl.annotate_file_content.clone(),
Self::SyncFilename => {
panic!("`TemplateKind::SyncFilename` has no content template")
}
Self::None => panic!("`TemplateKind::None` has no content template"),
}
}
/// Returns the content template variable name as it is used in the configuration file.
pub fn get_content_template_name(&self) -> &str {
match self {
Self::FromDir => "tmpl.from_dir_content",
Self::FromTextFile => "tmpl.from_text_file_content",
Self::AnnotateFile => "tmpl.annotate_file_content",
Self::SyncFilename => "`TemplateKind::SyncFilename` has no content template",
Self::None => "`TemplateKind::None` has no content template",
}
}
/// Returns the file template string as it is defined in the configuration file.
/// Panics for `TemplateKind::None`.
pub fn get_filename_template(&self) -> String {
let lib_cfg = LIB_CFG.read_recursive();
let scheme_idx = SETTINGS.read_recursive().current_scheme;
log::trace!(
"Scheme index: {}, applying the filename template: `{}`",
scheme_idx,
self.get_filename_template_name()
);
let tmpl = &lib_cfg.scheme[scheme_idx].tmpl;
match self {
Self::FromDir => tmpl.from_dir_filename.clone(),
Self::FromTextFile => tmpl.from_text_file_filename.clone(),
Self::AnnotateFile => tmpl.annotate_file_filename.clone(),
Self::SyncFilename => tmpl.sync_filename.clone(),
Self::None => panic!("`TemplateKind::None` has no filename template"),
}
}
/// Returns the content template variable name as it is used in the configuration file.
pub fn get_filename_template_name(&self) -> &str {
match self {
Self::FromDir => "tmpl.from_dir_filename",
Self::FromTextFile => "tmpl.from_text_file_filename",
Self::AnnotateFile => "tmpl.annotate_file_filename",
Self::SyncFilename => "tmpl.sync_filename",
Self::None => "`TemplateKind::None` has no filename template",
}
}
}
#[cfg(test)]
mod tests {
use crate::content::Content;
use crate::content::ContentString;
use super::*;
#[test]
fn test_template_kind_from() {
use std::env::temp_dir;
use std::fs;
//
let tk: (TemplateKind, Option<ContentString>) = TemplateKind::from(Path::new("."));
assert_eq!(tk, (TemplateKind::FromDir, None));
//
// Tp-Note file.
// Prepare test: open existing text file without header.
let raw = "Body text without header";
let notefile = temp_dir().join("no header.md");
let _ = fs::write(¬efile, raw.as_bytes());
// Execute test.
let (tk, content) = TemplateKind::from(¬efile);
// Inspect result.
let expected_template_kind = TemplateKind::FromTextFile;
let expected_body = "Body text without header";
let expected_header = "";
//println!("{:?}", tk);
assert_eq!(tk, expected_template_kind);
let content: ContentString = content.unwrap();
assert_eq!(content.header(), expected_header);
assert_eq!(content.body(), expected_body);
let _ = fs::remove_file(¬efile);
//
// Tp-Note file.
// Prepare test: open existing note file with header.
let raw = "---\ntitle: my doc\n---\nBody";
let notefile = temp_dir().join("some.md");
let _ = fs::write(¬efile, raw.as_bytes());
// Execute test.
let (tk, content) = TemplateKind::from(¬efile);
// Inspect result.
let expected_template_kind = TemplateKind::SyncFilename;
let expected_body = "Body";
let expected_header = "title: my doc";
//println!("{:?}", tk);
assert_eq!(tk, expected_template_kind);
let content: ContentString = content.unwrap();
assert_eq!(content.header(), expected_header);
assert_eq!(content.body(), expected_body);
let _ = fs::remove_file(¬efile);
//
// Non-Tp-Note file.
// Prepare test: annotate existing PDF file.
let raw = "some data";
let notefile = temp_dir().join("some.pdf");
let _ = fs::write(¬efile, raw.as_bytes());
let (tk, content): (TemplateKind, Option<ContentString>) = TemplateKind::from(¬efile);
// Inspect result.
let expected_template_kind = TemplateKind::AnnotateFile;
assert_eq!(tk, expected_template_kind);
assert_eq!(content, None);
let _ = fs::remove_file(¬efile);
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/workflow.rs | tpnote-lib/src/workflow.rs | //! Tp-Note's high level API.<!-- The low level API is documented
//! in the module `tpnote_lib::note`. -->
//!
//! How to integrate this in your text editor code?
//! First, call `create_new_note_or_synchronize_filename()`
//! with the first positional command line parameter `<path>`.
//! Then open the new text file with the returned path in your
//! text editor. After modifying the text, saving it and closing your
//! text editor, call `synchronize_filename()`.
//! The returned path points to the possibly renamed note file.
//!
//! Tp-Note is customizable at runtime by modifying its configuration stored in
//! `crate::config::LIB_CFG` before executing the functions in this
//! module (see type definition and documentation in `crate::config::LibCfg`).
//! All functions in this API are stateless.
//!
//!
//! ## Example with `TemplateKind::New`
//!
//! ```rust
//! use tpnote_lib::content::Content;
//! use tpnote_lib::content::ContentString;
//! use tpnote_lib::workflow::WorkflowBuilder;
//! use std::env::temp_dir;
//! use std::fs;
//! use std::path::Path;
//!
//! // Prepare test.
//! let notedir = temp_dir();
//!
//! let html_clipboard = ContentString::default();
//! let txt_clipboard = ContentString::default();
//! let stdin = ContentString::default();
//! let v = vec![&html_clipboard, &txt_clipboard, &stdin];
//! // This is the condition to choose: `TemplateKind::New`:
//! assert!(html_clipboard.is_empty() && txt_clipboard.is_empty() &&stdin.is_empty());
//! // There are no inhibitor rules to change the `TemplateKind`.
//! let template_kind_filter = |tk|tk;
//!
//! // Build and run workflow.
//! let n = WorkflowBuilder::new(¬edir)
//! // You can plug in your own type (must impl. `Content`).
//! .upgrade::<ContentString, _>(
//! "default", v, template_kind_filter)
//! .build()
//! .run()
//! .unwrap();
//!
//! // Check result.
//! assert!(n.as_os_str().to_str().unwrap()
//! .contains("--Note"));
//! assert!(n.is_file());
//! let raw_note = fs::read_to_string(n).unwrap();
//! #[cfg(not(target_family = "windows"))]
//! assert!(raw_note.starts_with("\u{feff}---\ntitle:"));
//! #[cfg(target_family = "windows")]
//! assert!(raw_note.starts_with("\u{feff}---\r\ntitle:"));
//! ```
//!
//! The internal data storage for the note's content is `ContentString`
//! which implements the `Content` trait. Now we modify slightly
//! the above example to showcase, how to overwrite
//! one of the trait's methods.
//!
//! ```rust
//! use std::path::Path;
//! use tpnote_lib::content::Content;
//! use tpnote_lib::content::ContentString;
//! use tpnote_lib::workflow::WorkflowBuilder;
//! use std::env::temp_dir;
//! use std::path::PathBuf;
//! use std::fs;
//! use std::fs::OpenOptions;
//! use std::io::Write;
//! use std::ops::Deref;
//!
//! #[derive(Default, Debug, Eq, PartialEq)]
//! // We need a newtype because of the orphan rule.
//! pub struct MyContentString(ContentString);
//!
//! impl AsRef<str> for MyContentString {
//! fn as_ref(&self) -> &str {
//! self.0.as_ref()
//! }
//! }
//!
//! impl Content for MyContentString {
//! // Now we overwrite one method to show how to plugin custom code.
//! fn save_as(&self, new_file_path: &Path) -> Result<(), std::io::Error> {
//! let mut outfile = OpenOptions::new()
//! .write(true)
//! .create(true)
//! .open(&new_file_path)?;
//! // We do not save the content to disk, we write intstead:
//! write!(outfile, "Simulation")?;
//! Ok(())
//! }
//! // The rest we delegate.
//! fn from_string(input: String, name: String) -> Self {
//! MyContentString(
//! ContentString::from_string(input, name))
//! }
//! fn header(&self) -> &str {
//! self.0.header()
//! }
//! fn body(&self) -> &str {
//! self.0.header()
//! }
//! fn name(&self) -> &str {
//! self.0.name()
//! }
//! }
//!
//! // Prepare test.
//! let notedir = temp_dir();
//!
//! let html_clipboard = MyContentString::default();
//! let txt_clipboard = MyContentString::default();
//! let stdin = MyContentString::default();
//! let v = vec![&html_clipboard, &txt_clipboard, &stdin];
//! // There are no inhibitor rules to change the `TemplateKind`.
//! let template_kind_filter = |tk|tk;
//!
//! // Build and run workflow.
//! let n = WorkflowBuilder::new(¬edir)
//! // You can plug in your own type (must impl. `Content`).
//! .upgrade::<MyContentString, _>(
//! "default", v, template_kind_filter)
//! .build()
//! .run()
//! .unwrap();
//!
//! // Check result.
//! assert!(n.as_os_str().to_str().unwrap()
//! .contains("--Note"));
//! assert!(n.is_file());
//! let raw_note = fs::read_to_string(n).unwrap();
//! assert_eq!(raw_note, "Simulation");
//! ```
use crate::config::LocalLinkKind;
use crate::config::TMPL_VAR_FM_;
use crate::config::TMPL_VAR_FM_ALL;
use crate::config::TMPL_VAR_FM_FILENAME_SYNC;
use crate::config::TMPL_VAR_FM_NO_FILENAME_SYNC;
use crate::config::TMPL_VAR_FM_SCHEME;
use crate::content::Content;
use crate::context::Context;
use crate::error::NoteError;
use crate::html_renderer::HtmlRenderer;
use crate::note::Note;
use crate::settings::SETTINGS;
use crate::settings::SchemeSource;
use crate::settings::Settings;
use crate::template::TemplateKind;
use parking_lot::RwLockUpgradableReadGuard;
use std::path::Path;
use std::path::PathBuf;
use tera::Value;
/// Typestate of the `WorkflowBuilder`.
#[derive(Debug, Clone)]
pub struct WorkflowBuilder<W> {
input: W,
}
/// In this state the workflow will only synchronize the filename.
#[derive(Debug, Clone)]
pub struct SyncFilename<'a> {
path: &'a Path,
}
/// In this state the workflow will either synchronize the filename of an
/// existing note or, -if none exists- create a new note.
#[derive(Debug, Clone)]
pub struct SyncFilenameOrCreateNew<'a, T, F> {
scheme_source: SchemeSource<'a>,
path: &'a Path,
clipboards: Vec<&'a T>,
tk_filter: F,
html_export: Option<(&'a Path, LocalLinkKind)>,
force_lang: Option<&'a str>,
}
impl<'a> WorkflowBuilder<SyncFilename<'a>> {
/// Constructor of all workflows. The `path` points
/// 1. to an existing note file, or
/// 2. to a directory where the new note should be created, or
/// 3. to a non-Tp-Note file that will be annotated.
///
/// For cases 2. and 3. upgrade the `WorkflowBuilder` with
/// `upgrade()` to add additional input data.
pub fn new(path: &'a Path) -> Self {
Self {
input: SyncFilename { path },
}
}
/// Upgrade the `WorkflowBuilder` to enable also the creation of new note
/// files. It requires providing additional input data:
///
/// New notes are created by inserting `Tp-Note`'s environment
/// in a template. The template set being used, is determined by
/// `scheme_new_default`. If the note to be created exists already, append
/// a so called `copy_counter` to the filename and try to save it again. In
/// case this does not succeed either, increment the `copy_counter` until a
/// free filename is found. The returned path points to the (new) note file
/// on disk. Depending on the context, Tp-Note chooses one `TemplateKind`
/// to operate (cf. `tpnote_lib::template::TemplateKind::from()`).
/// The `tk-filter` allows to overwrite this choice, e.g. you may set
/// `TemplateKind::None` under certain circumstances. This way the caller
/// can disable the filename synchronization and inject behavior like
/// `--no-filename-sync`.
///
/// Some templates insert the content of the clipboard or the standard
/// input pipe. The input data (can be empty) is provided with a
/// vector of `Content` named `clipboards`. The templates expect text with
/// markup or HTML. In case of HTML, the `Content.body` must start with
/// `<!DOCTYPE html` or `<html`
pub fn upgrade<T: Content, F: Fn(TemplateKind) -> TemplateKind>(
self,
scheme_new_default: &'a str,
clipboards: Vec<&'a T>,
tk_filter: F,
) -> WorkflowBuilder<SyncFilenameOrCreateNew<'a, T, F>> {
WorkflowBuilder {
input: SyncFilenameOrCreateNew {
scheme_source: SchemeSource::SchemeNewDefault(scheme_new_default),
path: self.input.path,
clipboards,
tk_filter,
html_export: None,
force_lang: None,
},
}
}
/// Finalize the build.
pub fn build(self) -> Workflow<SyncFilename<'a>> {
Workflow { input: self.input }
}
}
impl<'a, T: Content, F: Fn(TemplateKind) -> TemplateKind>
WorkflowBuilder<SyncFilenameOrCreateNew<'a, T, F>>
{
/// Set a flag, that the workflow also stores an HTML-rendition of the
/// note file next to it.
/// This optional HTML rendition is performed just before returning and does
/// not affect any above described operation.
pub fn html_export(&mut self, path: &'a Path, local_link_kind: LocalLinkKind) {
self.input.html_export = Some((path, local_link_kind));
}
/// Overwrite the default scheme.
pub fn force_scheme(&mut self, scheme: &'a str) {
self.input.scheme_source = SchemeSource::Force(scheme);
}
/// By default, the natural language, the note is written in is guessed
/// from the title and subtitle. This disables the automatic guessing
/// and forces the language.
pub fn force_lang(&mut self, force_lang: &'a str) {
self.input.force_lang = Some(force_lang);
}
/// Finalize the build.
pub fn build(self) -> Workflow<SyncFilenameOrCreateNew<'a, T, F>> {
Workflow { input: self.input }
}
}
/// Holds the input data for the `run()` method.
#[derive(Debug, Clone)]
pub struct Workflow<W> {
input: W,
}
impl Workflow<SyncFilename<'_>> {
/// Starts the "synchronize filename" workflow. Errors can occur in
/// various ways, see `NoteError`.
///
/// First, the workflow opens the note file `path` on disk and read its
/// YAML front matter. Then, it calculates from the front matter how the
/// filename should be to be in sync. If it is different, rename the note on
/// disk. Finally, it returns the note's new or existing filename. Repeated
/// calls, will reload the environment variables, but not the configuration
/// file. This function is stateless.
///
/// Note: this method holds an (upgradeable read) lock on the `SETTINGS`
/// object to ensure that the `SETTINGS` content does not change. The lock
/// also prevents from concurrent execution.
///
///
/// ## Example with `TemplateKind::SyncFilename`
///
/// ```rust
/// use tpnote_lib::content::ContentString;
/// use tpnote_lib::workflow::WorkflowBuilder;
/// use std::env::temp_dir;
/// use std::fs;
/// use std::path::Path;
///
/// // Prepare test: create existing note.
/// let raw = r#"
///
/// ---
/// title: "My day"
/// subtitle: "Note"
/// ---
/// Body text
/// "#;
/// let notefile = temp_dir().join("20221030-hello.md");
/// fs::write(¬efile, raw.as_bytes()).unwrap();
///
/// let expected = temp_dir().join("20221030-My day--Note.md");
/// let _ = fs::remove_file(&expected);
///
/// // Build and run workflow.
/// let n = WorkflowBuilder::new(¬efile)
/// .build()
/// // You can plug in your own type (must impl. `Content`).
/// .run::<ContentString>()
/// .unwrap();
///
/// // Check result
/// assert_eq!(n, expected);
/// assert!(n.is_file());
/// ```
pub fn run<T: Content>(self) -> Result<PathBuf, NoteError> {
// Prevent the rest to run in parallel, other threads will block when they
// try to write.
let mut settings = SETTINGS.upgradable_read();
// Collect input data for templates.
let context = Context::from(self.input.path)?;
let content = <T>::open(self.input.path).unwrap_or_default();
// This does not fill any templates,
let mut n = Note::from_existing_content(context, content, TemplateKind::SyncFilename)?;
synchronize_filename(&mut settings, &mut n)?;
Ok(n.rendered_filename)
}
}
impl<T: Content, F: Fn(TemplateKind) -> TemplateKind> Workflow<SyncFilenameOrCreateNew<'_, T, F>> {
/// Starts the "synchronize filename or create a new note" workflow.
/// Returns the note's new or existing filename. Repeated calls, will
/// reload the environment variables, but not the configuration file. This
/// function is stateless.
/// Errors can occur in various ways, see `NoteError`.
///
/// Note: this method holds an (upgradeable read) lock on the `SETTINGS`
/// object to ensure that the `SETTINGS` content does not change. The lock
/// also prevents from concurrent execution.
///
///
/// ## Example with `TemplateKind::FromClipboard`
///
/// ```rust
/// use tpnote_lib::content::Content;
/// use tpnote_lib::content::ContentString;
/// use tpnote_lib::workflow::WorkflowBuilder;
/// use std::env::temp_dir;
/// use std::path::PathBuf;
/// use std::fs;
///
/// // Prepare test.
/// let notedir = temp_dir();
///
/// let html_clipboard = ContentString::from_string(
/// "my HTML clipboard\n".to_string(),
/// "html_clipboard".to_string()
/// );
/// let txt_clipboard = ContentString::from_string(
/// "my TXT clipboard\n".to_string(),
/// "txt_clipboard".to_string()
/// );
/// let stdin = ContentString::from_string(
/// "my stdin\n".to_string(),
/// "stdin".to_string()
/// );
/// let v = vec![&html_clipboard, &txt_clipboard, &stdin];
/// // This is the condition to choose: `TemplateKind::FromClipboard`:
/// assert!(html_clipboard.header().is_empty()
/// && txt_clipboard.header().is_empty()
/// && stdin.header().is_empty());
/// assert!(!html_clipboard.body().is_empty() || !txt_clipboard.body().is_empty() || !stdin.body().is_empty());
/// let template_kind_filter = |tk|tk;
///
/// // Build and run workflow.
/// let n = WorkflowBuilder::new(¬edir)
/// // You can plug in your own type (must impl. `Content`).
/// .upgrade::<ContentString, _>(
/// "default", v, template_kind_filter)
/// .build()
/// .run()
/// .unwrap();
///
/// // Check result.
/// assert!(n.as_os_str().to_str().unwrap()
/// .contains("my stdin--Note"));
/// assert!(n.is_file());
/// let raw_note = fs::read_to_string(n).unwrap();
///
/// #[cfg(not(target_family = "windows"))]
/// assert!(raw_note.starts_with(
/// "\u{feff}---\ntitle: my stdin"));
/// #[cfg(target_family = "windows")]
/// assert!(raw_note.starts_with(
/// "\u{feff}---\r\ntitle:"));
/// ```
pub fn run(self) -> Result<PathBuf, NoteError> {
// Prevent the rest to run in parallel, other threads will block when they
// try to write.
let mut settings = SETTINGS.upgradable_read();
// Initialize settings.
settings.with_upgraded(|settings| {
settings.update(self.input.scheme_source, self.input.force_lang)
})?;
// First, generate a new note (if it does not exist), then parse its front_matter
// and finally rename the file, if it is not in sync with its front matter.
// Collect input data for templates.
let context = Context::from(self.input.path)?;
// `template_kind` will tell us what to do.
let (template_kind, content) = TemplateKind::from(self.input.path);
let template_kind = (self.input.tk_filter)(template_kind);
let n = match template_kind {
TemplateKind::FromDir | TemplateKind::AnnotateFile => {
// CREATE A NEW NOTE WITH THE `TMPL_NEW_CONTENT` TEMPLATE
// All these template do not refer to existing front matter,
// as there is none yet.
let context = context
.insert_front_matter_and_raw_text_from_existing_content(&self.input.clipboards)?
.set_state_ready_for_content_template();
let mut n = Note::from_content_template(context, template_kind)?;
n.render_filename(template_kind)?;
// Check if the filename is not taken already
n.set_next_unused_rendered_filename()?;
n.save()?;
n
}
TemplateKind::FromTextFile => {
// This is part of the contract for this template:
let content: T = content.unwrap();
debug_assert!(&content.header().is_empty());
debug_assert!(!&content.body().is_empty());
let context = context
.insert_front_matter_and_raw_text_from_existing_content(&self.input.clipboards)?
.insert_front_matter_and_raw_text_from_existing_content(&vec![&content])?;
let context = context.set_state_ready_for_content_template();
let mut n = Note::from_content_template(context, TemplateKind::FromTextFile)?;
// Render filename.
n.render_filename(template_kind)?;
// Save new note.
let context_path = n.context.get_path().to_owned();
n.set_next_unused_rendered_filename_or(&context_path)?;
n.save_and_delete_from(&context_path)?;
n
}
TemplateKind::SyncFilename => {
let mut n = Note::from_existing_content(
context,
content.unwrap(),
TemplateKind::SyncFilename,
)?;
synchronize_filename(&mut settings, &mut n)?;
n
}
TemplateKind::None => {
Note::from_existing_content(context, content.unwrap(), template_kind)?
}
};
// If no new filename was rendered, return the old one.
let mut n = n;
if n.rendered_filename == PathBuf::new() {
n.rendered_filename = n.context.get_path().to_owned();
}
// Export HTML rendition, if wanted.
if let Some((export_dir, local_link_kind)) = self.input.html_export {
HtmlRenderer::save_exporter_page(
&n.rendered_filename,
n.content,
export_dir,
local_link_kind,
)?;
}
Ok(n.rendered_filename)
}
}
///
/// Helper function. We take `RwLockUpgradableReadGuard<Settings>` as parameter
/// with a unique `mut` pointer because:
/// 1. It serves as a lock to prevent several instances of
/// `synchronize_filename` from running in parallel.
/// 2. We need write access to `SETTINGS` in this function.
fn synchronize_filename<T: Content>(
settings: &mut RwLockUpgradableReadGuard<Settings>,
note: &mut Note<T>,
) -> Result<(), NoteError> {
let no_filename_sync = match (
note.context
.get(TMPL_VAR_FM_ALL)
.and_then(|v| v.get(TMPL_VAR_FM_FILENAME_SYNC)),
note.context
.get(TMPL_VAR_FM_ALL)
.and_then(|v| v.get(TMPL_VAR_FM_NO_FILENAME_SYNC)),
) {
// By default we sync.
(None, None) => false,
(None, Some(Value::Bool(nsync))) => *nsync,
(None, Some(_)) => true,
(Some(Value::Bool(sync)), None) => !*sync,
_ => false,
};
if no_filename_sync {
log::info!(
"Filename synchronisation disabled with the front matter field: `{}: {}`",
TMPL_VAR_FM_FILENAME_SYNC.trim_start_matches(TMPL_VAR_FM_),
!no_filename_sync
);
return Ok(());
}
// Shall we switch the `settings.current_theme`?
// If `fm_scheme` is defined, prefer this value.
match note
.context
.get(TMPL_VAR_FM_ALL)
.and_then(|v| v.get(TMPL_VAR_FM_SCHEME))
{
Some(Value::String(s)) if !s.is_empty() => {
// Initialize `SETTINGS`.
settings
.with_upgraded(|settings| settings.update_current_scheme(SchemeSource::Force(s)))?;
log::info!("Switch to scheme `{}` as indicated in front matter", s);
}
Some(Value::String(_)) | None => {
// Initialize `SETTINGS`.
settings.with_upgraded(|settings| {
settings.update_current_scheme(SchemeSource::SchemeSyncDefault)
})?;
}
Some(_) => {
return Err(NoteError::FrontMatterFieldIsNotString {
field_name: TMPL_VAR_FM_SCHEME.to_string(),
});
}
};
note.render_filename(TemplateKind::SyncFilename)?;
let path = note.context.get_path().to_owned();
note.set_next_unused_rendered_filename_or(&path)?;
// Silently fails is source and target are identical.
note.rename_file_from(note.context.get_path())?;
Ok(())
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/error.rs | tpnote-lib/src/error.rs | //! Custom error types.
use std::io;
use std::path::PathBuf;
use thiserror::Error;
/// The error `InvalidFrontMatterYaml` prints the front matter section of the
/// note file. This constant limits the number of text lines that are printed.
pub const FRONT_MATTER_ERROR_MAX_LINES: usize = 20;
/// Error related to the clipboard or `stdin` input stream.
#[derive(Debug, Error, PartialEq)]
pub enum InputStreamError {
/// Remedy: Prepend HTML input data with `<!DOCTYPE html>` or `<html>`
/// with a doc type other than `<!DOCTYPE html>`.
#[error(
"The HTML input stream starts with a doctype other than\n\
\"<!DOCTYPE html>\":\n\
{html}"
)]
NonHtmlDoctype { html: String },
}
/// Configuration file related filesystem and syntax errors.
#[derive(Debug, Error)]
pub enum FileError {
/// Remedy: delete all files in configuration file directory.
#[error(
"Can not find unused filename in directory:\n\
\t{directory:?}\n\
(only `COPY_COUNTER_MAX` copies are allowed)."
)]
NoFreeFileName { directory: PathBuf },
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
Serialize(#[from] toml::ser::Error),
#[error(transparent)]
Deserialize(#[from] toml::de::Error),
}
/// Configuration file related semantic errors.
#[derive(Debug, Error, Clone, PartialEq)]
pub enum LibCfgError {
/// `CfgVal` can only be deserialized with data whose root element
/// is a `Value::Table`.
/// This should not happen. Please file a bug report.
#[error("Input data root must be a `Value::Table`")]
CfgValInputIsNotTable,
/// Remedy: Choose another scheme.
#[error(
"Configuration file error in section:\n\
\t[[scheme]]\n\
\tscheme_default = \"{scheme_name}\"\n\
No scheme found. Available configured schemes:\n\
{schemes}
"
)]
SchemeNotFound {
scheme_name: String,
schemes: String,
},
/// Remedy: Choose a value in the given interval.
#[error(
"Configuration file error in [base_scheme] or in section:\n\
\t[[scheme]]\n\
\tname = \"{scheme_name}\"\n\
\t[scheme.tmpl]\n\
\tfilter.get_lang.relative_distance_min={dist}\n\
must be between 0.0 and 0.99."
)]
MinimumRelativeDistanceInvalid { scheme_name: String, dist: f64 },
/// Remedy: Choose another `sort_tag.extra_separator` character.
#[error(
"Configuration file error in [base_scheme] or in section:\n\
\t[[scheme]]\n\
\tname = \"{scheme_name}\"
\t[scheme.filename]\n\
\tsort_tag.extra_separator=\"{extra_separator}\"\n\
must not be one of `sort_tag_extra_chars=\"{sort_tag_extra_chars}\"`,\n\
`0..9`, `a..z` or `{dot_file_marker}`."
)]
SortTagExtraSeparator {
scheme_name: String,
dot_file_marker: char,
sort_tag_extra_chars: String,
extra_separator: String,
},
/// Remedy: Choose another `extension_default` out of
/// `extensions[..].0`.
#[error(
"Configuration file error in [base_scheme] or in section:\n\
\t[[scheme]]\n\
\tname = \"{scheme_name}\"
\t[scheme.filename]\n\
\t`extension_default=\"{extension_default}\"\n\
must not be one of:`\n\
\t{extensions}."
)]
ExtensionDefault {
scheme_name: String,
extension_default: String,
extensions: String,
},
/// Remedy: Insert `sort_tag.separator` in `sort_tag.extra_chars`.
#[error(
"Configuration file error in [base_scheme] or in section:\n\
\t[[scheme]]\n\
\tname = \"{scheme_name}\"
\t[scheme.filename]\n\
All characters in `sort_tag.separator=\"{separator}\"\n\
must be in the set `sort_tag.extra_chars=\"{chars}\"`,\n\
or in `0..9`, `a..z``\n\
must NOT start with `{dot_file_marker}`."
)]
SortTagSeparator {
scheme_name: String,
dot_file_marker: char,
chars: String,
separator: String,
},
/// Remedy: Choose a `copy_counter.extra_separator` in the set.
#[error(
"Configuration file error in [base_scheme] or in section:\n\
\t[[scheme]]\n\
\tname = \"{scheme_name}\"
\t[scheme.filename]\n\
`copy_counter.extra_separator=\"{extra_separator}\"`\n\
must be one of: \"{chars}\""
)]
CopyCounterExtraSeparator {
scheme_name: String,
chars: String,
extra_separator: String,
},
/// Remedy: check the configuration file variable `tmpl.filter.assert_preconditions`.
#[error(
"choose one of: `IsDefined`, `IsString`, `IsNumber`, `IsStringOrNumber`, `IsBool`, `IsValidSortTag`"
)]
ParseAssertPrecondition,
/// Remedy: check the configuration file variable `arg_default.export_link_rewriting`.
#[error("choose one of: `off`, `short` or `long`")]
ParseLocalLinkKind,
/// Remedy: check the ISO 639-1 codes in the configuration variable
/// `tmpl.filter.get_lang.language_candidates` and make sure that they are
/// supported, by checking `tpnote -V`.
#[error(
"The ISO 639-1 language subtag `{language_code}`\n\
in the configuration file variable\n\
`tmpl.filter.get_lang.language_candidates` or in the\n\
environment variable `TPNOTE_LANG_DETECTION` is not\n\
supported. All listed codes must be part of the set:\n\
{all_langs}."
)]
ParseLanguageCode {
language_code: String,
all_langs: String,
},
/// Remedy: add one more ISO 639-1 code in the configuration variable
/// `tmpl.filter.get_lang.language_candidates` (or in
/// `TPNOTE_LANG_DETECTION`) and make sure that the code is supported, by
/// checking `tpnote -V`.
#[error(
"Not enough languages to choose from.\n\
The list of ISO 639-1 language subtags\n\
currently contains only one item: `{language_code}`.\n\
Add one more language to the configuration \n\
file variable `tmpl.filter.get_lang` or to the\n\
environment variable `TPNOTE_LANG_DETECTION`\n\
to prevent this error from occurring."
)]
NotEnoughLanguageCodes { language_code: String },
/// Remedy: correct the variable by choosing one the available themes.
#[error(
"Configuration file error in section `[tmp_html]` in line:\n\
\t{var} = \"{value}\"\n\
The theme must be one of the following set:\n\
{available}"
)]
HighlightingThemeName {
var: String,
value: String,
available: String,
},
#[error(transparent)]
Deserialize(#[from] toml::de::Error),
}
#[derive(Debug, Error)]
/// Error type returned form methods in or related to the `note` module.
pub enum NoteError {
/// Remedy: make sure, that a file starting with `path` exists.
#[error("<NONE FOUND: {path}...>")]
CanNotExpandShorthandLink { path: String },
/// Remedy: Choose another scheme.
#[error(
"Invalid header variable value: no scheme `{scheme_val}` found.\n\
\t---\n\
\t{scheme_key}: {scheme_val}\n\
\t---\n\n\
Available schemes in configuration file:\n\
{schemes}
"
)]
SchemeNotFound {
scheme_val: String,
scheme_key: String,
schemes: String,
},
/// Remedy: remove invalid characters.
#[error(
"The `sort_tag` header variable contains invalid\n\
character(s):\n\n\
\t---\n\
\tsort_tag: {sort_tag}\n\
\t---\n\n\
Only the characters: \"{sort_tag_extra_chars}\", `0..9`\n\
and `a..z` (maximum {filename_sort_tag_letters_in_succession_max} in \
succession) are allowed."
)]
FrontMatterFieldIsInvalidSortTag {
sort_tag: String,
sort_tag_extra_chars: String,
filename_sort_tag_letters_in_succession_max: u8,
},
/// Remedy: choose another sort-tag.
#[error(
"This `sort_tag` header variable is a sequential sort-tag:\n\
\t---\n\
\tsort_tag: {sort_tag}\n\
\t---\n\n\
A file with this sort-tag exists already on disk:\n\n\
\t`{existing_file}`\n\n\
For sequential sort-tags no duplicates are allowed.\n\
Please choose another sort-tag.
"
)]
FrontMatterFieldIsDuplicateSortTag {
sort_tag: String,
existing_file: String,
},
/// Remedy: index the compound type?
#[error(
"The type of the front matter field `{field_name}:`\n\
must not be a compound type. Use a simple type, \n\
i.e. `String`, `Number` or `Bool` instead. Example:\n\
\n\
\t~~~~~~~~~~~~~~\n\
\t---\n\
\t{field_name}: My simple type\n\
\t---\n\
\tsome text\n\
\t~~~~~~~~~~~~~~"
)]
FrontMatterFieldIsCompound { field_name: String },
/// Remedy: try to enclose with quotes.
#[error(
"The (sub)type of the front matter field `{field_name}:`\n\
must be a non empty `String`. Example:\n\
\n\
\t~~~~~~~~~~~~~~\n\
\t---\n\
\t{field_name}: My string\n\
\t---\n\
\tsome text\n\
\t~~~~~~~~~~~~~~"
)]
FrontMatterFieldIsEmptyString { field_name: String },
/// Remedy: try to remove possible quotes.
#[error(
"The (sub)type of the front matter field `{field_name}:`\n\
must be `Bool`. Example:\n\
\n\
\t~~~~~~~~~~~~~~\n\
\t---\n\
\t{field_name}: false\n\
\t---\n\
\tsome text\n\
\t~~~~~~~~~~~~~~\n\
\n\
Hint: try to remove possible quotes."
)]
FrontMatterFieldIsNotBool { field_name: String },
/// Remedy: try to remove possible quotes.
#[error(
"The (sub)type of the front matter field `{field_name}:`\n\
must be `Number`. Example:\n\
\n\
\t~~~~~~~~~~~~~~\n\
\t---\n\
\t{field_name}: 142\n\
\t---\n\
\tsome text\n\
\t~~~~~~~~~~~~~~\n\
\n\
Hint: try to remove possible quotes."
)]
FrontMatterFieldIsNotNumber { field_name: String },
/// Remedy: try to enclose with quotes.
#[error(
"The (sub)type of the front matter field `{field_name}:`\n\
must be `String`. Example:\n\
\n\
\t~~~~~~~~~~~~~~\n\
\t---\n\
\t{field_name}: My string\n\
\t---\n\
\tsome text\n\
\t~~~~~~~~~~~~~~\n\
\n\
Hint: try to enclose with quotes."
)]
FrontMatterFieldIsNotString { field_name: String },
/// Remedy: correct the front matter variable `file_ext`.
#[error(
"The file extension:\n\
\t---\n\
\tfile_ext: {extension}\n\
\t---\n\
is not registered as Tp-Note file in\n\
your configuration file:\n\
\t{extensions}\n\
\n\
Choose one of the listed above or add more extensions to the\n\
`filename.extensions` variable in your configuration file."
)]
FrontMatterFieldIsNotTpnoteExtension {
extension: String,
extensions: String,
},
/// Remedy: add the missing field in the note's front matter.
#[error(
"The document is missing a `{field_name}:`\n\
field in its front matter:\n\
\n\
\t~~~~~~~~~~~~~~\n\
\t---\n\
\t{field_name}: \"My note\"\n\
\t---\n\
\tsome text\n\
\t~~~~~~~~~~~~~~\n\
\n\
Please correct the front matter if this is\n\
supposed to be a Tp-Note file. Ignore otherwise."
)]
FrontMatterFieldMissing { field_name: String },
/// Remedy: check front matter delimiters `----`.
#[error(
"The document (or template) has no front matter\n\
section. Is one `---` missing?\n\n\
\t~~~~~~~~~~~~~~\n\
\t---\n\
\t{compulsory_field}: My note\n\
\t---\n\
\tsome text\n\
\t~~~~~~~~~~~~~~\n\
\n\
Please correct the front matter if this is\n\
supposed to be a Tp-Note file. Ignore otherwise."
)]
FrontMatterMissing { compulsory_field: String },
/// Remedy: check YAML syntax in the note's front matter.
#[error(
"Can not parse front matter:\n\
\n\
{front_matter}\
\n\
{source_error}"
)]
InvalidFrontMatterYaml {
front_matter: String,
source_error: serde_yaml::Error,
},
/// Remedy: check YAML syntax in the input stream's front matter.
#[error(
"Invalid YAML field(s) in the {tmpl_var} input\n\
stream data found:\n\
{source_str}"
)]
InvalidInputYaml {
tmpl_var: String,
source_str: String,
},
/// Remedy: check HTML syntax in the input stream data.
#[error(
"Invalid HTML in the input stream data found:\n\
{source_str}"
)]
InvalidHtml { source_str: String },
/// Remedy: reconfigure `scheme.filename.extensions.1`.
#[error(
"Filter `html_to_markup` is disabled for this \n\
`extension_default` in table `scheme.filename.extensions.1`."
)]
HtmlToMarkupDisabled,
/// Remedy: correct link path.
#[error("<INVALID: {path}>")]
InvalidLocalPath { path: String },
/// Remedy: check the file permission of the note file.
#[error("Can not read file:\n\t {path:?}\n{source}")]
Read { path: PathBuf, source: io::Error },
/// Remedy: check ReStructuredText syntax.
#[error("Can not parse reStructuredText input:\n{msg}")]
#[cfg(feature = "renderer")]
RstParse { msg: String },
/// Remedy: restart with `--debug trace`.
#[error(
"Tera error:\n\
{source}"
)]
Tera {
#[from]
source: tera::Error,
},
/// Remedy: check the syntax of the Tera template in the configuration file.
#[error(
"Tera template error in configuration file\n\
variable \"{template_str}\":\n {source_str}"
)]
TeraTemplate {
source_str: String,
template_str: String,
},
#[error(transparent)]
File(#[from] FileError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
ParseLanguageCode(#[from] LibCfgError),
#[error(transparent)]
Utf8Conversion {
#[from]
source: core::str::Utf8Error,
},
}
/// Macro to construct a `NoteError::TeraTemplate from a `Tera::Error` .
#[macro_export]
macro_rules! note_error_tera_template {
($e:ident, $t:expr) => {
NoteError::TeraTemplate {
source_str: std::error::Error::source(&$e)
.unwrap_or(&tera::Error::msg(""))
.to_string()
// Remove useless information.
.trim_end_matches("in context while rendering '__tera_one_off'")
.to_string(),
template_str: $t,
}
};
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/clone_ext.rs | tpnote-lib/src/clone_ext.rs | //! Extension trait adding a `shallow_clone()` method to `Cow`.
use std::borrow::Cow;
pub trait CloneExt<'b> {
/// Clone a `Cow` without memory allocation.
/// Note, the original must outlive the clone! Use case:
/// ```no_run
/// use crate::tpnote_lib::clone_ext::CloneExt;
/// use std::borrow::Cow;
/// fn do_something_or_nothing(v: Cow<str>) -> Cow<str> {
/// if v.len() > 3 {
/// let s = "Hello ".to_string() + &*v;
/// Cow::Owned(s)
/// } else {
/// v
/// }
/// }
///
/// // Sometimes, we only have a `&Cow`, but we need a `Cow`!
/// let a: &Cow<str> = &Cow::Owned("world!".to_string());
/// let b: Cow<str> = a.shallow_clone();
/// assert_eq!(do_something_or_nothing(b), "Hello world!");
///
/// let a: &Cow<str> = &Cow::Owned("ld!".to_string());
/// let b: Cow<str> = a.shallow_clone();
/// assert_eq!(do_something_or_nothing(b), "ld!");
/// ```
fn shallow_clone(&'b self) -> Cow<'b, str>;
}
impl<'b> CloneExt<'b> for Cow<'b, str> {
fn shallow_clone(&'b self) -> Cow<'b, str> {
// match *self {
// Self::Borrowed(b) => Self::Borrowed(b),
// Self::Owned(ref o) => Self::Borrowed(o.as_ref()),
// }
// // This is equivalent to:
Cow::Borrowed(&**self)
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/text_reader.rs | tpnote-lib/src/text_reader.rs | //! An iterator adapter to suppress CRLF (`\r\n`) sequences in a stream of
//! bytes.
//!
//! # Overview
//!
//! This module provides [`CrlfSuppressor`], an iterator adapter to filter out
//! CR (`\r`, 0x0D) when it is immediately followed by LF (`\n`, 0x0A), as
//! commonly found in Windows line endings.
//!
//! It also provides an extension trait [`CrlfSuppressorExt`] so you can easily
//! call `.crlf_suppressor()` on any iterator over bytes (e.g., from
//! `BufReader::bytes()`).
//!
//! # Usage
//!
//! ## Basic example
//!
//! ```rust
//! use std::io::{Cursor, Error, Read};
//! use tpnote_lib::text_reader::CrlfSuppressorExt;
//!
//! let data = b"hello\r\nworld";
//! let normalized: Result<Vec<u8>, Error> = Cursor::new(data)
//! .bytes()
//! .crlf_suppressor()
//! .collect();
//! let s = String::from_utf8(normalized.unwrap()).unwrap();
//! assert_eq!(s, "hello\nworld");
//! ```
//!
//! ## Reading from a file
//!
//! ```rust,no_run
//! use std::fs::File;
//! use tpnote_lib::text_reader::read_as_string_with_crlf_suppression;
//!
//! let normalized = read_as_string_with_crlf_suppression(File::open("file.txt")?)?;
//! println!("{}", normalized);
//! # Ok::<(), std::io::Error>(())
//! ```
//!
//! # Implementation details
//!
//! In UTF-8, continuation bytes for multi-byte code points are always in the
//! range `0x80..0xBF`. Since `0x0D` and `0x0A` are not in this range, searching
//! for CRLF as byte values is safe.
//!
//! # See also
//!
//! - [`BufReader::bytes`](https://doc.rust-lang.org/std/io/struct.BufReader.html#method.bytes)
//! - [`String::from_utf8`](https://doc.rust-lang.org/std/string/struct.String.html#method.from_utf8)
use std::io::{self, BufReader, Read};
use std::iter::Peekable;
const CR: u8 = 0x0D; // Carriage Return.
const LF: u8 = 0x0A; // Line Feed.
/// An iterator adapter that suppresses CR (`\r`, 0x0D) when followed by LF
/// (`\n`, 0x0A). In a valid multi-byte UTF-8 sequence, continuation bytes must
/// be in the range 0x80 to 0xBF. As 0x0D and 0x0A are not in this range, we can
/// search for them in a stream of bytes.
///
/// * In UTF-8, multi-byte code points (3 or more bytes) have specific "marker"
/// bits in each byte:
/// * The first byte starts with 1110xxxx (for 3 bytes) or 11110xxx (for 4
/// bytes). Continuation bytes always start with 10xxxxxx (0x80..0xBF).
/// * 0x0D is 00001101 and 0x0A is 00001010—neither match the required bit
/// patterns for multi-byte UTF-8 encoding.
/// * In a valid multi-byte UTF-8 sequence, continuation bytes must be in the
/// range 0x80 to 0xBF.
/// * 0x0D and 0x0A are not in this range.
///
pub struct CrlfSuppressor<I: Iterator<Item = io::Result<u8>>> {
iter: Peekable<I>,
}
impl<I: Iterator<Item = io::Result<u8>>> CrlfSuppressor<I> {
/// Creates a new suppressor from an iterator over bytes.
/// (Preferred usage: see extension trait `CrlfSuppressorExt`).
///
/// # Example
/// ```
/// use std::io::Cursor;
/// use std::io::Read;
/// use tpnote_lib::text_reader::CrlfSuppressor;
///
/// let bytes = b"foo\r\nbar";
/// let suppressor = CrlfSuppressor::new(Cursor::new(bytes).bytes());
/// ```
/// Create a new suppressor from an iterator over bytes.
pub fn new(iter: I) -> Self {
Self {
iter: iter.peekable(),
}
}
}
impl<I: Iterator<Item = io::Result<u8>>> Iterator for CrlfSuppressor<I> {
type Item = io::Result<u8>;
fn next(&mut self) -> Option<Self::Item> {
match self.iter.next()? {
Ok(CR) => match self.iter.peek() {
Some(Ok(LF)) => {
self.iter.next(); // Consume.
Some(Ok(LF))
}
_ => Some(Ok(CR)),
},
Ok(byte) => Some(Ok(byte)),
Err(err) => Some(Err(err)),
}
}
}
/// Extension trait to add `.crlf_suppressor()` to any iterator over bytes.
///
/// # Example
/// ```rust
/// use std::io::{Cursor, Error, Read};
/// use tpnote_lib::text_reader::CrlfSuppressorExt;
///
/// let data = b"hello\r\nworld";
/// let normalized: Result<Vec<u8>, Error> = Cursor::new(data)
/// .bytes()
/// .crlf_suppressor()
/// .collect();
/// let s = String::from_utf8(normalized.unwrap()).unwrap();
/// assert_eq!(s, "hello\nworld");
/// ```
pub trait CrlfSuppressorExt: Iterator<Item = io::Result<u8>> + Sized {
/// Returns an iterator that suppresses CRLF sequences.
fn crlf_suppressor(self) -> CrlfSuppressor<Self> {
CrlfSuppressor::new(self)
}
}
impl<T: Iterator<Item = io::Result<u8>>> CrlfSuppressorExt for T {}
/// Reads all bytes from the given reader, suppressing CR (`\r`) bytes that are
/// immediately followed by LF (`\n`).
///
/// This function is intended to normalize line endings by removing carriage
/// return characters that precede line feeds (i.e., converting CRLF sequences
/// to LF).
///
/// # Arguments
///
/// * `reader` - Any type that implements [`std::io::Read`], such as a file,
/// buffer, or stream.
///
/// # Returns
///
/// A [`std::io::Result`] containing a `Vec<u8>` with the filtered bytes, or an
/// error if one occurs while reading from the input.
///
/// # Example
///
/// ```rust
/// use std::io::Cursor;
/// use tpnote_lib::text_reader::read_with_crlf_suppression;
///
/// let data = b"foo\r\nbar\nbaz\r\n";
/// let cursor = Cursor::new(data);
/// let result = read_with_crlf_suppression(cursor).unwrap();
/// assert_eq!(result, b"foo\nbar\nbaz\n");
/// ```
///
/// # Errors
///
/// Returns any I/O error encountered while reading from the provided reader.
///
/// # See Also
///
/// [`std::io::Read`], [`std::fs::File`]
pub fn read_with_crlf_suppression<R: Read>(reader: R) -> io::Result<Vec<u8>> {
let reader = BufReader::new(reader);
let filtered_bytes = reader.bytes().crlf_suppressor();
filtered_bytes.collect()
}
/// Reads all bytes from the given reader, suppressing CR (`\r`) bytes that are
/// immediately followed by LF (`\n`), and returns the resulting data as a UTF-8
/// string.
///
/// This function is useful for normalizing line endings (converting CRLF to LF)
/// and reading textual data from any source that implements [`std::io::Read`].
///
/// # Arguments
///
/// * `reader` - Any type implementing [`std::io::Read`], such as a file,
/// buffer, or stream.
///
/// # Returns
///
/// Returns an [`std::io::Result`] containing the resulting `String` if all
/// bytes are valid UTF-8, or an error if reading fails or the data is not valid
/// UTF-8.
///
/// # Errors
///
/// Returns an error if an I/O error occurs while reading, or if the data read
/// is not valid UTF-8.
///
/// # Example
///
/// ```rust
/// use std::io::Cursor;
/// use tpnote_lib::text_reader::read_as_string_with_crlf_suppression;
///
/// let input = b"hello\r\nworld";
/// let cursor = Cursor::new(input);
/// let output = read_as_string_with_crlf_suppression(cursor).unwrap();
/// assert_eq!(output, "hello\nworld");
/// ```
///
/// # See Also
///
/// [`read_with_crlf_suppression`]
pub fn read_as_string_with_crlf_suppression<R: Read>(reader: R) -> io::Result<String> {
let bytes = read_with_crlf_suppression(reader)?;
String::from_utf8(bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
}
/// Additional method for `String` suppressing `\r` in `\r\n` sequences:
/// When no `\r\n` is found, no memory allocation occurs.
///
/// ```rust
/// use tpnote_lib::text_reader::StringExt;
///
/// let s = "hello\r\nworld".to_string();
/// let res = s.crlf_suppressor_string();
/// assert_eq!("hello\nworld", res);
///
/// let s = "hello\nworld".to_string();
/// let res = s.crlf_suppressor_string();
/// assert_eq!("hello\nworld", res);
/// ```
pub trait StringExt {
fn crlf_suppressor_string(self) -> String;
}
impl StringExt for String {
fn crlf_suppressor_string(self) -> String {
// Replace `\r\n` with `\n`.
// Searching in bytes is faster than in chars.
// In UTF-8, continuation bytes for multi-byte code points are always in the
// range `0x80..0xBF`. Since `0x0D` and `0x0A` are not in this range, searching
// for CRLF as byte values is safe.
if !self.contains("\r\n") {
// Forward without allocating.
self
} else {
// We allocate here and do a lot of copying.
self.replace("\r\n", "\n")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
fn run(input: &[u8]) -> String {
let cursor = Cursor::new(input);
let bytes = cursor.bytes().crlf_suppressor();
let vec: Vec<u8> = bytes.map(|b| b.unwrap()).collect();
String::from_utf8(vec).unwrap()
}
#[test]
fn test_crlf_sequence() {
let input = b"foo\r\nbar\r\nbaz";
let expected = "foo\nbar\nbaz";
assert_eq!(run(input), expected);
}
#[test]
fn test_lone_cr() {
let input = b"foo\rbar";
let expected = "foo\rbar";
assert_eq!(run(input), expected);
}
#[test]
fn test_lone_lf() {
let input = b"foo\nbar";
let expected = "foo\nbar";
assert_eq!(run(input), expected);
}
#[test]
fn test_mixed_endings() {
let input = b"foo\r\nbar\rbaz\nqux";
let expected = "foo\nbar\rbaz\nqux";
assert_eq!(run(input), expected);
}
#[test]
fn test_empty_input() {
let input = b"";
let expected = "";
assert_eq!(run(input), expected);
}
#[test]
fn test_only_crlf() {
let input = b"\r\n";
let expected = "\n";
assert_eq!(run(input), expected);
}
#[test]
fn test_only_cr() {
let input = b"\r";
let expected = "\r";
assert_eq!(run(input), expected);
}
#[test]
fn test_only_lf() {
let input = b"\n";
let expected = "\n";
assert_eq!(run(input), expected);
}
#[test]
fn test_trailing_cr() {
let input = b"foo\r";
let expected = "foo\r";
assert_eq!(run(input), expected);
}
#[test]
fn test_trailing_crlf() {
let input = b"foo\r\n";
let expected = "foo\n";
assert_eq!(run(input), expected);
}
#[test]
fn test_crlf_suppressor_string() {
use std::ptr::addr_of;
let s = "hello\r\nworld".to_string();
let s_addr = addr_of!(*s);
let res = s.crlf_suppressor_string();
assert_eq!("hello\nworld", res);
// Memory allocation occurred.
assert_ne!(s_addr, addr_of!(*res));
//
let s = "hello\nworld".to_string();
let s_addr = addr_of!(*s);
let res = s.crlf_suppressor_string();
assert_eq!("hello\nworld", res);
// No memory allocation here:
assert_eq!(s_addr, addr_of!(*res));
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/config_value.rs | tpnote-lib/src/config_value.rs | //! Provides a newtype for `toml::map::Map<String, Value>)` with methods
//! to merge (incomplete) configuration data from different sources (files).
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use toml::Value;
use crate::error::LibCfgError;
/// This decides until what depth arrays are merged into the default
/// configuration. Tables are always merged. Deeper arrays replace the default
/// configuration. For our configuration this means, that `scheme` is merged and
/// all other arrays are replaced.
pub(crate) const CONFIG_FILE_MERGE_DEPTH: isize = 2;
/// A newtype holding configuration data.
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize, Default)]
pub struct CfgVal(toml::map::Map<String, Value>);
/// This API deals with configuration values.
///
impl CfgVal {
/// Constructor returning an empty map.
pub fn new() -> Self {
Self::default()
}
/// Append key, value pairs from other to `self`.
///
/// ```rust
/// use tpnote_lib::config_value::CfgVal;
/// use std::str::FromStr;
///
/// let toml1 = "\
/// [arg_default]
/// scheme = 'zettel'
/// ";
///
/// let toml2 = "\
/// [base_scheme]
/// name = 'some name'
/// ";
///
/// let mut cfg1 = CfgVal::from_str(toml1).unwrap();
/// let cfg2 = CfgVal::from_str(toml2).unwrap();
///
/// let expected = CfgVal::from_str("\
/// [arg_default]
/// scheme = 'zettel'
/// [base_scheme]
/// name = 'some name'
/// ").unwrap();
///
/// // Run test
/// cfg1.extend(cfg2);
///
/// assert_eq!(cfg1, expected);
///
#[inline]
pub fn extend(&mut self, other: Self) {
self.0.extend(other.0);
}
#[inline]
pub fn insert(&mut self, key: String, val: Value) {
self.0.insert(key, val); //
}
#[inline]
/// Merges configuration values from `other` into `self`
/// and returns the result. The top level element is a set of key and value
/// pairs (map). If one of its values is a `Value::Array`, then the
/// corresponding array from `other` is appended.
/// Otherwise the corresponding `other` value replaces the `self` value.
/// Deeper nested `Value::Array`s are never appended but always replaced
/// (`CONFIG_FILE_MERGE_PEPTH=2`).
/// Append key, value pairs from other to `self`.
///
/// ```rust
/// use tpnote_lib::config_value::CfgVal;
/// use std::str::FromStr;
///
/// let toml1 = "\
/// version = '1.0.0'
/// [[scheme]]
/// name = 'default'
/// ";
/// let toml2 = "\
/// version = '2.0.0'
/// [[scheme]]
/// name = 'zettel'
/// ";
///
/// let mut cfg1 = CfgVal::from_str(toml1).unwrap();
/// let cfg2 = CfgVal::from_str(toml2).unwrap();
///
/// let expected = CfgVal::from_str("\
/// version = '2.0.0'
/// [[scheme]]
/// name = 'default'
/// [[scheme]]
/// name = 'zettel'
/// ").unwrap();
///
/// // Run test
/// let res = cfg1.merge(cfg2);
///
/// assert_eq!(res, expected);
///
pub fn merge(self, other: Self) -> Self {
let left = Value::Table(self.0);
let right = Value::Table(other.0);
let res = Self::merge_toml_values(left, right, CONFIG_FILE_MERGE_DEPTH);
// Invariant: when left and right are `Value::Table`, then `res`
// must be a `Value::Table` also.
if let Value::Table(map) = res {
Self(map)
} else {
unreachable!()
}
}
/// Merges configuration values from the right-hand side into the
/// left-hand side and returns the result. The top level element is usually
/// a `toml::Value::Table`. The table is a set of key and value pairs.
/// The values here can be compound data types, i.e. `Value::Table` or
/// `Value::Array`.
/// `merge_depth` controls whether a top-level array in the TOML document
/// is appended to instead of overridden. This is useful for TOML documents
/// that have a top-level arrays (`merge_depth=2`) like `[[scheme]]` in
/// `tpnote.toml`. For top level arrays, one usually wants to append the
/// right-hand array to the left-hand array instead of just replacing the
/// left-hand array with the right-hand array. If you set `merge_depth=0`,
/// all arrays whatever level they have, are always overridden by the
/// right-hand side.
pub(crate) fn merge_toml_values(
left: toml::Value,
right: toml::Value,
merge_depth: isize,
) -> toml::Value {
use toml::Value;
fn get_name(v: &Value) -> Option<&str> {
v.get("name").and_then(Value::as_str)
}
match (left, right) {
(Value::Array(mut left_items), Value::Array(right_items)) => {
// The top-level arrays should be merged but nested arrays
// should act as overrides. For the `tpnote.toml` config,
// this means that you can specify a sub-set of schemes in
// an overriding `tpnote.toml` but that nested arrays like
// `scheme.tmpl.fm_var_localization` are replaced instead
// of merged.
if merge_depth > 0 {
left_items.reserve(right_items.len());
for rvalue in right_items {
let lvalue = get_name(&rvalue)
.and_then(|rname| {
left_items.iter().position(|v| get_name(v) == Some(rname))
})
.map(|lpos| left_items.remove(lpos));
let mvalue = match lvalue {
Some(lvalue) => {
Self::merge_toml_values(lvalue, rvalue, merge_depth - 1)
}
None => rvalue,
};
left_items.push(mvalue);
}
Value::Array(left_items)
} else {
Value::Array(right_items)
}
}
(Value::Table(mut left_map), Value::Table(right_map)) => {
if merge_depth > -10 {
for (rname, rvalue) in right_map {
match left_map.remove(&rname) {
Some(lvalue) => {
let merged_value =
Self::merge_toml_values(lvalue, rvalue, merge_depth - 1);
left_map.insert(rname, merged_value);
}
None => {
left_map.insert(rname, rvalue);
}
}
}
Value::Table(left_map)
} else {
Value::Table(right_map)
}
}
(_, value) => value,
}
}
/// Convert to `toml::Value`.
///
/// ```rust
/// use tpnote_lib::config_value::CfgVal;
/// use std::str::FromStr;
///
/// let toml1 = "\
/// version = 1
/// [[scheme]]
/// name = 'default'
/// ";
///
/// let cfg1 = CfgVal::from_str(toml1).unwrap();
///
/// let expected: toml::Value = toml::from_str(toml1).unwrap();
///
/// // Run test
/// let res = cfg1.to_value();
///
/// assert_eq!(res, expected);
///
pub fn to_value(self) -> toml::Value {
Value::Table(self.0)
}
}
impl FromStr for CfgVal {
type Err = LibCfgError;
/// Constructor taking a text to deserialize.
/// Throws an error if the deserialized root element is not a
/// `Value::Table`.
fn from_str(s: &str) -> Result<Self, Self::Err> {
let v = toml::from_str(s)?;
if let Value::Table(map) = v {
Ok(Self(map))
} else {
Err(LibCfgError::CfgValInputIsNotTable)
}
}
}
impl From<CfgVal> for toml::Value {
fn from(cfg_val: CfgVal) -> Self {
cfg_val.to_value()
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/lingua.rs | tpnote-lib/src/lingua.rs | //! This module abstracts the Lingua library API.
use crate::settings::SETTINGS;
use crate::{config::Mode, error::LibCfgError};
pub(crate) use lingua::IsoCode639_1;
use lingua::{LanguageDetector, LanguageDetectorBuilder};
use parse_hyperlinks::iterator::MarkupLink;
use parse_hyperlinks::parser::Link;
use std::collections::HashMap; // Reexport this type.
/// A filter telling in which natural language(s) the input text is written.
/// It returns an array of ISO 639-1 code representations listing the detected
/// languages. If no language can be reliably identified, the output is the
/// empty array.
#[cfg(feature = "lang-detection")]
pub(crate) fn get_lang(input: &str) -> Result<Vec<String>, LibCfgError> {
use std::borrow::Cow;
use itertools::Itertools;
let input = input.trim();
// Return early if there is no input text.
if input.is_empty() {
return Ok(vec![]);
}
let settings = SETTINGS.read_recursive();
// Check if we can return early.
match &settings.get_lang_filter.mode {
Mode::Disabled => return Ok(vec![]),
Mode::Error(e) => return Err(e.clone()),
_ => {}
}
// Build `LanguageDetector`.
let detector: LanguageDetector = if !&settings.get_lang_filter.language_candidates.is_empty() {
log::trace!(
"Execute template filter `get_lang` \
with languages candidates: {:?}",
&settings.get_lang_filter.language_candidates,
);
LanguageDetectorBuilder::from_iso_codes_639_1(&settings.get_lang_filter.language_candidates)
.with_minimum_relative_distance(settings.get_lang_filter.relative_distance_min)
.build()
} else {
log::trace!(
"Execute template filter `get_lang` \
with all available languages",
);
LanguageDetectorBuilder::from_all_languages()
.with_minimum_relative_distance(settings.get_lang_filter.relative_distance_min)
.build()
};
// Remove URLs
let mut sniplets: Vec<Cow<str>> = Vec::new();
let mut remnant = "";
for ((skipped, _, r), link) in MarkupLink::new(input, false) {
sniplets.push(Cow::from(skipped));
remnant = r;
match link {
Link::Text2Dest(text, _, title) => {
if !text.is_empty() {
sniplets.push(text)
};
if !title.is_empty() {
sniplets.push(title)
};
}
Link::Text2Label(text, _) => {
if !text.is_empty() {
sniplets.push(text)
};
}
Link::TextLabel2Dest(text, _, _) => {
if !text.is_empty() {
sniplets.push(text)
};
}
Link::Image(alt_text, _) => {
if !alt_text.is_empty() {
sniplets.push(alt_text)
};
}
Link::Image2Dest(text1, img_alt, _, text2, _, title) => {
if !text1.is_empty() {
sniplets.push(text1)
};
if !img_alt.is_empty() {
sniplets.push(img_alt)
};
if !text2.is_empty() {
sniplets.push(text2)
};
if !title.is_empty() {
sniplets.push(title)
};
}
_ => {}
}
}
if !remnant.is_empty() {
sniplets.push(Cow::from(remnant));
}
if sniplets.is_empty() {
sniplets.push(Cow::from(input));
}
// End of remove URLs.
let texts = sniplets.as_slice();
// Detect languages.
use crate::FlattenWithIndexExt;
let detected_languages: Vec<String> = match &settings.get_lang_filter.mode {
Mode::Multilingual => {
//
let consecutive_words_min = settings.get_lang_filter.consecutive_words_min;
let words_total_percentage_min = settings.get_lang_filter.words_total_percentage_min;
let words_total: usize = texts
.iter()
.map(|slice| slice.split_whitespace().count())
.sum();
// `words_total / 3` relaxes the criteria for very shot input texts.
let words_min = [consecutive_words_min, words_total / 3];
let words_min = words_min.iter().min().unwrap();
log::trace!(
"Language snippets with less than {} words will be ignored.",
words_min
);
let words_distribution: HashMap<String, usize> = detector
.detect_multiple_languages_in_parallel_of(texts)
.into_iter()
.flatten_with_index()
// Filter too short word sequences.
.filter(|(i, l)| {
let allow_through = l.word_count() >= *words_min;
log::trace!(
"Language(s) detected in [{}]: {}, {}, {}: {:?}",
i,
l.language().iso_code_639_1(),
l.word_count(),
allow_through,
texts[*i][l.start_index()..l.end_index()]
.chars()
.take(60)
.collect::<String>()
);
allow_through
})
.map(|(_, l)| (l.language().iso_code_639_1().to_string(), l.word_count()))
.into_grouping_map_by(|n| n.0.clone())
.aggregate(|acc, _key, val| Some(acc.unwrap_or(0) + val.1));
// Descending order sort.
let words_distribution: Vec<(String, usize)> = words_distribution
.into_iter()
.sorted_by_key(|l| usize::MAX - l.1)
.collect();
log::debug!(
"Languages distribution per word count:\n {:?}",
words_distribution
);
// Filter languages, whose words do not occur sufficiently in total.
let words_distribution_total: usize = words_distribution.iter().map(|l| l.1).sum();
let words_total_min: usize =
words_distribution_total * words_total_percentage_min / 100;
// Filter languages with too few words and return language list.
words_distribution
.into_iter()
.filter(|(l, wc)| {
if *wc >= words_total_min {
true
} else {
let words_percentage = wc * 100 / words_distribution_total;
log::info!(
"Language `{}` rejected: not enough words in total ({}%<{}%)",
l,
words_percentage,
words_total_percentage_min
);
false
}
})
.map(|(l, _)| l)
.collect::<Vec<String>>()
}
Mode::Monolingual => detector
.detect_languages_in_parallel_of(texts)
.into_iter()
.flatten()
.map(|l| l.iso_code_639_1().to_string())
.inspect(|l| log::debug!("Language: '{}' in input detected.", l))
.collect(),
Mode::Disabled => unreachable!(), // See early return above.
Mode::Error(_) => unreachable!(), // See early return above.
};
Ok(detected_languages)
}
#[cfg(test)]
mod tests {
use super::*;
use parking_lot::RwLockWriteGuard;
#[test]
fn test_get_lang() {
use crate::{
config::{GetLang, Mode},
settings::Settings,
};
use lingua::IsoCode639_1;
// The `get_lang` filter requires an initialized `SETTINGS` object.
// Lock the config object for this test.
let get_lang_filter = GetLang {
mode: Mode::Multilingual,
language_candidates: vec![IsoCode639_1::DE, IsoCode639_1::EN, IsoCode639_1::FR],
relative_distance_min: 0.2,
consecutive_words_min: 5,
words_total_percentage_min: 10,
};
let mut settings = SETTINGS.write();
*settings = Settings::default();
settings.get_lang_filter = get_lang_filter;
// This locks `SETTINGS` for further write access in this scope.
let _settings = RwLockWriteGuard::<'_, _>::downgrade(settings);
let input = "Das große Haus";
let output = get_lang(input).unwrap();
assert_eq!("de", output[0]);
let input = "Il est venu trop tard";
let output = get_lang(input).unwrap();
assert_eq!("fr", output[0]);
let input = "How to set up a roof rack";
let output = get_lang(input).unwrap();
assert_eq!("en", output[0]);
let input = "1917039480 50198%-328470";
let output = get_lang(input).unwrap();
assert!(output.is_empty());
let input = " \t\n ";
let output = get_lang(input).unwrap();
assert!(output.is_empty());
let input = "Parlez-vous français? \
Ich spreche Französisch nur ein bisschen. \
A little bit is better than nothing. \
Noch mehr Deutsch. \
Bien-sûr, je parle un peu. Qu'est-ce que tu veux?";
let output = get_lang(input).unwrap();
// Execute template filter `get_lang` with languages candidates: [EN, FR, DE, ET]
// Language(s) detected: fr, 2, false: "Parlez-vous français?"
// Language(s) detected: de, 7, true: "Ich spreche Französisch nur ein bisschen."
// Language(s) detected: en, 6, true: "little bit is better than nothing."
// Language(s) detected: de, 3, false: "Noch mehr Deutsch."
// Language(s) detected: fr, 9, true: "Bien-sûr, je parle un peu. Qu'est-ce que tu veux?"
// Languages distribution per word count: [("fr", 9), ("de", 7), ("en", 6)]
assert_eq!(output, ["fr", "de", "en"]);
let input = "Parlez-vous français? \
Ich spreche Französisch nur ein bisschen. \
A little bit is better than nothing.";
let output = get_lang(input).unwrap();
// Scheme index: 0, applying the content template: `tmpl.from_clipboard_content`
// Execute template filter `get_lang` with languages candidates: [EN, FR, DE, ET]
// Language(s) detected: fr, 2, false: "Parlez-vous français?"
// Language(s) detected: de, 7, true: "Ich spreche Französisch nur ein bisschen."
// Language(s) detected: en, 6, true: "little bit is better than nothing."
// Languages distribution per word count: [("de", 7), ("en", 6)]
assert_eq!(output, ["de", "en"]);
// Release the lock.
drop(_settings);
}
#[test]
fn test_get_lang2() {
use crate::{
config::{GetLang, Mode},
settings::Settings,
};
use lingua::IsoCode639_1;
// The `get_lang` filter requires an initialized `SETTINGS` object.
// Lock the config object for this test.
let get_lang_filter = GetLang {
mode: Mode::Monolingual,
language_candidates: vec![IsoCode639_1::DE, IsoCode639_1::EN, IsoCode639_1::FR],
relative_distance_min: 0.2,
consecutive_words_min: 5,
words_total_percentage_min: 10,
};
let mut settings = SETTINGS.write();
*settings = Settings::default();
settings.get_lang_filter = get_lang_filter;
// This locks `SETTINGS` for further write access in this scope.
let _settings = RwLockWriteGuard::<'_, _>::downgrade(settings);
let input = "Das große Haus";
let output = get_lang(input).unwrap();
assert_eq!("de", output[0]);
let input = "Il est venu trop tard";
let output = get_lang(input).unwrap();
assert_eq!("fr", output[0]);
let input = "How to set up a roof rack";
let output = get_lang(input).unwrap();
assert_eq!("en", output[0]);
let input = "1917039480 50198%-328470";
let output = get_lang(input).unwrap();
assert!(output.is_empty());
let input = " \t\n ";
let output = get_lang(input).unwrap();
assert!(output.is_empty());
let input = "Parlez-vous français? \
Ich spreche Französisch nur ein bisschen. \
A little bit is better than nothing.";
let output = get_lang(input).unwrap();
assert_eq!(output.len(), 1);
assert_eq!("de", output[0]);
// Release the lock.
drop(_settings);
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/note.rs | tpnote-lib/src/note.rs | //! Tp-Note's low level API, creating a memory representation of a
//! note file by inserting Tp-Note's
//! environment data in some templates. If the note exists on disk already,
//! the memory representation is established be reading the note file and
//! parsing its front matter.
//! NB: The high level API is in the module `tpnote_lib::workflow`.
use crate::config::TMPL_VAR_DOC;
use crate::content::Content;
use crate::context::Context;
use crate::context::HasSettings;
use crate::context::ReadyForContentTemplate;
use crate::context::ReadyForFilenameTemplate;
use crate::error::NoteError;
use crate::filename::NotePath;
use crate::filename::NotePathBuf;
use crate::filter::TERA;
use crate::front_matter::FrontMatter;
use crate::note_error_tera_template;
use crate::template::TemplateKind;
use std::default::Default;
use std::fs;
use std::path::{Path, PathBuf};
use std::str;
use tera::Tera;
/// This constant is used by Tera as template name for `tera::render_str()`.
/// Unfortunately it is private there, this is why we must redefine it here.
pub(crate) const ONE_OFF_TEMPLATE_NAME: &str = "__tera_one_off";
//#[derive(Debug, PartialEq)]
/// Represents a note.
/// 1. The `ContentString`'s header is deserialized into `FrontMatter`.
/// 2. `FrontMatter` is stored in `Context` with some environment data.
/// 3. `Context` data is filled in some filename template.
/// 4. The result is stored in `rendered_filename`.
pub struct Note<T: Content> {
/// Captured environment of _Tp-Note_ that
/// is used to fill in templates.
pub context: Context<ReadyForFilenameTemplate>,
/// The full text content of the note, including
/// its front matter.
pub content: T,
/// This field equals to `PathBuf::new()` until `self.render_filename()`
/// is called.
pub rendered_filename: PathBuf,
}
impl<T: Content> Note<T> {
/// Constructor creating a `Note` memory representation of some content
/// that exists outside of Tp-Note provided by the `content` object. No file
/// content is read from disk.
///
/// In case of `TemplateKind::SyncFilename` the `content` is required to
/// have a header with front matter.
///
/// Contract: `template_kind` must be one of:
/// * `TemplateKind::SyncFilename`,
/// * `TemplateKind::None` or
///
/// Panics otherwise. Use `Note::from_content_template()` in those cases.
///
pub fn from_existing_content(
context: Context<HasSettings>,
content: T,
template_kind: TemplateKind,
) -> Result<Note<T>, NoteError> {
// Check contract.
debug_assert!(match template_kind {
TemplateKind::SyncFilename => true,
TemplateKind::None => true,
_ => panic!(
"Contract violation: `template_kind=={:?}` is not acceptable here.",
template_kind
),
});
// Deserialize the note's header read from disk.
// Store the front matter in the context for later use in templates.
let fm = FrontMatter::try_from(content.header())?;
let context = context.insert_front_matter(&fm);
// This data comes from outside. We need additional checks here.
context.assert_precoditions()?;
Ok(Note {
context,
content,
rendered_filename: PathBuf::new(),
})
}
/// Constructor that creates a new note by filling in the content
/// template `template` with the data read from `context`.
/// The result is an initialized `self.content`.
///
/// Contract: `template_kind` should be NOT one of:
/// * `TemplateKind::SyncFilename`,
/// * `TemplateKind::None`
///
/// Panics if this is the case.
///
pub fn from_content_template(
context: Context<ReadyForContentTemplate>,
template_kind: TemplateKind,
) -> Result<Note<T>, NoteError> {
// Add content to context.
log::trace!(
"Available substitution variables for the content template:\n{:#?}",
*context
);
debug_assert!(match template_kind {
TemplateKind::SyncFilename => panic!("`TemplateKind::SyncFilename` not allowed here"),
TemplateKind::None => panic!("`TemplateKind::None` not allowed here"),
_ => true,
});
// Render template
let new_content: T = T::from_string(
{
let mut tera = Tera::default();
tera.extend(&TERA)?;
// Panics, if the content template does not exist (see contract).
// Returns an error, when the rendition goes wrong.
tera.render_str(&template_kind.get_content_template(), &context)
.map_err(|e| {
note_error_tera_template!(
e,
template_kind.get_content_template_name().to_string()
)
})?
},
TMPL_VAR_DOC.to_string(),
);
log::debug!(
"Rendered content template:\n---\n{}\n---\n\n{}",
new_content.header(),
new_content.body()
);
// Deserialize the rendered template
let fm = FrontMatter::try_from(new_content.header())?;
let new_context = Context::from_context_path(&context).insert_front_matter(&fm);
// Return new note.
Ok(Note {
context: new_context,
content: new_content,
rendered_filename: PathBuf::new(),
})
}
/// Applies a Tera template to the notes context in order to generate a
/// sanitized filename that is in sync with the note's meta data stored in
/// its front matter.
pub fn render_filename(&mut self, template_kind: TemplateKind) -> Result<(), NoteError> {
log::trace!(
"Available substitution variables for the filename template:\n{:#?}",
*self.context
);
// Render template
let mut file_path = self.context.get_dir_path().to_owned();
let mut tera = Tera::default();
tera.extend(&TERA)?;
match tera.render_str(&template_kind.get_filename_template(), &self.context) {
Ok(filename) => {
file_path.push(filename.trim());
}
Err(e) => {
return Err(note_error_tera_template!(
e,
template_kind.get_filename_template_name().to_string()
));
}
}
file_path.shorten_filename();
self.rendered_filename = file_path;
Ok(())
}
/// Checks if `self.rendered_filename` is taken already.
/// If yes, some copy counter is appended/incremented.
/// Contract: `render_filename` must have been executed before.
pub fn set_next_unused_rendered_filename(&mut self) -> Result<(), NoteError> {
debug_assert_ne!(self.rendered_filename, PathBuf::new());
self.rendered_filename.set_next_unused()?;
Ok(())
}
/// Checks if `alt_path` is equal to `self.rendered_filename`
/// without considering their copy counter.
/// If they are similar, `self.rendered_filename` becomes `alt_path`.
/// If they are different, then we continue incrementing the copy
/// counter in `self.rendered_filename` until we find a free spot.
/// (Same as in `set_next_unused_rendered_filename()`).
/// Contract: `render_filename` must have been executed before.
pub fn set_next_unused_rendered_filename_or(
&mut self,
alt_path: &Path,
) -> Result<(), NoteError> {
debug_assert_ne!(self.rendered_filename, PathBuf::new());
if self.rendered_filename.exclude_copy_counter_eq(alt_path) {
self.rendered_filename = alt_path.to_path_buf();
} else {
self.rendered_filename.set_next_unused()?;
}
Ok(())
}
/// Writes the note to disk using the note's `content` and the note's
/// `rendered_filename`.
pub fn save(&self) -> Result<(), NoteError> {
debug_assert_ne!(self.rendered_filename, PathBuf::new());
log::trace!(
"Writing the note's content to file: {:?}",
self.rendered_filename
);
self.content.save_as(&self.rendered_filename)?;
Ok(())
}
/// Rename the file `from_path` to `self.rendered_filename`.
/// Silently fails is source and target are identical.
/// Contract: `render_filename` must have been executed before.
pub fn rename_file_from(&self, from_path: &Path) -> Result<(), NoteError> {
debug_assert_ne!(self.rendered_filename, PathBuf::new());
if !from_path.exclude_copy_counter_eq(&self.rendered_filename) {
// Rename file
fs::rename(from_path, &self.rendered_filename)?;
log::trace!(
"File renamed to {}",
self.rendered_filename.to_str().unwrap_or_default()
);
}
Ok(())
}
/// Write the note to disk and remove the file at the previous location.
/// Similar to `rename_from()`, but the target is replaced by `self.content`.
/// Silently fails is source and target are identical.
/// Contract: `render_filename` must have been executed before.
pub fn save_and_delete_from(&mut self, from_path: &Path) -> Result<(), NoteError> {
debug_assert_ne!(self.rendered_filename, PathBuf::new());
self.save()?;
if from_path != self.rendered_filename {
log::trace!("Deleting file: {:?}", from_path);
fs::remove_file(from_path)?;
}
Ok(())
}
#[inline]
/// Renders `self.content` to HTML by calling the appropriate markup
/// renderer. The `html_tmpl` injects JavaScript code with the
/// key `TMPL_HTML_VAR_VIEWER_DOC_JS`. This code is provided with
/// `viewer_doc_js`.
///
/// Contract:
/// * `self.context` is in a valid `ReadyForFilenameTemplate` state.
/// * `self.content.body_name == TMPL_VAR_DOC`. The HTML template expects
/// this name.
/// * The `html_tmpl` template expects `content` to have a header with:
/// a `title:` field.
///
pub fn render_content_to_html(
&self,
// HTML template for this rendition.
tmpl: &str,
// JavaScript for live update code injection.
viewer_doc_js: &str,
) -> Result<String, NoteError> {
//
let html_context = self.context.clone();
let html_context = html_context.insert_raw_content_and_css(&self.content, viewer_doc_js);
log::trace!(
"Available substitution variables for the HTML template:\
\n{:#?}",
html_context
);
let mut tera = Tera::default();
tera.extend(&TERA)?;
// Switch `autoescape_on()` only for HTML templates.
tera.autoescape_on(vec![ONE_OFF_TEMPLATE_NAME]);
let html = tera.render_str(tmpl, &html_context).map_err(|e| {
note_error_tera_template!(e, "[html_tmpl] viewer/exporter_tmpl ".to_string())
})?;
Ok(html)
}
}
#[cfg(test)]
mod tests {
use super::Context;
use super::FrontMatter;
use crate::config::TMPL_VAR_FM_ALL;
use serde_json::json;
use std::path::Path;
use tera::Value;
#[test]
fn test_deserialize() {
let input = "# document start
title: The book
subtitle: you always wanted
author: It's me
date: 2020-04-21
lang: en
revision: '1.0'
sort_tag: 20200420-21_22
file_ext: md
height: 1.23
count: 2
neg: -1
flag: true
numbers:
- 1
- 3
- 5
";
let mut expected = tera::Map::new();
expected.insert("title".to_string(), Value::String("The book".to_string()));
expected.insert(
"subtitle".to_string(),
Value::String("you always wanted".to_string()),
);
expected.insert("author".to_string(), Value::String("It\'s me".to_string()));
expected.insert("date".to_string(), Value::String("2020-04-21".to_string()));
expected.insert("lang".to_string(), Value::String("en".to_string()));
expected.insert("revision".to_string(), Value::String("1.0".to_string()));
expected.insert(
"sort_tag".to_string(),
Value::String("20200420-21_22".to_string()),
);
expected.insert("file_ext".to_string(), Value::String("md".to_string()));
expected.insert("height".to_string(), json!(1.23)); // Number()
expected.insert("count".to_string(), json!(2)); // Number()
expected.insert("neg".to_string(), json!(-1)); // Number()
expected.insert("flag".to_string(), json!(true)); // Bool()
expected.insert("numbers".to_string(), json!([1, 3, 5])); // Array()
let expected_front_matter = FrontMatter(expected);
//panic!("{:?}", &expected_front_matter);
assert_eq!(expected_front_matter, FrontMatter::try_from(input).unwrap());
}
#[test]
fn test_register_front_matter() {
let mut tmp = tera::Map::new();
tmp.insert("file_ext".to_string(), Value::String("md".to_string())); // String
tmp.insert("height".to_string(), json!(1.23)); // Number()
tmp.insert("count".to_string(), json!(2)); // Number()
tmp.insert("neg".to_string(), json!(-1)); // Number()
tmp.insert("flag".to_string(), json!(true)); // Bool()
tmp.insert("numbers".to_string(), json!([1, 3, 5])); // Array([Numbers()..])!
let mut tmp2 = tera::Map::new();
tmp2.insert("fm_file_ext".to_string(), Value::String("md".to_string())); // String
tmp2.insert("fm_height".to_string(), json!(1.23)); // Number()
tmp2.insert("fm_count".to_string(), json!(2)); // Number()
tmp2.insert("fm_neg".to_string(), json!(-1)); // Number()
tmp2.insert("fm_flag".to_string(), json!(true)); // Bool()
tmp2.insert("fm_numbers".to_string(), json!([1, 3, 5])); // Array([Numbers()..])!
let input1 = Context::from(Path::new("a/b/test.md")).unwrap();
let input2 = FrontMatter(tmp);
let mut expected = Context::from(Path::new("a/b/test.md")).unwrap();
tmp2.remove("fm_numbers");
tmp2.insert("fm_numbers".to_string(), json!([1, 3, 5])); // String()!
let tmp2 = tera::Value::from(tmp2);
expected.insert(TMPL_VAR_FM_ALL, &tmp2); // Map()
let expected = expected.insert_front_matter(&FrontMatter::try_from("").unwrap());
let result = input1.insert_front_matter(&input2);
assert_eq!(result, expected);
}
#[test]
fn test_from_existing_content1() {
//
// Example with `TemplateKind::SyncFilename`
//
use crate::content::Content;
use crate::content::ContentString;
use crate::context::Context;
use crate::note::Note;
use crate::template::TemplateKind;
use std::env::temp_dir;
use std::fs;
// Prepare test: create existing note.
let raw = r#"---
title: "My day"
subtitle: "Note"
---
Body text
"#;
let notefile = temp_dir().join("20221031-hello.md");
fs::write(¬efile, raw.as_bytes()).unwrap();
let expected = temp_dir().join("20221031-My day--Note.md");
let _ = fs::remove_file(&expected);
// Start test.
let context = Context::from(¬efile).unwrap();
// Create note object.
let content = <ContentString as Content>::open(¬efile).unwrap();
// You can plug in your own type (must impl. `Content`).
let mut n = Note::<ContentString>::from_existing_content(
context,
content,
TemplateKind::SyncFilename,
)
.unwrap();
let path = n.context.get_path().to_owned();
n.render_filename(TemplateKind::SyncFilename).unwrap();
n.set_next_unused_rendered_filename_or(&path).unwrap();
assert_eq!(n.rendered_filename, expected);
// Rename file on the disk.
n.rename_file_from(&path).unwrap();
assert!(n.rendered_filename.is_file());
}
#[test]
fn test_from_existing_content2() {
// Example with `TemplateKind::None`
//
// This constructor is called, when `Note` is solely created for
// HTML rendering and no templates will be applied.
//
use crate::config::LIB_CFG;
use crate::config::TMPL_HTML_VAR_VIEWER_DOC_JS;
use crate::content::Content;
use crate::content::ContentString;
use crate::context::Context;
use crate::note::Note;
use crate::template::TemplateKind;
use std::env::temp_dir;
use std::fs;
// Prepare test: create existing note file.
let raw = r#"---
title: "My day"
subtitle: "Note"
---
Body text
"#;
let notefile = temp_dir().join("20221030-My day--Note.md");
fs::write(¬efile, raw.as_bytes()).unwrap();
// Start test
// Only minimal context is needed, because no templates are applied later.
let mut context = Context::from(¬efile).unwrap();
// We do not inject any JavaScript.
context.insert(TMPL_HTML_VAR_VIEWER_DOC_JS, &"".into());
// Create note object.
let content = <ContentString as Content>::open(¬efile).unwrap();
// You can plug in your own type (must impl. `Content`).
let n: Note<ContentString> =
Note::<ContentString>::from_existing_content(context, content, TemplateKind::None)
.unwrap();
// Check the HTML rendition.
let html = n
.render_content_to_html(&LIB_CFG.read_recursive().tmpl_html.viewer, "")
.unwrap();
assert!(html.starts_with("<!DOCTYPE html>\n<html"))
}
#[test]
fn test_from_content_template1() {
// Example with `TemplateKind::New`
//
use crate::content::Content;
use crate::content::ContentString;
use crate::context::Context;
use crate::note::Note;
use crate::settings::SETTINGS;
use crate::settings::Settings;
use crate::template::TemplateKind;
use parking_lot::RwLockWriteGuard;
use std::env::temp_dir;
use std::fs;
// Prepare test.
let mut settings = SETTINGS.write();
*settings = Settings::default();
// This locks `SETTINGS` for further write access in this scope.
let _settings = RwLockWriteGuard::<'_, _>::downgrade(settings);
// Create a directory for the new note.
let notedir = temp_dir().join("123-my dir/");
fs::create_dir_all(¬edir).unwrap();
// Store the path in `context`.
let context = Context::from(¬edir).unwrap();
let html_clipboard =
ContentString::from_string("".to_string(), "html_clipboard".to_string());
let txt_clipboard = ContentString::from_string("".to_string(), "txt_clipboard".to_string());
let stdin = ContentString::from_string("".to_string(), "stdin".to_string());
let v = vec![&html_clipboard, &txt_clipboard, &stdin];
let context = context
.insert_front_matter_and_raw_text_from_existing_content(&v)
.unwrap()
.set_state_ready_for_content_template();
// Create the `Note` object.
// You can plug in your own type (must impl. `Content`).
let mut n =
Note::<ContentString>::from_content_template(context, TemplateKind::FromDir).unwrap();
assert!(n.content.header().starts_with("title: my dir"));
assert_eq!(n.content.borrow_dependent().body, "\n\n");
// Check the title and subtitle in the note's header.
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_title")
.unwrap()
.as_str(),
Some("my dir")
);
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_subtitle")
.unwrap()
.as_str(),
Some("Note")
);
n.render_filename(TemplateKind::FromDir).unwrap();
n.set_next_unused_rendered_filename().unwrap();
n.save().unwrap();
// Check the created new note file.
assert!(n.rendered_filename.is_file());
let raw_note = fs::read_to_string(n.rendered_filename).unwrap();
#[cfg(not(target_family = "windows"))]
assert!(raw_note.starts_with("\u{feff}---\ntitle: my dir"));
#[cfg(target_family = "windows")]
assert!(raw_note.starts_with("\u{feff}---\r\ntitle: my dir"));
}
#[test]
fn test_from_content_template2() {
// Example with `TemplateKind::FromClipboard`
use crate::content::Content;
use crate::content::ContentString;
use crate::context::Context;
use crate::note::Note;
use crate::settings::SETTINGS;
use crate::settings::Settings;
use crate::template::TemplateKind;
use parking_lot::RwLockWriteGuard;
use std::env::temp_dir;
use std::fs;
// Prepare test.
let mut settings = SETTINGS.write();
*settings = Settings::default();
// This locks `SETTINGS` for further write access in this scope.
let _settings = RwLockWriteGuard::<'_, _>::downgrade(settings);
// Directory for the new note.
let notedir = temp_dir();
// Store the path in `context`.
let context = Context::from(¬edir).unwrap();
let html_clipboard =
ContentString::from_string("html_clp\n".to_string(), "html_clipboard".to_string());
let txt_clipboard =
ContentString::from_string("txt_clp\n".to_string(), "txt_clipboard".to_string());
let stdin = ContentString::from_string("std\n".to_string(), "stdin".to_string());
let v = vec![&html_clipboard, &txt_clipboard, &stdin];
let context = context
.insert_front_matter_and_raw_text_from_existing_content(&v)
.unwrap();
// This is the condition to choose: `TemplateKind::FromClipboard`:
assert!(
html_clipboard.header().is_empty()
&& txt_clipboard.header().is_empty()
&& stdin.header().is_empty()
);
assert!(
!html_clipboard.body().is_empty()
&& !txt_clipboard.body().is_empty()
&& !stdin.body().is_empty()
);
let context = context.set_state_ready_for_content_template();
// Create the `Note` object.
// You can plug in your own type (must impl. `Content`).
let mut n =
Note::<ContentString>::from_content_template(context, TemplateKind::FromDir).unwrap();
let expected_body = "\nstd\ntxt_clp\n\n";
assert_eq!(n.content.body(), expected_body);
// Check the title and subtitle in the note's header.
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_title")
.unwrap()
.as_str(),
Some("std")
);
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_subtitle")
.unwrap()
.as_str(),
Some("Note")
);
n.render_filename(TemplateKind::FromDir).unwrap();
n.set_next_unused_rendered_filename().unwrap();
n.save().unwrap();
// Check the new note file.
// println!("{:?}", n.rendered_filename);
assert!(
n.rendered_filename
.as_os_str()
.to_str()
.unwrap()
.contains("std--Note")
);
assert!(n.rendered_filename.is_file());
let raw_note = fs::read_to_string(&n.rendered_filename).unwrap();
println!("{}", raw_note);
#[cfg(not(target_family = "windows"))]
assert!(raw_note.starts_with("\u{feff}---\ntitle: std"));
#[cfg(target_family = "windows")]
assert!(raw_note.starts_with("\u{feff}---\r\ntitle:"));
}
#[test]
fn test_from_content_template3() {
// Example with `TemplateKind::FromClipboardYaml`
use crate::content::Content;
use crate::content::ContentString;
use crate::context::Context;
use crate::note::Note;
use crate::settings::SETTINGS;
use crate::settings::Settings;
use crate::template::TemplateKind;
use parking_lot::RwLockWriteGuard;
use std::env::temp_dir;
use std::fs;
// Prepare test.
let mut settings = SETTINGS.write();
*settings = Settings::default();
// This locks `SETTINGS` for further write access in this scope.
let _settings = RwLockWriteGuard::<'_, _>::downgrade(settings);
// Directory for the new note.
let notedir = temp_dir().join("123-my dir/");
// Run test.
// Store the path in `context`.
let context = Context::from(¬edir).unwrap();
let html_clipboard = ContentString::from_string(
"my HTML clipboard\n".to_string(),
"html_clipboard".to_string(),
);
let txt_clipboard = ContentString::from_string(
"my TXT clipboard\n".to_string(),
"txt_clipboard".to_string(),
);
let stdin = ContentString::from_string(
"---\nsubtitle: \"this overwrites\"\n---\nstdin body".to_string(),
"stdin".to_string(),
);
let v = vec![&html_clipboard, &txt_clipboard, &stdin];
let context = context
.insert_front_matter_and_raw_text_from_existing_content(&v)
.unwrap();
// This is the condition to choose: `TemplateKind::FromClipboardYaml`:
assert!(
!html_clipboard.header().is_empty()
|| !txt_clipboard.header().is_empty()
|| !stdin.header().is_empty()
);
let context = context.set_state_ready_for_content_template();
// Create the `Note` object.
// You can plug in your own type (must impl. `Content`).
let mut n =
Note::<ContentString>::from_content_template(context, TemplateKind::FromDir).unwrap();
let expected_body = "\nstdin body\nmy TXT clipboard\n\n";
assert_eq!(n.content.body(), expected_body);
// Check the title and subtitle in the note's header.
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_title")
.unwrap()
.as_str(),
// Remember: in debug titles are very short. The code only works,
// because the string is pure ASCII (not UTF-8).
Some("stdin bod")
);
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_subtitle")
.unwrap()
.as_str(),
// Remember: in debug titles are very short. The code only works,
// because the string is pure ASCII (not UTF-8).
Some("this over")
);
n.render_filename(TemplateKind::FromDir).unwrap();
n.set_next_unused_rendered_filename().unwrap();
n.save().unwrap();
// Check the new note file.
//println!("rendered_filename == {:?}", n.rendered_filename);
assert!(
n.rendered_filename
.as_os_str()
.to_str()
.unwrap()
.contains("stdin bod--this over")
);
assert!(n.rendered_filename.is_file());
let raw_note = fs::read_to_string(n.rendered_filename).unwrap();
#[cfg(not(target_family = "windows"))]
assert!(raw_note.starts_with("\u{feff}---\ntitle: stdin bod"));
#[cfg(target_family = "windows")]
assert!(raw_note.starts_with("\u{feff}---\r\ntitle: stdin bod"));
}
#[test]
fn test_from_content_template4() {
// Example with `TemplateKind::AnnotateFile`
use crate::content::Content;
use crate::content::ContentString;
use crate::context::Context;
use crate::note::Note;
use crate::settings::SETTINGS;
use crate::settings::Settings;
use crate::template::TemplateKind;
use parking_lot::RwLockWriteGuard;
use std::env::temp_dir;
use std::fs;
// Prepare the test.
let mut settings = SETTINGS.write();
*settings = Settings::default();
// This locks `SETTINGS` for further write access in this scope.
let _settings = RwLockWriteGuard::<'_, _>::downgrade(settings);
// Create some non-Tp-Note-file.
let raw = "This simulates a non tp-note file";
let non_notefile = temp_dir().join("20221030-some.pdf");
fs::write(&non_notefile, raw.as_bytes()).unwrap();
let expected = temp_dir().join("20221030-some.pdf--Note.md");
let _ = fs::remove_file(&expected);
// Run the test.
// Store the path in `context`.
let context = Context::from(&non_notefile).unwrap();
let html_clipboard = ContentString::from_string(
"my HTML clipboard\n".to_string(),
"html_clipboard".to_string(),
);
let txt_clipboard = ContentString::from_string(
"my TXT clipboard\n".to_string(),
"txt_clipboard".to_string(),
);
let stdin = ContentString::from_string("my stdin\n".to_string(), "stdin".to_string());
let v = vec![&html_clipboard, &txt_clipboard, &stdin];
let context = context
.insert_front_matter_and_raw_text_from_existing_content(&v)
.unwrap()
.set_state_ready_for_content_template();
// Create the `Note` object.
// You can plug in your own type (must impl. `Content`).
let mut n =
Note::<ContentString>::from_content_template(context, TemplateKind::AnnotateFile)
.unwrap();
let expected_body =
"\n[20221030-some.pdf](<20221030-some.pdf>)\n____\n\nmy stdin\nmy TXT clipboard\n\n";
assert_eq!(n.content.body(), expected_body);
// Check the title and subtitle in the note's header.
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_title")
.unwrap()
.as_str(),
Some("some.pdf")
);
assert_eq!(
n.context
.get(TMPL_VAR_FM_ALL)
.unwrap()
.get("fm_subtitle")
.unwrap()
.as_str(),
Some("Note")
);
n.render_filename(TemplateKind::AnnotateFile).unwrap();
n.set_next_unused_rendered_filename().unwrap();
n.save().unwrap();
// Check the new note file.
assert_eq!(n.rendered_filename, expected);
fs::remove_file(n.rendered_filename).unwrap();
}
#[test]
fn test_from_existing_content5() {
//
// Example with `TemplateKind::FromTextFile`
//
use crate::content::Content;
use crate::content::ContentString;
use crate::context::Context;
use crate::note::Note;
use crate::template::TemplateKind;
use std::env::temp_dir;
use std::fs;
// Prepare test: create existing note file without header.
let raw = "Body text without header";
let notefile = temp_dir().join("20221030-hello -- world.md");
let _ = fs::write(¬efile, raw.as_bytes());
let expected = temp_dir().join("20221030-hello--world.md");
let _ = fs::remove_file(&expected);
// Start test.
let context = Context::from(¬efile).unwrap();
// Create note object.
let content = <ContentString as Content>::open(¬efile).unwrap();
let context = context
.insert_front_matter_and_raw_text_from_existing_content(&vec![&content])
.unwrap()
.set_state_ready_for_content_template();
// You can plug in your own type (must impl. `Content`).
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | true |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/html2md.rs | tpnote-lib/src/html2md.rs | //! This module abstracts the HTML to Markdown filter.
use crate::error::NoteError;
use html2md::parse_html;
/*
// Alternative implementation:
/// Abstracts the HTML to Markdown conversion.
/// This implementation uses the `htmd` crate.
#[inline]
pub(crate) fn convert_html_to_md(html: &str) -> Result<String, NoteError> {
use htmd;
let converter = htmd::HtmlToMarkdown::builder()
.skip_tags(vec!["script", "style"])
.build();
converter.convert(&s).map_err(|e| NoteError::InvalidHtml {
source_str: e.to_string(),
})
}
*/
/// Abstracts the HTML to Markdown conversion.
/// This implementation uses the `html2md` crate.
#[inline]
pub(crate) fn convert_html_to_md(html: &str) -> Result<String, NoteError> {
Ok(parse_html(html))
}
#[cfg(test)]
mod tests {
use crate::html2md::convert_html_to_md;
#[test]
fn test_convert_html_to_md() {
let input: &str =
"<div id=\"videopodcast\">outside <span id=\"pills\">inside</span>\n</div>";
let expected: &str = "outside inside";
let result = convert_html_to_md(input);
assert_eq!(result.unwrap(), expected);
//
let input: &str = r#"<p><a href="/my_uri">link</a></p>"#;
let expected: &str = "[link](/my_uri)";
let result = convert_html_to_md(input);
assert_eq!(result.unwrap(), expected);
//
// [CommonMark: Example 489](https://spec.commonmark.org/0.31.2/#example-489)
let input: &str = r#"<p><a href="/my uri">link</a></p>"#;
let expected: &str = "[link](</my uri>)";
let result = convert_html_to_md(input);
assert_eq!(result.unwrap(), expected);
//
// [CommonMark: Example 489](https://spec.commonmark.org/0.31.2/#example-489)
let input: &str = r#"<p><a href="/my%20uri">link</a></p>"#;
let expected: &str = "[link](</my uri>)";
let result = convert_html_to_md(input);
assert_eq!(result.unwrap(), expected);
//
// We want ATX style headers.
let input: &str = r#"<p><h1>Title</h1></p>"#;
let expected: &str = "# Title";
let result = convert_html_to_md(input);
assert_eq!(result.unwrap(), expected);
}
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | false |
getreu/tp-note | https://github.com/getreu/tp-note/blob/4a373fcf860c8d7a8c3da02f4ab23441f91738ae/tpnote-lib/src/context.rs | tpnote-lib/src/context.rs | //! Extends the built-in Tera filters.
use tera::Value;
use crate::config::Assertion;
use crate::config::FILENAME_ROOT_PATH_MARKER;
use crate::config::LIB_CFG;
#[cfg(feature = "viewer")]
use crate::config::TMPL_HTML_VAR_DOC_ERROR;
#[cfg(feature = "viewer")]
use crate::config::TMPL_HTML_VAR_DOC_TEXT;
use crate::config::TMPL_HTML_VAR_EXPORTER_DOC_CSS;
use crate::config::TMPL_HTML_VAR_EXPORTER_HIGHLIGHTING_CSS;
use crate::config::TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH;
use crate::config::TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH_VALUE;
use crate::config::TMPL_HTML_VAR_VIEWER_DOC_JS;
use crate::config::TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH;
use crate::config::TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH_VALUE;
use crate::config::TMPL_VAR_BODY;
use crate::config::TMPL_VAR_CURRENT_SCHEME;
use crate::config::TMPL_VAR_DIR_PATH;
use crate::config::TMPL_VAR_DOC_FILE_DATE;
use crate::config::TMPL_VAR_EXTENSION_DEFAULT;
use crate::config::TMPL_VAR_FM_;
use crate::config::TMPL_VAR_FM_ALL;
use crate::config::TMPL_VAR_FM_SCHEME;
use crate::config::TMPL_VAR_FORCE_LANG;
use crate::config::TMPL_VAR_HEADER;
use crate::config::TMPL_VAR_LANG;
use crate::config::TMPL_VAR_PATH;
use crate::config::TMPL_VAR_ROOT_PATH;
use crate::config::TMPL_VAR_SCHEME_SYNC_DEFAULT;
use crate::config::TMPL_VAR_USERNAME;
use crate::content::Content;
use crate::error::FileError;
use crate::error::LibCfgError;
use crate::error::NoteError;
use crate::filename::Extension;
use crate::filename::NotePath;
use crate::filename::NotePathStr;
use crate::filter::name;
use crate::front_matter::FrontMatter;
use crate::front_matter::all_leaves;
use crate::settings::SETTINGS;
use std::borrow::Cow;
use std::fs::File;
use std::marker::PhantomData;
use std::matches;
use std::ops::Deref;
use std::path::Path;
use std::path::PathBuf;
use std::time::SystemTime;
/// At trait setting up a state machine as described below.
/// Its implementors represent one specific state defining the amount and the
/// type of data the `Context` type holds at that moment.
pub trait ContextState {}
#[derive(Debug, PartialEq, Clone)]
/// See description in the `ContextState` implementor list.
pub struct Invalid;
#[derive(Debug, PartialEq, Clone)]
/// See description in the `ContextState` implementor list.
pub struct HasSettings;
#[derive(Debug, PartialEq, Clone)]
/// See description in the `ContextState` implementor list.
pub(crate) struct ReadyForFilenameTemplate;
#[derive(Debug, PartialEq, Clone)]
/// See description in the `ContextState` implementor list.
pub(crate) struct HasExistingContent;
#[derive(Debug, PartialEq, Clone)]
/// See description in the `ContextState` implementor list.
pub(crate) struct ReadyForContentTemplate;
#[derive(Debug, PartialEq, Clone)]
/// See description in the `ContextState` implementor list.
pub(crate) struct ReadyForHtmlTemplate;
#[cfg(feature = "viewer")]
#[derive(Debug, PartialEq, Clone)]
/// See description in the `ContextState` implementor list.
pub(crate) struct ReadyForHtmlErrorTemplate;
/// The `Context` object is in an invalid state. Either it was not initialized
/// or its data does not correspond any more to the `Content` it represents.
///
/// | State order | |
/// |----------------|---------------------------------------|
/// | Previous state | none |
/// | Current state | `Invalid` |
/// | Next state | `HasSettings` |
///
impl ContextState for Invalid {}
/// The `Context` has the following initialized and valid fields: `path`,
/// `dir_path`, `root_path` and `ct`. The context `ct` contains data from
/// `insert_config_vars()` and `insert_settings()`.
/// `Context<HasSettings>` has the following variables set:
///
/// * `TMPL_VAR_CURRENT_SCHEME`
/// * `TMPL_VAR_DIR_PATH` in sync with `self.dir_path` and
/// * `TMPL_VAR_DOC_FILE_DATE` in sync with `self.doc_file_date` (only if
/// available).
/// * `TMPL_VAR_EXTENSION_DEFAULT`
/// * `TMPL_VAR_LANG`
/// * `TMPL_VAR_PATH` in sync with `self.path`,
/// * `TMPL_VAR_ROOT_PATH` in sync with `self.root_path`.
/// * `TMPL_VAR_SCHEME_SYNC_DEFAULT`.
/// * `TMPL_VAR_USERNAME`
///
/// The variables are inserted by the following methods: `self.from()`,
/// `self.insert_config_vars()` and `self.insert_settings()`.
/// Once this state is achieved, `Context` is constant and write protected until
/// the next state transition.
///
/// | State order | |
/// |----------------|---------------------------------------|
/// | Previous state | `Invalid` |
/// | Current state | `HasSettings` |
/// | Next state | `ReadyForFilenameTemplate` or `HasExistingContent` |
///
impl ContextState for HasSettings {}
/// In addition to `HasSettings`, the `context.ct` contains template variables
/// deserialized from some note's front matter. E.g. a field named `title:`
/// appears in the context as `fm.fm_title` template variable.
/// In `Note` objects the `Content` is always associated with a
/// `Context<ReadyForFilenameTemplate>`.
/// Once this state is achieved, `Context` is constant and write protected until
/// the next state transition.
///
/// | State order | |
/// |----------------|---------------------------------------|
/// | Previous state | `HasSettings` |
/// | Current state | `ReadyForFilenameTemplate ` |
/// | Next state | none or `ReadyForHtmlTemplate` |
///
impl ContextState for ReadyForFilenameTemplate {}
/// In addition to the `HasSettings` the YAML headers of all clipboard
/// `Content` objects are registered as front matter variables `fm.fm*` in the
/// `Context`.
/// This stage is also used for the `TemplateKind::FromTextFile` template.
/// In this case the last inserted `Content` comes from the text file
/// the command line parameter `<path>` points to. This adds the following key:
///
/// * `TMPL_VAR_DOC`
///
/// This state can evolve as the
/// `insert_front_matter_and_raw_text_from_existing_content()` function can be
/// called several times.
///
/// | State order | |
/// |----------------|---------------------------------------|
/// | Previous state | `HasSettings` or `HasExistingContent` |
/// | Current state | `HasExistingContent` |
/// | Next state | `ReadyForContentTemplate` |
///
impl ContextState for HasExistingContent {}
/// This marker state means that enough information have been collected
/// in the `HasExistingContent` state to be passed to a
/// content template renderer.
/// Once this state is achieved, `Context` is constant and write protected until
/// the next state transition.
///
/// | State order | |
/// |----------------|---------------------------------------|
/// | Previous state | `HasExistingContent` |
/// | Current state | `ReadyForContentTemplate` |
/// | Next state | none |
///
impl ContextState for ReadyForContentTemplate {}
/// In addition to the `ReadyForFilenameTemplate` state this state has the
/// following variables set:
///
/// * `TMPL_HTML_VAR_EXPORTER_DOC_CSS`
/// * `TMPL_HTML_VAR_EXPORTER_HIGHLIGHTING_CSS`
/// * `TMPL_HTML_VAR_EXPORTER_HIGHLIGHTING_CSS`
/// * `TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH`
/// * `TMPL_HTML_VAR_VIEWER_DOC_CSS_PATH_VALUE`
/// * `TMPL_HTML_VAR_VIEWER_DOC_JS` from `viewer_doc_js`
/// * `TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH`
/// * `TMPL_HTML_VAR_VIEWER_HIGHLIGHTING_CSS_PATH_VALUE`
/// * `TMPL_VAR_DOC`
///
/// Once this state is achieved, `Context` is constant and write protected until
/// the next state transition.
///
/// | State order | |
/// |----------------|---------------------------------------|
/// | Previous state | `ReadyForFilenameTemplate` |
/// | Current state | `ReadyForHtmlTemplate` |
/// | Next state | none |
///
impl ContextState for ReadyForHtmlTemplate {}
/// The `Context` has all data for the intended template.
///
/// * `TMPL_HTML_VAR_DOC_ERROR` from `error_message`
/// * `TMPL_HTML_VAR_DOC_TEXT` from `note_erroneous_content`
/// * `TMPL_HTML_VAR_VIEWER_DOC_JS` from `viewer_doc_js`
///
/// Once this state is achieved, `Context` is constant and write protected until
/// the next state transition.
///
/// | State order | |
/// |----------------|---------------------------------------|
/// | Previous state | `HasSettings` |
/// | Current state | `ReadyForHtmlErrorTemplate` |
/// | Next state | none |
///
#[cfg(feature = "viewer")]
impl ContextState for ReadyForHtmlErrorTemplate {}
/// Tiny wrapper around "Tera context" with some additional information.
#[derive(Clone, Debug, PartialEq)]
pub struct Context<S: ContextState + ?Sized> {
/// Collection of substitution variables.
ct: tera::Context,
/// First positional command line argument.
path: PathBuf,
/// The directory (only) path corresponding to the first positional
/// command line argument. The is our working directory and
/// the directory where the note file is (will be) located.
dir_path: PathBuf,
/// `dir_path` is a subdirectory of `root_path`. `root_path` is the
/// first directory, that upwards from `dir_path`, contains a file named
/// `FILENAME_ROOT_PATH_MARKER` (or `/` if no marker file can be found).
/// The root directory is interpreted by Tp-Note's viewer as its base
/// directory: only files within this directory are served.
root_path: PathBuf,
/// If `path` points to a file, we store its creation date here.
doc_file_date: Option<SystemTime>,
/// Rust requires usage of generic parameters, here `S`.
_marker: PhantomData<S>,
}
/// The methods below are available in all `ContentState` states.
impl<S: ContextState> Context<S> {
/// Getter for `self.path`.
/// See `from()` method for details.
pub fn get_path(&self) -> &Path {
self.path.as_path()
}
/// Getter for `self.dir_path`.
/// See `from()` method for details.
pub fn get_dir_path(&self) -> &Path {
self.dir_path.as_path()
}
/// Getter for `self.root_path`.
/// See `from()` method for details.
pub fn get_root_path(&self) -> &Path {
self.root_path.as_path()
}
/// Getter for `self.doc_file_date`.
/// See `from()` method for details.
pub fn get_doc_file_date(&self) -> Option<SystemTime> {
self.doc_file_date
}
/// Constructor. Unlike `from()` this constructor does not access
/// the filesystem in order to detect `dir_path`, `root_path` and
/// `doc_file_date`. It copies these values from the passed `context`.
/// Use this constructor when you are sure that the above date has
/// not changed since you instantiated `context`. In this case you
/// can avoid repeated file access.
pub fn from_context_path(context: &Context<S>) -> Context<HasSettings> {
let mut new_context = Context {
ct: tera::Context::new(),
path: context.path.clone(),
dir_path: context.dir_path.clone(),
root_path: context.root_path.clone(),
doc_file_date: context.doc_file_date,
_marker: PhantomData,
};
new_context.sync_paths_to_map();
new_context.insert_config_vars();
new_context.insert_settings();
new_context
}
/// Helper function that keeps the values with the `self.ct` key
///
/// * `TMPL_VAR_PATH` in sync with `self.path`,
/// * `TMPL_VAR_DIR_PATH` in sync with `self.dir_path` and
/// * `TMPL_VAR_ROOT_PATH` in sync with `self.root_path`.
/// * `TMPL_VAR_DOC_FILE_DATE` in sync with `self.doc_file_date` (only if
///
/// available).
/// Synchronization is performed by copying the latter to the former.
fn sync_paths_to_map(&mut self) {
self.ct.insert(TMPL_VAR_PATH, &self.path);
self.ct.insert(TMPL_VAR_DIR_PATH, &self.dir_path);
self.ct.insert(TMPL_VAR_ROOT_PATH, &self.root_path);
if let Some(time) = self.doc_file_date {
self.ct.insert(
TMPL_VAR_DOC_FILE_DATE,
&time
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
)
} else {
self.ct.remove(TMPL_VAR_DOC_FILE_DATE);
};
}
/// Insert some configuration variables into the context so that they
/// can be used in the templates.
///
/// This function adds the key:
///
/// * `TMPL_VAR_SCHEME_SYNC_DEFAULT`.
///
/// ```
/// use std::path::Path;
/// use tpnote_lib::config::TMPL_VAR_SCHEME_SYNC_DEFAULT;
/// use tpnote_lib::settings::set_test_default_settings;
/// use tpnote_lib::context::Context;
/// set_test_default_settings().unwrap();
///
/// // The constructor calls `context.insert_settings()` before returning.
/// let mut context = Context::from(&Path::new("/path/to/mynote.md")).unwrap();
///
/// // When the note's YAML header does not contain a `scheme:` field,
/// // the `default` scheme is used.
/// assert_eq!(&context.get(TMPL_VAR_SCHEME_SYNC_DEFAULT).unwrap().to_string(),
/// &format!("\"default\""));
/// ```
fn insert_config_vars(&mut self) {
let lib_cfg = LIB_CFG.read_recursive();
// Default extension for new notes as defined in the configuration file.
self.ct.insert(
TMPL_VAR_SCHEME_SYNC_DEFAULT,
lib_cfg.scheme_sync_default.as_str(),
);
}
/// Captures Tp-Note's environment and stores it as variables in a
/// `context` collection. The variables are needed later to populate
/// a context template and a filename template.
///
/// This function adds the keys:
///
/// * `TMPL_VAR_EXTENSION_DEFAULT`
/// * `TMPL_VAR_USERNAME`
/// * `TMPL_VAR_LANG`
/// * `TMPL_VAR_CURRENT_SCHEME`
///
/// ```
/// use std::path::Path;
/// use tpnote_lib::config::TMPL_VAR_EXTENSION_DEFAULT;
/// use tpnote_lib::config::TMPL_VAR_CURRENT_SCHEME;
/// use tpnote_lib::settings::set_test_default_settings;
/// use tpnote_lib::context::Context;
/// set_test_default_settings().unwrap();
///
/// // The constructor calls `context.insert_settings()` before returning.
/// let mut context = Context::from(&Path::new("/path/to/mynote.md")).unwrap();
///
/// // For most platforms `context.get("extension_default")` is `md`
/// assert_eq!(&context.get(TMPL_VAR_EXTENSION_DEFAULT).unwrap().to_string(),
/// &format!("\"md\""));
/// // `Settings.current_scheme` is by default the `default` scheme.
/// assert_eq!(&context.get(TMPL_VAR_CURRENT_SCHEME).unwrap().to_string(),
/// &format!("\"default\""));
/// ```
fn insert_settings(&mut self) {
let settings = SETTINGS.read_recursive();
// Default extension for new notes as defined in the configuration file.
self.ct.insert(
TMPL_VAR_EXTENSION_DEFAULT,
settings.extension_default.as_str(),
);
{
let lib_cfg = LIB_CFG.read_recursive();
self.ct.insert(
TMPL_VAR_CURRENT_SCHEME,
&lib_cfg.scheme[settings.current_scheme].name,
);
} // Release `lib_cfg` here.
// Search for UNIX, Windows, and MacOS user-names.
self.ct.insert(TMPL_VAR_USERNAME, &settings.author);
// Get the user's language tag.
self.ct.insert(TMPL_VAR_LANG, &settings.lang);
// Store `force_lang`.
self.ct.insert(TMPL_VAR_FORCE_LANG, &settings.force_lang);
}
/// Inserts the YAML front header variables into the context for later use
/// with templates.
///
fn insert_front_matter2(&mut self, fm: &FrontMatter) {
let mut fm_all_map = self
.ct
.remove(TMPL_VAR_FM_ALL)
.and_then(|v| {
if let tera::Value::Object(map) = v {
Some(map)
} else {
None
}
})
.unwrap_or_default();
// Collect all localized scheme field names.
// Example: `["scheme", "scheme", "Schema"]`
let localized_scheme_names: Vec<String> = LIB_CFG
.read_recursive()
.scheme
.iter()
.map(|s| {
s.tmpl
.fm_var
.localization
.iter()
.find_map(|(k, v)| (k == TMPL_VAR_FM_SCHEME).then_some(v.to_owned()))
})
.collect::<Option<Vec<String>>>()
.unwrap_or_default();
// Search for localized scheme names in front matter.
// `(scheme_idx, field_value)`. Example: `(2, "Deutsch")`
let localized_scheme: Option<(usize, &str)> = localized_scheme_names
.iter()
.enumerate()
.find_map(|(i, k)| fm.0.get(k).and_then(|s| s.as_str()).map(|s| (i, s)));
let scheme = if let Some((scheme_idx, scheme_name)) = localized_scheme {
{
log::trace!(
"Found `scheme: {}` with index=={} in front matter",
scheme_name,
scheme_idx,
);
scheme_idx
}
} else {
SETTINGS.read_recursive().current_scheme
};
let scheme = &LIB_CFG.read_recursive().scheme[scheme];
let vars = &scheme.tmpl.fm_var.localization;
for (key, value) in fm.iter() {
// This delocalizes the variable name and prepends `fm_` to its name.
// NB: We also insert `Value::Array` and `Value::Object`
// variants, No flattening occurs here.
let fm_key = vars.iter().find(|&l| &l.1 == key).map_or_else(
|| {
let mut s = TMPL_VAR_FM_.to_string();
s.push_str(key);
Cow::Owned(s)
},
|l| Cow::Borrowed(&l.0),
);
// Store a copy in `fm`.
fm_all_map.insert(fm_key.to_string(), value.clone());
}
// Register the collection as `Object(Map<String, Value>)`.
self.ct.insert(TMPL_VAR_FM_ALL, &fm_all_map);
}
/// Insert a key/val pair directly. Only available in tests.
#[cfg(test)]
pub(crate) fn insert(&mut self, key: &str, val: &tera::Value) {
self.ct.insert(key, val);
}
/// Inserts a `Content` in `Context`. The content appears as key in
/// `context.ct` with its name taken from `content.name()`.
/// Its value is a `tera::Map` with two keys `TMPL_VAR_HEADER` and
/// `TMPL_VAR_BODY`. The corresponding values are copied from
/// `conten.header()` and `content.body()`.
fn insert_raw_text_from_existing_content(&mut self, content: &impl Content) {
//
// Register input.
let mut map = tera::Map::new();
map.insert(TMPL_VAR_HEADER.to_string(), content.header().into());
map.insert(TMPL_VAR_BODY.to_string(), content.body().into());
self.ct.insert(content.name(), &tera::Value::from(map));
}
/// See function of the same name in `impl Context<HasSettings>`.
fn insert_front_matter_and_raw_text_from_existing_content2(
&mut self,
clipboards: &Vec<&impl Content>,
) -> Result<(), NoteError> {
//
for &clip in clipboards {
// Register input.
self.insert_raw_text_from_existing_content(clip);
// Can we find a front matter in the input stream? If yes, the
// unmodified input stream is our new note content.
if !clip.header().is_empty() {
let input_fm = FrontMatter::try_from(clip.header());
match input_fm {
Ok(ref fm) => {
log::trace!(
"Input stream \"{}\" generates the front matter variables:\n{:#?}",
clip.name(),
&fm
)
}
Err(ref e) => {
if !clip.header().is_empty() {
return Err(NoteError::InvalidInputYaml {
tmpl_var: clip.name().to_string(),
source_str: e.to_string(),
});
}
}
};
// Register front matter.
// The variables registered here can be overwrite the ones from the clipboard.
if let Ok(fm) = input_fm {
self.insert_front_matter2(&fm);
}
}
}
Ok(())
}
}
/// The start state of all `Context` objects.
///
impl Context<Invalid> {
/// Constructor: `path` is Tp-Notes first positional command line parameter
/// `<path>` (see man page). `path` must point to a directory or
/// a file.
///
/// A copy of `path` is stored in `self.ct` as key `TMPL_VAR_PATH`. It
/// directory path as key `TMPL_VAR_DIR_PATH`. The root directory, where
/// the marker file `tpnote.toml` was found, is stored with the key
/// `TMPL_VAR_ROOT_PATH`. If `path` points to a file, its file creation
/// date is stored with the key `TMPL_VAR_DOC_FILE_DATE`.
///
/// ```rust
/// use std::path::Path;
/// use tpnote_lib::settings::set_test_default_settings;
/// use tpnote_lib::config::TMPL_VAR_DIR_PATH;
/// use tpnote_lib::config::TMPL_VAR_PATH;
/// use tpnote_lib::context::Context;
/// set_test_default_settings().unwrap();
///
/// let mut context = Context::from(&Path::new("/path/to/mynote.md")).unwrap();
///
/// assert_eq!(context.get_path(), Path::new("/path/to/mynote.md"));
/// assert_eq!(context.get_dir_path(), Path::new("/path/to/"));
/// assert_eq!(&context.get(TMPL_VAR_PATH).unwrap().to_string(),
/// r#""/path/to/mynote.md""#);
/// assert_eq!(&context.get(TMPL_VAR_DIR_PATH).unwrap().to_string(),
/// r#""/path/to""#);
/// ```
pub fn from(path: &Path) -> Result<Context<HasSettings>, FileError> {
let path = path.to_path_buf();
// `dir_path` is a directory as fully qualified path, ending
// by a separator.
let dir_path = if path.is_dir() {
path.clone()
} else {
path.parent()
.unwrap_or_else(|| Path::new("./"))
.to_path_buf()
};
// Get the root directory.
let mut root_path = Path::new("");
for anc in dir_path.ancestors() {
root_path = anc;
let mut p = anc.to_owned();
p.push(Path::new(FILENAME_ROOT_PATH_MARKER));
if p.is_file() {
break;
}
}
let root_path = root_path.to_owned();
debug_assert!(dir_path.starts_with(&root_path));
// Get the file's creation date. Fail silently.
let file_creation_date = if let Ok(file) = File::open(&path) {
let metadata = file.metadata()?;
metadata.created().or_else(|_| metadata.modified()).ok()
} else {
None
};
// Insert environment.
let mut context = Context {
ct: tera::Context::new(),
path,
dir_path,
root_path,
doc_file_date: file_creation_date,
_marker: PhantomData,
};
context.sync_paths_to_map();
context.insert_config_vars();
context.insert_settings();
Ok(context)
}
}
impl Context<HasSettings> {
/// Merges `fm` into `self.ct`.
pub(crate) fn insert_front_matter(
mut self,
fm: &FrontMatter,
) -> Context<ReadyForFilenameTemplate> {
Context::insert_front_matter2(&mut self, fm);
Context {
ct: self.ct,
path: self.path,
dir_path: self.dir_path,
root_path: self.root_path,
doc_file_date: self.doc_file_date,
_marker: PhantomData,
}
}
/// Inserts clipboard data, standard input data and/or existing note file
/// content into the context. The data may contain some copied text with
/// or without a YAML header. The latter usually carries front matter
/// variables. The `input` data below is registered with the key name given
/// by `tmpl_var_body_name`. Typical names are `"clipboard"` or `"stdin"`.
/// If the below `input` contains a valid YAML header, it will be registered
/// in the context with the key name given by `tmpl_var_header_name`. The
/// templates expect the key names `clipboard_header` or `std_header`. The
/// raw header text will be inserted with this key name.
///
pub(crate) fn insert_front_matter_and_raw_text_from_existing_content(
mut self,
clipboards: &Vec<&impl Content>,
) -> Result<Context<HasExistingContent>, NoteError> {
//
self.insert_front_matter_and_raw_text_from_existing_content2(clipboards)?;
Ok(Context {
ct: self.ct,
path: self.path,
dir_path: self.dir_path,
root_path: self.root_path,
doc_file_date: self.doc_file_date,
_marker: PhantomData,
})
}
/// This adds the following variables to `self`:
///
/// * `TMPL_HTML_VAR_VIEWER_DOC_JS` from `viewer_doc_js`
/// * `TMPL_HTML_VAR_DOC_ERROR` from `error_message`
/// * `TMPL_HTML_VAR_DOC_TEXT` from `note_erroneous_content`
///
#[cfg(feature = "viewer")]
pub(crate) fn insert_error_content(
mut self,
note_erroneous_content: &impl Content,
error_message: &str,
// Java Script live updater inject code. Will be inserted into
// `tmpl_html.viewer`.
viewer_doc_js: &str,
) -> Context<ReadyForHtmlErrorTemplate> {
//
self.ct.insert(TMPL_HTML_VAR_VIEWER_DOC_JS, viewer_doc_js);
self.ct.insert(TMPL_HTML_VAR_DOC_ERROR, error_message);
self.ct
.insert(TMPL_HTML_VAR_DOC_TEXT, ¬e_erroneous_content.as_str());
Context {
ct: self.ct,
path: self.path,
dir_path: self.dir_path,
root_path: self.root_path,
doc_file_date: self.doc_file_date,
_marker: PhantomData,
}
}
}
impl Context<HasExistingContent> {
/// See same method in `Context<HasSettings>`.
pub(crate) fn insert_front_matter_and_raw_text_from_existing_content(
mut self,
clipboards: &Vec<&impl Content>,
) -> Result<Context<HasExistingContent>, NoteError> {
//
self.insert_front_matter_and_raw_text_from_existing_content2(clipboards)?;
Ok(Context {
ct: self.ct,
path: self.path,
dir_path: self.dir_path,
root_path: self.root_path,
doc_file_date: self.doc_file_date,
_marker: PhantomData,
})
}
/// Mark this as ready for a content template.
pub(crate) fn set_state_ready_for_content_template(self) -> Context<ReadyForContentTemplate> {
Context {
ct: self.ct,
path: self.path,
dir_path: self.dir_path,
root_path: self.root_path,
doc_file_date: self.doc_file_date,
_marker: PhantomData,
}
}
}
impl Context<ReadyForFilenameTemplate> {
/// Checks if the front matter variables satisfy preconditions.
/// `self.path` is the path to the current document.
#[inline]
pub(crate) fn assert_precoditions(&self) -> Result<(), NoteError> {
let path = &self.path;
let lib_cfg = &LIB_CFG.read_recursive();
// Get front matter scheme if there is any.
let fm_all = self.get(TMPL_VAR_FM_ALL);
if fm_all.is_none() {
return Ok(());
}
let fm_all = fm_all.unwrap();
let fm_scheme = fm_all.get(TMPL_VAR_FM_SCHEME).and_then(|v| v.as_str());
let scheme_idx = fm_scheme.and_then(|scheme_name| {
lib_cfg
.scheme
.iter()
.enumerate()
.find_map(|(i, s)| (s.name == scheme_name).then_some(i))
});
// If not use `current_scheme` from `SETTINGS`
let scheme_idx = scheme_idx.unwrap_or_else(|| SETTINGS.read_recursive().current_scheme);
let scheme = &lib_cfg.scheme[scheme_idx];
for (key, conditions) in scheme.tmpl.fm_var.assertions.iter() {
if let Some(value) = fm_all.get(key) {
for cond in conditions {
match cond {
Assertion::IsDefined => {}
Assertion::IsString => {
if !all_leaves(value, &|v| matches!(v, Value::String(..))) {
return Err(NoteError::FrontMatterFieldIsNotString {
field_name: name(scheme, key).to_string(),
});
}
}
Assertion::IsNotEmptyString => {
if !all_leaves(value, &|v| {
matches!(v, Value::String(..)) && v.as_str() != Some("")
}) {
return Err(NoteError::FrontMatterFieldIsEmptyString {
field_name: name(scheme, key).to_string(),
});
}
}
Assertion::IsNumber => {
if !all_leaves(value, &|v| matches!(v, Value::Number(..))) {
return Err(NoteError::FrontMatterFieldIsNotNumber {
field_name: name(scheme, key).to_string(),
});
}
}
Assertion::IsBool => {
if !all_leaves(value, &|v| matches!(v, Value::Bool(..))) {
return Err(NoteError::FrontMatterFieldIsNotBool {
field_name: name(scheme, key).to_string(),
});
}
}
Assertion::IsNotCompound => {
if matches!(value, Value::Array(..))
|| matches!(value, Value::Object(..))
{
return Err(NoteError::FrontMatterFieldIsCompound {
field_name: name(scheme, key).to_string(),
});
}
}
Assertion::IsValidSortTag => {
let fm_sort_tag = value.as_str().unwrap_or_default();
if !fm_sort_tag.is_empty() {
// Check for forbidden characters.
let (_, rest, is_sequential) = fm_sort_tag.split_sort_tag(true);
if !rest.is_empty() {
return Err(NoteError::FrontMatterFieldIsInvalidSortTag {
sort_tag: fm_sort_tag.to_owned(),
sort_tag_extra_chars: scheme
.filename
.sort_tag
.extra_chars
.escape_default()
.to_string(),
filename_sort_tag_letters_in_succession_max: scheme
.filename
.sort_tag
.letters_in_succession_max,
});
}
| rust | Apache-2.0 | 4a373fcf860c8d7a8c3da02f4ab23441f91738ae | 2026-01-04T20:18:01.333543Z | true |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/src/stream.rs | src/stream.rs | use core::char;
use core::cmp;
use core::ops::Range;
use core::str;
use crate::{StrSpan, StreamError, TextPos, XmlByteExt, XmlCharExt};
type Result<T> = ::core::result::Result<T, StreamError>;
/// Representation of the [Reference](https://www.w3.org/TR/xml/#NT-Reference) value.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum Reference<'a> {
/// An entity reference.
///
/// <https://www.w3.org/TR/xml/#NT-EntityRef>
Entity(&'a str),
/// A character reference.
///
/// <https://www.w3.org/TR/xml/#NT-CharRef>
Char(char),
}
/// A streaming XML parsing interface.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct Stream<'a> {
pos: usize,
end: usize,
span: StrSpan<'a>,
}
impl<'a> From<&'a str> for Stream<'a> {
#[inline]
fn from(text: &'a str) -> Self {
Stream {
pos: 0,
end: text.len(),
span: text.into(),
}
}
}
impl<'a> From<StrSpan<'a>> for Stream<'a> {
#[inline]
fn from(span: StrSpan<'a>) -> Self {
Stream {
pos: 0,
end: span.as_str().len(),
span,
}
}
}
impl<'a> Stream<'a> {
/// Creates a new stream from a specified `text` substring.
#[inline]
pub fn from_substr(text: &'a str, fragment: Range<usize>) -> Self {
Stream {
pos: fragment.start,
end: fragment.end,
span: text.into(),
}
}
/// Returns an underling string span.
#[inline]
pub fn span(&self) -> StrSpan<'a> {
self.span
}
/// Returns current position.
#[inline]
pub fn pos(&self) -> usize {
self.pos
}
/// Sets current position equal to the end.
///
/// Used to indicate end of parsing on error.
#[inline]
pub fn jump_to_end(&mut self) {
self.pos = self.end;
}
/// Checks if the stream is reached the end.
///
/// Any [`pos()`] value larger than original text length indicates stream end.
///
/// Accessing stream after reaching end via safe methods will produce
/// an `UnexpectedEndOfStream` error.
///
/// Accessing stream after reaching end via *_unchecked methods will produce
/// a Rust's bound checking error.
///
/// [`pos()`]: #method.pos
#[inline]
pub fn at_end(&self) -> bool {
self.pos >= self.end
}
/// Returns a byte from a current stream position.
///
/// # Errors
///
/// - `UnexpectedEndOfStream`
#[inline]
pub fn curr_byte(&self) -> Result<u8> {
if self.at_end() {
return Err(StreamError::UnexpectedEndOfStream);
}
Ok(self.curr_byte_unchecked())
}
/// Returns a byte from a current stream position.
///
/// # Panics
///
/// - if the current position is after the end of the data
#[inline]
pub fn curr_byte_unchecked(&self) -> u8 {
self.span.as_bytes()[self.pos]
}
/// Returns a next byte from a current stream position.
///
/// # Errors
///
/// - `UnexpectedEndOfStream`
#[inline]
pub fn next_byte(&self) -> Result<u8> {
if self.pos + 1 >= self.end {
return Err(StreamError::UnexpectedEndOfStream);
}
Ok(self.span.as_bytes()[self.pos + 1])
}
/// Advances by `n` bytes.
///
/// # Examples
///
/// ```rust,should_panic
/// use xmlparser::Stream;
///
/// let mut s = Stream::from("text");
/// s.advance(2); // ok
/// s.advance(20); // will cause a panic via debug_assert!().
/// ```
#[inline]
pub fn advance(&mut self, n: usize) {
debug_assert!(self.pos + n <= self.end);
self.pos += n;
}
/// Checks that the stream starts with a selected text.
///
/// We are using `&[u8]` instead of `&str` for performance reasons.
///
/// # Examples
///
/// ```
/// use xmlparser::Stream;
///
/// let mut s = Stream::from("Some text.");
/// s.advance(5);
/// assert_eq!(s.starts_with(b"text"), true);
/// assert_eq!(s.starts_with(b"long"), false);
/// ```
#[inline]
pub fn starts_with(&self, text: &[u8]) -> bool {
self.span.as_bytes()[self.pos..self.end].starts_with(text)
}
/// Consumes the current byte if it's equal to the provided byte.
///
/// # Errors
///
/// - `InvalidChar`
/// - `UnexpectedEndOfStream`
///
/// # Examples
///
/// ```
/// use xmlparser::Stream;
///
/// let mut s = Stream::from("Some text.");
/// assert!(s.consume_byte(b'S').is_ok());
/// assert!(s.consume_byte(b'o').is_ok());
/// assert!(s.consume_byte(b'm').is_ok());
/// assert!(s.consume_byte(b'q').is_err());
/// ```
pub fn consume_byte(&mut self, c: u8) -> Result<()> {
let curr = self.curr_byte()?;
if curr != c {
return Err(StreamError::InvalidChar(curr, c, self.gen_text_pos()));
}
self.advance(1);
Ok(())
}
/// Tries to consume the current byte if it's equal to the provided byte.
///
/// Unlike `consume_byte()` will not return any errors.
pub fn try_consume_byte(&mut self, c: u8) -> bool {
match self.curr_byte() {
Ok(b) if b == c => {
self.advance(1);
true
}
_ => false,
}
}
/// Skips selected string.
///
/// # Errors
///
/// - `InvalidString`
pub fn skip_string(&mut self, text: &'static [u8]) -> Result<()> {
if !self.starts_with(text) {
let pos = self.gen_text_pos();
// Assume that all input `text` are valid UTF-8 strings, so unwrap is safe.
let expected = str::from_utf8(text).unwrap();
return Err(StreamError::InvalidString(expected, pos));
}
self.advance(text.len());
Ok(())
}
/// Consumes bytes by the predicate and returns them.
///
/// The result can be empty.
#[inline]
pub fn consume_bytes<F>(&mut self, f: F) -> StrSpan<'a>
where
F: Fn(&Stream, u8) -> bool,
{
let start = self.pos;
self.skip_bytes(f);
self.slice_back(start)
}
/// Skips bytes by the predicate.
pub fn skip_bytes<F>(&mut self, f: F)
where
F: Fn(&Stream, u8) -> bool,
{
while !self.at_end() && f(self, self.curr_byte_unchecked()) {
self.advance(1);
}
}
/// Consumes chars by the predicate and returns them.
///
/// The result can be empty.
#[inline]
pub fn consume_chars<F>(&mut self, f: F) -> Result<StrSpan<'a>>
where
F: Fn(&Stream, char) -> bool,
{
let start = self.pos;
self.skip_chars(f)?;
Ok(self.slice_back(start))
}
/// Skips chars by the predicate.
#[inline]
pub fn skip_chars<F>(&mut self, f: F) -> Result<()>
where
F: Fn(&Stream, char) -> bool,
{
for c in self.chars() {
if !c.is_xml_char() {
return Err(StreamError::NonXmlChar(c, self.gen_text_pos()));
} else if f(self, c) {
self.advance(c.len_utf8());
} else {
break;
}
}
Ok(())
}
#[inline]
pub(crate) fn chars(&self) -> str::Chars<'a> {
self.span.as_str()[self.pos..self.end].chars()
}
/// Slices data from `pos` to the current position.
#[inline]
pub fn slice_back(&self, pos: usize) -> StrSpan<'a> {
self.span.slice_region(pos, self.pos)
}
/// Slices data from the current position to the end.
#[inline]
pub fn slice_tail(&self) -> StrSpan<'a> {
self.span.slice_region(self.pos, self.end)
}
/// Skips whitespaces.
///
/// Accepted values: `' ' \n \r \t`.
#[inline]
pub fn skip_spaces(&mut self) {
while !self.at_end() && self.curr_byte_unchecked().is_xml_space() {
self.advance(1);
}
}
/// Checks if the stream is starts with a space.
#[inline]
pub fn starts_with_space(&self) -> bool {
!self.at_end() && self.curr_byte_unchecked().is_xml_space()
}
/// Consumes whitespaces.
///
/// Like [`skip_spaces()`], but checks that first char is actually a space.
///
/// [`skip_spaces()`]: #method.skip_spaces
///
/// # Errors
///
/// - `InvalidSpace`
pub fn consume_spaces(&mut self) -> Result<()> {
if self.at_end() {
return Err(StreamError::UnexpectedEndOfStream);
}
if !self.starts_with_space() {
return Err(StreamError::InvalidSpace(
self.curr_byte_unchecked(),
self.gen_text_pos(),
));
}
self.skip_spaces();
Ok(())
}
/// Consumes an XML character reference if there is one.
///
/// On error will reset the position to the original.
pub fn try_consume_reference(&mut self) -> Option<Reference<'a>> {
let start = self.pos();
// Consume reference on a substream.
let mut s = *self;
match s.consume_reference() {
Ok(r) => {
// If the current data is a reference than advance the current stream
// by number of bytes read by substream.
self.advance(s.pos() - start);
Some(r)
}
Err(_) => None,
}
}
/// Consumes an XML reference.
///
/// Consumes according to: <https://www.w3.org/TR/xml/#NT-Reference>
///
/// # Errors
///
/// - `InvalidReference`
pub fn consume_reference(&mut self) -> Result<Reference<'a>> {
self._consume_reference()
.map_err(|_| StreamError::InvalidReference)
}
#[inline(never)]
fn _consume_reference(&mut self) -> Result<Reference<'a>> {
if !self.try_consume_byte(b'&') {
return Err(StreamError::InvalidReference);
}
let reference = if self.try_consume_byte(b'#') {
let (value, radix) = if self.try_consume_byte(b'x') {
let value = self.consume_bytes(|_, c| c.is_xml_hex_digit()).as_str();
(value, 16)
} else {
let value = self.consume_bytes(|_, c| c.is_xml_digit()).as_str();
(value, 10)
};
let n = u32::from_str_radix(value, radix).map_err(|_| StreamError::InvalidReference)?;
let c = char::from_u32(n).unwrap_or('\u{FFFD}');
if !c.is_xml_char() {
return Err(StreamError::InvalidReference);
}
Reference::Char(c)
} else {
let name = self.consume_name()?;
match name.as_str() {
"quot" => Reference::Char('"'),
"amp" => Reference::Char('&'),
"apos" => Reference::Char('\''),
"lt" => Reference::Char('<'),
"gt" => Reference::Char('>'),
_ => Reference::Entity(name.as_str()),
}
};
self.consume_byte(b';')?;
Ok(reference)
}
/// Consumes an XML name and returns it.
///
/// Consumes according to: <https://www.w3.org/TR/xml/#NT-Name>
///
/// # Errors
///
/// - `InvalidName` - if name is empty or starts with an invalid char
/// - `UnexpectedEndOfStream`
pub fn consume_name(&mut self) -> Result<StrSpan<'a>> {
let start = self.pos();
self.skip_name()?;
let name = self.slice_back(start);
if name.is_empty() {
return Err(StreamError::InvalidName);
}
Ok(name)
}
/// Skips an XML name.
///
/// The same as `consume_name()`, but does not return a consumed name.
///
/// # Errors
///
/// - `InvalidName` - if name is empty or starts with an invalid char
pub fn skip_name(&mut self) -> Result<()> {
let mut iter = self.chars();
if let Some(c) = iter.next() {
if c.is_xml_name_start() {
self.advance(c.len_utf8());
} else {
return Err(StreamError::InvalidName);
}
}
for c in iter {
if c.is_xml_name() {
self.advance(c.len_utf8());
} else {
break;
}
}
Ok(())
}
/// Consumes a qualified XML name and returns it.
///
/// Consumes according to: <https://www.w3.org/TR/xml-names/#ns-qualnames>
///
/// # Errors
///
/// - `InvalidName` - if name is empty or starts with an invalid char
#[inline(never)]
pub fn consume_qname(&mut self) -> Result<(StrSpan<'a>, StrSpan<'a>)> {
let start = self.pos();
let mut splitter = None;
while !self.at_end() {
// Check for ASCII first for performance reasons.
let b = self.curr_byte_unchecked();
if b < 128 {
if b == b':' {
if splitter.is_none() {
splitter = Some(self.pos());
self.advance(1);
} else {
// Multiple `:` is an error.
return Err(StreamError::InvalidName);
}
} else if b.is_xml_name() {
self.advance(1);
} else {
break;
}
} else {
// Fallback to Unicode code point.
match self.chars().nth(0) {
Some(c) if c.is_xml_name() => {
self.advance(c.len_utf8());
}
_ => break,
}
}
}
let (prefix, local) = if let Some(splitter) = splitter {
let prefix = self.span().slice_region(start, splitter);
let local = self.slice_back(splitter + 1);
(prefix, local)
} else {
let local = self.slice_back(start);
("".into(), local)
};
// Prefix must start with a `NameStartChar`.
if let Some(c) = prefix.as_str().chars().nth(0) {
if !c.is_xml_name_start() {
return Err(StreamError::InvalidName);
}
}
// Local name must start with a `NameStartChar`.
if let Some(c) = local.as_str().chars().nth(0) {
if !c.is_xml_name_start() {
return Err(StreamError::InvalidName);
}
} else {
// If empty - error.
return Err(StreamError::InvalidName);
}
Ok((prefix, local))
}
/// Consumes `=`.
///
/// Consumes according to: <https://www.w3.org/TR/xml/#NT-Eq>
///
/// # Errors
///
/// - `InvalidChar`
/// - `UnexpectedEndOfStream`
pub fn consume_eq(&mut self) -> Result<()> {
self.skip_spaces();
self.consume_byte(b'=')?;
self.skip_spaces();
Ok(())
}
/// Consumes quote.
///
/// Consumes `'` or `"` and returns it.
///
/// # Errors
///
/// - `InvalidQuote`
/// - `UnexpectedEndOfStream`
pub fn consume_quote(&mut self) -> Result<u8> {
let c = self.curr_byte()?;
if c == b'\'' || c == b'"' {
self.advance(1);
Ok(c)
} else {
Err(StreamError::InvalidQuote(c, self.gen_text_pos()))
}
}
/// Calculates a current absolute position.
///
/// This operation is very expensive. Use only for errors.
#[inline(never)]
pub fn gen_text_pos(&self) -> TextPos {
let text = self.span.as_str();
let end = self.pos;
let row = Self::calc_curr_row(text, end);
let col = Self::calc_curr_col(text, end);
TextPos::new(row, col)
}
/// Calculates an absolute position at `pos`.
///
/// This operation is very expensive. Use only for errors.
///
/// # Examples
///
/// ```
/// let s = xmlparser::Stream::from("text");
///
/// assert_eq!(s.gen_text_pos_from(2), xmlparser::TextPos::new(1, 3));
/// assert_eq!(s.gen_text_pos_from(9999), xmlparser::TextPos::new(1, 5));
/// ```
#[inline(never)]
pub fn gen_text_pos_from(&self, pos: usize) -> TextPos {
let mut s = *self;
s.pos = cmp::min(pos, s.span.as_str().len());
s.gen_text_pos()
}
fn calc_curr_row(text: &str, end: usize) -> u32 {
let mut row = 1;
for c in &text.as_bytes()[..end] {
if *c == b'\n' {
row += 1;
}
}
row
}
fn calc_curr_col(text: &str, end: usize) -> u32 {
let mut col = 1;
for c in text[..end].chars().rev() {
if c == '\n' {
break;
} else {
col += 1;
}
}
col
}
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/src/lib.rs | src/lib.rs | //! [<img alt="github" src="https://img.shields.io/badge/github-RazrFalcon/xmlparser-8da0cb?style=for-the-badge&logo=github" height="20">](https://github.com/RazrFalcon/xmlparser)
//! [<img alt="crates.io" src="https://img.shields.io/crates/v/xmlparser.svg?style=for-the-badge&color=fc8d62&logo=rust" height="20">](https://crates.io/crates/xmlparser)
//! [<img alt="docs.rs" src="https://img.shields.io/badge/docs.rs-xmlparser-66c2a5?style=for-the-badge&logoColor=white&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K" height="20">](https://docs.rs/xmlparser)
//!
//! *xmlparser* is a low-level, pull-based, zero-allocation
//! [XML 1.0](https://www.w3.org/TR/xml/) parser.
//!
//! <br>
//!
//! ## Example
//!
//! ```rust
//! for token in xmlparser::Tokenizer::from("<tagname name='value'/>") {
//! println!("{:?}", token);
//! }
//! ```
//!
//! <br>
//!
//! ## Why a new library?
//!
//! This library is basically a low-level XML tokenizer that preserves the
//! positions of the tokens and is not intended to be used directly.
//!
//! If you are looking for a higher level solution, check out
//! [roxmltree](https://github.com/RazrFalcon/roxmltree).
//!
//! <br>
//!
//! ## Benefits
//!
//! - All tokens contain `StrSpan` structs which represent the position of the
//! substring in the original document.
//! - Good error processing. All error types contain the position (line:column)
//! where it occurred.
//! - No heap allocations.
//! - No dependencies.
//! - Tiny. ~1400 LOC and ~30KiB in the release build according to
//! `cargo-bloat`.
//! - Supports `no_std` builds. To use without the standard library, disable the
//! default features.
//!
//! <br>
//!
//! ## Limitations
//!
//! - Currently, only ENTITY objects are parsed from the DOCTYPE. All others are
//! ignored.
//! - No tree structure validation. So an XML like
//! `<root><child></root></child>` or a string without root element will be
//! parsed without errors. You should check for this manually. On the other
//! hand `<a/><a/>` will lead to an error.
//! - Duplicated attributes is not an error. So XML like `<item a="v1" a="v2"/>`
//! will be parsed without errors. You should check for this manually.
//! - UTF-8 only.
//!
//! <br>
//!
//! ## Safety
//!
//! - The library must not panic. Any panic is considered a critical bug and
//! should be reported.
//! - The library forbids unsafe code.
//!
//! <br>
//!
//! ## License
//!
//! Licensed under either of
//!
//! - Apache License, Version 2.0 ([LICENSE-APACHE] or
//! http://www.apache.org/licenses/LICENSE-2.0)
//! - MIT license ([LICENSE-MIT] or http://opensource.org/licenses/MIT)
//!
//! at your option.
//!
//! <br>
//!
//! ### Contribution
//!
//! Unless you explicitly state otherwise, any contribution intentionally submitted
//! for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
//! dual licensed as above, without any additional terms or conditions.
//!
//! [LICENSE-APACHE]: https://github.com/RazrFalcon/xmlparser/blob/master/LICENSE-APACHE
//! [LICENSE-MIT]: https://github.com/RazrFalcon/xmlparser/blob/master/LICENSE-MIT
#![no_std]
#![forbid(unsafe_code)]
#![warn(missing_docs)]
#![allow(ellipsis_inclusive_range_patterns)]
#[cfg(feature = "std")]
#[macro_use]
extern crate std;
macro_rules! matches {
($expression:expr, $($pattern:tt)+) => {
match $expression {
$($pattern)+ => true,
_ => false
}
}
}
mod error;
mod stream;
mod strspan;
mod xmlchar;
pub use crate::error::*;
pub use crate::stream::*;
pub use crate::strspan::*;
pub use crate::xmlchar::*;
/// An XML token.
#[allow(missing_docs)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum Token<'a> {
/// Declaration token.
///
/// ```text
/// <?xml version='1.0' encoding='UTF-8' standalone='yes'?>
/// --- - version
/// ----- - encoding?
/// --- - standalone?
/// ------------------------------------------------------- - span
/// ```
Declaration {
version: StrSpan<'a>,
encoding: Option<StrSpan<'a>>,
standalone: Option<bool>,
span: StrSpan<'a>,
},
/// Processing instruction token.
///
/// ```text
/// <?target content?>
/// ------ - target
/// ------- - content?
/// ------------------ - span
/// ```
ProcessingInstruction {
target: StrSpan<'a>,
content: Option<StrSpan<'a>>,
span: StrSpan<'a>,
},
/// Comment token.
///
/// ```text
/// <!-- text -->
/// ------ - text
/// ------------- - span
/// ```
Comment {
text: StrSpan<'a>,
span: StrSpan<'a>,
},
/// DOCTYPE start token.
///
/// ```text
/// <!DOCTYPE greeting SYSTEM "hello.dtd" [
/// -------- - name
/// ------------------ - external_id?
/// --------------------------------------- - span
/// ```
DtdStart {
name: StrSpan<'a>,
external_id: Option<ExternalId<'a>>,
span: StrSpan<'a>,
},
/// Empty DOCTYPE token.
///
/// ```text
/// <!DOCTYPE greeting SYSTEM "hello.dtd">
/// -------- - name
/// ------------------ - external_id?
/// -------------------------------------- - span
/// ```
EmptyDtd {
name: StrSpan<'a>,
external_id: Option<ExternalId<'a>>,
span: StrSpan<'a>,
},
/// ENTITY token.
///
/// Can appear only inside the DTD.
///
/// ```text
/// <!ENTITY ns_extend "http://test.com">
/// --------- - name
/// --------------- - definition
/// ------------------------------------- - span
/// ```
EntityDeclaration {
name: StrSpan<'a>,
definition: EntityDefinition<'a>,
span: StrSpan<'a>,
},
/// DOCTYPE end token.
///
/// ```text
/// <!DOCTYPE svg [
/// ...
/// ]>
/// -- - span
/// ```
DtdEnd { span: StrSpan<'a> },
/// Element start token.
///
/// ```text
/// <ns:elem attr="value"/>
/// -- - prefix
/// ---- - local
/// -------- - span
/// ```
ElementStart {
prefix: StrSpan<'a>,
local: StrSpan<'a>,
span: StrSpan<'a>,
},
/// Attribute token.
///
/// ```text
/// <elem ns:attr="value"/>
/// -- - prefix
/// ---- - local
/// ----- - value
/// --------------- - span
/// ```
Attribute {
prefix: StrSpan<'a>,
local: StrSpan<'a>,
value: StrSpan<'a>,
span: StrSpan<'a>,
},
/// Element end token.
///
/// ```text
/// <ns:elem>text</ns:elem>
/// - ElementEnd::Open
/// - - span
/// ```
///
/// ```text
/// <ns:elem>text</ns:elem>
/// -- ---- - ElementEnd::Close(prefix, local)
/// ---------- - span
/// ```
///
/// ```text
/// <ns:elem/>
/// - ElementEnd::Empty
/// -- - span
/// ```
ElementEnd {
end: ElementEnd<'a>,
span: StrSpan<'a>,
},
/// Text token.
///
/// Contains text between elements including whitespaces.
/// Basically everything between `>` and `<`.
/// Except `]]>`, which is not allowed and will lead to an error.
///
/// ```text
/// <p> text </p>
/// ------ - text
/// ```
///
/// The token span is equal to the `text`.
Text { text: StrSpan<'a> },
/// CDATA token.
///
/// ```text
/// <p><![CDATA[text]]></p>
/// ---- - text
/// ---------------- - span
/// ```
Cdata {
text: StrSpan<'a>,
span: StrSpan<'a>,
},
}
impl<'a> Token<'a> {
/// Returns the [`StrSpan`] encompassing all of the token.
pub fn span(&self) -> StrSpan<'a> {
let span = match self {
Token::Declaration { span, .. } => span,
Token::ProcessingInstruction { span, .. } => span,
Token::Comment { span, .. } => span,
Token::DtdStart { span, .. } => span,
Token::EmptyDtd { span, .. } => span,
Token::EntityDeclaration { span, .. } => span,
Token::DtdEnd { span, .. } => span,
Token::ElementStart { span, .. } => span,
Token::Attribute { span, .. } => span,
Token::ElementEnd { span, .. } => span,
Token::Text { text, .. } => text,
Token::Cdata { span, .. } => span,
};
*span
}
}
/// `ElementEnd` token.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum ElementEnd<'a> {
/// Indicates `>`
Open,
/// Indicates `</name>`
Close(StrSpan<'a>, StrSpan<'a>),
/// Indicates `/>`
Empty,
}
/// Representation of the [ExternalID](https://www.w3.org/TR/xml/#NT-ExternalID) value.
#[allow(missing_docs)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum ExternalId<'a> {
System(StrSpan<'a>),
Public(StrSpan<'a>, StrSpan<'a>),
}
/// Representation of the [EntityDef](https://www.w3.org/TR/xml/#NT-EntityDef) value.
#[allow(missing_docs)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum EntityDefinition<'a> {
EntityValue(StrSpan<'a>),
ExternalId(ExternalId<'a>),
}
type Result<T> = core::result::Result<T, Error>;
type StreamResult<T> = core::result::Result<T, StreamError>;
#[derive(Clone, Copy, PartialEq, Debug)]
enum State {
Declaration,
AfterDeclaration,
Dtd,
AfterDtd,
Elements,
Attributes,
AfterElements,
End,
}
/// Tokenizer for the XML structure.
#[derive(Clone)]
pub struct Tokenizer<'a> {
stream: Stream<'a>,
state: State,
depth: usize,
fragment_parsing: bool,
}
impl core::fmt::Debug for Tokenizer<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(f, "Tokenizer {{ ... }}")
}
}
impl<'a> From<&'a str> for Tokenizer<'a> {
#[inline]
fn from(text: &'a str) -> Self {
let mut stream = Stream::from(text);
// Skip UTF-8 BOM.
if stream.starts_with(&[0xEF, 0xBB, 0xBF]) {
stream.advance(3);
}
Tokenizer {
stream,
state: State::Declaration,
depth: 0,
fragment_parsing: false,
}
}
}
macro_rules! map_err_at {
($fun:expr, $stream:expr, $err:ident) => {{
let start = $stream.pos();
$fun.map_err(|e| Error::$err(e, $stream.gen_text_pos_from(start)))
}};
}
impl<'a> Tokenizer<'a> {
/// Enables document fragment parsing.
///
/// By default, `xmlparser` will check for DTD, root element, etc.
/// But if we have to parse an XML fragment, it will lead to an error.
/// This method switches the parser to the root element content parsing mode,
/// so it will treat any data as a content of the root element.
pub fn from_fragment(full_text: &'a str, fragment: core::ops::Range<usize>) -> Self {
Tokenizer {
stream: Stream::from_substr(full_text, fragment),
state: State::Elements,
depth: 0,
fragment_parsing: true,
}
}
fn parse_next_impl(&mut self) -> Option<Result<Token<'a>>> {
let s = &mut self.stream;
if s.at_end() {
return None;
}
let start = s.pos();
match self.state {
State::Declaration => {
self.state = State::AfterDeclaration;
if s.starts_with(b"<?xml ") {
Some(Self::parse_declaration(s))
} else {
None
}
}
State::AfterDeclaration => {
if s.starts_with(b"<!DOCTYPE") {
let t = Self::parse_doctype(s);
match t {
Ok(Token::DtdStart { .. }) => self.state = State::Dtd,
Ok(Token::EmptyDtd { .. }) => self.state = State::AfterDtd,
_ => {}
}
Some(t)
} else if s.starts_with(b"<!--") {
Some(Self::parse_comment(s))
} else if s.starts_with(b"<?") {
if s.starts_with(b"<?xml ") {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
} else {
Some(Self::parse_pi(s))
}
} else if s.starts_with_space() {
s.skip_spaces();
None
} else {
self.state = State::AfterDtd;
None
}
}
State::Dtd => {
if s.starts_with(b"<!ENTITY") {
Some(Self::parse_entity_decl(s))
} else if s.starts_with(b"<!--") {
Some(Self::parse_comment(s))
} else if s.starts_with(b"<?") {
if s.starts_with(b"<?xml ") {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
} else {
Some(Self::parse_pi(s))
}
} else if s.starts_with(b"]") {
// DTD ends with ']' S? '>', therefore we have to skip possible spaces.
s.advance(1);
s.skip_spaces();
match s.curr_byte() {
Ok(b'>') => {
self.state = State::AfterDtd;
s.advance(1);
Some(Ok(Token::DtdEnd {
span: s.slice_back(start),
}))
}
Ok(c) => {
let e = StreamError::InvalidChar(c, b'>', s.gen_text_pos());
Some(Err(Error::InvalidDoctype(e, s.gen_text_pos_from(start))))
}
Err(_) => {
let e = StreamError::UnexpectedEndOfStream;
Some(Err(Error::InvalidDoctype(e, s.gen_text_pos_from(start))))
}
}
} else if s.starts_with_space() {
s.skip_spaces();
None
} else if s.starts_with(b"<!ELEMENT")
|| s.starts_with(b"<!ATTLIST")
|| s.starts_with(b"<!NOTATION")
{
if Self::consume_decl(s).is_err() {
let pos = s.gen_text_pos_from(start);
Some(Err(Error::UnknownToken(pos)))
} else {
None
}
} else {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
}
}
State::AfterDtd => {
if s.starts_with(b"<!--") {
Some(Self::parse_comment(s))
} else if s.starts_with(b"<?") {
if s.starts_with(b"<?xml ") {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
} else {
Some(Self::parse_pi(s))
}
} else if s.starts_with(b"<!") {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
} else if s.starts_with(b"<") {
self.state = State::Attributes;
Some(Self::parse_element_start(s))
} else if s.starts_with_space() {
s.skip_spaces();
None
} else {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
}
}
State::Elements => {
// Use `match` only here, because only this section is performance-critical.
match s.curr_byte() {
Ok(b'<') => match s.next_byte() {
Ok(b'!') => {
if s.starts_with(b"<!--") {
Some(Self::parse_comment(s))
} else if s.starts_with(b"<![CDATA[") {
Some(Self::parse_cdata(s))
} else {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
}
}
Ok(b'?') => {
if !s.starts_with(b"<?xml ") {
Some(Self::parse_pi(s))
} else {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
}
}
Ok(b'/') => {
if self.depth > 0 {
self.depth -= 1;
}
if self.depth == 0 && !self.fragment_parsing {
self.state = State::AfterElements;
} else {
self.state = State::Elements;
}
Some(Self::parse_close_element(s))
}
Ok(_) => {
self.state = State::Attributes;
Some(Self::parse_element_start(s))
}
Err(_) => Some(Err(Error::UnknownToken(s.gen_text_pos()))),
},
Ok(_) => Some(Self::parse_text(s)),
Err(_) => Some(Err(Error::UnknownToken(s.gen_text_pos()))),
}
}
State::Attributes => {
let t = Self::parse_attribute(s);
if let Ok(Token::ElementEnd { end, .. }) = t {
if end == ElementEnd::Open {
self.depth += 1;
}
if self.depth == 0 && !self.fragment_parsing {
self.state = State::AfterElements;
} else {
self.state = State::Elements;
}
}
Some(t.map_err(|e| Error::InvalidAttribute(e, s.gen_text_pos_from(start))))
}
State::AfterElements => {
if s.starts_with(b"<!--") {
Some(Self::parse_comment(s))
} else if s.starts_with(b"<?") {
if s.starts_with(b"<?xml ") {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
} else {
Some(Self::parse_pi(s))
}
} else if s.starts_with_space() {
s.skip_spaces();
None
} else {
Some(Err(Error::UnknownToken(s.gen_text_pos())))
}
}
State::End => None,
}
}
fn parse_declaration(s: &mut Stream<'a>) -> Result<Token<'a>> {
map_err_at!(Self::parse_declaration_impl(s), s, InvalidDeclaration)
}
// XMLDecl ::= '<?xml' VersionInfo EncodingDecl? SDDecl? S? '?>'
fn parse_declaration_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
fn consume_spaces(s: &mut Stream) -> StreamResult<()> {
if s.starts_with_space() {
s.skip_spaces();
} else if !s.starts_with(b"?>") && !s.at_end() {
return Err(StreamError::InvalidSpace(
s.curr_byte_unchecked(),
s.gen_text_pos(),
));
}
Ok(())
}
let start = s.pos();
s.advance(6);
let version = Self::parse_version_info(s)?;
consume_spaces(s)?;
let encoding = Self::parse_encoding_decl(s)?;
if encoding.is_some() {
consume_spaces(s)?;
}
let standalone = Self::parse_standalone(s)?;
s.skip_spaces();
s.skip_string(b"?>")?;
let span = s.slice_back(start);
Ok(Token::Declaration {
version,
encoding,
standalone,
span,
})
}
// VersionInfo ::= S 'version' Eq ("'" VersionNum "'" | '"' VersionNum '"')
// VersionNum ::= '1.' [0-9]+
fn parse_version_info(s: &mut Stream<'a>) -> StreamResult<StrSpan<'a>> {
s.skip_spaces();
s.skip_string(b"version")?;
s.consume_eq()?;
let quote = s.consume_quote()?;
let start = s.pos();
s.skip_string(b"1.")?;
s.skip_bytes(|_, c| c.is_xml_digit());
let ver = s.slice_back(start);
s.consume_byte(quote)?;
Ok(ver)
}
// EncodingDecl ::= S 'encoding' Eq ('"' EncName '"' | "'" EncName "'" )
// EncName ::= [A-Za-z] ([A-Za-z0-9._] | '-')*
fn parse_encoding_decl(s: &mut Stream<'a>) -> StreamResult<Option<StrSpan<'a>>> {
if !s.starts_with(b"encoding") {
return Ok(None);
}
s.advance(8);
s.consume_eq()?;
let quote = s.consume_quote()?;
// [A-Za-z] ([A-Za-z0-9._] | '-')*
// TODO: check that first byte is [A-Za-z]
let name = s.consume_bytes(|_, c| {
c.is_xml_letter() || c.is_xml_digit() || c == b'.' || c == b'-' || c == b'_'
});
s.consume_byte(quote)?;
Ok(Some(name))
}
// SDDecl ::= S 'standalone' Eq (("'" ('yes' | 'no') "'") | ('"' ('yes' | 'no') '"'))
fn parse_standalone(s: &mut Stream<'a>) -> StreamResult<Option<bool>> {
if !s.starts_with(b"standalone") {
return Ok(None);
}
s.advance(10);
s.consume_eq()?;
let quote = s.consume_quote()?;
let start = s.pos();
let value = s.consume_name()?.as_str();
let flag = match value {
"yes" => true,
"no" => false,
_ => {
let pos = s.gen_text_pos_from(start);
return Err(StreamError::InvalidString("yes', 'no", pos));
}
};
s.consume_byte(quote)?;
Ok(Some(flag))
}
fn parse_comment(s: &mut Stream<'a>) -> Result<Token<'a>> {
let start = s.pos();
Self::parse_comment_impl(s)
.map_err(|e| Error::InvalidComment(e, s.gen_text_pos_from(start)))
}
// '<!--' ((Char - '-') | ('-' (Char - '-')))* '-->'
fn parse_comment_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let start = s.pos();
s.advance(4);
let text = s.consume_chars(|s, c| !(c == '-' && s.starts_with(b"-->")))?;
s.skip_string(b"-->")?;
if text.as_str().contains("--") {
return Err(StreamError::InvalidCommentData);
}
if text.as_str().ends_with('-') {
return Err(StreamError::InvalidCommentEnd);
}
let span = s.slice_back(start);
Ok(Token::Comment { text, span })
}
fn parse_pi(s: &mut Stream<'a>) -> Result<Token<'a>> {
map_err_at!(Self::parse_pi_impl(s), s, InvalidPI)
}
// PI ::= '<?' PITarget (S (Char* - (Char* '?>' Char*)))? '?>'
// PITarget ::= Name - (('X' | 'x') ('M' | 'm') ('L' | 'l'))
fn parse_pi_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let start = s.pos();
s.advance(2);
let target = s.consume_name()?;
s.skip_spaces();
let content = s.consume_chars(|s, c| !(c == '?' && s.starts_with(b"?>")))?;
let content = if !content.is_empty() {
Some(content)
} else {
None
};
s.skip_string(b"?>")?;
let span = s.slice_back(start);
Ok(Token::ProcessingInstruction {
target,
content,
span,
})
}
fn parse_doctype(s: &mut Stream<'a>) -> Result<Token<'a>> {
map_err_at!(Self::parse_doctype_impl(s), s, InvalidDoctype)
}
// doctypedecl ::= '<!DOCTYPE' S Name (S ExternalID)? S? ('[' intSubset ']' S?)? '>'
fn parse_doctype_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let start = s.pos();
s.advance(9);
s.consume_spaces()?;
let name = s.consume_name()?;
s.skip_spaces();
let external_id = Self::parse_external_id(s)?;
s.skip_spaces();
let c = s.curr_byte()?;
if c != b'[' && c != b'>' {
static EXPECTED: &[u8] = b"[>";
return Err(StreamError::InvalidCharMultiple(
c,
EXPECTED,
s.gen_text_pos(),
));
}
s.advance(1);
let span = s.slice_back(start);
if c == b'[' {
Ok(Token::DtdStart {
name,
external_id,
span,
})
} else {
Ok(Token::EmptyDtd {
name,
external_id,
span,
})
}
}
// ExternalID ::= 'SYSTEM' S SystemLiteral | 'PUBLIC' S PubidLiteral S SystemLiteral
fn parse_external_id(s: &mut Stream<'a>) -> StreamResult<Option<ExternalId<'a>>> {
let v = if s.starts_with(b"SYSTEM") || s.starts_with(b"PUBLIC") {
let start = s.pos();
s.advance(6);
let id = s.slice_back(start);
s.consume_spaces()?;
let quote = s.consume_quote()?;
let literal1 = s.consume_bytes(|_, c| c != quote);
s.consume_byte(quote)?;
let v = if id.as_str() == "SYSTEM" {
ExternalId::System(literal1)
} else {
s.consume_spaces()?;
let quote = s.consume_quote()?;
let literal2 = s.consume_bytes(|_, c| c != quote);
s.consume_byte(quote)?;
ExternalId::Public(literal1, literal2)
};
Some(v)
} else {
None
};
Ok(v)
}
fn parse_entity_decl(s: &mut Stream<'a>) -> Result<Token<'a>> {
map_err_at!(Self::parse_entity_decl_impl(s), s, InvalidEntity)
}
// EntityDecl ::= GEDecl | PEDecl
// GEDecl ::= '<!ENTITY' S Name S EntityDef S? '>'
// PEDecl ::= '<!ENTITY' S '%' S Name S PEDef S? '>'
fn parse_entity_decl_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let start = s.pos();
s.advance(8);
s.consume_spaces()?;
let is_ge = if s.try_consume_byte(b'%') {
s.consume_spaces()?;
false
} else {
true
};
let name = s.consume_name()?;
s.consume_spaces()?;
let definition = Self::parse_entity_def(s, is_ge)?;
s.skip_spaces();
s.consume_byte(b'>')?;
let span = s.slice_back(start);
Ok(Token::EntityDeclaration {
name,
definition,
span,
})
}
// EntityDef ::= EntityValue | (ExternalID NDataDecl?)
// PEDef ::= EntityValue | ExternalID
// EntityValue ::= '"' ([^%&"] | PEReference | Reference)* '"' | "'" ([^%&']
// | PEReference | Reference)* "'"
// ExternalID ::= 'SYSTEM' S SystemLiteral | 'PUBLIC' S PubidLiteral S SystemLiteral
// NDataDecl ::= S 'NDATA' S Name
fn parse_entity_def(s: &mut Stream<'a>, is_ge: bool) -> StreamResult<EntityDefinition<'a>> {
let c = s.curr_byte()?;
match c {
b'"' | b'\'' => {
let quote = s.consume_quote()?;
let value = s.consume_bytes(|_, c| c != quote);
s.consume_byte(quote)?;
Ok(EntityDefinition::EntityValue(value))
}
b'S' | b'P' => {
if let Some(id) = Self::parse_external_id(s)? {
if is_ge {
s.skip_spaces();
if s.starts_with(b"NDATA") {
s.advance(5);
s.consume_spaces()?;
s.skip_name()?;
// TODO: NDataDecl is not supported
}
}
Ok(EntityDefinition::ExternalId(id))
} else {
Err(StreamError::InvalidExternalID)
}
}
_ => {
static EXPECTED: &[u8] = b"\"'SP";
let pos = s.gen_text_pos();
Err(StreamError::InvalidCharMultiple(c, EXPECTED, pos))
}
}
}
fn consume_decl(s: &mut Stream) -> StreamResult<()> {
s.skip_bytes(|_, c| c != b'>');
s.consume_byte(b'>')?;
Ok(())
}
fn parse_cdata(s: &mut Stream<'a>) -> Result<Token<'a>> {
map_err_at!(Self::parse_cdata_impl(s), s, InvalidCdata)
}
// CDSect ::= CDStart CData CDEnd
// CDStart ::= '<![CDATA['
// CData ::= (Char* - (Char* ']]>' Char*))
// CDEnd ::= ']]>'
fn parse_cdata_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let start = s.pos();
s.advance(9);
let text = s.consume_chars(|s, c| !(c == ']' && s.starts_with(b"]]>")))?;
s.skip_string(b"]]>")?;
let span = s.slice_back(start);
Ok(Token::Cdata { text, span })
}
fn parse_element_start(s: &mut Stream<'a>) -> Result<Token<'a>> {
map_err_at!(Self::parse_element_start_impl(s), s, InvalidElement)
}
// '<' Name (S Attribute)* S? '>'
fn parse_element_start_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let start = s.pos();
s.advance(1);
let (prefix, local) = s.consume_qname()?;
let span = s.slice_back(start);
Ok(Token::ElementStart {
prefix,
local,
span,
})
}
fn parse_close_element(s: &mut Stream<'a>) -> Result<Token<'a>> {
map_err_at!(Self::parse_close_element_impl(s), s, InvalidElement)
}
// '</' Name S? '>'
fn parse_close_element_impl(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let start = s.pos();
s.advance(2);
let (prefix, tag_name) = s.consume_qname()?;
s.skip_spaces();
s.consume_byte(b'>')?;
let span = s.slice_back(start);
Ok(Token::ElementEnd {
end: ElementEnd::Close(prefix, tag_name),
span,
})
}
// Name Eq AttValue
fn parse_attribute(s: &mut Stream<'a>) -> StreamResult<Token<'a>> {
let attr_start = s.pos();
let has_space = s.starts_with_space();
s.skip_spaces();
if let Ok(c) = s.curr_byte() {
let start = s.pos();
match c {
b'/' => {
s.advance(1);
s.consume_byte(b'>')?;
let span = s.slice_back(start);
return Ok(Token::ElementEnd {
end: ElementEnd::Empty,
span,
});
}
b'>' => {
s.advance(1);
let span = s.slice_back(start);
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | true |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/src/error.rs | src/error.rs | use core::fmt;
use core::str;
#[cfg(feature = "std")]
use std::error;
/// An XML parser errors.
#[allow(missing_docs)]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum Error {
InvalidDeclaration(StreamError, TextPos),
InvalidComment(StreamError, TextPos),
InvalidPI(StreamError, TextPos),
InvalidDoctype(StreamError, TextPos),
InvalidEntity(StreamError, TextPos),
InvalidElement(StreamError, TextPos),
InvalidAttribute(StreamError, TextPos),
InvalidCdata(StreamError, TextPos),
InvalidCharData(StreamError, TextPos),
UnknownToken(TextPos),
}
impl Error {
/// Returns the error position.
pub fn pos(&self) -> TextPos {
match *self {
Error::InvalidDeclaration(_, pos) => pos,
Error::InvalidComment(_, pos) => pos,
Error::InvalidPI(_, pos) => pos,
Error::InvalidDoctype(_, pos) => pos,
Error::InvalidEntity(_, pos) => pos,
Error::InvalidElement(_, pos) => pos,
Error::InvalidAttribute(_, pos) => pos,
Error::InvalidCdata(_, pos) => pos,
Error::InvalidCharData(_, pos) => pos,
Error::UnknownToken(pos) => pos,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::InvalidDeclaration(ref cause, pos) => {
write!(f, "invalid XML declaration at {} cause {}", pos, cause)
}
Error::InvalidComment(ref cause, pos) => {
write!(f, "invalid comment at {} cause {}", pos, cause)
}
Error::InvalidPI(ref cause, pos) => {
write!(
f,
"invalid processing instruction at {} cause {}",
pos, cause
)
}
Error::InvalidDoctype(ref cause, pos) => {
write!(f, "invalid DTD at {} cause {}", pos, cause)
}
Error::InvalidEntity(ref cause, pos) => {
write!(f, "invalid DTD entity at {} cause {}", pos, cause)
}
Error::InvalidElement(ref cause, pos) => {
write!(f, "invalid element at {} cause {}", pos, cause)
}
Error::InvalidAttribute(ref cause, pos) => {
write!(f, "invalid attribute at {} cause {}", pos, cause)
}
Error::InvalidCdata(ref cause, pos) => {
write!(f, "invalid CDATA at {} cause {}", pos, cause)
}
Error::InvalidCharData(ref cause, pos) => {
write!(f, "invalid character data at {} cause {}", pos, cause)
}
Error::UnknownToken(pos) => {
write!(f, "unknown token at {}", pos)
}
}
}
}
#[cfg(feature = "std")]
impl error::Error for Error {
fn description(&self) -> &str {
"an XML parsing error"
}
}
/// A stream parser errors.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum StreamError {
/// The steam ended earlier than we expected.
///
/// Should only appear on invalid input data.
/// Errors in a valid XML should be handled by errors below.
UnexpectedEndOfStream,
/// An invalid name.
InvalidName,
/// A non-XML character has occurred.
///
/// Valid characters are: <https://www.w3.org/TR/xml/#char32>
NonXmlChar(char, TextPos),
/// An invalid/unexpected character.
///
/// The first byte is an actual one, the second one is expected.
///
/// We are using a single value to reduce the struct size.
InvalidChar(u8, u8, TextPos),
/// An invalid/unexpected character.
///
/// Just like `InvalidChar`, but specifies multiple expected characters.
InvalidCharMultiple(u8, &'static [u8], TextPos),
/// An unexpected character instead of `"` or `'`.
InvalidQuote(u8, TextPos),
/// An unexpected character instead of an XML space.
///
/// Includes: `' ' \n \r \t   	 
 
`.
InvalidSpace(u8, TextPos),
/// An unexpected string.
///
/// Contains what string was expected.
InvalidString(&'static str, TextPos),
/// An invalid reference.
InvalidReference,
/// An invalid ExternalID in the DTD.
InvalidExternalID,
/// Comment cannot contain `--`.
InvalidCommentData,
/// Comment cannot end with `-`.
InvalidCommentEnd,
/// A Character Data node contains an invalid data.
///
/// Currently, only `]]>` is not allowed.
InvalidCharacterData,
}
impl fmt::Display for StreamError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
StreamError::UnexpectedEndOfStream => {
write!(f, "unexpected end of stream")
}
StreamError::InvalidName => {
write!(f, "invalid name token")
}
StreamError::NonXmlChar(c, pos) => {
write!(f, "a non-XML character {:?} found at {}", c, pos)
}
StreamError::InvalidChar(actual, expected, pos) => {
write!(
f,
"expected '{}' not '{}' at {}",
expected as char, actual as char, pos
)
}
StreamError::InvalidCharMultiple(actual, expected, pos) => {
let mut expected_iter = expected.iter().peekable();
write!(f, "expected ")?;
while let Some(&c) = expected_iter.next() {
write!(f, "'{}'", c as char)?;
if expected_iter.peek().is_some() {
write!(f, ", ")?;
}
}
write!(f, " not '{}' at {}", actual as char, pos)
}
StreamError::InvalidQuote(c, pos) => {
write!(f, "expected quote mark not '{}' at {}", c as char, pos)
}
StreamError::InvalidSpace(c, pos) => {
write!(f, "expected space not '{}' at {}", c as char, pos)
}
StreamError::InvalidString(expected, pos) => {
write!(f, "expected '{}' at {}", expected, pos)
}
StreamError::InvalidReference => {
write!(f, "invalid reference")
}
StreamError::InvalidExternalID => {
write!(f, "invalid ExternalID")
}
StreamError::InvalidCommentData => {
write!(f, "'--' is not allowed in comments")
}
StreamError::InvalidCommentEnd => {
write!(f, "comment cannot end with '-'")
}
StreamError::InvalidCharacterData => {
write!(f, "']]>' is not allowed inside a character data")
}
}
}
}
#[cfg(feature = "std")]
impl error::Error for StreamError {
fn description(&self) -> &str {
"an XML stream parsing error"
}
}
/// Position in text.
///
/// Position indicates a row/line and a column in the original text. Starting from 1:1.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[allow(missing_docs)]
pub struct TextPos {
pub row: u32,
pub col: u32,
}
impl TextPos {
/// Constructs a new `TextPos`.
///
/// Should not be invoked manually, but rather via `Stream::gen_text_pos`.
pub fn new(row: u32, col: u32) -> TextPos {
TextPos { row, col }
}
}
impl fmt::Display for TextPos {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}", self.row, self.col)
}
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/src/xmlchar.rs | src/xmlchar.rs | /// Extension methods for XML-subset only operations.
pub trait XmlCharExt {
/// Checks if the value is within the
/// [NameStartChar](https://www.w3.org/TR/xml/#NT-NameStartChar) range.
fn is_xml_name_start(&self) -> bool;
/// Checks if the value is within the
/// [NameChar](https://www.w3.org/TR/xml/#NT-NameChar) range.
fn is_xml_name(&self) -> bool;
/// Checks if the value is within the
/// [Char](https://www.w3.org/TR/xml/#NT-Char) range.
fn is_xml_char(&self) -> bool;
}
impl XmlCharExt for char {
#[inline]
#[allow(clippy::match_like_matches_macro)]
fn is_xml_name_start(&self) -> bool {
// Check for ASCII first.
if *self as u32 <= 128 {
return matches!(*self as u8, b'A'...b'Z' | b'a'...b'z' | b':' | b'_');
}
match *self as u32 {
0x0000C0...0x0000D6
| 0x0000D8...0x0000F6
| 0x0000F8...0x0002FF
| 0x000370...0x00037D
| 0x00037F...0x001FFF
| 0x00200C...0x00200D
| 0x002070...0x00218F
| 0x002C00...0x002FEF
| 0x003001...0x00D7FF
| 0x00F900...0x00FDCF
| 0x00FDF0...0x00FFFD
| 0x010000...0x0EFFFF => true,
_ => false,
}
}
#[inline]
#[allow(clippy::match_like_matches_macro)]
fn is_xml_name(&self) -> bool {
// Check for ASCII first.
if *self as u32 <= 128 {
return (*self as u8).is_xml_name();
}
match *self as u32 {
0x0000B7
| 0x0000C0...0x0000D6
| 0x0000D8...0x0000F6
| 0x0000F8...0x0002FF
| 0x000300...0x00036F
| 0x000370...0x00037D
| 0x00037F...0x001FFF
| 0x00200C...0x00200D
| 0x00203F...0x002040
| 0x002070...0x00218F
| 0x002C00...0x002FEF
| 0x003001...0x00D7FF
| 0x00F900...0x00FDCF
| 0x00FDF0...0x00FFFD
| 0x010000...0x0EFFFF => true,
_ => false,
}
}
#[inline]
fn is_xml_char(&self) -> bool {
// Does not check for surrogate code points U+D800-U+DFFF,
// since that check was performed by Rust when the `&str` was constructed.
if (*self as u32) < 0x20 {
return (*self as u8).is_xml_space();
}
!matches!(*self as u32, 0xFFFF | 0xFFFE)
}
}
/// Extension methods for XML-subset only operations.
pub trait XmlByteExt {
/// Checks if byte is a digit.
///
/// `[0-9]`
fn is_xml_digit(&self) -> bool;
/// Checks if byte is a hex digit.
///
/// `[0-9A-Fa-f]`
fn is_xml_hex_digit(&self) -> bool;
/// Checks if byte is a space.
///
/// `[ \r\n\t]`
fn is_xml_space(&self) -> bool;
/// Checks if byte is an ASCII char.
///
/// `[A-Za-z]`
fn is_xml_letter(&self) -> bool;
/// Checks if byte is within the ASCII
/// [Char](https://www.w3.org/TR/xml/#NT-Char) range.
fn is_xml_name(&self) -> bool;
}
impl XmlByteExt for u8 {
#[inline]
fn is_xml_digit(&self) -> bool {
matches!(*self, b'0'...b'9')
}
#[inline]
fn is_xml_hex_digit(&self) -> bool {
matches!(*self, b'0'...b'9' | b'A'...b'F' | b'a'...b'f')
}
#[inline]
fn is_xml_space(&self) -> bool {
matches!(*self, b' ' | b'\t' | b'\n' | b'\r')
}
#[inline]
fn is_xml_letter(&self) -> bool {
matches!(*self, b'A'...b'Z' | b'a'...b'z')
}
#[inline]
fn is_xml_name(&self) -> bool {
matches!(*self, b'A'...b'Z' | b'a'...b'z'| b'0'...b'9'| b':' | b'_' | b'-' | b'.')
}
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/src/strspan.rs | src/strspan.rs | use core::fmt;
use core::ops::{Deref, Range};
/// A string slice.
///
/// Like `&str`, but also contains the position in the input XML
/// from which it was parsed.
#[must_use]
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct StrSpan<'a> {
text: &'a str,
start: usize,
}
impl<'a> From<&'a str> for StrSpan<'a> {
#[inline]
fn from(text: &'a str) -> Self {
StrSpan { text, start: 0 }
}
}
impl PartialEq<str> for StrSpan<'_> {
fn eq(&self, other: &str) -> bool {
self.text == other
}
}
impl PartialEq<&str> for StrSpan<'_> {
fn eq(&self, other: &&str) -> bool {
self.text == *other
}
}
impl PartialEq<StrSpan<'_>> for str {
fn eq(&self, other: &StrSpan<'_>) -> bool {
self == other.text
}
}
impl PartialEq<StrSpan<'_>> for &str {
fn eq(&self, other: &StrSpan<'_>) -> bool {
*self == other.text
}
}
impl<'a> StrSpan<'a> {
/// Constructs a new `StrSpan` from substring.
#[inline]
pub(crate) fn from_substr(text: &str, start: usize, end: usize) -> StrSpan<'_> {
debug_assert!(start <= end);
StrSpan {
text: &text[start..end],
start,
}
}
/// Returns `true` is self is empty.
pub fn is_empty(&self) -> bool {
self.text.is_empty()
}
/// Returns the start position of the span.
#[inline]
pub fn start(&self) -> usize {
self.start
}
/// Returns the end position of the span.
#[inline]
pub fn end(&self) -> usize {
self.start + self.text.len()
}
/// Returns the range of the span.
#[inline]
pub fn range(&self) -> Range<usize> {
self.start..self.end()
}
/// Returns the span as a string slice
#[inline]
pub fn as_str(&self) -> &'a str {
self.text
}
/// Returns an underling string region as `StrSpan`.
#[inline]
pub(crate) fn slice_region(&self, start: usize, end: usize) -> StrSpan<'a> {
StrSpan::from_substr(self.text, start, end)
}
}
impl fmt::Debug for StrSpan<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"StrSpan({:?} {}..{})",
self.as_str(),
self.start(),
self.end()
)
}
}
impl fmt::Display for StrSpan<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl Deref for StrSpan<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.text
}
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/comments.rs | tests/integration/comments.rs | use crate::token::*;
test!(
comment_01,
"<!--comment-->",
Token::Comment("comment", 0..14)
);
test!(comment_02, "<!--<head>-->", Token::Comment("<head>", 0..13));
test!(comment_03, "<!--<!-x-->", Token::Comment("<!-x", 0..11));
test!(comment_04, "<!--<!x-->", Token::Comment("<!x", 0..10));
test!(comment_05, "<!--<<!x-->", Token::Comment("<<!x", 0..11));
test!(comment_06, "<!--<<!-x-->", Token::Comment("<<!-x", 0..12));
test!(comment_07, "<!--<x-->", Token::Comment("<x", 0..9));
test!(comment_08, "<!--<>-->", Token::Comment("<>", 0..9));
test!(comment_09, "<!--<-->", Token::Comment("<", 0..8));
test!(comment_10, "<!--<!-->", Token::Comment("<!", 0..9));
test!(comment_11, "<!---->", Token::Comment("", 0..7));
macro_rules! test_err {
($name:ident, $text:expr) => {
#[test]
fn $name() {
let mut p = xml::Tokenizer::from($text);
assert!(p.next().unwrap().is_err());
}
};
}
test_err!(comment_err_01, "<!----!>");
test_err!(comment_err_02, "<!----!");
test_err!(comment_err_03, "<!----");
test_err!(comment_err_04, "<!--->");
test_err!(comment_err_05, "<!-----");
test_err!(comment_err_06, "<!-->");
test_err!(comment_err_07, "<!--");
test_err!(comment_err_08, "<!--x");
test_err!(comment_err_09, "<!--<");
test_err!(comment_err_10, "<!--<!");
test_err!(comment_err_11, "<!--<!-");
test_err!(comment_err_12, "<!--<!--");
test_err!(comment_err_13, "<!--<!--!");
test_err!(comment_err_14, "<!--<!--!>");
test_err!(comment_err_15, "<!--<!---");
test_err!(comment_err_16, "<!--<!--x");
test_err!(comment_err_17, "<!--<!--x-");
test_err!(comment_err_18, "<!--<!--x--");
test_err!(comment_err_19, "<!--<!--x-->");
test_err!(comment_err_20, "<!--<!-x");
test_err!(comment_err_21, "<!--<!-x-");
test_err!(comment_err_22, "<!--<!-x--");
test_err!(comment_err_23, "<!--<!x");
test_err!(comment_err_24, "<!--<!x-");
test_err!(comment_err_25, "<!--<!x--");
test_err!(comment_err_26, "<!--<<!--x-->");
test_err!(comment_err_27, "<!--<!<!--x-->");
test_err!(comment_err_28, "<!--<!-<!--x-->");
test_err!(comment_err_29, "<!----!->");
test_err!(comment_err_30, "<!----!x>");
test_err!(comment_err_31, "<!-----x>");
test_err!(comment_err_32, "<!----->");
test_err!(comment_err_33, "<!------>");
test_err!(comment_err_34, "<!-- --->");
test_err!(comment_err_35, "<!--a--->");
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/document.rs | tests/integration/document.rs | use std::str;
use crate::token::*;
test!(document_01, "",);
test!(document_02, " ",);
test!(document_03, " \n\t\r ",);
// BOM
test!(
document_05,
str::from_utf8(b"\xEF\xBB\xBF<a/>").unwrap(),
Token::ElementStart("", "a", 3..5),
Token::ElementEnd(ElementEnd::Empty, 5..7)
);
test!(
document_06,
str::from_utf8(b"\xEF\xBB\xBF<?xml version='1.0'?>").unwrap(),
Token::Declaration("1.0", None, None, 3..24)
);
test!(
document_07,
"<?xml version='1.0' encoding='utf-8'?>\n<!-- comment -->\n\
<!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'>",
Token::Declaration("1.0", Some("utf-8"), None, 0..38),
Token::Comment(" comment ", 39..55),
Token::EmptyDtd(
"svg",
Some(ExternalId::Public(
"-//W3C//DTD SVG 1.1//EN",
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"
)),
56..154
)
);
test!(
document_08,
"<?xml-stylesheet?>\n\
<!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'>",
Token::PI("xml-stylesheet", None, 0..18),
Token::EmptyDtd(
"svg",
Some(ExternalId::Public(
"-//W3C//DTD SVG 1.1//EN",
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"
)),
19..117
)
);
test!(
document_09,
"<?xml version='1.0' encoding='utf-8'?>\n<?xml-stylesheet?>\n\
<!DOCTYPE svg PUBLIC '-//W3C//DTD SVG 1.1//EN' 'http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd'>",
Token::Declaration("1.0", Some("utf-8"), None, 0..38),
Token::PI("xml-stylesheet", None, 39..57),
Token::EmptyDtd(
"svg",
Some(ExternalId::Public(
"-//W3C//DTD SVG 1.1//EN",
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd"
)),
58..156
)
);
test!(
document_err_01,
"<![CDATA[text]]>",
Token::Error("unknown token at 1:1".to_string())
);
test!(
document_err_02,
" &www---------Ӥ+----------w-----www_",
Token::Error("unknown token at 1:2".to_string())
);
test!(
document_err_03,
"q",
Token::Error("unknown token at 1:1".to_string())
);
test!(
document_err_04,
"<!>",
Token::Error("unknown token at 1:1".to_string())
);
test!(
document_err_05,
"<!DOCTYPE greeting1><!DOCTYPE greeting2>",
Token::EmptyDtd("greeting1", None, 0..20),
Token::Error("unknown token at 1:21".to_string())
);
test!(
document_err_06,
" ",
Token::Error("unknown token at 1:1".to_string())
);
#[test]
fn parse_fragment_1() {
let s = "<p/><p/>";
let mut p = xml::Tokenizer::from_fragment(s, 0..s.len());
match p.next().unwrap().unwrap() {
xml::Token::ElementStart { local, .. } => assert_eq!(local.as_str(), "p"),
_ => panic!(),
}
match p.next().unwrap().unwrap() {
xml::Token::ElementEnd { .. } => {}
_ => panic!(),
}
match p.next().unwrap().unwrap() {
xml::Token::ElementStart { local, .. } => assert_eq!(local.as_str(), "p"),
_ => panic!(),
}
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/text.rs | tests/integration/text.rs | use crate::token::*;
test!(
text_01,
"<p>text</p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text("text", 3..7),
Token::ElementEnd(ElementEnd::Close("", "p"), 7..11)
);
test!(
text_02,
"<p> text </p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text(" text ", 3..9),
Token::ElementEnd(ElementEnd::Close("", "p"), 9..13)
);
// 欄 is EF A4 9D. And EF can be mistreated for UTF-8 BOM.
test!(
text_03,
"<p>欄</p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text("欄", 3..6),
Token::ElementEnd(ElementEnd::Close("", "p"), 6..10)
);
test!(
text_04,
"<p> </p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text(" ", 3..4),
Token::ElementEnd(ElementEnd::Close("", "p"), 4..8)
);
test!(
text_05,
"<p> \r\n\t </p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text(" \r\n\t ", 3..8),
Token::ElementEnd(ElementEnd::Close("", "p"), 8..12)
);
test!(
text_06,
"<p> </p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text(" ", 3..9),
Token::ElementEnd(ElementEnd::Close("", "p"), 9..13)
);
test!(
text_07,
"<p>]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text("]>", 3..5),
Token::ElementEnd(ElementEnd::Close("", "p"), 5..9)
);
test!(
text_err_01,
"<p>]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Error(
"invalid character data at 1:4 cause ']]>' is not allowed inside a character data"
.to_string()
)
);
test!(
text_err_02,
"<p>\u{0c}</p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Error(
"invalid character data at 1:4 cause a non-XML character '\\u{c}' found at 1:4".to_string()
)
);
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/api.rs | tests/integration/api.rs | extern crate xmlparser;
use xmlparser::*;
#[test]
fn text_pos_1() {
let mut s = Stream::from("text");
s.advance(2);
assert_eq!(s.gen_text_pos(), TextPos::new(1, 3));
}
#[test]
fn text_pos_2() {
let mut s = Stream::from("text\ntext");
s.advance(6);
assert_eq!(s.gen_text_pos(), TextPos::new(2, 2));
}
#[test]
fn text_pos_3() {
let mut s = Stream::from("текст\nтекст");
s.advance(15);
assert_eq!(s.gen_text_pos(), TextPos::new(2, 3));
}
#[test]
fn token_size() {
assert!(::std::mem::size_of::<Token>() <= 196);
}
#[test]
fn span_size() {
assert!(::std::mem::size_of::<StrSpan>() <= 48);
}
#[test]
fn err_size_1() {
assert!(::std::mem::size_of::<Error>() <= 64);
}
#[test]
fn err_size_2() {
assert!(::std::mem::size_of::<StreamError>() <= 64);
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/doctype.rs | tests/integration/doctype.rs | use crate::token::*;
test!(
dtd_01,
"<!DOCTYPE greeting SYSTEM \"hello.dtd\">",
Token::EmptyDtd("greeting", Some(ExternalId::System("hello.dtd")), 0..38)
);
test!(
dtd_02,
"<!DOCTYPE greeting PUBLIC \"hello.dtd\" \"goodbye.dtd\">",
Token::EmptyDtd(
"greeting",
Some(ExternalId::Public("hello.dtd", "goodbye.dtd")),
0..52
)
);
test!(
dtd_03,
"<!DOCTYPE greeting SYSTEM 'hello.dtd'>",
Token::EmptyDtd("greeting", Some(ExternalId::System("hello.dtd")), 0..38)
);
test!(
dtd_04,
"<!DOCTYPE greeting>",
Token::EmptyDtd("greeting", None, 0..19)
);
test!(
dtd_05,
"<!DOCTYPE greeting []>",
Token::DtdStart("greeting", None, 0..20),
Token::DtdEnd(20..22)
);
test!(
dtd_06,
"<!DOCTYPE greeting><a/>",
Token::EmptyDtd("greeting", None, 0..19),
Token::ElementStart("", "a", 19..21),
Token::ElementEnd(ElementEnd::Empty, 21..23)
);
test!(
dtd_07,
"<!DOCTYPE greeting [] >",
Token::DtdStart("greeting", None, 0..20),
Token::DtdEnd(20..23)
);
test!(
dtd_08,
"<!DOCTYPE greeting [ ] >",
Token::DtdStart("greeting", None, 0..20),
Token::DtdEnd(21..24)
);
test!(
dtd_entity_01,
"<!DOCTYPE svg [
<!ENTITY ns_extend \"http://ns.adobe.com/Extensibility/1.0/\">
]>",
Token::DtdStart("svg", None, 0..15),
Token::EntityDecl(
"ns_extend",
EntityDefinition::EntityValue("http://ns.adobe.com/Extensibility/1.0/"),
20..80,
),
Token::DtdEnd(81..83)
);
test!(
dtd_entity_02,
"<!DOCTYPE svg [
<!ENTITY Pub-Status \"This is a pre-release of the
specification.\">
]>",
Token::DtdStart("svg", None, 0..15),
Token::EntityDecl(
"Pub-Status",
EntityDefinition::EntityValue("This is a pre-release of the\nspecification."),
20..86,
),
Token::DtdEnd(87..89)
);
test!(
dtd_entity_03,
"<!DOCTYPE svg [
<!ENTITY open-hatch SYSTEM \"http://www.textuality.com/boilerplate/OpenHatch.xml\">
]>",
Token::DtdStart("svg", None, 0..15),
Token::EntityDecl(
"open-hatch",
EntityDefinition::ExternalId(ExternalId::System(
"http://www.textuality.com/boilerplate/OpenHatch.xml"
)),
20..101,
),
Token::DtdEnd(102..104)
);
test!(
dtd_entity_04,
"<!DOCTYPE svg [
<!ENTITY open-hatch
PUBLIC \"-//Textuality//TEXT Standard open-hatch boilerplate//EN\"
\"http://www.textuality.com/boilerplate/OpenHatch.xml\">
]>",
Token::DtdStart("svg", None, 0..15),
Token::EntityDecl(
"open-hatch",
EntityDefinition::ExternalId(ExternalId::Public(
"-//Textuality//TEXT Standard open-hatch boilerplate//EN",
"http://www.textuality.com/boilerplate/OpenHatch.xml"
)),
20..185,
),
Token::DtdEnd(186..188)
);
// TODO: NDATA will be ignored
test!(
dtd_entity_05,
"<!DOCTYPE svg [
<!ENTITY hatch-pic SYSTEM \"../grafix/OpenHatch.gif\" NDATA gif >
]>",
Token::DtdStart("svg", None, 0..15),
Token::EntityDecl(
"hatch-pic",
EntityDefinition::ExternalId(ExternalId::System("../grafix/OpenHatch.gif")),
20..83,
),
Token::DtdEnd(84..86)
);
// TODO: unsupported data will be ignored
test!(
dtd_entity_06,
"<!DOCTYPE svg [
<!ELEMENT sgml ANY>
<!ENTITY ns_extend \"http://ns.adobe.com/Extensibility/1.0/\">
<!NOTATION example1SVG-rdf SYSTEM \"example1.svg.rdf\">
<!ATTLIST img data ENTITY #IMPLIED>
]>",
Token::DtdStart("svg", None, 0..15),
Token::EntityDecl(
"ns_extend",
EntityDefinition::EntityValue("http://ns.adobe.com/Extensibility/1.0/"),
44..104
),
Token::DtdEnd(203..205)
);
// We do not support !ELEMENT DTD token and it will be skipped.
// Previously, we were calling `Tokenizer::next` after the skip,
// which is recursive and could cause a stack overflow when there are too many sequential
// unsupported tokens.
// This tests checks that the current code do not crash with stack overflow.
#[test]
fn dtd_entity_07() {
let mut text = "<!DOCTYPE svg [\n".to_string();
for _ in 0..500 {
text.push_str("<!ELEMENT sgml ANY>\n");
}
text.push_str("]>\n");
let mut p = xml::Tokenizer::from(text.as_str());
assert_eq!(
to_test_token(p.next().unwrap()),
Token::DtdStart("svg", None, 0..15)
);
assert_eq!(
to_test_token(p.next().unwrap()),
Token::DtdEnd(10016..10018)
);
}
test!(
dtd_err_01,
"<!DOCTYPEEG[<!ENTITY%ETT\u{000a}SSSSSSSS<D_IDYT;->\u{000a}<",
Token::Error("invalid DTD at 1:1 cause expected space not 'E' at 1:10".to_string())
);
test!(
dtd_err_02,
"<!DOCTYPE s [<!ENTITY % name S YSTEM",
Token::DtdStart("s", None, 0..13),
Token::Error("invalid DTD entity at 1:14 cause invalid ExternalID".to_string())
);
test!(
dtd_err_03,
"<!DOCTYPE s [<!ENTITY % name B",
Token::DtdStart("s", None, 0..13),
Token::Error(
"invalid DTD entity at 1:14 cause \
expected '\"', ''', 'S', 'P' not 'B' at 1:30"
.to_string()
)
);
test!(
dtd_err_04,
"<!DOCTYPE s []",
Token::DtdStart("s", None, 0..13),
Token::Error("invalid DTD at 1:14 cause unexpected end of stream".to_string())
);
test!(
dtd_err_05,
"<!DOCTYPE s [] !",
Token::DtdStart("s", None, 0..13),
Token::Error("invalid DTD at 1:14 cause expected '>' not '!' at 1:16".to_string())
);
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/elements.rs | tests/integration/elements.rs | use crate::token::*;
test!(
element_01,
"<a/>",
Token::ElementStart("", "a", 0..2),
Token::ElementEnd(ElementEnd::Empty, 2..4)
);
test!(
element_02,
"<a></a>",
Token::ElementStart("", "a", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::ElementEnd(ElementEnd::Close("", "a"), 3..7)
);
test!(
element_03,
" \t <a/> \n ",
Token::ElementStart("", "a", 5..7),
Token::ElementEnd(ElementEnd::Empty, 7..9)
);
test!(
element_04,
" \t <b><a/></b> \n ",
Token::ElementStart("", "b", 5..7),
Token::ElementEnd(ElementEnd::Open, 7..8),
Token::ElementStart("", "a", 8..10),
Token::ElementEnd(ElementEnd::Empty, 10..12),
Token::ElementEnd(ElementEnd::Close("", "b"), 12..16)
);
test!(
element_06,
"<俄语 լեզու=\"ռուսերեն\">данные</俄语>",
Token::ElementStart("", "俄语", 0..7),
Token::Attribute("", "լեզու", "ռուսերեն", 8..37),
Token::ElementEnd(ElementEnd::Open, 37..38),
Token::Text("данные", 38..50),
Token::ElementEnd(ElementEnd::Close("", "俄语"), 50..59)
);
test!(
element_07,
"<svg:circle></svg:circle>",
Token::ElementStart("svg", "circle", 0..11),
Token::ElementEnd(ElementEnd::Open, 11..12),
Token::ElementEnd(ElementEnd::Close("svg", "circle"), 12..25)
);
test!(
element_08,
"<:circle/>",
Token::ElementStart("", "circle", 0..8),
Token::ElementEnd(ElementEnd::Empty, 8..10)
);
test!(
element_err_01,
"<>",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_02,
"</",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_03,
"</a",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_04,
"<a x='test' /",
Token::ElementStart("", "a", 0..2),
Token::Attribute("", "x", "test", 3..11),
Token::Error("invalid attribute at 1:12 cause unexpected end of stream".to_string())
);
test!(
element_err_05,
"<<",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_06,
"< a",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_07,
"< ",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_08,
"<	",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_09,
"<a></a></a>",
Token::ElementStart("", "a", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::ElementEnd(ElementEnd::Close("", "a"), 3..7),
Token::Error("unknown token at 1:8".to_string())
);
test!(
element_err_10,
"<a/><a/>",
Token::ElementStart("", "a", 0..2),
Token::ElementEnd(ElementEnd::Empty, 2..4),
Token::Error("unknown token at 1:5".to_string())
);
test!(
element_err_11,
"<a></br/></a>",
Token::ElementStart("", "a", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Error("invalid element at 1:4 cause expected '>' not '/' at 1:8".to_string())
);
test!(
element_err_12,
"<svg:/>",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_13,
"\
<root>
</root>
</root>",
Token::ElementStart("", "root", 0..5),
Token::ElementEnd(ElementEnd::Open, 5..6),
Token::Text("\n", 6..7),
Token::ElementEnd(ElementEnd::Close("", "root"), 7..14),
Token::Error("unknown token at 3:1".to_string())
);
test!(
element_err_14,
"<-svg/>",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_15,
"<svg:-svg/>",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_16,
"<svg::svg/>",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_17,
"<svg:s:vg/>",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_18,
"<::svg/>",
Token::Error("invalid element at 1:1 cause invalid name token".to_string())
);
test!(
element_err_19,
"<a><",
Token::ElementStart("", "a", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Error("unknown token at 1:4".to_string())
);
test!(
attribute_01,
"<a ax=\"test\"/>",
Token::ElementStart("", "a", 0..2),
Token::Attribute("", "ax", "test", 3..12),
Token::ElementEnd(ElementEnd::Empty, 12..14)
);
test!(
attribute_02,
"<a ax='test'/>",
Token::ElementStart("", "a", 0..2),
Token::Attribute("", "ax", "test", 3..12),
Token::ElementEnd(ElementEnd::Empty, 12..14)
);
test!(
attribute_03,
"<a b='test1' c=\"test2\"/>",
Token::ElementStart("", "a", 0..2),
Token::Attribute("", "b", "test1", 3..12),
Token::Attribute("", "c", "test2", 13..22),
Token::ElementEnd(ElementEnd::Empty, 22..24)
);
test!(
attribute_04,
"<a b='\"test1\"' c=\"'test2'\"/>",
Token::ElementStart("", "a", 0..2),
Token::Attribute("", "b", "\"test1\"", 3..14),
Token::Attribute("", "c", "'test2'", 15..26),
Token::ElementEnd(ElementEnd::Empty, 26..28)
);
test!(
attribute_05,
"<c a=\"test1' c='test2\" b='test1\" c=\"test2'/>",
Token::ElementStart("", "c", 0..2),
Token::Attribute("", "a", "test1' c='test2", 3..22),
Token::Attribute("", "b", "test1\" c=\"test2", 23..42),
Token::ElementEnd(ElementEnd::Empty, 42..44)
);
test!(
attribute_06,
"<c a = 'test1' />",
Token::ElementStart("", "c", 0..2),
Token::Attribute("", "a", "test1", 5..21),
Token::ElementEnd(ElementEnd::Empty, 26..28)
);
test!(
attribute_07,
"<c q:a='b'/>",
Token::ElementStart("", "c", 0..2),
Token::Attribute("q", "a", "b", 3..10),
Token::ElementEnd(ElementEnd::Empty, 10..12)
);
test!(
attribute_err_01,
"<c az=test>",
Token::ElementStart("", "c", 0..2),
Token::Error("invalid attribute at 1:3 cause expected quote mark not 't' at 1:7".to_string())
);
test!(
attribute_err_02,
"<c a>",
Token::ElementStart("", "c", 0..2),
Token::Error("invalid attribute at 1:3 cause expected \'=\' not \'>\' at 1:5".to_string())
);
test!(
attribute_err_03,
"<c a/>",
Token::ElementStart("", "c", 0..2),
Token::Error("invalid attribute at 1:3 cause expected '=' not '/' at 1:5".to_string())
);
test!(
attribute_err_04,
"<c a='b' q/>",
Token::ElementStart("", "c", 0..2),
Token::Attribute("", "a", "b", 3..8),
Token::Error("invalid attribute at 1:9 cause expected '=' not '/' at 1:11".to_string())
);
test!(
attribute_err_05,
"<c a='<'/>",
Token::ElementStart("", "c", 0..2),
Token::Error("invalid attribute at 1:3 cause expected ''' not '<' at 1:7".to_string())
);
test!(
attribute_err_06,
"<c a='\u{1}'/>",
Token::ElementStart("", "c", 0..2),
Token::Error(
"invalid attribute at 1:3 cause a non-XML character '\\u{1}' found at 1:7".to_string()
)
);
test!(
attribute_err_07,
"<c a='v'b='v'/>",
Token::ElementStart("", "c", 0..2),
Token::Attribute("", "a", "v", 3..8),
Token::Error("invalid attribute at 1:9 cause expected space not 'b' at 1:9".to_string())
);
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/pi.rs | tests/integration/pi.rs | use crate::token::*;
test!(pi_01, "<?xslt ma?>", Token::PI("xslt", Some("ma"), 0..11));
test!(
pi_02,
"<?xslt \t\n m?>",
Token::PI("xslt", Some("m"), 0..13)
);
test!(pi_03, "<?xslt?>", Token::PI("xslt", None, 0..8));
test!(pi_04, "<?xslt ?>", Token::PI("xslt", None, 0..9));
test!(
pi_05,
"<?xml-stylesheet?>",
Token::PI("xml-stylesheet", None, 0..18)
);
test!(
pi_err_01,
"<??xml \t\n m?>",
Token::Error("invalid processing instruction at 1:1 cause invalid name token".to_string())
);
test!(
declaration_01,
"<?xml version=\"1.0\"?>",
Token::Declaration("1.0", None, None, 0..21)
);
test!(
declaration_02,
"<?xml version='1.0'?>",
Token::Declaration("1.0", None, None, 0..21)
);
test!(
declaration_03,
"<?xml version='1.0' encoding=\"UTF-8\"?>",
Token::Declaration("1.0", Some("UTF-8"), None, 0..38)
);
test!(
declaration_04,
"<?xml version='1.0' encoding='UTF-8'?>",
Token::Declaration("1.0", Some("UTF-8"), None, 0..38)
);
test!(
declaration_05,
"<?xml version='1.0' encoding='utf-8'?>",
Token::Declaration("1.0", Some("utf-8"), None, 0..38)
);
test!(
declaration_06,
"<?xml version='1.0' encoding='EUC-JP'?>",
Token::Declaration("1.0", Some("EUC-JP"), None, 0..39)
);
test!(
declaration_07,
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>",
Token::Declaration("1.0", Some("UTF-8"), Some(true), 0..55)
);
test!(
declaration_08,
"<?xml version='1.0' encoding='UTF-8' standalone='no'?>",
Token::Declaration("1.0", Some("UTF-8"), Some(false), 0..54)
);
test!(
declaration_09,
"<?xml version='1.0' standalone='no'?>",
Token::Declaration("1.0", None, Some(false), 0..37)
);
test!(
declaration_10,
"<?xml version='1.0' standalone='no' ?>",
Token::Declaration("1.0", None, Some(false), 0..38)
);
// Declaration with an invalid order
test!(
declaration_err_01,
"<?xml encoding='UTF-8' version='1.0'?>",
Token::Error("invalid XML declaration at 1:1 cause expected 'version' at 1:7".to_string())
);
test!(
declaration_err_02,
"<?xml version='1.0' encoding='*invalid*'?>",
Token::Error("invalid XML declaration at 1:1 cause expected '\'' not '*' at 1:31".to_string())
);
test!(
declaration_err_03,
"<?xml version='2.0'?>",
Token::Error("invalid XML declaration at 1:1 cause expected '1.' at 1:16".to_string())
);
test!(
declaration_err_04,
"<?xml version='1.0' standalone='true'?>",
Token::Error("invalid XML declaration at 1:1 cause expected 'yes', 'no' at 1:33".to_string())
);
test!(
declaration_err_05,
"<?xml version='1.0' yes='true'?>",
Token::Error("invalid XML declaration at 1:1 cause expected '?>' at 1:21".to_string())
);
test!(
declaration_err_06,
"<?xml version='1.0' encoding='UTF-8' standalone='yes' yes='true'?>",
Token::Error("invalid XML declaration at 1:1 cause expected '?>' at 1:55".to_string())
);
test!(
declaration_err_07,
"\u{000a}<?xml\u{000a}&jg'];",
Token::Error("invalid processing instruction at 2:1 cause expected '?>' at 3:7".to_string())
);
test!(
declaration_err_08,
"<?xml \t\n ?m?>",
Token::Error("invalid XML declaration at 1:1 cause expected 'version' at 2:2".to_string())
);
test!(
declaration_err_09,
"<?xml \t\n m?>",
Token::Error("invalid XML declaration at 1:1 cause expected 'version' at 2:2".to_string())
);
// XML declaration allowed only at the start of the document.
test!(
declaration_err_10,
" <?xml version='1.0'?>",
Token::Error("unknown token at 1:2".to_string())
);
// XML declaration allowed only at the start of the document.
test!(
declaration_err_11,
"<!-- comment --><?xml version='1.0'?>",
Token::Comment(" comment ", 0..16),
Token::Error("unknown token at 1:17".to_string())
);
// Duplicate.
test!(
declaration_err_12,
"<?xml version='1.0'?><?xml version='1.0'?>",
Token::Declaration("1.0", None, None, 0..21),
Token::Error("unknown token at 1:22".to_string())
);
test!(
declaration_err_13,
"<?target \u{1}content>",
Token::Error(
"invalid processing instruction at 1:1 cause a non-XML character '\\u{1}' found at 1:10"
.to_string()
)
);
test!(
declaration_err_14,
"<?xml version='1.0'encoding='UTF-8'?>",
Token::Error("invalid XML declaration at 1:1 cause expected space not 'e' at 1:20".to_string())
);
test!(
declaration_err_15,
"<?xml version='1.0' encoding='UTF-8'standalone='yes'?>",
Token::Error("invalid XML declaration at 1:1 cause expected space not 's' at 1:37".to_string())
);
test!(
declaration_err_16,
"<?xml version='1.0'",
Token::Error("invalid XML declaration at 1:1 cause expected '?>' at 1:20".to_string())
);
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/main.rs | tests/integration/main.rs | extern crate xmlparser as xml;
#[macro_use]
mod token;
mod api;
mod cdata;
mod comments;
mod doctype;
mod document;
mod elements;
mod pi;
mod text;
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/cdata.rs | tests/integration/cdata.rs | extern crate xmlparser as xml;
use crate::token::*;
test!(
cdata_01,
"<p><![CDATA[content]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("content", 3..22),
Token::ElementEnd(ElementEnd::Close("", "p"), 22..26)
);
test!(
cdata_02,
"<p><![CDATA[&ing]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("&ing", 3..22),
Token::ElementEnd(ElementEnd::Close("", "p"), 22..26)
);
test!(
cdata_03,
"<p><![CDATA[&ing ]]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("&ing ]", 3..24),
Token::ElementEnd(ElementEnd::Close("", "p"), 24..28)
);
test!(
cdata_04,
"<p><![CDATA[&ing]] ]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("&ing]] ", 3..25),
Token::ElementEnd(ElementEnd::Close("", "p"), 25..29)
);
test!(
cdata_05,
"<p><![CDATA[<message>text</message>]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("<message>text</message>", 3..38),
Token::ElementEnd(ElementEnd::Close("", "p"), 38..42)
);
test!(
cdata_06,
"<p><![CDATA[</this is malformed!</malformed</malformed & worse>]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("</this is malformed!</malformed</malformed & worse>", 3..66),
Token::ElementEnd(ElementEnd::Close("", "p"), 66..70)
);
test!(
cdata_07,
"<p><![CDATA[1]]><![CDATA[2]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("1", 3..16),
Token::Cdata("2", 16..29),
Token::ElementEnd(ElementEnd::Close("", "p"), 29..33)
);
test!(
cdata_08,
"<p> \n <![CDATA[data]]> \t </p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Text(" \n ", 3..6),
Token::Cdata("data", 6..22),
Token::Text(" \t ", 22..25),
Token::ElementEnd(ElementEnd::Close("", "p"), 25..29)
);
test!(
cdata_09,
"<p><![CDATA[bracket ]after]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Cdata("bracket ]after", 3..29),
Token::ElementEnd(ElementEnd::Close("", "p"), 29..33)
);
test!(
cdata_err_01,
"<p><![CDATA[\u{1}]]></p>",
Token::ElementStart("", "p", 0..2),
Token::ElementEnd(ElementEnd::Open, 2..3),
Token::Error(
"invalid CDATA at 1:4 cause a non-XML character '\\u{1}' found at 1:13".to_string()
)
);
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/tests/integration/token.rs | tests/integration/token.rs | type Range = ::std::ops::Range<usize>;
#[derive(PartialEq, Debug)]
pub enum Token<'a> {
Declaration(&'a str, Option<&'a str>, Option<bool>, Range),
PI(&'a str, Option<&'a str>, Range),
Comment(&'a str, Range),
DtdStart(&'a str, Option<ExternalId<'a>>, Range),
EmptyDtd(&'a str, Option<ExternalId<'a>>, Range),
EntityDecl(&'a str, EntityDefinition<'a>, Range),
DtdEnd(Range),
ElementStart(&'a str, &'a str, Range),
Attribute(&'a str, &'a str, &'a str, Range),
ElementEnd(ElementEnd<'a>, Range),
Text(&'a str, Range),
Cdata(&'a str, Range),
Error(String),
}
#[derive(PartialEq, Debug)]
pub enum ElementEnd<'a> {
Open,
Close(&'a str, &'a str),
Empty,
}
#[derive(PartialEq, Debug)]
pub enum ExternalId<'a> {
System(&'a str),
Public(&'a str, &'a str),
}
#[derive(PartialEq, Debug)]
pub enum EntityDefinition<'a> {
EntityValue(&'a str),
ExternalId(ExternalId<'a>),
}
#[macro_export]
macro_rules! test {
($name:ident, $text:expr, $($token:expr),*) => (
#[test]
fn $name() {
let mut p = xml::Tokenizer::from($text);
$(
let t = p.next().unwrap();
assert_eq!(to_test_token(t), $token);
)*
assert!(p.next().is_none());
}
)
}
#[inline(never)]
pub fn to_test_token(token: Result<xml::Token, xml::Error>) -> Token {
match token {
Ok(xml::Token::Declaration {
version,
encoding,
standalone,
span,
}) => Token::Declaration(
version.as_str(),
encoding.map(|v| v.as_str()),
standalone,
span.range(),
),
Ok(xml::Token::ProcessingInstruction {
target,
content,
span,
}) => Token::PI(target.as_str(), content.map(|v| v.as_str()), span.range()),
Ok(xml::Token::Comment { text, span }) => Token::Comment(text.as_str(), span.range()),
Ok(xml::Token::DtdStart {
name,
external_id,
span,
}) => Token::DtdStart(
name.as_str(),
external_id.map(|v| to_test_external_id(v)),
span.range(),
),
Ok(xml::Token::EmptyDtd {
name,
external_id,
span,
}) => Token::EmptyDtd(
name.as_str(),
external_id.map(|v| to_test_external_id(v)),
span.range(),
),
Ok(xml::Token::EntityDeclaration {
name,
definition,
span,
}) => Token::EntityDecl(
name.as_str(),
match definition {
xml::EntityDefinition::EntityValue(name) => {
EntityDefinition::EntityValue(name.as_str())
}
xml::EntityDefinition::ExternalId(id) => {
EntityDefinition::ExternalId(to_test_external_id(id))
}
},
span.range(),
),
Ok(xml::Token::DtdEnd { span }) => Token::DtdEnd(span.range()),
Ok(xml::Token::ElementStart {
prefix,
local,
span,
}) => Token::ElementStart(prefix.as_str(), local.as_str(), span.range()),
Ok(xml::Token::Attribute {
prefix,
local,
value,
span,
}) => Token::Attribute(
prefix.as_str(),
local.as_str(),
value.as_str(),
span.range(),
),
Ok(xml::Token::ElementEnd { end, span }) => Token::ElementEnd(
match end {
xml::ElementEnd::Open => ElementEnd::Open,
xml::ElementEnd::Close(prefix, local) => {
ElementEnd::Close(prefix.as_str(), local.as_str())
}
xml::ElementEnd::Empty => ElementEnd::Empty,
},
span.range(),
),
Ok(xml::Token::Text { text }) => Token::Text(text.as_str(), text.range()),
Ok(xml::Token::Cdata { text, span }) => Token::Cdata(text.as_str(), span.range()),
Err(ref e) => Token::Error(e.to_string()),
}
}
fn to_test_external_id(id: xml::ExternalId) -> ExternalId {
match id {
xml::ExternalId::System(name) => ExternalId::System(name.as_str()),
xml::ExternalId::Public(name, value) => ExternalId::Public(name.as_str(), value.as_str()),
}
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/fuzz/fuzz_targets/fuzz_xml.rs | fuzz/fuzz_targets/fuzz_xml.rs | #![no_main]
#[macro_use] extern crate libfuzzer_sys;
extern crate xmlparser;
use std::str;
fuzz_target!(|data: &[u8]| {
if let Ok(text) = str::from_utf8(data) {
let mut n = 0;
for _ in xmlparser::Tokenizer::from(text) {
n += 1;
if n == 1000 {
panic!("endless loop");
}
}
}
});
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/examples/parse.rs | examples/parse.rs | extern crate xmlparser as xml;
use std::env;
use std::fs;
use std::io::Read;
fn main() {
let args = env::args().collect::<Vec<String>>();
if args.len() != 2 {
println!("Usage: parse file.xml");
return;
}
let text = load_file(&args[1]);
if let Err(e) = parse(&text) {
println!("Error: {}.", e);
}
}
fn parse(text: &str) -> Result<(), xml::Error> {
for token in xml::Tokenizer::from(text) {
println!("{:?}", token?);
}
Ok(())
}
fn load_file(path: &str) -> String {
let mut file = fs::File::open(path).unwrap();
let mut text = String::new();
file.read_to_string(&mut text).unwrap();
text
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
RazrFalcon/xmlparser | https://github.com/RazrFalcon/xmlparser/blob/9c8e34305723118c67d7feba95772705b2247776/afl-fuzz/src/main.rs | afl-fuzz/src/main.rs | extern crate afl;
extern crate xmlparser;
use std::str;
use afl::fuzz;
fn main() {
fuzz!(|data: &[u8]| {
if let Ok(text) = str::from_utf8(data) {
for _ in xmlparser::Tokenizer::from(text) {}
}
});
}
| rust | Apache-2.0 | 9c8e34305723118c67d7feba95772705b2247776 | 2026-01-04T20:18:02.902370Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/location_service.rs | crates/discovery/src/location_service.rs | use anyhow::{Context, Result};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use shared::models::node::NodeLocation;
use std::time::Duration;
#[derive(Debug, Deserialize, Serialize)]
struct IpApiResponse {
ip: String,
city: Option<String>,
region: Option<String>,
country: Option<String>,
#[serde(default)]
latitude: f64,
#[serde(default)]
longitude: f64,
}
pub struct LocationService {
client: Client,
base_url: String,
enabled: bool,
api_key: String,
}
impl LocationService {
pub fn new(base_url: Option<String>, api_key: Option<String>) -> Self {
let enabled = base_url.is_some();
let base_url = base_url.unwrap_or_else(|| "https://ipapi.co".to_string());
let api_key = api_key.unwrap_or_default();
let client = Client::builder()
.timeout(Duration::from_secs(5))
.build()
.expect("Failed to build HTTP client");
Self {
client,
base_url,
enabled,
api_key,
}
}
pub async fn get_location(&self, ip_address: &str) -> Result<Option<NodeLocation>> {
if !self.enabled {
return Ok(None);
}
let url = format!(
"{}/{}/json/?key={}",
self.base_url, ip_address, self.api_key
);
let response = self
.client
.get(&url)
.send()
.await
.context("Failed to send request to location service")?;
let api_response: IpApiResponse = response
.json()
.await
.context("Failed to parse location service response")?;
Ok(Some(NodeLocation {
latitude: api_response.latitude,
longitude: api_response.longitude,
city: api_response.city,
region: api_response.region,
country: api_response.country,
}))
}
}
impl Default for LocationService {
fn default() -> Self {
Self::new(None, None)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/lib.rs | crates/discovery/src/lib.rs | mod api;
mod chainsync;
mod location_enrichment;
mod location_service;
mod store;
pub use api::server::start_server;
pub use chainsync::ChainSync;
pub use location_enrichment::LocationEnrichmentService;
pub use location_service::LocationService;
pub use store::node_store::NodeStore;
pub use store::redis::RedisStore;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/location_enrichment.rs | crates/discovery/src/location_enrichment.rs | use crate::location_service::LocationService;
use crate::store::node_store::NodeStore;
use anyhow::Result;
use log::{error, info, warn};
use redis::AsyncCommands;
use std::sync::Arc;
use std::time::Duration;
use tokio::time::interval;
const LOCATION_RETRY_KEY: &str = "location:retries:";
const MAX_RETRIES: u32 = 3;
const BATCH_SIZE: usize = 10;
pub struct LocationEnrichmentService {
node_store: Arc<NodeStore>,
location_service: Arc<LocationService>,
redis_client: redis::Client,
}
impl LocationEnrichmentService {
pub fn new(
node_store: Arc<NodeStore>,
location_service: Arc<LocationService>,
redis_url: &str,
) -> Result<Self> {
let redis_client = redis::Client::open(redis_url)?;
Ok(Self {
node_store,
location_service,
redis_client,
})
}
pub async fn run(&self, interval_seconds: u64) -> Result<()> {
let mut interval = interval(Duration::from_secs(interval_seconds));
loop {
interval.tick().await;
if let Err(e) = self.enrich_nodes_without_location().await {
error!("Location enrichment cycle failed: {e}");
}
}
}
async fn enrich_nodes_without_location(&self) -> Result<()> {
let nodes = self.node_store.get_nodes().await?;
let mut conn = self.redis_client.get_multiplexed_async_connection().await?;
let nodes_without_location: Vec<_> = nodes
.into_iter()
.filter(|node| node.location.is_none())
.collect();
if nodes_without_location.is_empty() {
return Ok(());
}
info!(
"Found {} nodes without location data",
nodes_without_location.len()
);
// Process in batches to respect rate limits
for chunk in nodes_without_location.chunks(BATCH_SIZE) {
for node in chunk {
let retry_key = format!("{}{}", LOCATION_RETRY_KEY, node.id);
let retries: u32 = conn.get(&retry_key).await.unwrap_or(0);
if retries >= MAX_RETRIES {
continue; // Skip nodes that have exceeded retry limit
}
match self.location_service.get_location(&node.ip_address).await {
Ok(Some(location)) => {
info!(
"Successfully fetched location for node {}: {:?}",
node.id, location
);
let mut updated_node = node.clone();
updated_node.location = Some(location);
if let Err(e) = self.node_store.update_node(updated_node).await {
error!("Failed to update node {} with location: {}", node.id, e);
} else {
let _: () = conn.del(&retry_key).await?;
}
}
Ok(None) => {
// Location service is disabled
break;
}
Err(e) => {
warn!(
"Failed to fetch location for node {} (attempt {}/{}): {}",
node.id,
retries + 1,
MAX_RETRIES,
e
);
// Increment retry counter
let _: () = conn.set_ex(&retry_key, retries + 1, 86400).await?;
// Expire after 24h
}
}
// Rate limiting - wait between requests
tokio::time::sleep(Duration::from_millis(100)).await;
}
// Longer wait between batches
tokio::time::sleep(Duration::from_secs(1)).await;
}
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/main.rs | crates/discovery/src/main.rs | use alloy::providers::RootProvider;
use anyhow::Result;
use clap::Parser;
use log::LevelFilter;
use log::{error, info};
use shared::web3::contracts::core::builder::ContractBuilder;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
use tokio_util::sync::CancellationToken;
use discovery::{
start_server, ChainSync, LocationEnrichmentService, LocationService, NodeStore, RedisStore,
};
#[derive(Debug, Clone, Copy, PartialEq)]
enum ServiceMode {
Api,
Processor,
Full,
}
impl std::str::FromStr for ServiceMode {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"api" => Ok(ServiceMode::Api),
"processor" => Ok(ServiceMode::Processor),
"full" => Ok(ServiceMode::Full),
_ => Err(format!(
"Invalid mode: {s}. Use 'api', 'processor', or 'full'"
)),
}
}
}
#[derive(Parser)]
struct Args {
/// RPC URL
#[arg(short = 'r', long, default_value = "http://localhost:8545")]
rpc_url: String,
/// Platform API key
#[arg(short = 'p', long, default_value = "prime")]
platform_api_key: String,
/// Redis URL
#[arg(long, default_value = "redis://localhost:6380")]
redis_url: String,
/// Port
#[arg(short = 'P', long, default_value = "8089")]
port: u16,
/// Maximum number of nodes allowed per IP address (active state)
#[arg(long, default_value = "1")]
max_nodes_per_ip: u32,
/// Service mode: api, processor, or full
#[arg(short = 'm', long, default_value = "full")]
mode: ServiceMode,
/// Location service URL (e.g., https://ipapi.co). If not provided, location services are disabled.
#[arg(long)]
location_service_url: Option<String>,
/// Location service API key
#[arg(long)]
location_service_api_key: Option<String>,
}
#[tokio::main]
async fn main() -> Result<()> {
env_logger::Builder::new()
.filter_level(LevelFilter::Info)
.format_timestamp(None)
.init();
let args = Args::parse();
let redis_store = Arc::new(RedisStore::new(&args.redis_url));
let node_store = Arc::new(NodeStore::new(redis_store.as_ref().clone()));
let Ok(endpoint) = args.rpc_url.parse() else {
return Err(anyhow::anyhow!("invalid RPC URL: {}", args.rpc_url));
};
let provider = RootProvider::new_http(endpoint);
let contracts = ContractBuilder::new(provider.clone())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.with_stake_manager()
.build()
.unwrap();
let cancellation_token = CancellationToken::new();
let last_chain_sync = Arc::new(Mutex::new(None::<std::time::SystemTime>));
info!("Starting discovery service in {:?} mode", args.mode);
match args.mode {
ServiceMode::Processor | ServiceMode::Full => {
let chain_sync = ChainSync::new(
node_store.clone(),
cancellation_token.clone(),
Duration::from_secs(10),
provider,
contracts.clone(),
last_chain_sync.clone(),
);
chain_sync.run()?;
// Start location enrichment service if enabled
if let Some(location_url) = args.location_service_url.clone() {
let location_service = Arc::new(LocationService::new(
Some(location_url),
args.location_service_api_key.clone(),
));
let location_enrichment = LocationEnrichmentService::new(
node_store.clone(),
location_service,
&args.redis_url,
)?;
info!("Starting location enrichment service");
tokio::spawn(async move {
if let Err(e) = location_enrichment.run(30).await {
error!("Location enrichment service failed: {e}");
}
});
}
if let Err(err) = start_server(
"0.0.0.0",
args.port,
node_store,
redis_store,
contracts,
args.platform_api_key,
last_chain_sync,
args.max_nodes_per_ip,
true,
)
.await
{
error!("❌ Failed to start server: {err}");
}
tokio::signal::ctrl_c().await?;
cancellation_token.cancel();
}
ServiceMode::Api => {
if let Err(err) = start_server(
"0.0.0.0",
args.port,
node_store,
redis_store,
contracts,
args.platform_api_key,
last_chain_sync,
args.max_nodes_per_ip,
false,
)
.await
{
error!("❌ Failed to start server: {err}");
}
}
}
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/store/node_store.rs | crates/discovery/src/store/node_store.rs | use crate::store::redis::RedisStore;
use anyhow::Error;
use log::error;
use redis::AsyncCommands;
use shared::models::node::{DiscoveryNode, Node};
pub struct NodeStore {
redis_store: RedisStore,
}
impl NodeStore {
pub fn new(redis_store: RedisStore) -> Self {
Self { redis_store }
}
async fn get_connection(&self) -> Result<redis::aio::MultiplexedConnection, redis::RedisError> {
self.redis_store
.client
.get_multiplexed_async_connection()
.await
}
pub async fn get_node(&self, address: String) -> Result<Option<DiscoveryNode>, Error> {
let key = format!("node:{address}");
let mut con = self.get_connection().await?;
let node: Option<String> = con.get(&key).await?;
let node = match node {
Some(node) => serde_json::from_str(&node),
None => Ok(None),
}?;
Ok(node)
}
pub async fn get_active_node_by_ip(&self, ip: String) -> Result<Option<DiscoveryNode>, Error> {
let mut con = self.get_connection().await?;
let node_ids: Vec<String> = con.smembers("node:ids").await?;
if node_ids.is_empty() {
return Ok(None);
}
let node_keys: Vec<String> = node_ids.iter().map(|id| format!("node:{id}")).collect();
let serialized_nodes: Vec<String> =
redis::pipe().get(&node_keys).query_async(&mut con).await?;
for serialized_node in serialized_nodes {
let deserialized_node: DiscoveryNode = serde_json::from_str(&serialized_node)?;
if deserialized_node.ip_address == ip && deserialized_node.is_active {
return Ok(Some(deserialized_node));
}
}
Ok(None)
}
pub async fn count_active_nodes_by_ip(&self, ip: String) -> Result<u32, Error> {
let mut con = self.get_connection().await?;
let node_ids: Vec<String> = con.smembers("node:ids").await?;
if node_ids.is_empty() {
return Ok(0);
}
let node_keys: Vec<String> = node_ids.iter().map(|id| format!("node:{id}")).collect();
let mut count = 0;
for key in node_keys {
let serialized_node: Option<String> = con.get(&key).await?;
if let Some(serialized_node) = serialized_node {
let deserialized_node: DiscoveryNode = serde_json::from_str(&serialized_node)?;
if deserialized_node.ip_address == ip && deserialized_node.is_active {
count += 1;
}
}
}
Ok(count)
}
pub async fn register_node(&self, node: Node) -> Result<(), Error> {
let address = node.id.clone();
let key = format!("node:{address}");
let mut con = self.get_connection().await?;
if con.exists(&key).await? {
let existing_node = self.get_node(address.clone()).await?;
if let Some(existing_node) = existing_node {
let updated_node = existing_node.with_updated_node(node);
self.update_node(updated_node).await?;
}
} else {
let discovery_node = DiscoveryNode::from(node);
let serialized_node = serde_json::to_string(&discovery_node)?;
let _: () = redis::pipe()
.atomic()
.set(&key, serialized_node)
.sadd("node:ids", &address)
.query_async(&mut con)
.await?;
}
Ok(())
}
pub async fn update_node(&self, node: DiscoveryNode) -> Result<(), Error> {
let mut con = self.get_connection().await?;
let address = node.id.clone();
let key = format!("node:{address}");
let serialized_node = serde_json::to_string(&node)?;
let _: () = redis::pipe()
.atomic()
.set(&key, serialized_node)
.sadd("node:ids", &address)
.query_async(&mut con)
.await?;
Ok(())
}
pub async fn get_nodes(&self) -> Result<Vec<DiscoveryNode>, Error> {
let mut con = self.get_connection().await?;
let node_ids: Vec<String> = con.smembers("node:ids").await?;
if node_ids.is_empty() {
return Ok(Vec::new());
}
let node_keys: Vec<String> = node_ids.iter().map(|id| format!("node:{id}")).collect();
let mut pipe = redis::pipe();
for key in &node_keys {
pipe.get(key);
}
let serialized_nodes: Result<Vec<String>, redis::RedisError> =
pipe.query_async(&mut con).await;
let serialized_nodes = match serialized_nodes {
Ok(nodes) => nodes,
Err(e) => {
error!("Error querying nodes from Redis: {e}");
return Err(e.into());
}
};
let nodes_vec: Result<Vec<DiscoveryNode>, _> = serialized_nodes
.into_iter()
.map(|serialized_node| serde_json::from_str(&serialized_node))
.collect();
let mut nodes_vec = nodes_vec?;
nodes_vec.sort_by(|a, b| {
let a_time = a.last_updated.or(a.created_at);
let b_time = b.last_updated.or(b.created_at);
b_time.cmp(&a_time)
});
Ok(nodes_vec)
}
pub async fn get_node_by_id(&self, node_id: &str) -> Result<Option<DiscoveryNode>, Error> {
let mut con = self.get_connection().await?;
let key = format!("node:{node_id}");
let serialized_node: Option<String> = con.get(&key).await?;
let serialized_node = match serialized_node {
Some(node_str) => serde_json::from_str(&node_str),
None => Ok(None),
}?;
Ok(serialized_node)
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/store/mod.rs | crates/discovery/src/store/mod.rs | pub(crate) mod node_store;
pub(crate) mod redis;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/store/redis.rs | crates/discovery/src/store/redis.rs | #[cfg(test)]
use log::debug;
use log::info;
use redis::Client;
#[cfg(test)]
use redis_test::server::RedisServer;
#[cfg(test)]
use std::sync::Arc;
#[cfg(test)]
use std::thread;
#[cfg(test)]
use std::time::Duration;
#[derive(Clone)]
pub struct RedisStore {
pub client: Client,
#[allow(dead_code)]
#[cfg(test)]
server: Arc<RedisServer>,
}
impl RedisStore {
pub fn new(redis_url: &str) -> Self {
match Client::open(redis_url) {
Ok(client) => {
info!("Successfully connected to Redis at {redis_url}");
Self {
client,
#[cfg(test)]
server: Arc::new(RedisServer::new()),
}
}
Err(e) => {
panic!("Redis connection error: {e}");
}
}
}
#[cfg(test)]
pub fn new_test() -> Self {
let server = RedisServer::new();
// Get the server address
let (host, port) = match server.client_addr() {
redis::ConnectionAddr::Tcp(host, port) => (host.clone(), *port),
_ => panic!("Expected TCP connection"),
};
let redis_url = format!("redis://{}:{}", host, port);
debug!("Starting test Redis server at {}", redis_url);
// Add a small delay to ensure server is ready
thread::sleep(Duration::from_millis(100));
// Try to connect with retry logic
let client = loop {
if let Ok(client) = Client::open(redis_url.clone()) {
// Verify connection works
if let Ok(mut conn) = client.get_connection() {
if redis::cmd("PING").query::<String>(&mut conn).is_ok() {
break client;
}
}
}
thread::sleep(Duration::from_millis(100));
};
Self {
client,
server: Arc::new(server),
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/api/mod.rs | crates/discovery/src/api/mod.rs | pub(crate) mod routes;
pub(crate) mod server;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/api/server.rs | crates/discovery/src/api/server.rs | use crate::api::routes::get_nodes::{get_node_by_subkey, get_nodes, get_nodes_for_pool};
use crate::api::routes::node::node_routes;
use crate::store::node_store::NodeStore;
use crate::store::redis::RedisStore;
use actix_web::middleware::{Compress, NormalizePath, TrailingSlash};
use actix_web::HttpResponse;
use actix_web::{
middleware,
web::Data,
web::{self, get},
App, HttpServer,
};
use alloy::providers::RootProvider;
use log::{error, info, warn};
use serde_json::json;
use shared::security::api_key_middleware::ApiKeyMiddleware;
use shared::security::auth_signature_middleware::{ValidateSignature, ValidatorState};
use shared::web3::contracts::core::builder::Contracts;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use tokio::sync::Mutex;
#[derive(Clone)]
pub(crate) struct AppState {
pub node_store: Arc<NodeStore>,
pub contracts: Option<Contracts<RootProvider>>,
pub last_chain_sync: Arc<Mutex<Option<SystemTime>>>,
pub max_nodes_per_ip: u32,
pub chain_sync_enabled: bool,
}
async fn health_check(app_state: web::Data<AppState>) -> HttpResponse {
if app_state.chain_sync_enabled {
let sync_status = {
let last_sync_guard = app_state.last_chain_sync.lock().await;
match *last_sync_guard {
Some(last_sync) => {
if let Ok(elapsed) = last_sync.elapsed() {
if elapsed > Duration::from_secs(60) {
warn!(
"Health check: Chain sync is delayed. Last sync was {} seconds ago",
elapsed.as_secs()
);
Some(elapsed)
} else {
None
}
} else {
warn!("Health check: Unable to determine elapsed time since last sync");
Some(Duration::from_secs(u64::MAX))
}
}
None => {
warn!("Health check: Chain sync has not occurred yet");
Some(Duration::from_secs(u64::MAX))
}
}
};
if let Some(elapsed) = sync_status {
return HttpResponse::ServiceUnavailable().json(json!({
"status": "error",
"service": "discovery",
"message": format!("Chain sync is delayed. Last sync was {} seconds ago", elapsed.as_secs())
}));
}
}
HttpResponse::Ok().json(json!({
"status": "ok",
"service": "discovery",
"chain_sync_enabled": app_state.chain_sync_enabled
}))
}
#[allow(clippy::too_many_arguments)]
pub async fn start_server(
host: &str,
port: u16,
node_store: Arc<NodeStore>,
redis_store: Arc<RedisStore>,
contracts: Contracts<RootProvider>,
platform_api_key: String,
last_chain_sync: Arc<Mutex<Option<SystemTime>>>,
max_nodes_per_ip: u32,
chain_sync_enabled: bool,
) -> std::io::Result<()> {
info!("Starting server at http://{host}:{port}");
let validators = match contracts.prime_network.get_validator_role().await {
Ok(validators) => validators,
Err(e) => {
error!("❌ Failed to get validator role: {e}");
std::process::exit(1);
}
};
let app_state = AppState {
node_store,
contracts: Some(contracts),
last_chain_sync,
max_nodes_per_ip,
chain_sync_enabled,
};
let validator_validator = Arc::new(
ValidatorState::new(validators)
.with_redis(redis_store.client.clone())
.await
.map_err(|e| {
std::io::Error::other(format!("Failed to initialize Redis connection pool: {e}"))
})?,
);
let validate_signatures = Arc::new(
ValidatorState::new(vec![])
.with_redis(redis_store.client.clone())
.await
.map_err(|e| {
std::io::Error::other(format!("Failed to initialize Redis connection pool: {e}"))
})?
.with_validator(move |_| true),
);
let api_key_middleware = Arc::new(ApiKeyMiddleware::new(platform_api_key));
HttpServer::new(move || {
App::new()
.wrap(middleware::Logger::default())
.wrap(Compress::default())
.wrap(NormalizePath::new(TrailingSlash::Trim))
.app_data(Data::new(app_state.clone()))
.app_data(web::PayloadConfig::default().limit(2_097_152))
.route("/health", web::get().to(health_check))
.service(
web::scope("/api/platform")
.wrap(api_key_middleware.clone())
.route("", get().to(get_nodes)),
)
.service(
web::scope("/api/nodes/{node_id}")
.wrap(api_key_middleware.clone())
.route("", get().to(get_node_by_subkey)),
)
.service(
web::scope("/api/validator")
.wrap(ValidateSignature::new(validator_validator.clone()))
.route("", web::get().to(get_nodes)),
)
.service(
web::scope("/api/pool/{pool_id}")
.wrap(ValidateSignature::new(validate_signatures.clone()))
.route("", get().to(get_nodes_for_pool)),
)
.service(node_routes().wrap(ValidateSignature::new(validate_signatures.clone())))
.default_service(web::route().to(|| async {
HttpResponse::NotFound().json(json!({
"success": false,
"error": "Resource not found"
}))
}))
})
.bind((host, port))?
.run()
.await
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/api/routes/node.rs | crates/discovery/src/api/routes/node.rs | use crate::api::server::AppState;
use actix_web::{
web::{self, put, Data},
HttpResponse, Scope,
};
use alloy::primitives::U256;
use log::warn;
use shared::models::api::ApiResponse;
use shared::models::node::{ComputeRequirements, Node};
use std::str::FromStr;
pub(crate) async fn register_node(
node: web::Json<Node>,
data: Data<AppState>,
req: actix_web::HttpRequest,
) -> HttpResponse {
// Check for the x-address header
let address_str = match req.headers().get("x-address") {
Some(address) => match address.to_str() {
Ok(addr) => addr.to_string(),
Err(_) => {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Invalid x-address header"))
}
},
None => {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Missing x-address header"))
}
};
if address_str != node.id {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Invalid x-address header"));
}
let update_node = node.clone();
let existing_node = data.node_store.get_node(update_node.id.clone()).await;
if let Ok(Some(existing_node)) = existing_node {
// Node already exists - check if it's active in a pool
if existing_node.is_active {
if existing_node.node == update_node {
log::info!("Node {} is already active in a pool", update_node.id);
return HttpResponse::Ok()
.json(ApiResponse::new(true, "Node registered successfully"));
}
// Temp. adjustment: The gpu object has changed and includes a vec of indices now.
// This now causes the discovery svc to reject nodes that have just updated their software.
// This is a temporary fix to ensure the node is accepted even though the indices are different.
let mut existing_clone = existing_node.node.clone();
existing_clone.worker_p2p_id = update_node.worker_p2p_id.clone();
existing_clone.worker_p2p_addresses = update_node.worker_p2p_addresses.clone();
match &update_node.compute_specs {
Some(compute_specs) => {
if let Some(ref mut existing_compute_specs) = existing_clone.compute_specs {
match &compute_specs.gpu {
Some(gpu_specs) => {
existing_compute_specs.gpu = Some(gpu_specs.clone());
existing_compute_specs.storage_gb = compute_specs.storage_gb;
existing_compute_specs.storage_path =
compute_specs.storage_path.clone();
}
None => {
existing_compute_specs.gpu = None;
}
}
}
}
None => {
existing_clone.compute_specs = None;
}
}
if existing_clone == update_node {
log::info!("Node {} is already active in a pool", update_node.id);
return HttpResponse::Ok()
.json(ApiResponse::new(true, "Node registered successfully"));
}
warn!(
"Node {} tried to change discovery but is already active in a pool",
update_node.id
);
// Node is currently active in pool - cannot be updated
// Did the user actually change node information?
return HttpResponse::BadRequest().json(ApiResponse::new(
false,
"Node is currently active in pool - cannot be updated",
));
}
}
let active_nodes_count = data
.node_store
.count_active_nodes_by_ip(update_node.ip_address.clone())
.await;
if let Ok(count) = active_nodes_count {
let existing_node_by_ip = data
.node_store
.get_active_node_by_ip(update_node.ip_address.clone())
.await;
let is_existing_node = existing_node_by_ip
.map(|result| {
result
.map(|node| node.id == update_node.id)
.unwrap_or(false)
})
.unwrap_or(false);
let effective_count = if is_existing_node { count - 1 } else { count };
if effective_count >= data.max_nodes_per_ip {
warn!(
"Node {} registration would exceed IP limit. Current active nodes on IP {}: {}, max allowed: {}",
update_node.id, update_node.ip_address, count, data.max_nodes_per_ip
);
return HttpResponse::BadRequest().json(ApiResponse::new(
false,
&format!(
"IP address {} already has {} active nodes (max allowed: {})",
update_node.ip_address, count, data.max_nodes_per_ip
),
));
}
}
if let Some(contracts) = data.contracts.clone() {
let Ok(provider_address) = node.provider_address.parse() else {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Invalid provider address format"));
};
let Ok(node_id) = node.id.parse() else {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Invalid node ID format"));
};
if contracts
.compute_registry
.get_node(provider_address, node_id)
.await
.is_err()
{
return HttpResponse::BadRequest().json(ApiResponse::new(
false,
"Node not found in compute registry",
));
}
// Check if node meets the pool's compute requirements
match contracts
.compute_pool
.get_pool_info(U256::from(node.compute_pool_id))
.await
{
Ok(pool_info) => {
if let Ok(required_specs) = ComputeRequirements::from_str(&pool_info.pool_data_uri)
{
if let Some(ref compute_specs) = node.compute_specs {
if !compute_specs.meets(&required_specs) {
log::info!(
"Node {} does not meet compute requirements for pool {}",
node.id,
node.compute_pool_id
);
return HttpResponse::BadRequest().json(ApiResponse::new(
false,
"Node does not meet the compute requirements for this pool",
));
}
} else {
log::info!("Node specs not provided for node {}", node.id);
return HttpResponse::BadRequest().json(ApiResponse::new(
false,
"Cannot verify compute requirements: node specs not provided",
));
}
} else {
log::info!(
"Could not parse compute requirements from pool data URI: {}",
&pool_info.pool_data_uri
);
}
}
Err(e) => {
log::info!(
"Failed to get pool information for pool ID {}: {:?}",
node.compute_pool_id,
e
);
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Failed to get pool information"));
}
}
}
let node_store = data.node_store.clone();
match node_store.register_node(node.into_inner()).await {
Ok(_) => HttpResponse::Ok().json(ApiResponse::new(true, "Node registered successfully")),
Err(_) => HttpResponse::InternalServerError()
.json(ApiResponse::new(false, "Internal server error")),
}
}
pub(crate) fn node_routes() -> Scope {
web::scope("/api/nodes").route("", put().to(register_node))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::store::node_store::NodeStore;
use crate::store::redis::RedisStore;
use actix_web::http::StatusCode;
use actix_web::test;
use actix_web::App;
use shared::models::node::{ComputeSpecs, CpuSpecs, DiscoveryNode, GpuSpecs};
use shared::security::auth_signature_middleware::{ValidateSignature, ValidatorState};
use shared::security::request_signer::sign_request_with_nonce;
use shared::web3::wallet::Wallet;
use std::sync::Arc;
use std::time::SystemTime;
use tokio::sync::Mutex;
use url::Url;
#[actix_web::test]
async fn test_register_node() {
let node = Node {
id: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8089,
compute_pool_id: 0,
..Default::default()
};
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 1,
chain_sync_enabled: true,
};
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", put().to(register_node)),
)
.await;
let json = serde_json::to_value(node.clone()).unwrap();
let req = test::TestRequest::put()
.uri("/nodes")
.set_json(json)
.insert_header(("x-address", "wrong_address")) // Set header to an incorrect address
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::BAD_REQUEST); // Expecting a Bad Request response
let body: ApiResponse<String> = test::read_body_json(resp).await;
assert!(!body.success);
assert_eq!(body.data, "Invalid x-address header"); // Expecting the appropriate error message
}
#[actix_web::test]
async fn test_register_node_already_validated() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let node = Node {
id: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8089,
compute_pool_id: 0,
compute_specs: Some(ComputeSpecs {
gpu: Some(GpuSpecs {
count: Some(4),
model: Some("A100".to_string()),
memory_mb: Some(40000),
indices: Some(vec![0, 1, 2, 3]),
}),
cpu: Some(CpuSpecs {
cores: Some(16),
model: None,
}),
ram_mb: Some(64000),
storage_gb: Some(500),
..Default::default()
}),
..Default::default()
};
let node_clone_for_recall = node.clone();
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 1,
chain_sync_enabled: true,
};
let validate_signatures =
Arc::new(ValidatorState::new(vec![]).with_validator(move |_| true));
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", put().to(register_node))
.wrap(ValidateSignature::new(validate_signatures.clone())),
)
.await;
let json = serde_json::to_value(node.clone()).unwrap();
let signed_request = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json),
)
.await
.unwrap();
let req = test::TestRequest::put()
.uri("/nodes")
.set_json(signed_request.data.as_ref().unwrap())
.insert_header(("x-address", node.id.clone()))
.insert_header(("x-signature", signed_request.signature))
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body: ApiResponse<String> = test::read_body_json(resp).await;
assert!(body.success);
assert_eq!(body.data, "Node registered successfully");
let nodes = app_state.node_store.get_nodes().await;
match nodes {
Ok(nodes) => {
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0].id, node.id);
}
Err(_) => {
unreachable!("Error getting nodes");
}
}
let validated = DiscoveryNode {
node,
is_validated: true,
is_active: true,
is_provider_whitelisted: false,
is_blacklisted: false,
last_updated: None,
created_at: None,
location: None,
latest_balance: None,
};
match app_state.node_store.update_node(validated).await {
Ok(_) => (),
Err(_) => {
unreachable!("Error updating node");
}
}
let nodes = app_state.node_store.get_nodes().await;
match nodes {
Ok(nodes) => {
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0].id, node_clone_for_recall.id);
assert!(nodes[0].is_validated);
assert!(nodes[0].is_active);
}
Err(_) => {
unreachable!("Error getting nodes");
}
}
let json = serde_json::to_value(node_clone_for_recall.clone()).unwrap();
let signed_request = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json),
)
.await
.unwrap();
let req = test::TestRequest::put()
.uri("/nodes")
.set_json(signed_request.data.as_ref().unwrap())
.insert_header(("x-address", node_clone_for_recall.id.clone()))
.insert_header(("x-signature", signed_request.signature))
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let nodes = app_state.node_store.get_nodes().await;
match nodes {
Ok(nodes) => {
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0].id, node_clone_for_recall.id);
assert!(nodes[0].is_validated);
assert!(nodes[0].is_active);
}
Err(_) => {
unreachable!("Error getting nodes");
}
}
}
#[actix_web::test]
async fn test_register_node_with_correct_signature() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let node = Node {
id: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8089,
compute_pool_id: 0,
..Default::default()
};
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 1,
chain_sync_enabled: true,
};
let validate_signatures =
Arc::new(ValidatorState::new(vec![]).with_validator(move |_| true));
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", put().to(register_node))
.wrap(ValidateSignature::new(validate_signatures.clone())),
)
.await;
let json = serde_json::to_value(node.clone()).unwrap();
let signed_request = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json),
)
.await
.unwrap();
let req = test::TestRequest::put()
.uri("/nodes")
.set_json(signed_request.data.as_ref().unwrap())
.insert_header(("x-address", node.id.clone()))
.insert_header(("x-signature", signed_request.signature))
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body: ApiResponse<String> = test::read_body_json(resp).await;
assert!(body.success);
assert_eq!(body.data, "Node registered successfully");
let nodes = app_state.node_store.get_nodes().await;
let nodes = match nodes {
Ok(nodes) => nodes,
Err(_) => {
panic!("Error getting nodes");
}
};
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0].id, node.id);
assert_eq!(nodes[0].last_updated, None);
assert_ne!(nodes[0].created_at, None);
}
#[actix_web::test]
async fn test_register_node_with_incorrect_signature() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let node = Node {
id: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdd".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8089,
compute_pool_id: 0,
..Default::default()
};
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 1,
chain_sync_enabled: true,
};
let validate_signatures =
Arc::new(ValidatorState::new(vec![]).with_validator(move |_| true));
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", put().to(register_node))
.wrap(ValidateSignature::new(validate_signatures.clone())),
)
.await;
let json = serde_json::to_value(node.clone()).unwrap();
let signed_request = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json),
)
.await
.unwrap();
let req = test::TestRequest::put()
.uri("/nodes")
.set_json(signed_request.data.as_ref().unwrap())
.insert_header(("x-address", "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf"))
.insert_header(("x-signature", signed_request.signature))
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
}
#[actix_web::test]
async fn test_register_node_already_active_in_pool() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let mut node = Node {
id: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8089,
compute_pool_id: 0,
compute_specs: Some(ComputeSpecs {
gpu: Some(GpuSpecs {
count: Some(4),
model: Some("A100".to_string()),
memory_mb: Some(40000),
indices: None,
}),
cpu: Some(CpuSpecs {
cores: Some(16),
model: None,
}),
ram_mb: Some(64000),
storage_gb: Some(500),
..Default::default()
}),
..Default::default()
};
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 1,
chain_sync_enabled: true,
};
app_state
.node_store
.register_node(node.clone())
.await
.unwrap();
node.compute_specs.as_mut().unwrap().storage_gb = Some(300);
node.compute_specs
.as_mut()
.unwrap()
.gpu
.as_mut()
.unwrap()
.indices = Some(vec![0, 1, 2, 3]);
let validate_signatures =
Arc::new(ValidatorState::new(vec![]).with_validator(move |_| true));
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", put().to(register_node))
.wrap(ValidateSignature::new(validate_signatures.clone())),
)
.await;
let json = serde_json::to_value(node.clone()).unwrap();
let signed_request = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json),
)
.await
.unwrap();
let req = test::TestRequest::put()
.uri("/nodes")
.set_json(signed_request.data.as_ref().unwrap())
.insert_header(("x-address", node.id.clone()))
.insert_header(("x-signature", signed_request.signature))
.to_request();
let resp = test::call_service(&app, req).await;
assert_eq!(resp.status(), StatusCode::OK);
let body: ApiResponse<String> = test::read_body_json(resp).await;
assert!(body.success);
assert_eq!(body.data, "Node registered successfully");
let nodes = app_state.node_store.get_nodes().await;
let nodes = match nodes {
Ok(nodes) => nodes,
Err(_) => {
panic!("Error getting nodes");
}
};
assert_eq!(nodes.len(), 1);
assert_eq!(nodes[0].id, node.id);
}
#[actix_web::test]
async fn test_register_node_with_max_nodes_per_ip() {
let private_key = "0000000000000000000000000000000000000000000000000000000000000001";
let private_key_2 = "0000000000000000000000000000000000000000000000000000000000000002";
let private_key_3 = "0000000000000000000000000000000000000000000000000000000000000003";
let node1 = Node {
id: "0x7E5F4552091A69125d5DfCb7b8C2659029395Bdf".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8089,
compute_pool_id: 0,
..Default::default()
};
let node2 = Node {
id: "0x2546BcD3c84621e976D8185a91A922aE77ECEc30".to_string(),
provider_address: "0x2546BcD3c84621e976D8185a91A922aE77ECEc30".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8090,
compute_pool_id: 0,
..Default::default()
};
let node3 = Node {
id: "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC".to_string(),
provider_address: "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8091,
compute_pool_id: 0,
..Default::default()
};
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 2,
chain_sync_enabled: true,
};
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", put().to(register_node)),
)
.await;
// Register first node - should succeed
let json1 = serde_json::to_value(node1.clone()).unwrap();
let signature1 = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json1),
)
.await
.unwrap();
let req1 = test::TestRequest::put()
.uri("/nodes")
.set_json(signature1.data)
.insert_header(("x-address", node1.id.clone()))
.insert_header(("x-signature", signature1.signature))
.to_request();
let resp1 = test::call_service(&app, req1).await;
assert_eq!(resp1.status(), StatusCode::OK);
// Try to register same node again - should succeed (update)
let json1_duplicate = serde_json::to_value(node1.clone()).unwrap();
let signature1_duplicate = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json1_duplicate),
)
.await
.unwrap();
let req1_duplicate = test::TestRequest::put()
.uri("/nodes")
.set_json(signature1_duplicate.data)
.insert_header(("x-address", node1.id.clone()))
.insert_header(("x-signature", signature1_duplicate.signature))
.to_request();
let resp1_duplicate = test::call_service(&app, req1_duplicate).await;
assert_eq!(resp1_duplicate.status(), StatusCode::OK);
// Register second node with different ID - should succeed
let json2 = serde_json::to_value(node2.clone()).unwrap();
let signature2 = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key_2, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json2),
)
.await
.unwrap();
let req2 = test::TestRequest::put()
.uri("/nodes")
.set_json(signature2.data)
.insert_header(("x-address", node2.id.clone()))
.insert_header(("x-signature", signature2.signature))
.to_request();
let resp2 = test::call_service(&app, req2).await;
assert_eq!(resp2.status(), StatusCode::OK);
// Make node 1 and two active
let mut node1_active = DiscoveryNode::from(node1.clone());
node1_active.is_active = true;
app_state
.node_store
.update_node(node1_active)
.await
.unwrap();
let mut node2_active = DiscoveryNode::from(node2.clone());
node2_active.is_active = true;
app_state
.node_store
.update_node(node2_active)
.await
.unwrap();
// Register third node - should fail (exceeds max_nodes_per_ip)
let json3 = serde_json::to_value(node3.clone()).unwrap();
let signature3 = sign_request_with_nonce(
"/nodes",
&Wallet::new(private_key_3, Url::parse("http://localhost:8080").unwrap()).unwrap(),
Some(&json3),
)
.await
.unwrap();
let req3 = test::TestRequest::put()
.uri("/nodes")
.set_json(signature3.data)
.insert_header(("x-address", node3.id.clone()))
.insert_header(("x-signature", signature3.signature))
.to_request();
let resp3 = test::call_service(&app, req3).await;
assert_eq!(resp3.status(), StatusCode::BAD_REQUEST);
// Verify only 2 nodes are registered
let nodes = app_state.node_store.get_nodes().await.unwrap();
assert_eq!(nodes.len(), 2);
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/api/routes/mod.rs | crates/discovery/src/api/routes/mod.rs | pub(crate) mod get_nodes;
pub(crate) mod node;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/api/routes/get_nodes.rs | crates/discovery/src/api/routes/get_nodes.rs | use crate::api::server::AppState;
use actix_web::{
web::Data,
web::{self},
HttpResponse,
};
use alloy::primitives::U256;
use shared::models::api::ApiResponse;
use shared::models::node::DiscoveryNode;
pub(crate) async fn get_nodes(data: Data<AppState>) -> HttpResponse {
let nodes = data.node_store.get_nodes().await;
match nodes {
Ok(nodes) => {
let response = ApiResponse::new(true, nodes);
HttpResponse::Ok().json(response)
}
Err(_) => HttpResponse::InternalServerError()
.json(ApiResponse::new(false, "Internal server error")),
}
}
fn filter_nodes_for_pool(nodes: Vec<DiscoveryNode>, pool_id: u32) -> Vec<DiscoveryNode> {
let nodes_for_pool: Vec<DiscoveryNode> = nodes
.iter()
.filter(|node| node.compute_pool_id == pool_id)
.cloned()
.collect();
// Filter out nodes with IPs that are currently active in another pool
let filtered: Vec<DiscoveryNode> = nodes_for_pool
.iter()
.filter(|node| {
// Check if there's any other node with the same IP address in a different pool that is active
!nodes.iter().any(|other| {
other.ip_address == node.ip_address
&& other.compute_pool_id != node.compute_pool_id
&& other.is_active
})
})
.cloned()
.collect();
filtered
}
pub(crate) async fn get_nodes_for_pool(
data: Data<AppState>,
pool_id: web::Path<String>,
req: actix_web::HttpRequest,
) -> HttpResponse {
let nodes = data.node_store.get_nodes().await;
match nodes {
Ok(nodes) => {
let id_clone = pool_id.clone();
let pool_contract_id: U256 = match id_clone.parse::<U256>() {
Ok(id) => id,
Err(_) => {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Invalid pool ID format"));
}
};
let pool_id: u32 = match pool_id.parse() {
Ok(id) => id,
Err(_) => {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Invalid pool ID format"));
}
};
match data.contracts.clone() {
Some(contracts) => {
let Ok(pool_info) =
contracts.compute_pool.get_pool_info(pool_contract_id).await
else {
return HttpResponse::NotFound()
.json(ApiResponse::new(false, "Pool not found"));
};
let owner = pool_info.creator;
let manager = pool_info.compute_manager_key;
let address_str = match req.headers().get("x-address") {
Some(address) => match address.to_str() {
Ok(addr) => addr.to_string(),
Err(_) => {
return HttpResponse::BadRequest().json(ApiResponse::new(
false,
"Invalid x-address header - parsing issue",
))
}
},
None => {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "Missing x-address header"))
}
};
// Normalize the address strings for comparison
let owner_str = owner.to_string().to_lowercase();
let manager_str = manager.to_string().to_lowercase();
let address_str_normalized = address_str.to_lowercase();
if address_str_normalized != owner_str && address_str_normalized != manager_str
{
return HttpResponse::BadRequest().json(ApiResponse::new(
false,
"Invalid x-address header - not owner or manager",
));
}
}
None => {
return HttpResponse::BadRequest()
.json(ApiResponse::new(false, "No contracts found"))
}
}
let nodes_for_pool: Vec<DiscoveryNode> = filter_nodes_for_pool(nodes, pool_id);
let response = ApiResponse::new(true, nodes_for_pool);
HttpResponse::Ok().json(response)
}
Err(_) => HttpResponse::InternalServerError()
.json(ApiResponse::new(false, "Internal server error")),
}
}
pub(crate) async fn get_node_by_subkey(
node_id: web::Path<String>,
data: Data<AppState>,
) -> HttpResponse {
let node = data.node_store.get_node_by_id(&node_id.to_string()).await;
match node {
Ok(Some(node)) => HttpResponse::Ok().json(ApiResponse::new(true, node)),
Ok(None) => HttpResponse::NotFound().json(ApiResponse::new(false, "Node not found")),
Err(_) => HttpResponse::InternalServerError()
.json(ApiResponse::new(false, "Internal server error")),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::store::node_store::NodeStore;
use crate::store::redis::RedisStore;
use actix_web::test;
use actix_web::web::get;
use actix_web::App;
use shared::models::node::DiscoveryNode;
use shared::models::node::Node;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use std::time::SystemTime;
use tokio::sync::Mutex;
#[actix_web::test]
async fn test_get_nodes() {
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 1,
chain_sync_enabled: true,
};
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", get().to(get_nodes)),
)
.await;
let sample_node = Node {
id: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
compute_pool_id: 0,
..Default::default()
};
match app_state.node_store.register_node(sample_node).await {
Ok(_) => (),
Err(_) => {
panic!("Error registering node");
}
}
let req = test::TestRequest::get().uri("/nodes").to_request();
let resp = test::call_service(&app, req).await;
assert!(resp.status().is_success());
let body = test::read_body(resp).await;
let api_response: ApiResponse<Vec<DiscoveryNode>> = match serde_json::from_slice(&body) {
Ok(response) => response,
Err(_) => panic!("Failed to deserialize response"),
};
assert!(api_response.success);
assert_eq!(api_response.data.len(), 1);
}
#[actix_web::test]
async fn test_nodes_sorted_by_newest_first() {
let app_state = AppState {
node_store: Arc::new(NodeStore::new(RedisStore::new_test())),
contracts: None,
last_chain_sync: Arc::new(Mutex::new(None::<SystemTime>)),
max_nodes_per_ip: 1,
chain_sync_enabled: true,
};
let app = test::init_service(
App::new()
.app_data(Data::new(app_state.clone()))
.route("/nodes", get().to(get_nodes)),
)
.await;
// Register older node first
let older_node = Node {
id: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
provider_address: "0x32A8dFdA26948728e5351e61d62C190510CF1C88".to_string(),
ip_address: "127.0.0.1".to_string(),
port: 8080,
compute_pool_id: 0,
..Default::default()
};
match app_state.node_store.register_node(older_node).await {
Ok(_) => (),
Err(_) => {
panic!("Error registering node");
}
}
// Wait a moment to ensure timestamps are different
thread::sleep(Duration::from_millis(100));
// Register newer node
let newer_node = Node {
id: "0x45B8dFdA26948728e5351e61d62C190510CF1C99".to_string(),
provider_address: "0x45B8dFdA26948728e5351e61d62C190510CF1C99".to_string(),
ip_address: "127.0.0.2".to_string(),
port: 8081,
compute_pool_id: 0,
..Default::default()
};
match app_state.node_store.register_node(newer_node).await {
Ok(_) => (),
Err(_) => {
panic!("Error registering node");
}
}
let req = test::TestRequest::get().uri("/nodes").to_request();
let resp = test::call_service(&app, req).await;
assert!(resp.status().is_success());
let body = test::read_body(resp).await;
let api_response: ApiResponse<Vec<DiscoveryNode>> = match serde_json::from_slice(&body) {
Ok(response) => response,
Err(_) => panic!("Failed to deserialize response"),
};
assert!(api_response.success);
assert_eq!(api_response.data.len(), 2);
// Verify the newer node is first in the list
assert_eq!(
api_response.data[0].id,
"0x45B8dFdA26948728e5351e61d62C190510CF1C99"
);
assert_eq!(
api_response.data[1].id,
"0x32A8dFdA26948728e5351e61d62C190510CF1C88"
);
}
#[actix_web::test]
async fn test_filter_nodes_for_pool() {
// Create test nodes for different pools
let mut nodes = vec![
DiscoveryNode {
node: Node {
id: "0x1111".to_string(),
provider_address: "0x1111".to_string(),
ip_address: "192.168.1.1".to_string(),
port: 8080,
compute_pool_id: 1,
..Default::default()
},
is_validated: true,
is_provider_whitelisted: true,
is_active: true,
is_blacklisted: false,
..Default::default()
},
DiscoveryNode {
node: Node {
id: "0x2222".to_string(),
provider_address: "0x2222".to_string(),
ip_address: "192.168.1.2".to_string(),
port: 8080,
compute_pool_id: 1,
..Default::default()
},
is_validated: true,
is_provider_whitelisted: true,
is_active: false,
is_blacklisted: false,
..Default::default()
},
];
// Pool 2 nodes
nodes.push(DiscoveryNode {
node: Node {
id: "0x3333".to_string(),
provider_address: "0x3333".to_string(),
ip_address: "192.168.1.3".to_string(),
port: 8080,
compute_pool_id: 2,
..Default::default()
},
is_validated: true,
is_provider_whitelisted: true,
is_active: true,
is_blacklisted: false,
..Default::default()
});
// Node with same IP in different pools (active in pool 3)
nodes.push(DiscoveryNode {
node: Node {
id: "0x4444".to_string(),
provider_address: "0x4444".to_string(),
ip_address: "192.168.1.4".to_string(),
port: 8080,
compute_pool_id: 3,
..Default::default()
},
is_validated: true,
is_provider_whitelisted: true,
is_active: true,
is_blacklisted: false,
..Default::default()
});
// This node should be filtered out because it shares IP with an active node in pool 3
nodes.push(DiscoveryNode {
node: Node {
id: "0x5555".to_string(),
provider_address: "0x5555".to_string(),
ip_address: "192.168.1.4".to_string(),
port: 8081,
compute_pool_id: 1,
..Default::default()
},
is_validated: true,
is_provider_whitelisted: true,
is_active: false,
is_blacklisted: false,
..Default::default()
});
// Test filtering for pool 1
let filtered_nodes = filter_nodes_for_pool(nodes.clone(), 1);
// Should have 2 nodes from pool 1, but one is filtered out due to IP conflict
assert_eq!(filtered_nodes.len(), 2);
assert!(filtered_nodes.iter().any(|n| n.id == "0x1111"));
assert!(filtered_nodes.iter().any(|n| n.id == "0x2222"));
assert!(!filtered_nodes.iter().any(|n| n.id == "0x5555"));
// Test filtering for pool 2
let filtered_nodes = filter_nodes_for_pool(nodes.clone(), 2);
assert_eq!(filtered_nodes.len(), 1);
assert!(filtered_nodes.iter().any(|n| n.id == "0x3333"));
// Test filtering for pool 3
let filtered_nodes = filter_nodes_for_pool(nodes.clone(), 3);
assert_eq!(filtered_nodes.len(), 1);
assert!(filtered_nodes.iter().any(|n| n.id == "0x4444"));
// Test filtering for non-existent pool
let filtered_nodes = filter_nodes_for_pool(nodes.clone(), 99);
assert_eq!(filtered_nodes.len(), 0);
}
#[actix_web::test]
async fn test_filter_nodes_for_pool_with_inactive_nodes() {
let nodes = vec![
// Inactive node in pool 1
DiscoveryNode {
node: Node {
id: "0x1111".to_string(),
provider_address: "0x1111".to_string(),
ip_address: "192.168.1.1".to_string(),
port: 8080,
compute_pool_id: 1,
..Default::default()
},
is_validated: true,
is_provider_whitelisted: true,
is_active: false,
is_blacklisted: false,
..Default::default()
},
// Inactive node in pool 2 with same IP
DiscoveryNode {
node: Node {
id: "0x2222".to_string(),
provider_address: "0x2222".to_string(),
ip_address: "192.168.1.1".to_string(),
port: 8080,
compute_pool_id: 2,
..Default::default()
},
is_validated: true,
is_provider_whitelisted: true,
is_active: false,
is_blacklisted: false,
..Default::default()
},
];
// This should be included in pool 2 results since the conflicting node in pool 1
// doesn't affect it (the filter only excludes nodes when there's an active node
// with the same IP in a different pool)
let filtered_nodes = filter_nodes_for_pool(nodes.clone(), 2);
assert_eq!(filtered_nodes.len(), 1);
assert!(filtered_nodes.iter().any(|n| n.id == "0x2222"));
// The pool 1 node should be included in pool 1 results
let filtered_nodes = filter_nodes_for_pool(nodes.clone(), 1);
assert_eq!(filtered_nodes.len(), 1);
assert!(filtered_nodes.iter().any(|n| n.id == "0x1111"));
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/chainsync/sync.rs | crates/discovery/src/chainsync/sync.rs | use crate::store::node_store::NodeStore;
use alloy::primitives::Address;
use alloy::providers::Provider as _;
use alloy::providers::RootProvider;
use anyhow::Error;
use futures::stream::{self, StreamExt};
use log::{debug, error, info, warn};
use shared::models::node::DiscoveryNode;
use shared::web3::contracts::core::builder::Contracts;
use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use tokio::sync::Mutex;
use tokio_util::sync::CancellationToken;
const MAX_CONCURRENT_SYNCS: usize = 50;
pub struct ChainSync {
pub node_store: Arc<NodeStore>,
cancel_token: CancellationToken,
chain_sync_interval: Duration,
provider: RootProvider,
contracts: Contracts<RootProvider>,
last_chain_sync: Arc<Mutex<Option<std::time::SystemTime>>>,
}
impl ChainSync {
pub fn new(
node_store: Arc<NodeStore>,
cancellation_token: CancellationToken,
chain_sync_interval: Duration,
provider: RootProvider,
contracts: Contracts<RootProvider>,
last_chain_sync: Arc<Mutex<Option<std::time::SystemTime>>>,
) -> Self {
Self {
node_store,
cancel_token: cancellation_token,
chain_sync_interval,
provider,
contracts,
last_chain_sync,
}
}
pub fn run(self) -> Result<(), Error> {
let ChainSync {
node_store,
cancel_token,
chain_sync_interval,
last_chain_sync,
provider,
contracts,
} = self;
tokio::spawn(async move {
let mut interval = tokio::time::interval(chain_sync_interval);
info!(
"Chain sync started with {} second interval",
chain_sync_interval.as_secs()
);
loop {
tokio::select! {
_ = interval.tick() => {
let sync_start = SystemTime::now();
info!("Starting chain sync cycle");
let nodes = node_store.get_nodes().await;
match nodes {
Ok(nodes) => {
let total_nodes = nodes.len();
info!("Syncing {total_nodes} nodes");
// Process nodes in parallel with concurrency limit
let results: Vec<Result<(), Error>> = stream::iter(nodes)
.map(|node| {
let node_store = node_store.clone();
let provider = provider.clone();
let contracts = contracts.clone();
async move {
sync_single_node(node_store, provider, contracts, node).await
}
})
.buffer_unordered(MAX_CONCURRENT_SYNCS)
.collect()
.await;
// Count successes and failures
let mut success_count = 0;
let mut failure_count = 0;
for result in results {
match result {
Ok(_) => success_count += 1,
Err(e) => {
failure_count += 1;
warn!("Node sync failed: {e}");
}
}
}
// Update the last chain sync time
let mut last_sync = last_chain_sync.lock().await;
*last_sync = Some(SystemTime::now());
let sync_duration = SystemTime::now()
.duration_since(sync_start)
.unwrap_or_default();
info!(
"Chain sync completed in {:.2}s: {} successful, {} failed out of {} total nodes",
sync_duration.as_secs_f64(),
success_count,
failure_count,
total_nodes
);
}
Err(e) => {
error!("Error getting nodes from store: {e}");
}
}
}
_ = cancel_token.cancelled() => {
info!("Chain sync cancelled, shutting down");
break;
}
}
}
info!("Chain sync task ended");
});
Ok(())
}
}
async fn sync_single_node(
node_store: Arc<NodeStore>,
provider: RootProvider,
contracts: Contracts<RootProvider>,
node: DiscoveryNode,
) -> Result<(), Error> {
let mut n = node.clone();
// Safely parse provider_address and node_address
let provider_address = Address::from_str(&node.provider_address).map_err(|e| {
error!(
"Failed to parse provider address '{}': {}",
node.provider_address, e
);
anyhow::anyhow!("Invalid provider address")
})?;
let node_address = Address::from_str(&node.id).map_err(|e| {
error!("Failed to parse node address '{}': {}", node.id, e);
anyhow::anyhow!("Invalid node address")
})?;
let balance = provider.get_balance(node_address).await.map_err(|e| {
error!("Error retrieving balance for node {node_address}: {e}");
anyhow::anyhow!("Failed to retrieve node balance")
})?;
n.latest_balance = Some(balance);
let node_info = contracts
.compute_registry
.get_node(provider_address, node_address)
.await
.map_err(|e| {
error!(
"Error retrieving node info for provider {provider_address} and node {node_address}: {e}"
);
anyhow::anyhow!("Failed to retrieve node info")
})?;
let provider_info = contracts
.compute_registry
.get_provider(provider_address)
.await
.map_err(|e| {
error!("Error retrieving provider info for {provider_address}: {e}");
anyhow::anyhow!("Failed to retrieve provider info")
})?;
let (is_active, is_validated) = node_info;
n.is_active = is_active;
n.is_validated = is_validated;
n.is_provider_whitelisted = provider_info.is_whitelisted;
// Handle potential errors from async calls
let is_blacklisted = contracts
.compute_pool
.is_node_blacklisted(node.node.compute_pool_id, node_address)
.await
.map_err(|e| {
error!(
"Error checking if node {} is blacklisted in pool {}: {}",
node_address, node.node.compute_pool_id, e
);
anyhow::anyhow!("Failed to check blacklist status")
})?;
n.is_blacklisted = is_blacklisted;
// Only update if the node has changed
if n.is_active != node.is_active
|| n.is_validated != node.is_validated
|| n.is_provider_whitelisted != node.is_provider_whitelisted
|| n.is_blacklisted != node.is_blacklisted
{
match node_store.update_node(n).await {
Ok(_) => {
debug!("Successfully updated node {}", node.id);
Ok(())
}
Err(e) => {
error!("Error updating node {}: {}", node.id, e);
Err(anyhow::anyhow!("Failed to update node: {}", e))
}
}
} else {
debug!("Node {} unchanged, skipping update", node.id);
Ok(())
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/discovery/src/chainsync/mod.rs | crates/discovery/src/chainsync/mod.rs | mod sync;
pub use sync::ChainSync;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/mint_ai_token.rs | crates/dev-utils/examples/mint_ai_token.rs | use alloy::primitives::utils::Unit;
use alloy::primitives::Address;
use alloy::primitives::U256;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
/// Address to mint tokens to
#[arg(short = 'a', long)]
address: String,
/// Private key for transaction signing
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
/// Amount to mint
#[arg(short = 'm', long, default_value = "30000")]
amount: u64,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Unfortunately have to build all contracts atm
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let address = Address::from_str(&args.address).unwrap();
let amount = U256::from(args.amount) * Unit::ETHER.wei();
let tx = contracts.ai_token.mint(address, amount).await;
println!("Minting to address: {}", args.address);
println!("Transaction: {:?}", tx);
let balance = contracts.ai_token.balance_of(address).await;
println!("Balance: {:?}", balance);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/test_concurrent_calls.rs | crates/dev-utils/examples/test_concurrent_calls.rs | use alloy::eips::BlockId;
use alloy::eips::BlockNumberOrTag;
use alloy::primitives::utils::Unit;
use alloy::primitives::Address;
use alloy::primitives::U256;
use alloy::providers::Provider;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::contracts::helpers::utils::retry_call;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use std::sync::Arc;
use url::Url;
#[derive(Parser)]
struct Args {
/// Address to mint tokens to
#[arg(short = 'a', long)]
address: String,
/// Private key for transaction signing
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
/// Amount to mint
#[arg(short = 'm', long, default_value = "30000")]
amount: u64,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Arc::new(Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap());
let price = wallet.provider.get_gas_price().await?;
println!("Gas price: {:?}", price);
let current_nonce = wallet
.provider
.get_transaction_count(wallet.address())
.await?;
let pending_nonce = wallet
.provider
.get_transaction_count(wallet.address())
.block_id(BlockId::Number(BlockNumberOrTag::Pending))
.await?;
println!("Pending nonce: {:?}", pending_nonce);
println!("Current nonce: {:?}", current_nonce);
// Unfortunately have to build all contracts atm
let contracts = Arc::new(
ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap(),
);
let address = Address::from_str(&args.address).unwrap();
let amount = U256::from(args.amount) * Unit::ETHER.wei();
let random = (rand::random::<u8>() % 10) + 1;
println!("Random: {:?}", random);
let contracts_one = contracts.clone();
let wallet_one = wallet.clone();
tokio::spawn(async move {
let mint_call = contracts_one
.ai_token
.build_mint_call(address, amount)
.unwrap();
let tx = retry_call(mint_call, 5, wallet_one.provider(), None)
.await
.unwrap();
println!("Transaction hash I: {:?}", tx);
});
let contracts_two = contracts.clone();
let wallet_two = wallet.clone();
tokio::spawn(async move {
let mint_call_two = contracts_two
.ai_token
.build_mint_call(address, amount)
.unwrap();
let tx = retry_call(mint_call_two, 5, wallet_two.provider(), None)
.await
.unwrap();
println!("Transaction hash II: {:?}", tx);
});
let balance = contracts.ai_token.balance_of(address).await.unwrap();
println!("Balance: {:?}", balance);
tokio::time::sleep(tokio::time::Duration::from_secs(40)).await;
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/set_min_stake_amount.rs | crates/dev-utils/examples/set_min_stake_amount.rs | use alloy::primitives::utils::Unit;
use alloy::primitives::U256;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use url::Url;
#[derive(Parser)]
struct Args {
/// Minimum stake amount to set
#[arg(short = 'm', long)]
min_stake_amount: f64,
/// Private key for transaction signing
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Build all contracts
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let min_stake_amount = U256::from(args.min_stake_amount) * Unit::ETHER.wei();
println!("Min stake amount: {}", min_stake_amount);
let tx = contracts
.prime_network
.set_stake_minimum(min_stake_amount)
.await;
println!("Transaction: {:?}", tx);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/whitelist_provider.rs | crates/dev-utils/examples/whitelist_provider.rs | use alloy::primitives::Address;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use url::Url;
#[derive(Parser)]
struct Args {
/// Provider address to whitelist
#[arg(short = 'a', long)]
provider_address: String,
/// Private key for transaction signing
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Build all contracts
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let provider_address: Address = args.provider_address.parse()?;
let _ = contracts
.prime_network
.whitelist_provider(provider_address)
.await;
println!("Whitelisting provider: {}", args.provider_address);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/submit_work.rs | crates/dev-utils/examples/submit_work.rs | use alloy::primitives::{Address, U256};
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
/// Pool ID
#[arg(short = 'p', long)]
pool_id: u32,
/// Node address
#[arg(short = 'n', long)]
node: String,
/// Work key (32-byte hex string)
#[arg(short = 'w', long)]
work_key: String,
/// Private key for transaction signing (provider's private key)
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Build all contracts
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let pool_id = U256::from(args.pool_id);
let node = Address::from_str(&args.node).expect("Invalid node address");
let work_key = hex::decode(&args.work_key).expect("Invalid work key hex");
if work_key.len() != 32 {
panic!("Work key must be 32 bytes");
}
let call = contracts
.compute_pool
.build_work_submission_call(pool_id, node, work_key, U256::from(179949060096000.0))
.map_err(|e| eyre::eyre!("Failed to build work submission call: {}", e))?;
let tx = call
.send()
.await
.map_err(|e| eyre::eyre!("Failed to submit work: {}", e))?;
println!(
"Submitted work for node {} in pool {}",
args.node, args.pool_id
);
println!("Transaction hash: {:?}", tx);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/eject_node.rs | crates/dev-utils/examples/eject_node.rs | use alloy::primitives::Address;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
/// Private key for transaction signing
/// The address of this key must be the pool creator or manager
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
/// Pool ID
#[arg(short = 'p', long)]
pool_id: u32,
/// Provider address
#[arg(short = 'a', long)]
provider_address: String,
/// Node address to eject
#[arg(short = 'n', long)]
node: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Build all contracts
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let node_address = Address::from_str(&args.node).expect("Invalid node address");
let provider_address =
Address::from_str(&args.provider_address).expect("Invalid provider address");
let node_info = contracts
.compute_registry
.get_node(provider_address, node_address)
.await;
println!("Node info: {:?}", node_info);
let tx = contracts
.compute_pool
.eject_node(args.pool_id, node_address)
.await;
println!("Ejected node {} from pool {}", args.node, args.pool_id);
println!("Transaction: {:?}", tx);
let node_info = contracts
.compute_registry
.get_node(provider_address, node_address)
.await;
println!("Post ejection node info: {:?}", node_info);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/get_node_info.rs | crates/dev-utils/examples/get_node_info.rs | use alloy::primitives::Address;
use alloy::providers::RootProvider;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
/// Provider address
#[arg(short = 'p', long)]
provider_address: String,
/// Node address
#[arg(short = 'n', long)]
node_address: String,
/// Private key for transaction signing
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let provider = RootProvider::new_http(Url::parse(&args.rpc_url).unwrap());
// Build the contract
let contracts = ContractBuilder::new(provider)
.with_compute_registry()
.with_ai_token() // Initialize AI Token
.with_prime_network() // Initialize Prime Network
.with_compute_pool()
.build()
.unwrap();
let provider_address = Address::from_str(&args.provider_address).unwrap();
let node_address = Address::from_str(&args.node_address).unwrap();
// Get node info
let (active, validated) = contracts
.compute_registry
.get_node(provider_address, node_address)
.await
.unwrap();
let is_node_in_pool = contracts
.compute_pool
.is_node_in_pool(0, node_address)
.await
.unwrap();
println!(
"Node Active: {}, Validated: {}, In Pool: {}",
active, validated, is_node_in_pool
);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/invalidate_work.rs | crates/dev-utils/examples/invalidate_work.rs | use alloy::primitives::U256;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
#[arg(long)]
pool_id: u64,
#[arg(long)]
penalty: String,
#[arg(long)]
work_key: String,
#[arg(long)]
key: String,
#[arg(long)]
rpc_url: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
// Parse RPC URL with proper error handling
let rpc_url = Url::parse(&args.rpc_url).map_err(|e| eyre::eyre!("Invalid RPC URL: {}", e))?;
// Create wallet with error conversion
let wallet = Wallet::new(&args.key, rpc_url)
.map_err(|e| eyre::eyre!("Failed to create wallet: {}", e))?;
// Build the PrimeNetwork contract
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.map_err(|e| eyre::eyre!("Failed to build contracts: {}", e))?;
// Convert arguments to appropriate types
let pool_id = U256::from(args.pool_id);
let penalty =
U256::from_str(&args.penalty).map_err(|e| eyre::eyre!("Invalid penalty value: {}", e))?;
let work_key = hex::decode(args.work_key.trim_start_matches("0x"))
.map_err(|e| eyre::eyre!("Invalid work key hex: {}", e))?;
// Validate work_key length
if work_key.len() != 32 {
return Err(eyre::eyre!("Work key must be 32 bytes"));
}
let data = work_key; // Use the decoded work_key as data
// Call invalidate_work on the PrimeNetwork contract
let tx = contracts
.prime_network
.invalidate_work(pool_id, penalty, data)
.await
.map_err(|e| eyre::eyre!("Failed to invalidate work: {}", e))?;
println!(
"Invalidated work in pool {} with penalty {}",
args.pool_id, args.penalty
);
println!("Transaction hash: {:?}", tx);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/create_domain.rs | crates/dev-utils/examples/create_domain.rs | use alloy::primitives::Address;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
/// Domain name to create
#[arg(short = 'd', long)]
domain_name: String,
/// Validation logic address
#[arg(
short = 'v',
long,
default_value = "0x0000000000000000000000000000000000000000"
)]
validation_logic: String,
/// Domain URI
#[arg(short = 'u', long)]
domain_uri: String,
/// Private key for transaction signing
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Build all contracts
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.with_domain_registry()
.build()
.unwrap();
let domain_name = args.domain_name.clone();
let validation_logic = Address::from_str(&args.validation_logic).unwrap();
let domain_uri = args.domain_uri.clone();
let tx = contracts
.prime_network
.create_domain(domain_name, validation_logic, domain_uri)
.await;
println!("Creating domain: {}", args.domain_name);
println!("Validation logic: {}", args.validation_logic);
println!("Transaction: {:?}", tx);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/compute_pool.rs | crates/dev-utils/examples/compute_pool.rs | use alloy::primitives::Address;
use alloy::primitives::U256;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::contracts::implementations::rewards_distributor_contract::RewardsDistributor;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
/// Domain ID to create the compute pool for
#[arg(short = 'd', long)]
domain_id: U256,
/// Compute manager key address
#[arg(short = 'm', long)]
compute_manager_key: String,
/// Pool name
#[arg(short = 'n', long)]
pool_name: String,
/// Pool data URI
#[arg(short = 'u', long)]
pool_data_uri: String,
/// Private key for transaction signing
/// The address of this key will be the creator of the compute pool
/// They are the only one who can actually set the pool to active
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Build all contracts
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let domain_id = args.domain_id;
let compute_manager_key = Address::from_str(&args.compute_manager_key).unwrap();
let pool_name = args.pool_name.clone();
let pool_data_uri = args.pool_data_uri.clone();
let compute_limit = U256::from(0);
let tx = contracts
.compute_pool
.create_compute_pool(
domain_id,
compute_manager_key,
pool_name,
pool_data_uri,
compute_limit,
)
.await;
println!("Transaction: {:?}", tx);
let rewards_distributor_address = contracts
.compute_pool
.get_reward_distributor_address(U256::from(0))
.await
.unwrap();
println!(
"Rewards distributor address: {:?}",
rewards_distributor_address
);
let rewards_distributor = RewardsDistributor::new(
rewards_distributor_address,
wallet.provider(),
"rewards_distributor.json",
);
let rate = U256::from(10000000000000000u64);
let tx = rewards_distributor.set_reward_rate(rate).await;
println!("Setting reward rate: {:?}", tx);
let reward_rate = rewards_distributor.get_reward_rate().await.unwrap();
println!(
"Reward rate: {}",
reward_rate.to_string().parse::<f64>().unwrap_or(0.0) / 10f64.powf(18.0)
);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/start_compute_pool.rs | crates/dev-utils/examples/start_compute_pool.rs | use alloy::primitives::U256;
use clap::Parser;
use eyre::Result;
use shared::web3::contracts::core::builder::ContractBuilder;
use shared::web3::wallet::Wallet;
use url::Url;
#[derive(Parser)]
struct Args {
/// Private key for transaction signing
/// The address of this key will be the creator of the compute pool
/// They are the only one who can actually set the pool to active
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
/// Pool ID
#[arg(short = 'p', long)]
pool_id: U256,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
// Build all contracts
let contracts = ContractBuilder::new(wallet.provider())
.with_compute_registry()
.with_ai_token()
.with_prime_network()
.with_compute_pool()
.build()
.unwrap();
let tx = contracts
.compute_pool
.start_compute_pool(U256::from(args.pool_id))
.await;
println!("Started compute pool with id: {}", args.pool_id);
println!("Transaction: {:?}", tx);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/dev-utils/examples/transfer_eth.rs | crates/dev-utils/examples/transfer_eth.rs | use alloy::{
network::TransactionBuilder, primitives::utils::format_ether, primitives::Address,
primitives::U256, providers::Provider, rpc::types::TransactionRequest,
};
use clap::Parser;
use eyre::Result;
use shared::web3::wallet::Wallet;
use std::str::FromStr;
use url::Url;
#[derive(Parser)]
struct Args {
/// Address to send ETH to
#[arg(short = 'a', long)]
address: String,
/// Private key for transaction signing
#[arg(short = 'k', long)]
key: String,
/// RPC URL
#[arg(short = 'r', long)]
rpc_url: String,
/// Amount to send
#[arg(short = 'm', long, default_value = "1000")]
amount: u64,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
let wallet = Wallet::new(&args.key, Url::parse(&args.rpc_url)?).unwrap();
let balance_before = wallet.provider.get_balance(wallet.signer.address()).await?;
// Start of Selection
let from = wallet.signer.address();
let to = Address::from_str(&args.address).unwrap();
let amount = U256::from(args.amount);
let tx = TransactionRequest::default()
.with_from(from)
.with_to(to)
.with_value(amount);
// Send the transaction and listen for the transaction to be included.
let tx_hash = wallet.provider.send_transaction(tx).await?.watch().await?;
println!("Sent transaction: {tx_hash}");
println!(
"Sender's ETH balance before transaction: {} ETH",
format_ether(balance_before)
);
let balance_after = wallet.provider.get_balance(to).await?;
println!(
"Receiver's ETH balance after transaction: {} ETH",
format_ether(balance_after)
);
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/build.rs | crates/worker/build.rs | fn main() {
// If WORKER_VERSION is set during the build (e.g., in CI),
// pass it to the rustc compiler.
if let Ok(version) = std::env::var("WORKER_VERSION") {
println!("cargo:rustc-env=WORKER_VERSION={version}");
}
if let Ok(rpc_url) = std::env::var("WORKER_RPC_URL") {
println!("cargo:rustc-env=WORKER_RPC_URL={rpc_url}");
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/lib.rs | crates/worker/src/lib.rs | mod checks;
mod cli;
mod console;
mod docker;
mod metrics;
mod operations;
mod p2p;
mod services;
mod state;
mod utils;
pub use cli::execute_command;
pub use cli::Cli;
pub use utils::logging::setup_logging;
pub type TaskHandles = std::sync::Arc<tokio::sync::Mutex<Vec<tokio::task::JoinHandle<()>>>>;
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/main.rs | crates/worker/src/main.rs | use clap::Parser;
use std::panic;
use std::sync::Arc;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use tokio_util::sync::CancellationToken;
use worker::TaskHandles;
use worker::{execute_command, setup_logging, Cli};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let task_handles: TaskHandles = Arc::new(Mutex::new(Vec::<JoinHandle<()>>::new()));
let cli = Cli::parse();
if let Err(e) = setup_logging(Some(&cli)) {
eprintln!("Warning: Failed to initialize logging: {e}. Using default logging.");
}
// Set up panic hook to log panics
panic::set_hook(Box::new(|panic_info| {
let location = panic_info
.location()
.unwrap_or_else(|| panic::Location::caller());
let message = match panic_info.payload().downcast_ref::<&str>() {
Some(s) => *s,
None => match panic_info.payload().downcast_ref::<String>() {
Some(s) => s.as_str(),
None => "Unknown panic payload",
},
};
log::error!(
"PANIC: '{}' at {}:{}",
message,
location.file(),
location.line()
);
}));
let mut sigterm = signal(SignalKind::terminate())?;
let mut sigint = signal(SignalKind::interrupt())?;
let mut sighup = signal(SignalKind::hangup())?;
let mut sigquit = signal(SignalKind::quit())?;
let cancellation_token = CancellationToken::new();
let signal_token = cancellation_token.clone();
let command_token = cancellation_token.clone();
let signal_handle = tokio::spawn(async move {
tokio::select! {
_ = sigterm.recv() => {
log::info!("Received termination signal");
}
_ = sigint.recv() => {
log::info!("Received interrupt signal");
}
_ = sighup.recv() => {
log::info!("Received hangup signal");
}
_ = sigquit.recv() => {
log::info!("Received quit signal");
}
}
signal_token.cancel();
});
task_handles.lock().await.push(signal_handle);
let task_handles_clone = task_handles.clone();
tokio::select! {
cmd_result = execute_command(&cli.command, command_token, task_handles_clone) => {
if let Err(e) = cmd_result {
log::error!("Command execution error: {e}");
}
}
_ = cancellation_token.cancelled() => {
log::info!("Received cancellation request");
}
}
let mut handles = task_handles.lock().await;
for handle in handles.iter() {
handle.abort();
}
// Wait for all tasks to finish/abort with timeout
let cleanup = tokio::time::timeout(
tokio::time::Duration::from_secs(5),
futures::future::join_all(handles.drain(..)),
)
.await;
match cleanup {
Ok(_) => (),
Err(_) => log::warn!("Timeout waiting for tasks to cleanup"),
}
Ok(())
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
PrimeIntellect-ai/protocol | https://github.com/PrimeIntellect-ai/protocol/blob/d389f2035ec2c3c485c6b3e270625f86c7609d50/crates/worker/src/services/discovery_updater.rs | crates/worker/src/services/discovery_updater.rs | use crate::services::discovery::DiscoveryService;
use crate::state::system_state::SystemState;
use log::{debug, error, info};
use shared::models::node::Node;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::time::{sleep, Duration};
use tokio_util::sync::CancellationToken;
const INITIAL_UPDATE_DELAY: Duration = Duration::from_secs(120);
const UPDATE_INTERVAL: Duration = Duration::from_secs(120);
pub(crate) struct DiscoveryUpdater {
discovery_service: Arc<DiscoveryService>,
is_running: Arc<AtomicBool>,
system_state: Arc<SystemState>,
cancellation_token: Arc<CancellationToken>,
}
impl DiscoveryUpdater {
pub(crate) fn new(discovery_service: DiscoveryService, system_state: Arc<SystemState>) -> Self {
Self {
discovery_service: Arc::new(discovery_service),
is_running: Arc::new(AtomicBool::new(false)),
system_state,
cancellation_token: Arc::new(CancellationToken::new()),
}
}
pub(crate) fn start_auto_update(&self, node_config: Node) {
if self.is_running.load(Ordering::SeqCst) {
debug!("Auto update already running, skipping start");
return;
}
self.is_running.store(true, Ordering::SeqCst);
let is_running = self.is_running.clone();
let discovery_service = self.discovery_service.clone();
let system_state = self.system_state.clone();
let cancellation_token = self.cancellation_token.clone();
tokio::spawn(async move {
debug!("Starting discovery info auto-update task");
// Initial delay before first update
tokio::select! {
_ = sleep(INITIAL_UPDATE_DELAY) => {},
_ = cancellation_token.cancelled() => {
is_running.store(false, Ordering::SeqCst);
return;
}
}
while is_running.load(Ordering::SeqCst) {
// Check if we're in a compute pool by checking the heartbeat endpoint
let should_update = !system_state.is_running().await;
if should_update {
if let Err(e) = discovery_service.upload_discovery_info(&node_config).await {
error!("Failed to update discovery info: {e}");
} else {
info!("Successfully updated discovery info");
}
}
// Sleep before next check, but check for cancellation
tokio::select! {
_ = sleep(UPDATE_INTERVAL) => {},
_ = cancellation_token.cancelled() => {
is_running.store(false, Ordering::SeqCst);
break;
}
}
}
debug!("Discovery info auto-update task finished");
});
}
}
impl Clone for DiscoveryUpdater {
fn clone(&self) -> Self {
Self {
discovery_service: self.discovery_service.clone(),
is_running: self.is_running.clone(),
system_state: self.system_state.clone(),
cancellation_token: self.cancellation_token.clone(),
}
}
}
| rust | Apache-2.0 | d389f2035ec2c3c485c6b3e270625f86c7609d50 | 2026-01-04T20:18:07.676063Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.