repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/completion.rs | src/handlers/completion.rs | use std::collections::HashSet;
use streaming_iterator::StreamingIterator;
use tower_lsp::jsonrpc::Result;
use tower_lsp::lsp_types::{
CompletionItem, CompletionItemKind, CompletionParams, CompletionResponse, CompletionTextEdit,
Documentation, InsertTextFormat, MarkupContent, MarkupKind, Position, Range, TextEdit,
};
use tracing::warn;
use tree_sitter::QueryCursor;
use ts_query_ls::{PredicateParameterArity, PredicateParameterType};
use crate::util::{
CAPTURES_QUERY, NodeUtil, PosUtil, TextProviderRope, get_current_capture_node,
get_language_name_raw, get_scm_files, node_is_or_has_ancestor, uri_to_basename,
};
use crate::{Backend, LspClient, SymbolInfo};
pub async fn completion<C: LspClient>(
backend: &Backend<C>,
params: CompletionParams,
) -> Result<Option<CompletionResponse>> {
let uri = ¶ms.text_document_position.text_document.uri;
let options = backend.options.read().await;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when handling completion");
return Ok(None);
};
let rope = &doc.rope;
let tree = &doc.tree;
let language_data = doc
.language_name
.as_ref()
.and_then(|name| backend.language_map.get(name));
let mut position = params.text_document_position.position;
if position.character > 0 {
position.character -= 1;
}
let point = position.to_ts_point(rope);
let query = &CAPTURES_QUERY;
let mut cursor = QueryCursor::new();
let current_node = tree
.root_node()
.named_descendant_for_point_range(point, point)
.unwrap();
// Import completions
if current_node.kind() == "comment" {
if position.line != 0 {
return Ok(None);
}
let Some(inherits) = current_node.text(rope).find("inherits: ") else {
let line_len = rope.line(0).len_utf16_cu() as u32;
return Ok(Some(CompletionResponse::Array(vec![CompletionItem {
label: String::from("inherits: "),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: String::from(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/inherits.md"
))),
})),
kind: Some(CompletionItemKind::KEYWORD),
insert_text_format: Some(InsertTextFormat::SNIPPET),
text_edit: Some(CompletionTextEdit::Edit(TextEdit {
range: Range::new(Position::new(0, 0), Position::new(0, line_len)),
#[allow(clippy::literal_string_with_formatting_args)]
new_text: String::from("; inherits: ${1:foo}"),
})),
..Default::default()
}])));
};
if (position.character as usize) < current_node.start_position().column + inherits + 9 {
return Ok(None);
}
let Ok(path) = uri.to_file_path() else {
return Ok(None);
};
let Some(query_type) = path.file_stem() else {
return Ok(None);
};
return Ok(Some(CompletionResponse::Array(
get_scm_files(&backend.workspace_paths.read().unwrap())
.filter_map(|file| {
if file.file_stem().is_none_or(|stem| stem != query_type) {
return None;
}
get_language_name_raw(&file, &options)
})
.map(|file| CompletionItem {
label: file,
kind: Some(CompletionItemKind::MODULE),
..Default::default()
})
.collect(),
)));
}
// Subtype completions
if params
.context
.as_ref()
.is_some_and(|ctx| ctx.trigger_character == Some("/".to_string()))
|| current_node
.prev_sibling()
.is_some_and(|sib| sib.kind() == "/")
|| (current_node.is_error() && current_node.child(0).is_some_and(|c| c.kind() == "/"))
{
let response = || {
let supertype = current_node.prev_named_sibling()?;
let supertypes = &language_data?.supertype_map;
let subtypes = supertypes.get(&SymbolInfo {
label: supertype.text(rope),
named: true,
})?;
Some(CompletionResponse::Array(
subtypes
.iter()
.map(|sub| CompletionItem {
label: if sub.named {
sub.label.clone()
} else {
format!("\"{}\"", sub.label)
},
kind: Some(if sub.named {
CompletionItemKind::CLASS
} else {
CompletionItemKind::CONSTANT
}),
..Default::default()
})
.collect(),
))
};
return Ok(response());
}
// Predicates and directives completions
let char = position.char(rope);
let cursor_after_hashtag = char == '#';
if cursor_after_hashtag
|| current_node
.prev_sibling()
.is_some_and(|sib| sib.kind() == "#")
{
let predicates = &options.valid_predicates;
let directives = &options.valid_directives;
let range = if cursor_after_hashtag {
Range {
start: position,
end: params.text_document_position.position,
}
} else {
current_node.parent().unwrap().lsp_range(rope)
};
let completions = predicates
.iter()
.map(|(name, pred)| (true, name, pred))
.chain(directives.iter().map(|(name, pred)| (false, name, pred)))
.map(|(is_pred, name, predicate)| {
let label = if is_pred {
format!("#{name}?")
} else {
format!("#{name}!")
};
CompletionItem {
label: label.clone(),
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: predicate.description.clone(),
})),
kind: Some(CompletionItemKind::FUNCTION),
insert_text_format: Some(InsertTextFormat::SNIPPET),
text_edit: Some(CompletionTextEdit::Edit(TextEdit {
range,
new_text: format!(
"{label} {}",
predicate
.parameters
.iter()
.enumerate()
.filter_map(|(i, param)| {
let i = i + 1;
if param.arity == PredicateParameterArity::Required {
Some(if param.type_ == PredicateParameterType::Capture {
format!("${{{i}:@capture}}")
} else {
format!("${{{i}:text}}")
})
} else {
None
}
})
.collect::<Vec<_>>()
.join(" ")
),
})),
..Default::default()
}
});
return Ok(Some(CompletionResponse::Array(completions.collect())));
}
let mut completion_items = vec![];
// Node and field name completions
let cursor_after_at_sign = char == '@';
let cursor_after_exclamation_point = char == '!';
let root = tree.root_node();
let in_capture = cursor_after_at_sign || node_is_or_has_ancestor(root, current_node, "capture");
let in_predicate = node_is_or_has_ancestor(root, current_node, "predicate");
let in_missing = node_is_or_has_ancestor(root, current_node, "missing_node");
if !in_capture
&& !in_predicate
&& let Some(language_data) = language_data
{
let symbols = &language_data.symbols_vec;
let supertypes = &language_data.supertype_map;
let fields = &language_data.fields_vec;
let in_anon = node_is_or_has_ancestor(root, current_node, "string") && !in_predicate;
let top_level = current_node.kind() == "program";
let in_negated_field = current_node.kind() == "negated_field"
|| cursor_after_exclamation_point
|| (current_node.kind() == "identifier"
&& current_node
.parent()
.is_some_and(|p| p.kind() == "negated_field"));
if in_negated_field {
for field in fields.clone() {
completion_items.push(CompletionItem {
label: field,
kind: Some(CompletionItemKind::FIELD),
..Default::default()
});
}
return Ok(Some(CompletionResponse::Array(completion_items)));
}
if !top_level {
for symbol in symbols {
if (in_anon && !symbol.named) || (!in_anon && symbol.named) {
completion_items.push(CompletionItem {
label: symbol.label.clone(),
kind: if symbol.named {
if supertypes.contains_key(symbol) {
Some(CompletionItemKind::INTERFACE)
} else {
Some(CompletionItemKind::CLASS)
}
} else {
Some(CompletionItemKind::CONSTANT)
},
..Default::default()
});
}
}
}
if !in_missing && !in_anon {
if !top_level {
completion_items.push(CompletionItem {
label: String::from("MISSING"),
kind: Some(CompletionItemKind::KEYWORD),
..Default::default()
});
}
for field in fields {
completion_items.push(CompletionItem {
label: format!("{field}: "),
kind: Some(CompletionItemKind::FIELD),
..Default::default()
});
}
}
}
// Capture completions
if node_is_or_has_ancestor(tree.root_node(), current_node, "string") {
return Ok(Some(CompletionResponse::Array(completion_items)));
}
let mut text_edit = get_current_capture_node(root, point).map_or_else(
|| {
Some(CompletionTextEdit::Edit(TextEdit {
new_text: String::default(),
range: Range {
start: position,
end: params.text_document_position.position,
},
}))
},
|cap_node| {
Some(CompletionTextEdit::Edit(TextEdit {
new_text: String::default(),
range: cap_node.lsp_range(rope),
}))
},
);
if in_predicate {
let Some(pattern_node) = root.child_with_descendant(current_node) else {
return Ok(Some(CompletionResponse::Array(completion_items)));
};
let provider = TextProviderRope(rope);
let mut iter = cursor.matches(query, pattern_node, &provider);
let mut seen = HashSet::new();
let mut text_edit = if in_capture { text_edit } else { None };
while let Some(match_) = iter.next() {
for capture in match_.captures {
let node_text = capture.node.text(rope);
if let Some(CompletionTextEdit::Edit(edit)) = text_edit.as_mut() {
edit.new_text.clone_from(&node_text);
}
let parent_params = capture
.node
.parent()
.is_none_or(|p| p.kind() != "parameters");
if parent_params && !seen.contains(&node_text) {
seen.insert(node_text.clone());
completion_items.push(CompletionItem {
label: node_text.clone(),
kind: Some(CompletionItemKind::VARIABLE),
text_edit: text_edit.clone(),
..Default::default()
});
}
}
}
} else if in_capture
&& let Some(valid_captures) =
uri_to_basename(uri).and_then(|base| options.valid_captures.get(&base))
{
completion_items.extend(valid_captures.iter().map(|cap| {
let label = "@".to_string() + cap.0;
if let Some(CompletionTextEdit::Edit(edit)) = text_edit.as_mut() {
edit.new_text.clone_from(&label);
}
CompletionItem {
label,
documentation: Some(Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: cap.1.clone(),
})),
kind: Some(CompletionItemKind::VARIABLE),
text_edit: text_edit.clone(),
..Default::default()
}
}));
}
Ok(Some(CompletionResponse::Array(completion_items)))
}
#[cfg(test)]
mod test {
use std::{
collections::{BTreeMap, HashMap},
sync::LazyLock,
};
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
CompletionItem, CompletionItemKind, CompletionParams, CompletionResponse,
CompletionTextEdit, Documentation, InsertTextFormat, MarkupContent, MarkupKind,
PartialResultParams, Position, Range, TextDocumentIdentifier, TextDocumentPositionParams,
TextEdit, Url, WorkDoneProgressParams, request::Completion,
};
use ts_query_ls::{
Options, Predicate, PredicateParameter, PredicateParameterArity, PredicateParameterType,
};
use crate::test_helpers::helpers::{
QUERY_TEST_URI, RUST_TEST_URI, TestService, initialize_server,
};
static NODE_COMPLETIONS: LazyLock<Vec<CompletionItem>> = LazyLock::new(|| {
vec![
CompletionItem {
label: String::from("ERROR"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("escape_sequence"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("identifier"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("comment"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("predicate_type"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("program"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("definition"),
kind: Some(CompletionItemKind::INTERFACE),
..Default::default()
},
CompletionItem {
label: String::from("quantifier"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("capture"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("string"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("string_content"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("parameters"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("list"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("grouping"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("missing_node"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("anonymous_node"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("named_node"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("field_definition"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("negated_field"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("predicate"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("MISSING"),
kind: Some(CompletionItemKind::KEYWORD),
..Default::default()
},
]
});
static FIELD_COMPLETIONS: LazyLock<Vec<CompletionItem>> = LazyLock::new(|| {
vec![
CompletionItem {
label: String::from("name"),
kind: Some(CompletionItemKind::FIELD),
..Default::default()
},
CompletionItem {
label: String::from("parameters"),
kind: Some(CompletionItemKind::FIELD),
..Default::default()
},
CompletionItem {
label: String::from("quantifier"),
kind: Some(CompletionItemKind::FIELD),
..Default::default()
},
CompletionItem {
label: String::from("supertype"),
kind: Some(CompletionItemKind::FIELD),
..Default::default()
},
CompletionItem {
label: String::from("type"),
kind: Some(CompletionItemKind::FIELD),
..Default::default()
},
]
});
static SUBTYPE_COMPLETIONS: LazyLock<Vec<CompletionItem>> = LazyLock::new(|| {
vec![
CompletionItem {
label: String::from("anonymous_node"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("field_definition"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("grouping"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("list"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("missing_node"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("named_node"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("predicate"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
]
});
static RUST_SUBTYPE_COMPLETIONS: LazyLock<Vec<CompletionItem>> = LazyLock::new(|| {
vec![
CompletionItem {
label: String::from("\"_\""),
kind: Some(CompletionItemKind::CONSTANT),
..Default::default()
},
CompletionItem {
label: String::from("_literal_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("captured_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("const_block"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("generic_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("identifier"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("macro_invocation"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("mut_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("or_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("range_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("ref_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("reference_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("remaining_field_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("scoped_identifier"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("slice_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("struct_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("tuple_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
CompletionItem {
label: String::from("tuple_struct_pattern"),
kind: Some(CompletionItemKind::CLASS),
..Default::default()
},
]
});
#[rstest]
#[case(
&QUERY_TEST_URI,
r#"((identifier) @constant
(#match? @cons "^[A-Z][A-Z\\d_]*$"))"#,
Position { line: 1, character: 14 },
&Options::default(),
&[CompletionItem {
label: String::from("@constant"),
kind: Some(CompletionItemKind::VARIABLE),
text_edit: Some(CompletionTextEdit::Edit(TextEdit {
new_text: String::from("@constant"),
range: Range {
start: Position { line: 1, character: 9 },
end: Position { line: 1, character: 14 },
}
})),
..Default::default()
}],
)]
#[case(
&QUERY_TEST_URI,
r#"((ident) @constant
(#match? @constant "^[A-Z][A-Z\\d_]*$"))"#,
Position { line: 0, character: 6 },
&Options::default(),
&{
let mut compls = NODE_COMPLETIONS.clone();
compls.extend(FIELD_COMPLETIONS.clone().iter_mut().map(|fc| {
fc.label += ": ";
fc.clone()
}));
compls
},
)]
#[case(
&QUERY_TEST_URI,
r"((constant) @constant
; @co
)
",
Position { line: 1, character: 4 },
&Options::default(),
&[]
)]
#[case(
&QUERY_TEST_URI,
r"(definition/)",
Position { line: 0, character: 12 },
&Options::default(),
&SUBTYPE_COMPLETIONS
)]
#[case(
&QUERY_TEST_URI,
r"(definition/a)",
Position { line: 0, character: 13 },
&Options::default(),
&SUBTYPE_COMPLETIONS
)]
#[case(
&RUST_TEST_URI,
r"(_pattern/)",
Position { line: 0, character: 10 },
&Options::default(),
&RUST_SUBTYPE_COMPLETIONS
)]
#[case(
&QUERY_TEST_URI,
r"(constant) @cons ",
Position { line: 0, character: 13 },
&Options { valid_captures: HashMap::from([(String::from("test"),
BTreeMap::from([(String::from("constant"), String::from("a constant"))]))]),
..Default::default() },
&[
CompletionItem {
label: String::from("@constant"),
kind: Some(CompletionItemKind::VARIABLE),
documentation: Some(tower_lsp::lsp_types::Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: String::from("a constant"),
})),
text_edit: Some(CompletionTextEdit::Edit(TextEdit { range: Range { start: Position
{ line: 0, character: 11 }, end: Position { line: 0, character: 16 } },
new_text: String::from("@constant") })),
..Default::default()
},
]
)]
#[case(
&QUERY_TEST_URI,
r"(constant) @ ",
Position { line: 0, character: 12 },
&Options { valid_captures: HashMap::from([(String::from("test"),
BTreeMap::from([(String::from("constant"), String::from("a constant"))]))]),
..Default::default() },
&[
CompletionItem {
label: String::from("@constant"),
kind: Some(CompletionItemKind::VARIABLE),
documentation: Some(tower_lsp::lsp_types::Documentation::MarkupContent(MarkupContent {
kind: MarkupKind::Markdown,
value: String::from("a constant"),
})),
text_edit: Some(CompletionTextEdit::Edit(TextEdit { range: Range { start: Position
{ line: 0, character: 11 }, end: Position { line: 0, character: 12 } },
new_text: String::from("@constant") })),
..Default::default()
},
]
)]
#[case(
&QUERY_TEST_URI,
r"( (constant) @constant (#eq? @) ) ",
Position { line: 0, character: 30 },
&Options { valid_captures: HashMap::from([(String::from("test"),
BTreeMap::from([(String::from("constant"), String::from("a constant"))]))]),
..Default::default() },
&[
CompletionItem {
label: String::from("@constant"),
kind: Some(CompletionItemKind::VARIABLE),
text_edit: Some(CompletionTextEdit::Edit(TextEdit {
range: Range { start: Position { line: 0, character: 29 }, end: Position { line: 0, character: 30 } },
new_text: String::from("@constant") })),
..Default::default()
},
]
)]
#[case(
&QUERY_TEST_URI,
r"( (constant) @constant (#eq? @cons) ) ",
Position { line: 0, character: 34 },
&Options { valid_captures: HashMap::from([(String::from("test"),
BTreeMap::from([(String::from("constant"), String::from("a constant"))]))]),
..Default::default() },
&[
CompletionItem {
label: String::from("@constant"),
kind: Some(CompletionItemKind::VARIABLE),
text_edit: Some(CompletionTextEdit::Edit(TextEdit {
range: Range { start: Position { line: 0, character: 29 }, end: Position { line: 0, character: 34 } },
new_text: String::from("@constant") })),
..Default::default()
},
]
)]
#[case(
&QUERY_TEST_URI,
r"( (constant) @constant (#) ) ",
Position { line: 0, character: 25 },
&Options {
valid_captures: HashMap::from([(String::from("test"),
BTreeMap::from([(String::from("constant"), String::from("a constant"))]))]),
valid_predicates: BTreeMap::from([
(String::from("eq"), Predicate {
description: String::from("Equality check"),
parameters: vec![PredicateParameter {
type_: PredicateParameterType::Capture,
..Default::default()
}, PredicateParameter {
type_: PredicateParameterType::Any,
..Default::default()
}]
})
]),
valid_directives: BTreeMap::from([
(String::from("set"), Predicate {
description: String::from("Set metadata"),
parameters: vec![PredicateParameter {
type_: PredicateParameterType::Any,
..Default::default()
}, PredicateParameter {
type_: PredicateParameterType::String,
..Default::default()
}, PredicateParameter {
type_: PredicateParameterType::String,
arity: PredicateParameterArity::Optional,
..Default::default()
}]
})
]),
..Default::default()
},
&[
CompletionItem {
label: String::from("#eq?"),
kind: Some(CompletionItemKind::FUNCTION),
documentation:
Some(tower_lsp::lsp_types::Documentation::MarkupContent(MarkupContent { kind:
MarkupKind::Markdown, value: String::from("Equality check") })),
insert_text_format: Some(InsertTextFormat::SNIPPET),
text_edit: Some(CompletionTextEdit::Edit(TextEdit {
range: Range { start: Position { line: 0, character: 24 }, end: Position { line: 0, character: 25 } },
new_text: String::from("#eq? ${1:@capture} ${2:text}") })),
..Default::default()
},
CompletionItem {
label: String::from("#not-eq?"),
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | true |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/rename.rs | src/handlers/rename.rs | use tower_lsp::{
jsonrpc::{self, Result},
lsp_types::{
DocumentChanges, OneOf, OptionalVersionedTextDocumentIdentifier, RenameParams,
TextDocumentEdit, TextEdit, WorkspaceEdit,
},
};
use tracing::warn;
use tree_sitter::QueryCursor;
use crate::{
Backend, LspClient,
util::{
CAPTURES_QUERY, NodeUtil, PosUtil, TextProviderRope, get_current_capture_node,
get_references,
},
};
use super::diagnostic::IDENTIFIER_REGEX;
pub fn rename<C: LspClient>(
backend: &Backend<C>,
params: &RenameParams,
) -> Result<Option<WorkspaceEdit>> {
let uri = ¶ms.text_document_position.text_document.uri;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when handling rename");
return Ok(None);
};
let rope = &doc.rope;
let tree = &doc.tree;
let Some(current_node) = get_current_capture_node(
tree.root_node(),
params.text_document_position.position.to_ts_point(rope),
) else {
return Ok(None);
};
let query = &CAPTURES_QUERY;
let mut cursor = QueryCursor::new();
// Allow the new name to begin with "@"
let new_name = params
.new_name
.strip_prefix('@')
.unwrap_or(params.new_name.as_str());
if !IDENTIFIER_REGEX.is_match(new_name) {
return Err(jsonrpc::Error::invalid_params(
"New name is not a valid identifier",
));
}
let provider = TextProviderRope(rope);
let edits = get_references(
&tree.root_node(),
¤t_node,
query,
&mut cursor,
&provider,
rope,
)
.map(|node| {
let mut range = node.lsp_range(rope);
// Don't include the preceding `@`
range.start.character += 1;
OneOf::Left(TextEdit {
range,
new_text: new_name.to_owned(),
})
})
.collect();
Ok(Some(WorkspaceEdit {
document_changes: Some(DocumentChanges::Edits(vec![TextDocumentEdit {
text_document: OptionalVersionedTextDocumentIdentifier {
uri: uri.clone(),
version: doc.version,
},
edits,
}])),
changes: None,
change_annotations: None,
}))
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
DocumentChanges, OneOf, OptionalVersionedTextDocumentIdentifier, Position, RenameParams,
TextDocumentEdit, TextDocumentIdentifier, TextDocumentPositionParams,
WorkDoneProgressParams, WorkspaceEdit, request::Rename,
};
use crate::{
Options,
test_helpers::helpers::{
COMPLEX_FILE, SIMPLE_FILE, TEST_URI, TestEdit, TestService, initialize_server,
},
};
#[rstest]
#[case(
&SIMPLE_FILE,
Position { line: 1, character: 12, },
&[
TestEdit::new("superlongnamehere", (0, 15), (0, 23)),
TestEdit::new("superlongnamehere", (1, 11), (1, 19)),
TestEdit::new("superlongnamehere", (1, 21), (1, 29)),
],
"superlongnamehere",
)]
#[case(
&COMPLEX_FILE,
Position { line: 8, character: 24 },
&[
TestEdit::new("invariant", (8, 25), (8, 42)),
TestEdit::new("invariant", (9, 23), (9, 40)),
TestEdit::new("invariant", (12, 13), (12, 30)),
TestEdit::new("invariant", (18, 17), (18, 34)),
],
"invariant"
)]
#[case(
&COMPLEX_FILE,
Position { line: 8, character: 23 },
// Doesn't rename when cursor is not in capture
&[],
"invariant"
)]
#[tokio::test(flavor = "current_thread")]
async fn server_rename(
#[case] original: &str,
#[case] cursor_position: Position,
#[case] edits: &[TestEdit],
#[case] new_name: &str,
) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), original)], &Options::default()).await;
// Act
let rename_edits = service
.request::<Rename>(RenameParams {
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
position: cursor_position,
},
new_name: new_name.to_string(),
work_done_progress_params: WorkDoneProgressParams {
work_done_token: None,
},
})
.await;
// Assert
let expected = if edits.is_empty() {
None
} else {
Some(WorkspaceEdit {
document_changes: Some(DocumentChanges::Edits(vec![TextDocumentEdit {
text_document: OptionalVersionedTextDocumentIdentifier {
uri: TEST_URI.clone(),
version: Some(0),
},
edits: edits.iter().map(|e| OneOf::Left(e.into())).collect(),
}])),
..Default::default()
})
};
assert_eq!(expected, rename_edits);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/semantic_tokens.rs | src/handlers/semantic_tokens.rs | use std::sync::LazyLock;
use tower_lsp::lsp_types::{
Range, SemanticToken, SemanticTokens, SemanticTokensParams, SemanticTokensRangeParams,
SemanticTokensRangeResult, SemanticTokensResult, Url,
};
use tracing::warn;
use tree_sitter::{Query, QueryCursor, StreamingIterator};
use crate::{
Backend, LspClient, QUERY_LANGUAGE, SymbolInfo,
util::{FORMAT_IGNORE_REGEX, INHERITS_REGEX, NodeUtil, PosUtil, TextProviderRope},
};
static SEM_TOK_QUERY: LazyLock<Query> = LazyLock::new(|| {
Query::new(
&QUERY_LANGUAGE,
r"(named_node (identifier) @ident)
(missing_node (identifier) @ident)
(comment) @comment
",
)
.unwrap()
});
pub fn semantic_tokens_full<C: LspClient>(
backend: &Backend<C>,
params: &SemanticTokensParams,
) -> Option<SemanticTokensResult> {
get_semantic_tokens(backend, ¶ms.text_document.uri, None).map(Into::into)
}
pub fn semantic_tokens_range<C: LspClient>(
backend: &Backend<C>,
params: &SemanticTokensRangeParams,
) -> Option<SemanticTokensRangeResult> {
get_semantic_tokens(backend, ¶ms.text_document.uri, Some(params.range)).map(Into::into)
}
fn get_semantic_tokens<C: LspClient>(
backend: &Backend<C>,
uri: &Url,
range: Option<Range>,
) -> Option<SemanticTokens> {
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when retrieving semantic tokens");
return None;
};
let mut tokens = Vec::new();
let tree = &doc.tree;
let rope = &doc.rope;
let language_data = doc
.language_name
.as_ref()
.and_then(|name| backend.language_map.get(name));
let supertypes = language_data.as_ref().map(|ld| &ld.supertype_map);
let query = &SEM_TOK_QUERY;
let mut cursor = QueryCursor::new();
if let Some(range) = range {
cursor.set_point_range(range.start.to_ts_point(rope)..range.end.to_ts_point(rope));
}
let provider = TextProviderRope(rope);
let mut matches = cursor.matches(query, tree.root_node(), &provider);
let mut prev_line = 0;
let mut prev_col = 0;
while let Some(match_) = matches.next() {
for cap in match_.captures {
let capture_name = SEM_TOK_QUERY.capture_names()[cap.index as usize];
let node = &cap.node;
let node_text = node.text(rope);
let start_row = node.start_position().row as u32;
let start_col = node.start_position().column as u32;
let delta_line = start_row - prev_line;
let delta_start = if start_row - prev_line == 0 {
start_col - prev_col
} else {
start_col
};
match capture_name {
// Highlight supertypes and ERROR nodes
"ident" => {
// Identifiers are always ASCII characters, meaning they count for the same
// number of code points in each encoding (and we can count them by bytes)
let length = node.byte_range().len() as u32;
if node_text == "ERROR" {
tokens.push(SemanticToken {
delta_line,
delta_start,
length,
token_type: 1,
token_modifiers_bitset: 1,
});
prev_line = start_row;
prev_col = start_col;
} else if supertypes.is_some_and(|supertypes| {
supertypes.contains_key(&SymbolInfo {
label: node_text,
named: true,
})
}) {
tokens.push(SemanticToken {
delta_line,
delta_start,
length,
token_type: 0,
token_modifiers_bitset: 0,
});
prev_line = start_row;
prev_col = start_col;
}
}
// Highlight special comments (inherits, format ignore)
"comment" => {
if let Some(fmt_ignore) = FORMAT_IGNORE_REGEX
.captures(&node_text)
.and_then(|c| c.get(1))
{
const FMT_IGNORE_LEN: u32 = 13;
let offset = fmt_ignore.start() as u32;
tokens.push(SemanticToken {
delta_line,
delta_start: delta_start + offset,
length: FMT_IGNORE_LEN,
token_type: 3,
token_modifiers_bitset: 0,
});
prev_line = start_row;
prev_col = start_col + offset;
continue;
}
if start_row != 0 {
continue;
}
let Some(mods) = INHERITS_REGEX.captures(&node_text).and_then(|c| c.get(1))
else {
continue;
};
let offset = mods.start() as u32;
let mods = mods.as_str().split(',');
// Add a token for `inherits:`
const INHERITS_LEN: u32 = 9;
let mut delta_start = delta_start + offset - INHERITS_LEN - 1;
tokens.push(SemanticToken {
delta_line: 0,
delta_start,
length: INHERITS_LEN,
token_type: 3,
token_modifiers_bitset: 0,
});
delta_start = INHERITS_LEN + 1;
let mut start_col = start_col + offset;
let mut delta_line = delta_line;
for module in mods {
// We assert that modules are valid ASCII characters, so we can index them
// by byte count.
let length = module.len() as u32;
tokens.push(SemanticToken {
delta_line,
delta_start,
length,
token_type: 2,
token_modifiers_bitset: 0,
});
start_col += length + 1;
delta_start = length + 1;
delta_line = 0;
}
prev_line = start_row;
prev_col = start_col;
}
_ => {}
}
}
}
Some(SemanticTokens {
result_id: None,
data: tokens,
})
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use tower_lsp::lsp_types::{
PartialResultParams, SemanticToken, SemanticTokens, SemanticTokensParams,
SemanticTokensResult, TextDocumentIdentifier, WorkDoneProgressParams,
request::SemanticTokensFullRequest,
};
use crate::{
Options,
test_helpers::helpers::{QUERY_TEST_URI, TestService, initialize_server},
};
#[tokio::test(flavor = "current_thread")]
async fn semantic_tokens_full() {
// Arrange
let source = r"; inherits: c,cuda
(ERROR) @error (definition) @node (definition) @node
(definition) @node
; Weird
(MISSING ERROR) @missingerror
(MISSING definition) @missingsupertype
;;;format-ignore
(foo)
";
let mut service =
initialize_server(&[(QUERY_TEST_URI.clone(), source)], &Options::default()).await;
// Act
let actual_tokens = service
.request::<SemanticTokensFullRequest>(SemanticTokensParams {
partial_result_params: PartialResultParams {
partial_result_token: None,
},
work_done_progress_params: WorkDoneProgressParams::default(),
text_document: TextDocumentIdentifier {
uri: QUERY_TEST_URI.clone(),
},
})
.await;
// Assert
let expected_tokens = Some(SemanticTokensResult::Tokens(SemanticTokens {
result_id: None,
data: vec![
// ; inherits:
SemanticToken {
delta_line: 0,
delta_start: 2,
length: 9,
token_type: 3,
token_modifiers_bitset: 0,
},
SemanticToken {
delta_line: 0,
delta_start: 10,
length: 1,
token_type: 2,
token_modifiers_bitset: 0,
},
SemanticToken {
delta_line: 0,
delta_start: 2,
length: 4,
token_type: 2,
token_modifiers_bitset: 0,
},
// ERROR, Supertypes
SemanticToken {
delta_line: 2,
delta_start: 1,
length: 5,
token_type: 1,
token_modifiers_bitset: 1,
},
SemanticToken {
delta_line: 0,
delta_start: 15,
length: 10,
token_type: 0,
token_modifiers_bitset: 0,
},
SemanticToken {
delta_line: 0,
delta_start: 19,
length: 10,
token_type: 0,
token_modifiers_bitset: 0,
},
SemanticToken {
delta_line: 2,
delta_start: 1,
length: 10,
token_type: 0,
token_modifiers_bitset: 0,
},
SemanticToken {
delta_line: 3,
delta_start: 9,
length: 5,
token_type: 1,
token_modifiers_bitset: 1,
},
SemanticToken {
delta_line: 2,
delta_start: 9,
length: 10,
token_type: 0,
token_modifiers_bitset: 0,
},
// format-ignore
SemanticToken {
delta_line: 2,
delta_start: 3,
length: 13,
token_type: 3,
token_modifiers_bitset: 0,
},
],
}));
assert_eq!(expected_tokens, actual_tokens);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/references.rs | src/handlers/references.rs | use tower_lsp::lsp_types::{Location, ReferenceParams};
use tracing::warn;
use tree_sitter::QueryCursor;
use crate::LspClient;
use crate::util::{CAPTURES_QUERY, NodeUtil, PosUtil};
use crate::{
Backend,
util::{TextProviderRope, get_current_capture_node, get_references},
};
pub fn references<C: LspClient>(
backend: &Backend<C>,
params: &ReferenceParams,
) -> Option<Vec<Location>> {
let uri = ¶ms.text_document_position.text_document.uri;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when handling references");
return None;
};
let rope = &doc.rope;
let tree = &doc.tree;
let cur_pos = params.text_document_position.position.to_ts_point(rope);
let current_node = get_current_capture_node(tree.root_node(), cur_pos)?;
let include_def = params.context.include_declaration;
let query = &CAPTURES_QUERY;
let mut cursor = QueryCursor::new();
let provider = TextProviderRope(rope);
Some(
get_references(
&tree.root_node(),
¤t_node,
query,
&mut cursor,
&provider,
rope,
)
.filter_map(|node| {
if include_def || node.parent().is_some_and(|p| p.kind() == "parameters") {
Some(Location {
uri: uri.clone(),
range: node.lsp_range(rope),
})
} else {
None
}
})
.collect(),
)
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
Location, PartialResultParams, Position, Range, ReferenceContext, ReferenceParams,
TextDocumentIdentifier, TextDocumentPositionParams, WorkDoneProgressParams,
request::References,
};
use crate::{
Options,
test_helpers::helpers::{COMPLEX_FILE, TEST_URI, TestService, initialize_server},
};
type Coordinate = ((u32, u32), (u32, u32));
#[rstest]
#[case(
"(identifier) @variable",
Position { line: 0, character: 17 },
true,
&[((0, 13), (0, 22))]
)]
#[case(
r#"((identifier) @constant
(#match? @constant "^[A-Z][A-Z\\d_]*$"))"#,
Position { line: 0, character: 17 },
true,
&[((0, 14), (0, 23)), ((1, 9), (1, 18))]
)]
#[case(
r"(type_definition declarator: (type_identifier) @name) @definition.type",
Position { line: 0, character: 61 },
true,
&[((0, 54), (0, 70))]
)]
#[case(
r"(call_expression
function: (identifier) @function)",
Position { line: 0, character: 1 },
true,
&[]
)]
#[case(
&COMPLEX_FILE,
Position { line: 5, character: 25 },
true,
&[((5, 25), (5, 44)), ((11, 15), (11, 34)), ((17, 16), (17, 35))]
)]
#[case(
&COMPLEX_FILE,
Position { line: 12, character: 13 },
false,
&[((12, 12), (12, 30)), ((18, 16), (18, 34))]
)]
#[tokio::test(flavor = "current_thread")]
async fn capture_references(
#[case] input: &str,
#[case] position: Position,
#[case] include_declaration: bool,
#[case] ranges: &[Coordinate],
) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), input)], &Options::default()).await;
// Act
let refs = service
.request::<References>(ReferenceParams {
context: ReferenceContext {
include_declaration,
},
partial_result_params: PartialResultParams {
partial_result_token: None,
},
work_done_progress_params: WorkDoneProgressParams::default(),
text_document_position: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
position,
},
})
.await;
// Assert
let expected = if ranges.is_empty() {
None
} else {
Some(
ranges
.iter()
.map(|r| Location {
uri: TEST_URI.clone(),
range: Range {
start: Position {
line: r.0.0,
character: r.0.1,
},
end: Position {
line: r.1.0,
character: r.1.1,
},
},
})
.collect(),
)
};
assert_eq!(expected, refs);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/selection_range.rs | src/handlers/selection_range.rs | use tower_lsp::lsp_types::{SelectionRange, SelectionRangeParams};
use tracing::warn;
use crate::{
Backend, LspClient,
util::{NodeUtil, PosUtil},
};
pub fn selection_range<C: LspClient>(
backend: &Backend<C>,
params: &SelectionRangeParams,
) -> Option<Vec<SelectionRange>> {
let uri = ¶ms.text_document.uri;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri}");
return None;
};
let tree = &doc.tree;
let rope = &doc.rope;
let mut results = Vec::with_capacity(params.positions.len());
for position in ¶ms.positions {
let ts_point = position.to_ts_point(rope);
let mut node = tree.root_node();
let descendant = node
.named_descendant_for_point_range(ts_point, ts_point)
.unwrap_or(node);
let mut selection_range = SelectionRange {
parent: None,
range: node.lsp_range(rope),
};
while let Some(child) = node.child_with_descendant(descendant) {
node = child;
let range = node.lsp_range(rope);
if range == selection_range.range {
continue;
}
let new_selection_range = SelectionRange {
range,
parent: Some(selection_range.into()),
};
selection_range = new_selection_range;
}
results.push(selection_range);
}
Some(results)
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
PartialResultParams, Position, Range, SelectionRange, TextDocumentIdentifier,
WorkDoneProgressParams,
};
use tower_lsp::lsp_types::{SelectionRangeParams, request::SelectionRangeRequest};
use crate::{
Options,
test_helpers::helpers::{
COMPLEX_FILE, SIMPLE_FILE, TEST_URI, TestService, initialize_server,
},
};
#[rstest]
#[case(
&SIMPLE_FILE,
vec![Position { line: 1, character: 13 }],
Some(vec![vec![
Range::new(Position::new(0, 0), Position::new(3, 0)),
Range::new(Position::new(0, 0), Position::new(1, 31)),
Range::new(Position::new(1, 1), Position::new(1, 30)),
Range::new(Position::new(1, 10), Position::new(1, 29)),
Range::new(Position::new(1, 10), Position::new(1, 19)),
Range::new(Position::new(1, 11), Position::new(1, 19)),
]])
)]
#[case(
&COMPLEX_FILE,
vec![
Position { line: 1, character: 13 },
Position { line: 8, character: 20 },
],
Some(vec![vec![
Range::new(Position::new(0, 0), Position::new(32, 0)),
Range::new(Position::new(0, 0), Position::new(1, 39)),
Range::new(Position::new(1, 2), Position::new(1, 38)),
Range::new(Position::new(1, 9), Position::new(1, 37)),
Range::new(Position::new(1, 9), Position::new(1, 27)),
], vec![
Range::new(Position::new(0, 0), Position::new(32, 0)),
Range::new(Position::new(4, 0), Position::new(18, 43)),
Range::new(Position::new(6, 2), Position::new(10, 3)),
Range::new(Position::new(6, 13), Position::new(10, 3)),
Range::new(Position::new(7, 4), Position::new(8, 43)),
Range::new(Position::new(8, 6), Position::new(8, 42)),
Range::new(Position::new(8, 7), Position::new(8, 22)),
]])
)]
#[tokio::test(flavor = "current_thread")]
async fn server_selection_range(
#[case] document_text: &str,
#[case] positions: Vec<Position>,
#[case] expected_ranges: Option<Vec<Vec<Range>>>,
) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), document_text)], &Options::default()).await;
let expected_selection_ranges = if let Some(ranges_list) = expected_ranges {
let mut results = Vec::new();
for ranges in ranges_list {
let mut ranges = ranges.into_iter();
let first = ranges.next().expect("ranges must not be empty");
let result = ranges.fold(
SelectionRange {
range: first,
parent: None,
},
|parent, range| SelectionRange {
range,
parent: Some(parent.into()),
},
);
results.push(result);
}
Some(results)
} else {
None
};
// Act
let selection_ranges = service
.request::<SelectionRangeRequest>(SelectionRangeParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
positions,
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
})
.await;
// Assert
assert_eq!(expected_selection_ranges, selection_ranges);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/did_open.rs | src/handlers/did_open.rs | use std::{
collections::{BTreeSet, HashMap, HashSet},
fs,
path::PathBuf,
};
use dashmap::DashMap;
use ropey::Rope;
use tower_lsp::lsp_types::{DidOpenTextDocumentParams, Url};
use tracing::info;
use tree_sitter::Language;
use crate::{
Backend, DocumentData, ImportedUri, LanguageData, LspClient, Options, SymbolInfo,
util::{get_imported_uris, get_language, get_language_name, parse, push_diagnostics},
};
pub async fn did_open<C: LspClient>(backend: &Backend<C>, params: DidOpenTextDocumentParams) {
let uri = params.text_document.uri;
info!("ts_query_ls did_open: {uri}");
let rope = Rope::from_str(¶ms.text_document.text);
let tree = parse(&rope, None);
let options = backend.options.read().await;
let language_name = get_language_name(&uri, &options);
let workspace_uris = backend.workspace_paths.read().unwrap().clone();
let imported_uris = get_imported_uris(&workspace_uris, &options, &uri, &rope, &tree);
// Track the document
let version = Some(params.text_document.version);
backend.document_map.insert(
uri.clone(),
DocumentData {
rope,
tree,
language_name: language_name.clone(),
version,
imported_uris: imported_uris.clone(),
},
);
populate_import_documents(
&backend.document_map,
&workspace_uris,
&options,
&imported_uris,
);
for import_uri in imported_uris
.into_iter()
.filter_map(|import| import.uri.filter(|url| url != &uri))
{
backend
.dependents
.entry(import_uri)
.or_default()
.insert(uri.clone());
}
populate_language_info(backend, language_name, &options);
push_diagnostics(backend, uri).await;
}
fn populate_language_info<C: LspClient>(
backend: &Backend<C>,
language_name: Option<String>,
options: &Options,
) {
let Some(language_name) = language_name else {
return;
};
if backend.language_map.contains_key(&language_name) {
return;
}
let Some(lang) = get_language(&language_name, options) else {
return;
};
let language_data = init_language_data(lang, language_name.clone()).into();
backend.language_map.insert(language_name, language_data);
}
pub fn init_language_data(language: Language, name: String) -> LanguageData {
let mut symbols_vec: Vec<SymbolInfo> = vec![];
let mut symbols_set: HashSet<SymbolInfo> = HashSet::new();
let mut fields_vec: Vec<String> = vec![];
let mut fields_set: HashSet<String> = HashSet::new();
let mut supertype_map: HashMap<SymbolInfo, BTreeSet<SymbolInfo>> = HashMap::new();
let error_symbol = SymbolInfo {
label: "ERROR".to_owned(),
named: true,
};
symbols_set.insert(error_symbol.clone());
symbols_vec.push(error_symbol);
for i in 0..language.node_kind_count() as u16 {
let supertype = language.node_kind_is_supertype(i);
let named = language.node_kind_is_named(i) || supertype;
let label = if named {
language.node_kind_for_id(i).unwrap().to_owned()
} else {
language
.node_kind_for_id(i)
.unwrap()
.replace('\\', r"\\")
.replace('"', r#"\""#)
.replace('\n', r"\n")
.replace('\r', r"\r")
.replace('\t', r"\t")
.replace('\0', r"\0")
};
let symbol_info = SymbolInfo { label, named };
if supertype {
supertype_map.insert(
symbol_info.clone(),
language
.subtypes_for_supertype(i)
.iter()
.map(|s| SymbolInfo {
label: language.node_kind_for_id(*s).unwrap().to_string(),
named: language.node_kind_is_named(*s)
|| language.node_kind_is_supertype(*s),
})
.collect(),
);
}
if symbols_set.contains(&symbol_info) || !(language.node_kind_is_visible(i) || supertype) {
continue;
}
symbols_set.insert(symbol_info.clone());
symbols_vec.push(symbol_info);
}
// Field IDs go from 1 to nfields inclusive (extra index 0 maps to NULL)
for i in 1..=language.field_count() as u16 {
let field_name = language.field_name_for_id(i).unwrap().to_owned();
if !fields_set.contains(&field_name) {
fields_set.insert(field_name.clone());
fields_vec.push(field_name);
}
}
LanguageData {
name,
symbols_set,
symbols_vec,
fields_set,
fields_vec,
supertype_map,
language,
}
}
pub fn populate_import_documents(
document_map: &DashMap<Url, DocumentData>,
workspace_dirs: &[PathBuf],
options: &Options,
imported_uris: &Vec<ImportedUri>,
) {
for imported_uri in imported_uris {
if let Some(uri) = &imported_uri.uri
&& !document_map.contains_key(uri)
&& let Ok(contents) = uri
.to_file_path()
.and_then(|path| fs::read_to_string(path).map_err(|_| ()))
{
let rope = Rope::from_str(&contents);
let tree = parse(&rope, None);
let nested_imported_uris =
get_imported_uris(workspace_dirs, options, uri, &rope, &tree);
document_map.insert(
uri.clone(),
DocumentData {
rope,
tree,
language_name: None,
version: None,
imported_uris: nested_imported_uris.clone(),
},
);
populate_import_documents(document_map, workspace_dirs, options, &nested_imported_uris);
}
}
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use tower_lsp::lsp_types::{
Diagnostic, DiagnosticSeverity, DidOpenTextDocumentParams, NumberOrString,
PublishDiagnosticsParams, TextDocumentItem, Url,
notification::{DidOpenTextDocument, PublishDiagnostics},
};
use crate::{
Options,
test_helpers::helpers::{MockRequest, TEST_URI, TestService, initialize_server},
};
#[tokio::test(flavor = "current_thread")]
async fn server_did_open_document() {
// Arrange
let mut service = initialize_server(&[], &Options::default()).await;
let source = r#""[" @cap"#;
// Act
service
.notify::<DidOpenTextDocument>(DidOpenTextDocumentParams {
text_document: TextDocumentItem {
uri: TEST_URI.clone(),
language_id: String::from("query"),
version: 0,
text: String::from(source),
},
})
.await;
// Assert
let doc = service.inner().document_map.get(&TEST_URI).unwrap();
let doc_rope = &doc.rope;
assert_eq!(doc_rope.to_string(), source);
let tree = &doc.tree;
assert_eq!(
tree.root_node().utf8_text(source.as_bytes()).unwrap(),
doc_rope.to_string()
);
assert_eq!(
vec![MockRequest::from_notification::<PublishDiagnostics>(
PublishDiagnosticsParams {
version: Some(0),
uri: Url::parse("file:///tmp/queries/js/test.scm").unwrap(),
diagnostics: vec![Diagnostic {
message: String::from("Language object for \"js\" not found"),
severity: Some(DiagnosticSeverity::WARNING),
code: Some(NumberOrString::String("no-language-object".into())),
..Default::default()
}]
}
)],
service.inner().client.get_notifications()
);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/hover.rs | src/handlers/hover.rs | use std::{collections::HashMap, sync::LazyLock};
use tower_lsp::{
jsonrpc::Result,
lsp_types::{Hover, HoverContents, HoverParams, MarkupContent, MarkupKind, Position, Range},
};
use tracing::warn;
use tree_sitter::Query;
use ts_query_ls::{ParameterConstraint, PredicateParameterType};
use crate::{
Backend, LspClient, QUERY_LANGUAGE, SymbolInfo,
util::{
FORMAT_IGNORE_REGEX, INHERITS_REGEX, NodeUtil, PosUtil, capture_at_pos,
get_imported_module_under_cursor, remove_unnecessary_escapes, uri_to_basename,
},
};
static HOVER_QUERY: LazyLock<Query> = LazyLock::new(|| {
Query::new(
&QUERY_LANGUAGE,
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/queries/query/hover.scm"
)),
)
.unwrap()
});
/// Create a static hashmap from doc name to doc file (found in "docs/<name>.md")
macro_rules! include_docs_map {
($($name:literal),* $(,)?) => {
LazyLock::new(|| {
HashMap::from([$(
($name, include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/docs/", $name, ".md"))),
)*])
})
};
}
static DOCS: LazyLock<HashMap<&'static str, &'static str>> = include_docs_map!(
"missing",
"wildcard",
"anchor",
"quantifier",
"alternation",
"error",
"negation",
"inherits",
"format-ignore",
);
pub async fn hover<C: LspClient>(
backend: &Backend<C>,
params: HoverParams,
) -> Result<Option<Hover>> {
let uri = ¶ms.text_document_position_params.text_document.uri;
let position = params.text_document_position_params.position;
let options = backend.options.read().await;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when handling hover");
return Ok(None);
};
let tree = &doc.tree;
let rope = &doc.rope;
let language_data = doc
.language_name
.as_ref()
.and_then(|name| backend.language_map.get(name));
let supertypes = language_data.as_ref().map(|ld| &ld.supertype_map);
let Some(capture) = capture_at_pos(tree, rope, &HOVER_QUERY, position.to_ts_point(rope)) else {
return Ok(None);
};
let capture_name = HOVER_QUERY.capture_names()[capture.index as usize];
let capture_text = capture.node.text(rope);
let range = Some(capture.node.lsp_range(rope));
Ok(match capture_name {
doc_name if DOCS.contains_key(capture_name) => {
let value = (*DOCS.get(doc_name).unwrap()).to_string();
Some(Hover {
range,
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value,
}),
})
}
"capture" => {
let options = backend.options.read().await;
if let Some(description) = uri_to_basename(uri).and_then(|base| {
options
.valid_captures
.get(&base)
.and_then(|c| c.get(&capture_text[1..].to_string()))
}) {
let value = format!("## `{capture_text}`\n\n{description}");
Some(Hover {
range,
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value,
}),
})
} else {
None
}
}
"identifier.node" => {
let sym = SymbolInfo {
label: capture_text,
named: true,
};
if let Some(subtypes) = supertypes.and_then(|supertypes| supertypes.get(&sym)) {
let value = if subtypes.is_empty() {
String::from("Subtypes could not be determined (parser ABI < 15)")
} else {
subtypes.iter().fold(
format!("Subtypes of `({})`:\n\n```query", sym.label),
|acc, subtype| format!("{acc}\n{subtype}"),
) + "\n```"
};
Some(Hover {
range,
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value,
}),
})
} else if let Some(language) = language_data.as_ref().map(|ld| &ld.language) {
let syms = (0..language.node_kind_count() as u16)
.filter(|&id| {
if !(language.node_kind_is_visible(id)
|| language.node_kind_is_supertype(id))
|| !language.node_kind_is_named(id)
{
return false;
}
language
.node_kind_for_id(id)
.is_some_and(|kind| kind == sym.label)
})
.collect::<Vec<_>>();
if syms.is_empty() {
None
} else {
Some(Hover {
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: format!(
"Symbol IDs: {}",
syms.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
),
}),
range,
})
}
} else {
None
}
}
"anonymous" => {
if let Some(language) = language_data.as_ref().map(|ld| &ld.language) {
let string_content =
remove_unnecessary_escapes(&capture_text[1..capture_text.len() - 1]);
let syms = (0..language.node_kind_count() as u16)
.filter(|&id| {
if !language.node_kind_is_visible(id)
|| language.node_kind_is_supertype(id)
|| language.node_kind_is_named(id)
{
return false;
}
language
.node_kind_for_id(id)
.is_some_and(|kind| kind == string_content)
})
.collect::<Vec<_>>();
if syms.is_empty() {
None
} else {
Some(Hover {
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: format!(
"Symbol IDs: {}",
syms.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join(", ")
),
}),
range,
})
}
} else {
None
}
}
"field" => {
if let Some(language) = language_data.as_ref().map(|ld| &ld.language) {
let sym = (1..=language.field_count() as u16).find(|&id| {
language
.field_name_for_id(id)
.is_some_and(|name| name == capture_text)
});
sym.map(|sym| Hover {
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: format!("Field ID: {sym}"),
}),
range,
})
} else {
None
}
}
"predicate" => {
let parent = capture
.node
.parent()
.expect("Should be children of the `(predicate)` node");
let (Some(predicate_name), Some(predicate_type)) =
(parent.named_child(0), parent.named_child(1))
else {
return Ok(None);
};
let validator = if predicate_type.text(rope) == "?" {
&options.valid_predicates
} else {
&options.valid_directives
};
let mut range = predicate_name.lsp_range(rope);
// Include # and ? in the range
range.start.character -= 1;
range.end.character += 1;
if let Some(predicate) = validator.get(&predicate_name.text(rope)) {
let mut value = format!("{}\n\n---\n\n## Parameters:\n\n", predicate.description);
for param in &predicate.parameters {
value += format!(
"- Type: `{}` ({}{})\n",
param.type_,
param.arity,
if param.type_ != PredicateParameterType::Capture
&& param.constraint != ParameterConstraint::None
{
format!("; constraint: {}", param.constraint)
} else {
String::new()
}
)
.as_str();
if let Some(desc) = ¶m.description {
value += format!(" - {desc}\n").as_str();
}
}
Some(Hover {
range: Some(range),
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value,
}),
})
} else {
None
}
}
"comment" => {
if position.line == 0 && INHERITS_REGEX.is_match(&capture_text) {
if let Some(module) = get_imported_module_under_cursor(&doc, position) {
let range = Some(Range::new(
Position::new(0, module.start_col),
Position::new(0, module.end_col),
));
let hover_content = if let Some(import_doc) = module
.uri
.as_ref()
.and_then(|uri| backend.document_map.get(uri))
{
let doc_text = import_doc.rope.to_string();
format!("```query\n{doc_text}\n```")
} else {
String::from("*Document not found*")
};
return Ok(Some(Hover {
range,
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: hover_content,
}),
}));
}
return Ok(Some(Hover {
range,
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: (*DOCS.get("inherits").unwrap()).to_string(),
}),
}));
}
if FORMAT_IGNORE_REGEX.is_match(&capture_text) {
Some(Hover {
range,
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: (*DOCS.get("format-ignore").unwrap()).to_string(),
}),
})
} else {
None
}
}
_ => None,
})
}
#[cfg(test)]
mod test {
use std::collections::{BTreeMap, HashMap};
use ts_query_ls::{
Options, ParameterConstraint, Predicate, PredicateParameter, PredicateParameterArity,
PredicateParameterType,
};
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
Hover, HoverContents, HoverParams, MarkupContent, MarkupKind, Position, Range,
TextDocumentIdentifier, TextDocumentPositionParams, WorkDoneProgressParams,
request::HoverRequest,
};
use crate::test_helpers::helpers::{QUERY_TEST_URI, TestService, initialize_server};
const SOURCE: &str = r"(ERROR) @error (definition) @node
(definition/test) @node
(MISSING definition) @node
(_) @any
_ @any
(function . (identifier)?)
(function (identifier)+)* @cap
[ (number) (boolean) ] @const
((number) @const (.set! foo bar))
(identifier !fieldname)
((number) @const (#eq? @const self))
";
#[rstest]
#[case(SOURCE, Position { line: 0, character: 2 }, Range::new(
Position { line: 0, character: 1 },
Position { line: 0, character: 6 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/error.md"
)), BTreeMap::default())]
#[case(SOURCE, Position { line: 4, character: 4 }, Range::new(
Position { line: 4, character: 1 },
Position { line: 4, character: 8 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/missing.md"
)), BTreeMap::default())]
#[case(SOURCE, Position { line: 6, character: 1 }, Range::new(
Position { line: 6, character: 1 },
Position { line: 6, character: 2 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/wildcard.md"
)), BTreeMap::default())]
#[case(SOURCE, Position { line: 7, character: 0 }, Range::new(
Position { line: 7, character: 0 },
Position { line: 7, character: 1 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/wildcard.md"
)), BTreeMap::default())]
#[case(SOURCE, Position { line: 0, character: 17 }, Range::new(
Position { line: 0, character: 16 },
Position { line: 0, character: 26 } ),
r"Subtypes of `(definition)`:
```query
(anonymous_node)
(field_definition)
(grouping)
(list)
(missing_node)
(named_node)
(predicate)
```", BTreeMap::default())]
#[case(SOURCE, Position { line: 2, character: 4 }, Range::new(
Position { line: 2, character: 1 },
Position { line: 2, character: 11 } ),
r"Subtypes of `(definition)`:
```query
(anonymous_node)
(field_definition)
(grouping)
(list)
(missing_node)
(named_node)
(predicate)
```", BTreeMap::default())]
#[case(SOURCE, Position { line: 4, character: 10 }, Range::new(
Position { line: 4, character: 9 },
Position { line: 4, character: 19 } ),
r"Subtypes of `(definition)`:
```query
(anonymous_node)
(field_definition)
(grouping)
(list)
(missing_node)
(named_node)
(predicate)
```", BTreeMap::default())]
#[case(SOURCE, Position { line: 0, character: 10 }, Range::new(
Position { line: 0, character: 8 },
Position { line: 0, character: 14 } ),
r"## `@error`
An error node", BTreeMap::from([(String::from("error"), String::from("An error node"))]))]
#[case(SOURCE, Position { line: 9, character: 10 }, Range::new(
Position { line: 9, character: 10 },
Position { line: 9, character: 11 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/anchor.md"
)), BTreeMap::from([(String::from("error"), String::from("An error node"))]))]
#[case(SOURCE, Position { line: 9, character: 24 }, Range::new(
Position { line: 9, character: 24 },
Position { line: 9, character: 25 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/quantifier.md"
)), BTreeMap::from([(String::from("error"), String::from("An error node"))]))]
#[case(SOURCE, Position { line: 11, character: 24 }, Range::new(
Position { line: 11, character: 24 },
Position { line: 11, character: 25 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/quantifier.md"
)), BTreeMap::from([(String::from("error"), String::from("An error node"))]))]
#[case(SOURCE, Position { line: 11, character: 22 }, Range::new(
Position { line: 11, character: 22 },
Position { line: 11, character: 23 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/quantifier.md"
)), BTreeMap::from([(String::from("error"), String::from("An error node"))]))]
#[case(SOURCE, Position { line: 13, character: 0 }, Range::new(
Position { line: 13, character: 0 },
Position { line: 13, character: 1 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/alternation.md"
)), BTreeMap::from([(String::from("error"), String::from("An error node"))]))]
#[case(SOURCE, Position { line: 13, character: 21 }, Range::new(
Position { line: 13, character: 21 },
Position { line: 13, character: 22 } ),
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/alternation.md"
)), BTreeMap::from([(String::from("error"), String::from("An error node"))]))]
#[case(SOURCE, Position { line: 15, character: 18 }, Range {
start: Position::new(15, 18),
end: Position::new(15, 23)
},
"Set a property\n\n---\n\n## Parameters:\n\n- Type: `string` (required; constraint: `[\"here\", \"there\"]`)\n - A property\n", BTreeMap::default())]
#[case(SOURCE, Position { line: 15, character: 22 }, Range {
start: Position::new(15, 18),
end: Position::new(15, 23)
},
"Set a property\n\n---\n\n## Parameters:\n\n- Type: `string` (required; constraint: `[\"here\", \"there\"]`)\n - A property\n", BTreeMap::default())]
#[case(SOURCE, Position { line: 15, character: 23 }, Range::default(), "", BTreeMap::default())]
#[case(SOURCE, Position { line: 15, character: 21 }, Range {
start: Position::new(15, 18),
end: Position::new(15, 23)
},
"Set a property\n\n---\n\n## Parameters:\n\n- Type: `string` (required; constraint: `[\"here\", \"there\"]`)\n - A property\n", BTreeMap::default())]
#[case(SOURCE, Position { line: 17, character: 12 }, Range {
start: Position::new(17, 12),
end: Position::new(17, 13),
},
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/docs/negation.md"
)), BTreeMap::default())]
#[case(SOURCE, Position { line: 19, character: 18 }, Range {
start: Position::new(19, 18),
end: Position::new(19, 22)
},
"Check for equality\n\n---\n\n## Parameters:\n\n- Type: `capture` (required)\n - A capture\n- Type: `string` (required)\n - A string\n", BTreeMap::default())]
#[case(";;; inherits: foo", Position { line: 0, character: 2 }, Range {
start: Position::new(0, 0),
end: Position::new(0, 17)
},
"## Inheriting queries\n\n```query\n; inherits: foo,bar\n```\n\nQueries can inherit other queries if they have an `; inherits:` comment as the\nfirst line of the query file. The language server will then act as though the\ntext of the inherited query files was placed at the top of the document, and\nwill provide diagnostics for the text in those queries as well (calculated with\nthe language information of the parent query). Queries will always inherit\nothers of the same type (e.g. a `highlights.scm` will only import other\n`highlights.scm`, never an `injections.scm`).\n\nNote that the syntax is very sensitive; there must be _exactly one_ space after\nthe `inherits:` keyword, and there must be no spaces in-between module names.\n", BTreeMap::default())]
#[case("
;;; inherits: foo", Position { line: 1, character: 2 }, Range {
start: Position::new(0, 0),
end: Position::new(0, 17)
},
"", BTreeMap::default())]
#[case(";;; format-ignore", Position { line: 0, character: 2 }, Range {
start: Position::new(0, 0),
end: Position::new(0, 17)
},
"## `; format-ignore`\n\nThe formatter will ignore nodes that are preceeded by a comment starting with\n`format-ignore`.\n\n```query\n((call_expression\n function: (identifier) @function.builtin)\n ; format-ignore\n (#any-of? @function.builtin\n \"printf\" \"printf_s\"\n \"vprintf\" \"vprintf_s\"\n \"scanf\" \"scanf_s\"\n \"vscanf\" \"vscanf_s\"\n \"wprintf\" \"wprintf_s\"\n \"vwprintf\" \"vwprintf_s\"\n \"wscanf\" \"wscanf_s\"\n \"vwscanf\" \"vwscanf_s\"\n \"cscanf\" \"_cscanf\"\n \"printw\"\n \"scanw\"))\n```\n", BTreeMap::default())]
#[case("; inherits: cpp", Position { line: 0, character: 13 }, Range {
start: Position::new(0, 12),
end: Position::new(0, 15)
},
"```query\n; test query\n\n(squid)\n\n```", BTreeMap::default())]
#[case("; inherits: squidward", Position { line: 0, character: 13 }, Range {
start: Position::new(0, 12),
end: Position::new(0, 21)
},
"*Document not found*", BTreeMap::default())]
#[case("(named_node)", Position { line: 0, character: 4 }, Range {
start: Position::new(0, 1),
end: Position::new(0, 11)
}, "Symbol IDs: 40", BTreeMap::default())]
#[case("(MISSING identifier)", Position { line: 0, character: 12 }, Range {
start: Position::new(0, 9),
end: Position::new(0, 19)
}, "Symbol IDs: 6, 7", BTreeMap::default())]
#[case("(definition/named_node)", Position { line: 0, character: 12 }, Range {
start: Position::new(0, 12),
end: Position::new(0, 22)
}, "Symbol IDs: 40", BTreeMap::default())]
#[case("\"MISSING\"", Position { line: 0, character: 0 }, Range {
start: Position::new(0, 0),
end: Position::new(0, 9)
}, "Symbol IDs: 18", BTreeMap::default())]
#[case(r#""MISSING""#, Position { line: 0, character: 2 }, Range {
start: Position::new(0, 0),
end: Position::new(0, 9)
}, "Symbol IDs: 18", BTreeMap::default())]
#[case(r#"(MISSING "MISSING")"#, Position { line: 0, character: 12 }, Range {
start: Position::new(0, 9),
end: Position::new(0, 18)
}, "Symbol IDs: 18", BTreeMap::default())]
#[case(r#"(definition/"MISSING")"#, Position { line: 0, character: 14 }, Range {
start: Position::new(0, 12),
end: Position::new(0, 21)
}, "Symbol IDs: 18", BTreeMap::default())]
#[case(r#""MIS\SING""#, Position { line: 0, character: 4 }, Range {
start: Position::new(0, 0),
end: Position::new(0, 10)
}, "Symbol IDs: 18", BTreeMap::default())]
#[case("(missing_node name: (identifier) @variable !type)", Position { line: 0, character: 15 }, Range {
start: Position::new(0, 14),
end: Position::new(0, 18)
}, "Field ID: 1", BTreeMap::default())]
#[case("(missing_node name: (identifier) @variable !type)", Position { line: 0, character: 46 }, Range {
start: Position::new(0, 44),
end: Position::new(0, 48)
}, "Field ID: 5", BTreeMap::default())]
#[tokio::test(flavor = "current_thread")]
async fn hover(
#[case] source: &str,
#[case] position: Position,
#[case] range: Range,
#[case] hover_content: &str,
#[case] captures: BTreeMap<String, String>,
) {
// Arrange
let mut service = initialize_server(
&[(QUERY_TEST_URI.clone(), source)],
&Options {
valid_captures: HashMap::from([(String::from("test"), captures)]),
valid_predicates: BTreeMap::from([(
String::from("eq"),
Predicate {
description: String::from("Check for equality"),
parameters: vec![
PredicateParameter {
description: Some(String::from("A capture")),
type_: PredicateParameterType::Capture,
arity: PredicateParameterArity::Required,
constraint: ParameterConstraint::None,
},
PredicateParameter {
description: Some(String::from("A string")),
type_: PredicateParameterType::String,
arity: PredicateParameterArity::Required,
constraint: ParameterConstraint::None,
},
],
},
)]),
valid_directives: BTreeMap::from([(
String::from("set"),
Predicate {
description: String::from("Set a property"),
parameters: vec![PredicateParameter {
description: Some(String::from("A property")),
type_: PredicateParameterType::String,
arity: PredicateParameterArity::Required,
constraint: ParameterConstraint::Enum(vec![
String::from("here"),
String::from("there"),
]),
}],
},
)]),
..Default::default()
},
)
.await;
// Act
let tokens = service
.request::<HoverRequest>(HoverParams {
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: QUERY_TEST_URI.clone(),
},
position,
},
work_done_progress_params: WorkDoneProgressParams::default(),
})
.await;
// Assert
let expected = if hover_content.is_empty() {
None
} else {
Some(Hover {
range: Some(range),
contents: HoverContents::Markup(MarkupContent {
kind: MarkupKind::Markdown,
value: String::from(hover_content),
}),
})
};
assert_eq!(expected, tokens,);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/initialize.rs | src/handlers/initialize.rs | use std::str::FromStr;
use tower_lsp::jsonrpc::Result;
use tower_lsp::lsp_types::{InitializeParams, InitializeResult, ServerInfo, Url};
use tracing::info;
use crate::util::set_configuration_options;
use crate::{Backend, LspClient, SERVER_CAPABILITIES};
pub async fn initialize<C: LspClient>(
backend: &Backend<C>,
params: InitializeParams,
) -> Result<InitializeResult> {
info!("ts_query_ls initialized");
if let Ok(mut ws_uris) = backend.workspace_paths.write() {
#[allow(deprecated)]
if let Some(ws_folders) = params.workspace_folders {
ws_uris.extend(
ws_folders
.into_iter()
.filter_map(|folder| folder.uri.to_file_path().ok()),
);
} else if let Some(root_uri) = params
.root_uri
.or_else(|| {
params
.root_path
.and_then(|p| Url::from_str(p.as_str()).ok())
})
.and_then(|uri| uri.to_file_path().ok())
{
ws_uris.push(root_uri);
}
}
{
let mut client_capabilities = backend.client_capabilities.write().await;
*client_capabilities = params.capabilities;
}
set_configuration_options(
backend,
params.initialization_options,
backend
.workspace_paths
.read()
.map(|r| r.to_vec())
.unwrap_or_default(),
)
.await;
Ok(InitializeResult {
capabilities: SERVER_CAPABILITIES.clone(),
server_info: Some(ServerInfo {
name: String::from("ts_query_ls"),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
}),
})
}
#[cfg(test)]
mod test {
use std::{env, sync::Arc};
use dashmap::DashMap;
use pretty_assertions::assert_eq;
use tower_lsp::{
LspService,
lsp_types::{
ClientCapabilities, InitializeParams, InitializeResult, ServerInfo, Url,
request::Initialize,
},
};
use crate::{
Backend, Options, SERVER_CAPABILITIES,
test_helpers::helpers::{MockClient, TestService},
};
#[tokio::test(flavor = "current_thread")]
async fn server_initialize() {
// Arrange
let (mut service, _socket) = LspService::build(|_client| Backend {
client: MockClient::default(),
client_capabilities: Arc::default(),
document_map: DashMap::default(),
language_map: DashMap::default(),
workspace_paths: Arc::default(),
dependents: DashMap::default(),
options: Arc::default(),
})
.finish();
unsafe { env::set_var("HOME", "/home/jdoe") };
let options = r#"
{
"parser_aliases": {
"ecma": "javascript",
"jsx": "javascript",
"foolang": "barlang"
},
"parser_install_directories": [
"${HOME}/my/directory/",
"/$tmp/tree-sitter/parsers/"
],
"language_retrieval_patterns": [
"\\.ts\\-([^/]+)\\-parser\\.wasm"
],
"valid_captures": {
"highlights": {
"variable": "Simple identifiers",
"variable.parameter": "Parameters of functions"
}
}
}
"#;
// Act
let init_result = service
.request::<Initialize>(InitializeParams {
capabilities: ClientCapabilities::default(),
root_uri: Some(Url::parse("file:///tmp/").unwrap()),
initialization_options: Some(serde_json::from_str(options).unwrap()),
..Default::default()
})
.await;
// Assert
assert_eq!(
init_result,
InitializeResult {
capabilities: SERVER_CAPABILITIES.clone(),
server_info: Some(ServerInfo {
name: String::from("ts_query_ls"),
version: Some(String::from("3.15.1")),
}),
}
);
let backend = service.inner();
let actual_options = backend.options.read().await;
let mut expected_options = serde_json::from_str::<Options>(options).unwrap();
// Test that env vars are correctly substituted
expected_options.parser_install_directories = vec![
String::from("/home/jdoe/my/directory/"),
String::from("/$tmp/tree-sitter/parsers/"),
];
assert_eq!(
actual_options.parser_aliases,
expected_options.parser_aliases
);
assert_eq!(
actual_options.parser_install_directories,
expected_options.parser_install_directories
);
assert_eq!(
actual_options.language_retrieval_patterns,
expected_options.language_retrieval_patterns
);
assert_eq!(
actual_options.valid_captures,
expected_options.valid_captures
);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/did_save.rs | src/handlers/did_save.rs | use tower_lsp::lsp_types::DidSaveTextDocumentParams;
use tracing::info;
use crate::{Backend, LspClient};
pub fn did_save<C: LspClient>(_: &Backend<C>, params: DidSaveTextDocumentParams) {
let uri = params.text_document.uri;
info!("ts_query_ls saved document with URI: {uri}");
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/mod.rs | src/handlers/mod.rs | pub mod code_action;
pub mod completion;
pub mod diagnostic;
pub mod did_change;
pub mod did_change_configuration;
pub mod did_close;
pub mod did_open;
pub mod did_save;
pub mod document_highlight;
pub mod document_symbol;
pub mod formatting;
pub mod goto_definition;
pub mod hover;
pub mod initialize;
pub mod references;
pub mod rename;
pub mod selection_range;
pub mod semantic_tokens;
pub mod shutdown;
pub mod workspace_symbol;
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/document_highlight.rs | src/handlers/document_highlight.rs | use std::sync::LazyLock;
use streaming_iterator::StreamingIterator;
use tower_lsp::lsp_types::{DocumentHighlight, DocumentHighlightKind, DocumentHighlightParams};
use tracing::warn;
use tree_sitter::{Query, QueryCursor};
use crate::util::{CAPTURES_QUERY, NodeUtil, PosUtil, TextProviderRope, get_references};
use crate::{Backend, LspClient, QUERY_LANGUAGE};
static IDENT_QUERY: LazyLock<Query> =
LazyLock::new(|| Query::new(&QUERY_LANGUAGE, "(identifier) @name").unwrap());
pub fn document_highlight<C: LspClient>(
backend: &Backend<C>,
params: &DocumentHighlightParams,
) -> Option<Vec<DocumentHighlight>> {
let uri = ¶ms.text_document_position_params.text_document.uri;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when retrieving document highlights");
return None;
};
let rope = &doc.rope;
let tree = &doc.tree;
let cur_pos = params
.text_document_position_params
.position
.to_ts_point(rope);
// Get the current node: if we are in a capture's identifier, move the current node to the
// entire capture
let current_node = tree
.root_node()
.named_descendant_for_point_range(cur_pos, cur_pos)
.map(|node| {
node.parent()
.filter(|p| p.kind() == "capture")
.unwrap_or(node)
})
.unwrap();
let capture_query = &CAPTURES_QUERY;
let ident_query = &IDENT_QUERY;
let mut cursor = QueryCursor::new();
let provider = TextProviderRope(rope);
if current_node.kind() == "capture" {
Some(
get_references(
&tree.root_node(),
¤t_node,
capture_query,
&mut cursor,
&provider,
rope,
)
.map(|node| DocumentHighlight {
kind: if node.parent().is_none_or(|p| p.kind() != "parameters") {
Some(DocumentHighlightKind::WRITE)
} else {
Some(DocumentHighlightKind::READ)
},
range: node.lsp_range(rope),
})
.collect(),
)
} else if current_node.kind() == "identifier" {
Some(
cursor
.matches(ident_query, tree.root_node(), &provider)
.map_deref(|match_| {
match_.captures.iter().filter_map(|cap| {
if cap.node.parent()?.kind() == current_node.parent()?.kind()
&& cap.node.text(rope) == current_node.text(rope)
{
return Some(cap.node);
}
None
})
})
.flatten()
.map(|node| DocumentHighlight {
kind: Some(DocumentHighlightKind::TEXT),
range: node.lsp_range(rope),
})
.collect(),
)
} else {
None
}
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
DocumentHighlight, DocumentHighlightKind, DocumentHighlightParams, PartialResultParams,
Position, Range, TextDocumentIdentifier, TextDocumentPositionParams,
WorkDoneProgressParams, request::DocumentHighlightRequest,
};
use crate::{
Options,
test_helpers::helpers::{COMPLEX_FILE, TEST_URI, TestService, initialize_server},
};
type Highlight = (DocumentHighlightKind, (u32, u32), (u32, u32));
#[rstest]
#[case(
"(identifier) @variable",
Position { line: 0, character: 0 },
&[]
)]
#[case(
"(identifier) @variable",
Position { line: 0, character: 17 },
&[(DocumentHighlightKind::WRITE, (0, 13), (0, 22))]
)]
#[case(
r#"((identifier) @constant
(#match? @constant "^[A-Z][A-Z\\d_]*$"))
(boolean) @constant"#,
Position { line: 0, character: 17 },
&[
(DocumentHighlightKind::WRITE, (0, 14), (0, 23)),
(DocumentHighlightKind::READ, (1, 9), (1, 18)),
]
)]
#[case(
r"(variable) @variable
(variable
(type_specifier)) @variable.typed",
Position { line: 1, character: 6 },
&[
(DocumentHighlightKind::TEXT, (0, 1), (0, 9)),
(DocumentHighlightKind::TEXT, (2, 1), (2, 9)),
]
)]
#[case(
r"expression: (number) @number
expression: (boolean) @boolean",
Position { line: 2, character: 2 },
&[
(DocumentHighlightKind::TEXT, (0, 0), (0, 10)),
(DocumentHighlightKind::TEXT, (2, 0), (2, 10)),
]
)]
#[case(
r"expression: (number) @number
expression: (boolean) @boolean",
Position { line: 0, character: 10 },
&[]
)]
#[case(
&COMPLEX_FILE,
Position { line: 1, character: 5 },
&[
(DocumentHighlightKind::TEXT, (1, 4), (1, 7)),
(DocumentHighlightKind::TEXT, (13, 4), (13, 7)),
(DocumentHighlightKind::TEXT, (30, 4), (30, 7)),
(DocumentHighlightKind::TEXT, (31, 4), (31, 7)),
]
)]
#[case(
&COMPLEX_FILE,
Position { line: 13, character: 5 },
&[
(DocumentHighlightKind::TEXT, (1, 4), (1, 7)),
(DocumentHighlightKind::TEXT, (13, 4), (13, 7)),
(DocumentHighlightKind::TEXT, (30, 4), (30, 7)),
(DocumentHighlightKind::TEXT, (31, 4), (31, 7)),
]
)]
#[tokio::test(flavor = "current_thread")]
async fn document_highlight(
#[case] input: &str,
#[case] position: Position,
#[case] highlights: &[Highlight],
) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), input)], &Options::default()).await;
// Act
let refs = service
.request::<DocumentHighlightRequest>(DocumentHighlightParams {
partial_result_params: PartialResultParams {
partial_result_token: None,
},
work_done_progress_params: WorkDoneProgressParams::default(),
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
position,
},
})
.await;
// Assert
let expected = if highlights.is_empty() {
None
} else {
Some(
highlights
.iter()
.map(|(kind, p0, p1)| DocumentHighlight {
kind: Some(*kind),
range: Range {
start: Position {
line: p0.0,
character: p0.1,
},
end: Position {
line: p1.0,
character: p1.1,
},
},
})
.collect(),
)
};
assert_eq!(expected, refs);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/goto_definition.rs | src/handlers/goto_definition.rs | use tower_lsp::lsp_types::{GotoDefinitionParams, GotoDefinitionResponse, Location, Range};
use tracing::{info, warn};
use tree_sitter::QueryCursor;
use crate::{
Backend, LspClient,
util::{
CAPTURES_QUERY, NodeUtil, PosUtil, TextProviderRope, get_current_capture_node,
get_imported_module_under_cursor, get_references,
},
};
pub fn goto_definition<C: LspClient>(
backend: &Backend<C>,
params: &GotoDefinitionParams,
) -> Option<GotoDefinitionResponse> {
info!("ts_query_ls goto_definition: {params:?}");
let uri = ¶ms.text_document_position_params.text_document.uri;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when handling goto_definition");
return None;
};
let rope = &doc.rope;
let tree = &doc.tree;
let cur_pos = params.text_document_position_params.position;
if let Some(module) = get_imported_module_under_cursor(&doc, cur_pos) {
return module.uri.clone().map(|uri| {
Location {
uri,
range: Range::default(),
}
.into()
});
}
let current_node = get_current_capture_node(tree.root_node(), cur_pos.to_ts_point(rope))?;
let query = &CAPTURES_QUERY;
let mut cursor = QueryCursor::new();
let provider = TextProviderRope(rope);
let defs = get_references(
&tree.root_node(),
¤t_node,
query,
&mut cursor,
&provider,
rope,
)
.filter(|node| node.parent().is_none_or(|p| p.kind() != "parameters"))
.map(|node| Location {
uri: uri.clone(),
range: node.lsp_range(rope),
})
.collect::<Vec<Location>>();
Some(GotoDefinitionResponse::Array(defs))
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
GotoDefinitionParams, GotoDefinitionResponse, Location, PartialResultParams, Position,
Range, TextDocumentIdentifier, TextDocumentPositionParams, Url, WorkDoneProgressParams,
request::GotoDefinition,
};
use crate::{
Options,
test_helpers::helpers::{
COMPLEX_FILE, SIMPLE_FILE, TEST_URI, TestService, initialize_server,
},
};
type Coordinate = ((u32, u32), (u32, u32));
#[rstest]
#[case(
&SIMPLE_FILE,
Position { line: 0, character: 4 },
(TEST_URI.clone(), [].as_slice())
)]
#[case(
&SIMPLE_FILE,
Position { line: 0, character: 20 },
(TEST_URI.clone(), [((0, 14), (0, 23))].as_slice())
)]
#[case(
&COMPLEX_FILE,
Position { line: 12, character: 14 },
(TEST_URI.clone(), [((8, 24), (8, 42)), ((9, 22), (9, 40))].as_slice())
)]
#[tokio::test(flavor = "current_thread")]
async fn goto_definition(
#[case] input: &str,
#[case] position: Position,
#[case] locations: (Url, &[Coordinate]),
) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), input)], &Options::default()).await;
// Act
let refs = service
.request::<GotoDefinition>(GotoDefinitionParams {
partial_result_params: PartialResultParams {
partial_result_token: None,
},
work_done_progress_params: WorkDoneProgressParams::default(),
text_document_position_params: TextDocumentPositionParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
position,
},
})
.await;
// Assert
let expected = if locations.1.is_empty() {
None
} else {
Some(GotoDefinitionResponse::Array(
locations
.1
.iter()
.map(|r| Location {
uri: locations.0.clone(),
range: Range {
start: Position {
line: r.0.0,
character: r.0.1,
},
end: Position {
line: r.1.0,
character: r.1.1,
},
},
})
.collect(),
))
};
assert_eq!(expected, refs);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/did_change.rs | src/handlers/did_change.rs | use tower_lsp::lsp_types::{DidChangeTextDocumentParams, Position, Range};
use tracing::warn;
use crate::{
Backend, LspClient,
util::{ByteUtil, TextDocChangeUtil, edit_rope, get_imported_uris, parse, push_diagnostics},
};
use super::did_open::populate_import_documents;
pub async fn did_change<C: LspClient>(backend: &Backend<C>, params: DidChangeTextDocumentParams) {
let uri = params.text_document.uri;
let Some(mut document) = backend.document_map.get_mut(&uri) else {
warn!("No document found for URI: {uri} when handling did_change");
return;
};
let version = params.text_document.version;
document.version = Some(version);
let mut edits = vec![];
let mut recalculate_imports = false;
for change in ¶ms.content_changes {
let rope = &mut document.rope;
let new_text = change.text.as_str();
let range = change.range.unwrap_or_else(|| {
let start = Position::new(0, 0);
let end = (rope.len_bytes() - 1).to_lsp_pos(rope);
Range { start, end }
});
if range.start.line == 0 {
recalculate_imports = true;
}
edits.push(change.to_tsedit(rope));
edit_rope(rope, range, new_text);
}
for edit in edits {
document.tree.edit(&edit);
}
let rope = document.rope.clone();
let tree = parse(&rope, (&document.tree).into());
document.tree = tree.clone();
// We must not hold a reference to something in the `document_map` while populating the import
// documents.
drop(document);
if recalculate_imports {
let workspace_uris = backend.workspace_paths.read().unwrap().clone();
let options = backend.options.read().await;
let uris = get_imported_uris(&workspace_uris, &options, &uri, &rope, &tree);
populate_import_documents(&backend.document_map, &workspace_uris, &options, &uris);
if let Some(mut document) = backend.document_map.get_mut(&uri) {
// Remove previous import URIs.
for import_uri in document
.imported_uris
.iter()
.filter_map(|import| import.uri.clone().filter(|url| url != &uri))
{
backend
.dependents
.entry(import_uri)
.or_default()
.remove(&uri);
}
// Add new import URIs.
for import_uri in uris
.iter()
.filter_map(|import| import.uri.clone().filter(|url| url != &uri))
{
backend
.dependents
.entry(import_uri)
.or_default()
.insert(uri.clone());
}
document.imported_uris = uris;
}
}
push_diagnostics(backend, uri).await;
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
DidChangeTextDocumentParams, TextDocumentContentChangeEvent,
VersionedTextDocumentIdentifier, notification::DidChangeTextDocument,
};
use crate::{
Options,
test_helpers::helpers::{TEST_URI, TestEdit, TestService, initialize_server},
};
#[rstest]
#[case(
r#"(node_name) @hello
";" @semicolon"#,
r#"(identifier) @goodbye
";" @punctuation.delimiter"#,
&[
TestEdit::new("goodbye", (0, 13), (0, 18)),
TestEdit::new("identifier", (0, 1), (0, 10)),
TestEdit::new("punctuation.delimiter", (1, 5), (1, 14)),
]
)]
#[case(
r#"; Some comment with emojis 🚀🛳️🫡
(node_name) @hello
";" @semicolon"#,
r#"; Some comment with emojis 🚀🛳️🫡
(identifier) @goodbye
";" @punctuation.delimiter"#,
&[
TestEdit::new("goodbye", (1, 13), (1, 18)),
TestEdit::new("identifier", (1, 1), (1, 10)),
TestEdit::new("punctuation.delimiter", (2, 5), (2, 14)),
]
)]
#[case(
r#"; Some comment with emojis 🚀🛳️🫡
(node_name) @hello
";" @semicolon"#,
r#"; Some comment with emojis 🚀🛳️🫡(node_name) @hello
";" @semicolon"#,
&[
TestEdit::new("", (0, 34), (0, 35)),
]
)]
#[tokio::test(flavor = "current_thread")]
async fn server_did_change(
#[case] original: &str,
#[case] expected: &str,
#[case] edits: &[TestEdit],
) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), original)], &Options::default()).await;
// Act
service
.notify::<DidChangeTextDocument>(DidChangeTextDocumentParams {
text_document: VersionedTextDocumentIdentifier {
uri: TEST_URI.clone(),
version: 1,
},
content_changes: edits
.iter()
.map(Into::<TextDocumentContentChangeEvent>::into)
.collect(),
})
.await;
// Assert
let doc = service.inner().document_map.get(&TEST_URI).unwrap();
let rope = &doc.rope;
assert_eq!(rope.to_string(), expected);
let tree = &doc.tree;
assert_eq!(
tree.root_node().utf8_text(expected.as_bytes()).unwrap(),
expected
);
assert_eq!(2, service.inner().client.get_notifications().len());
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/formatting.rs | src/handlers/formatting.rs | use std::collections::HashSet;
use std::sync::LazyLock;
use regex::Regex;
use ropey::Rope;
use tower_lsp::lsp_types::{
DocumentFormattingParams, DocumentRangeFormattingParams, Range, TextEdit,
};
use tracing::warn;
use tree_sitter::{
Node, Query, QueryCursor, QueryMatch, QueryPredicateArg, StreamingIterator as _, TreeCursor,
};
use ts_query_ls::FormattingOptions;
use crate::QUERY_LANGUAGE;
use crate::util::{ByteUtil, NodeUtil as _, TextProviderRope};
use crate::{Backend, LspClient};
pub async fn formatting<C: LspClient>(
backend: &Backend<C>,
params: &DocumentFormattingParams,
) -> Option<Vec<TextEdit>> {
let uri = ¶ms.text_document.uri;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when handling formatting");
return None;
};
let rope = &doc.rope;
let root = &doc.tree.root_node();
let fmt_options = backend.options.read().await.formatting_options;
format_document(rope, root, fmt_options).map(|formatted_doc| {
diffs(rope.to_string().as_str(), &formatted_doc, rope.clone()).collect()
})
}
pub async fn range_formatting<C: LspClient>(
backend: &Backend<C>,
params: &DocumentRangeFormattingParams,
) -> Option<Vec<TextEdit>> {
let uri = ¶ms.text_document.uri;
let Some(doc) = backend.document_map.get(uri) else {
warn!("No document found for URI: {uri} when handling formatting");
return None;
};
let rope = &doc.rope;
let root = &doc.tree.root_node();
let range = params.range;
let fmt_options = backend.options.read().await.formatting_options;
format_document(rope, root, fmt_options).map(|formatted_doc| {
diffs(rope.to_string().as_str(), &formatted_doc, rope.clone())
.filter(|d| d.range.end >= range.start && d.range.start <= range.end)
.collect()
})
}
static LINE_START: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"^([^\S\r\n]*)").unwrap());
static NEWLINES: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\n+").unwrap());
static COMMENT_PAT: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"^;+(\s*.*?)\s*$").unwrap());
static CRLF: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\r\n?").unwrap());
static FORMAT_QUERY: LazyLock<Query> = LazyLock::new(|| {
Query::new(
&QUERY_LANGUAGE,
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/queries/query/formatting.scm"
)),
)
.unwrap()
});
pub fn diffs<'a>(left: &'a str, right: &'a str, rope: Rope) -> impl Iterator<Item = TextEdit> + 'a {
use dissimilar::Chunk;
let mut offset = 0;
let mut chunks = dissimilar::diff(left, right).into_iter().peekable();
std::iter::from_fn(move || {
loop {
let chunk = chunks.next()?;
if let (Chunk::Delete(deleted), Some(&Chunk::Insert(inserted))) = (chunk, chunks.peek())
{
chunks.next().unwrap();
let deleted_len = deleted.len();
let start = offset.to_lsp_pos(&rope);
let end = (offset + deleted_len).to_lsp_pos(&rope);
offset += deleted_len;
return Some(TextEdit {
new_text: inserted.to_owned(),
range: Range { start, end },
});
}
match chunk {
Chunk::Equal(text) => {
offset += text.len();
}
Chunk::Delete(deleted) => {
let deleted_len = deleted.len();
let start = offset.to_lsp_pos(&rope);
let end = (offset + deleted_len).to_lsp_pos(&rope);
offset += deleted_len;
return Some(TextEdit {
new_text: String::new(),
range: Range { start, end },
});
}
Chunk::Insert(inserted) => {
let pos = offset.to_lsp_pos(&rope);
return Some(TextEdit {
new_text: inserted.to_owned(),
range: Range {
start: pos,
end: pos,
},
});
}
}
}
})
}
const INDENT_STR: &str = " ";
const TEXT_WIDTH: usize = 100;
fn append_lines(lines: &mut Vec<String>, lines_to_append: &[String]) {
for (i, line) in lines_to_append.iter().enumerate() {
lines.last_mut().unwrap().push_str(line);
if i != lines_to_append.len() - 1 {
lines.push(String::new());
}
}
}
#[derive(Default)]
struct FormatMap {
ignore: HashSet<usize>,
indent_begin: HashSet<usize>,
indent_dedent: HashSet<usize>,
prepend_space: HashSet<usize>,
prepend_newline: HashSet<usize>,
append_space: HashSet<usize>,
append_newline: HashSet<usize>,
cancel_append: HashSet<usize>,
cancel_prepend: HashSet<usize>,
conditional_newline: HashSet<usize>,
lookahead_newline: HashSet<usize>,
comment_fix: HashSet<usize>,
make_prefix: HashSet<usize>,
remove: HashSet<usize>,
}
pub fn format_document(rope: &Rope, root: &Node, options: FormattingOptions) -> Option<String> {
if root.has_error() {
return None;
}
let mut map = FormatMap::default();
let mut cursor = QueryCursor::new();
let provider = TextProviderRope(rope);
let mut matches = cursor.matches(&FORMAT_QUERY, *root, &provider);
'matches: while let Some(match_) = matches.next() {
for predicate in FORMAT_QUERY.general_predicates(match_.pattern_index) {
let keep = handle_predicate(match_, &predicate.operator, &predicate.args, rope);
if !keep {
continue 'matches;
}
}
for capture in match_.captures {
let name = FORMAT_QUERY.capture_names()[capture.index as usize];
if name.starts_with('_') {
continue;
}
let capture_set = match name {
"format.ignore" => &mut map.ignore,
"format.indent.begin" => &mut map.indent_begin,
"format.indent.dedent" => &mut map.indent_dedent,
"format.prepend-space" => &mut map.prepend_space,
"format.prepend-newline" => &mut map.prepend_newline,
"format.append-space" => &mut map.append_space,
"format.append-newline" => &mut map.append_newline,
"format.cancel-append" => &mut map.cancel_append,
"format.cancel-prepend" => &mut map.cancel_prepend,
"format.conditional-newline" => &mut map.conditional_newline,
"format.lookahead-newline" => &mut map.lookahead_newline,
"format.comment-fix" => &mut map.comment_fix,
"format.make-prefix" => &mut map.make_prefix,
"format.remove" => &mut map.remove,
_ => panic!("Unsupported format &mut capture"),
};
capture_set.insert(capture.node.id());
}
}
let mut lines = vec![String::new()];
format_iter(rope, root, &mut lines, &map, 0, &mut root.walk(), options);
Some(lines.join("\n") + "\n")
}
fn format_iter<'a>(
rope: &Rope,
node: &Node<'a>,
lines: &mut Vec<String>,
map: &FormatMap,
mut level: usize,
cursor: &mut TreeCursor<'a>,
options: FormattingOptions,
) {
if !cursor.goto_first_child() {
return;
}
// Sometimes 2 queries apply append twice. This is to prevent the case from happening
let mut apply_newline = false;
loop {
let child = cursor.node();
let id = &child.id();
if apply_newline {
apply_newline = false;
lines.push(INDENT_STR.repeat(level));
}
if map.ignore.contains(id) {
let text = CRLF
.replace_all(child.text(rope).as_str(), "\n")
.trim_matches('\n')
.split('\n')
.map(ToOwned::to_owned)
.collect::<Vec<String>>();
append_lines(lines, &text);
} else if !map.remove.contains(id) {
if !map.cancel_prepend.contains(id) {
if map.prepend_newline.contains(id) {
lines.push(INDENT_STR.repeat(level));
} else if map.prepend_space.contains(id) {
let byte_length = child.end_byte() - child.start_byte();
let broader_byte_length = node.end_byte() - child.start_byte();
if !map.conditional_newline.contains(id) {
lines.last_mut().unwrap().push(' ');
} else if byte_length + 1 + lines.last().unwrap().len() > TEXT_WIDTH
|| (map.lookahead_newline.contains(id)
&& broader_byte_length + lines.last().unwrap().len() > TEXT_WIDTH)
{
lines.push(INDENT_STR.repeat(level));
} else {
lines.last_mut().unwrap().push(' ');
}
}
}
if map.comment_fix.contains(id) {
if let Some(mat) = COMMENT_PAT.captures(&child.text(rope)) {
lines
.last_mut()
.unwrap()
.push_str([";", mat.get(1).unwrap().as_str()].concat().as_str());
}
} else if map.make_prefix.contains(id) {
lines
.last_mut()
.unwrap()
.push(if options.dot_prefix_predicates {
'.'
} else {
'#'
});
// Stop recursively formatting on leaf nodes (or strings, which should not have their
// inner content touched)
} else if child.child_count() == 0 || child.kind() == "string" {
let text = NEWLINES
.split(
CRLF.replace_all(child.text(rope).as_str(), "\n")
.trim_matches('\n'),
)
.map(ToOwned::to_owned)
.collect::<Vec<String>>();
append_lines(lines, &text);
} else {
format_iter(rope, &child, lines, map, level, cursor, options);
}
if map.indent_begin.contains(id) {
level += 1;
apply_newline = true;
} else if map.indent_dedent.contains(id) {
lines.last_mut().unwrap().drain(0..2);
}
}
if map.cancel_append.contains(id) {
apply_newline = false;
} else if map.append_newline.contains(id) {
apply_newline = true;
} else if map.append_space.contains(id) {
lines.last_mut().unwrap().push(' ');
}
if !cursor.goto_next_sibling() {
break;
}
}
cursor.goto_parent();
}
fn handle_predicate(
match_: &QueryMatch,
directive: &str,
args: &std::boxed::Box<[tree_sitter::QueryPredicateArg]>,
rope: &Rope,
) -> bool {
match directive {
"is-start-of-line?" | "not-is-start-of-line?" => {
if let QueryPredicateArg::Capture(cap_idx) = &args[0] {
let range = match_
.nodes_for_capture_index(*cap_idx)
.next()
.unwrap()
.range();
let line = rope.line(range.start_point.row).to_string();
let pre_whitespace = LINE_START
.captures(line.as_str())
.and_then(|c| c.get(1))
.map_or(0, |m| m.len());
let is_start = pre_whitespace == range.start_point.column;
if directive == "not-is-start-of-line?" {
return !is_start;
}
return is_start;
}
true
}
"not-kind-eq?" => {
if let QueryPredicateArg::Capture(cap_idx) = &args[0] {
let node_type = match match_.nodes_for_capture_index(*cap_idx).next() {
None => return true,
Some(node) => node.kind(),
};
for arg in &args[1..] {
if let QueryPredicateArg::String(kind) = arg
&& node_type == &**kind
{
return false;
}
}
}
true
}
&_ => false,
}
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
DidChangeTextDocumentParams, DocumentFormattingParams, DocumentRangeFormattingParams,
Position, Range, TextDocumentContentChangeEvent, TextDocumentIdentifier,
VersionedTextDocumentIdentifier, WorkDoneProgressParams,
notification::DidChangeTextDocument,
request::{Formatting, RangeFormatting},
};
use ts_query_ls::FormattingOptions;
use crate::{
Options,
test_helpers::helpers::{TEST_URI, TestService, initialize_server},
};
#[rstest]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_trailing_whitespace.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_trailing_whitespace.scm")),
FormattingOptions::default(),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_predicates.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_predicates.scm")),
FormattingOptions::default(),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_missing.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_missing.scm")),
FormattingOptions::default(),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_syntax_error.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_syntax_error.scm")),
FormattingOptions::default(),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_complex.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_complex.scm")),
FormattingOptions::default(),
)]
#[case(
"(#foo? bar)\n",
"(.foo? bar)\n",
FormattingOptions {
dot_prefix_predicates: true
},
)]
#[tokio::test(flavor = "current_thread")]
async fn server_formatting(
#[case] before: &str,
#[case] after: &str,
#[case] fmt_options: FormattingOptions,
) {
// Arrange
let mut service = initialize_server(
&[(TEST_URI.clone(), before)],
&Options {
formatting_options: fmt_options,
..Default::default()
},
)
.await;
// Act
let mut edits = service
.request::<Formatting>(DocumentFormattingParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
work_done_progress_params: WorkDoneProgressParams::default(),
options: tower_lsp::lsp_types::FormattingOptions::default(),
})
.await
.unwrap_or_default();
edits.sort_by(|a, b| {
let range_a = a.range;
let range_b = b.range;
range_b.start.cmp(&range_a.start)
});
service
.notify::<DidChangeTextDocument>(DidChangeTextDocumentParams {
text_document: VersionedTextDocumentIdentifier {
uri: TEST_URI.clone(),
version: 1,
},
content_changes: edits
.iter()
.map(|e| TextDocumentContentChangeEvent {
range: Some(e.range),
text: e.new_text.clone(),
range_length: None,
})
.collect(),
})
.await;
// Assert
let doc = service.inner().document_map.get(&TEST_URI).unwrap();
assert_eq!(doc.rope.to_string(), String::from(after));
}
#[rstest]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_syntax_error.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_syntax_error.scm")),
Range::new(Position::new(0, 0), Position::new(1, 0)),
)]
#[case(
r#" (
( identifier )
@type
( .lua-match? @type"^[A-Z]"_ asdf ))
"#,
r#"((identifier) @type
( .lua-match? @type"^[A-Z]"_ asdf ))
"#,
Range::new(Position::new(0, 0), Position::new(1, 0))
)]
#[case(
r#" (
( identifier )
@type
( .lua-match? @type"^[A-Z]"_ asdf ))
"#,
r#" (
( identifier )
@type
(#lua-match? @type "^[A-Z]" _ asdf))
"#,
Range::new(Position::new(3, 0), Position::new(4, 0))
)]
#[case(
r#" (
( identifier )
@type
( .lua-match? @type"^[A-Z]"_ asdf ))
"#,
r#"((identifier) @type
(#lua-match? @type "^[A-Z]" _ asdf))
"#,
Range::new(Position::new(0, 0), Position::new(4, 0))
)]
#[case(
" (idented
(indented) @cap)
(indented_again) @cap
(indented
(yet
(again) @cap))
",
" (idented
(indented) @cap)
(indented_again) @cap
(indented
(yet
(again) @cap))
",
Range::new(Position::new(4, 0), Position::new(5, 0))
)]
#[case(
" (idented
(indented) @cap)
(indented_again) @cap
(indented
(yet
(again) @cap))
",
" (idented
(indented) @cap)
(indented_again) @cap
(indented
(yet
(again) @cap))
",
Range::new(Position::new(7, 0), Position::new(7, 6))
)]
#[case(
"(expand (expand (expand (expand) @cap)))
(no_expand) @cap
",
"(expand (expand (expand (expand) @cap)))
(no_expand) @cap
",
Range::new(Position::new(2, 0), Position::new(3, 0))
)]
#[case(
"(expand (expand (expand (expand) @cap)) )
(no_expand) @cap
",
"(expand
(expand
(expand
(expand) @cap)))
(no_expand) @cap
",
Range::new(Position::new(0, 0), Position::new(1, 0))
)]
#[tokio::test(flavor = "current_thread")]
async fn server_range_formatting(
#[case] before: &str,
#[case] after: &str,
#[case] range: Range,
) {
// Arrange
let mut service =
initialize_server(&[(TEST_URI.clone(), before)], &Options::default()).await;
// Act
let mut edits = service
.request::<RangeFormatting>(DocumentRangeFormattingParams {
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
work_done_progress_params: WorkDoneProgressParams::default(),
range,
options: tower_lsp::lsp_types::FormattingOptions::default(),
})
.await
.unwrap_or_default();
edits.sort_by(|a, b| {
let range_a = a.range;
let range_b = b.range;
range_b.start.cmp(&range_a.start)
});
service
.notify::<DidChangeTextDocument>(DidChangeTextDocumentParams {
text_document: VersionedTextDocumentIdentifier {
uri: TEST_URI.clone(),
version: 1,
},
content_changes: edits
.iter()
.map(|e| TextDocumentContentChangeEvent {
range: Some(e.range),
text: e.new_text.clone(),
range_length: None,
})
.collect(),
})
.await;
// Assert
let doc = service.inner().document_map.get(&TEST_URI).unwrap();
assert_eq!(doc.rope.to_string(), String::from(after));
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/did_close.rs | src/handlers/did_close.rs | use tower_lsp::lsp_types::DidCloseTextDocumentParams;
use tracing::{info, warn};
use crate::{Backend, LspClient};
pub fn did_close<C: LspClient>(backend: &Backend<C>, params: &DidCloseTextDocumentParams) {
let uri = ¶ms.text_document.uri;
info!("ts_query_ls did_close: {uri}");
if backend.document_map.remove(uri).is_none() {
warn!("Document with URI: {uri} was not being tracked");
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/code_action.rs | src/handlers/code_action.rs | use std::{collections::HashMap, vec};
use ropey::Rope;
use serde::{Deserialize, Serialize};
use tower_lsp::lsp_types::{
CodeAction, CodeActionKind, CodeActionOrCommand, CodeActionParams, CodeActionResponse,
Diagnostic, Position, Range, TextEdit, Url, WorkspaceEdit,
};
use tree_sitter::{QueryCursor, Tree};
use crate::{
Backend, LspClient,
util::{
CAPTURES_QUERY, NodeUtil, PosUtil, RangeUtil, TextProviderRope, get_current_capture_node,
get_references,
},
};
#[repr(u8)]
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(into = "u8", try_from = "u8")]
pub enum CodeActions {
RemoveBackslash,
PrefixUnderscore,
Remove,
Trim,
Enquote,
}
impl From<CodeActions> for serde_json::Value {
fn from(value: CodeActions) -> Self {
serde_json::to_value(value).expect("Invalid code action value")
}
}
impl From<CodeActions> for u8 {
fn from(e: CodeActions) -> Self {
e as Self
}
}
impl TryFrom<u8> for CodeActions {
type Error = &'static str;
fn try_from(value: u8) -> std::result::Result<Self, Self::Error> {
match value {
0 => Ok(Self::RemoveBackslash),
1 => Ok(Self::PrefixUnderscore),
2 => Ok(Self::Remove),
3 => Ok(Self::Trim),
4 => Ok(Self::Enquote),
_ => Err("Invalid value"),
}
}
}
pub fn diag_to_code_action(
tree: &Tree,
rope: &Rope,
diagnostic: Diagnostic,
uri: &Url,
) -> Option<CodeActionOrCommand> {
match serde_json::from_value::<CodeActions>(diagnostic.data.clone()?) {
Ok(CodeActions::RemoveBackslash) => Some(CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Remove unnecessary backslash"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(
uri.clone(),
vec![TextEdit {
new_text: String::new(),
range: Range::new(
diagnostic.range.start,
Position::new(
diagnostic.range.start.line,
diagnostic.range.start.character + 1,
),
),
}],
)])),
..Default::default()
}),
diagnostics: Some(vec![diagnostic]),
..Default::default()
})),
Ok(CodeActions::PrefixUnderscore) => {
let root = tree.root_node();
let current_node =
get_current_capture_node(root, diagnostic.range.start.to_ts_point(rope))?;
let mut cursor = QueryCursor::new();
let provider = TextProviderRope(rope);
let refs = get_references(
&root,
¤t_node,
&CAPTURES_QUERY,
&mut cursor,
&provider,
rope,
);
let edits = refs
.into_iter()
.map(|node| {
let mut range = node.lsp_range(rope);
range.start.character += 1;
range.end.character = range.start.character;
TextEdit {
new_text: String::from("_"),
range,
}
})
.collect();
Some(CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Prefix capture name with underscore"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(uri.clone(), edits)])),
..Default::default()
}),
diagnostics: Some(vec![diagnostic]),
..Default::default()
}))
}
Ok(CodeActions::Remove) => Some(CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Remove pattern"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(
uri.clone(),
vec![TextEdit {
new_text: String::new(),
range: diagnostic.range,
}],
)])),
..Default::default()
}),
diagnostics: Some(vec![diagnostic]),
..Default::default()
})),
Ok(CodeActions::Trim) => {
let mut range = diagnostic.range;
range.start.character += 1;
range.end.character -= 1;
let new_text = range.text(rope);
Some(CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Trim quotations from string"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(
uri.clone(),
vec![TextEdit {
new_text,
range: diagnostic.range,
}],
)])),
..Default::default()
}),
diagnostics: Some(vec![diagnostic]),
..Default::default()
}))
}
Ok(CodeActions::Enquote) => {
let new_text = diagnostic.range.text(rope);
let new_text = format!("\"{new_text}\"");
Some(CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Add quotations"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(
uri.clone(),
vec![TextEdit {
new_text,
range: diagnostic.range,
}],
)])),
..Default::default()
}),
diagnostics: Some(vec![diagnostic]),
..Default::default()
}))
}
Err(_) => None,
}
}
pub fn code_action<C: LspClient>(
backend: &Backend<C>,
params: CodeActionParams,
) -> Option<CodeActionResponse> {
let uri = ¶ms.text_document.uri;
let diagnostics = params.context.diagnostics;
let doc = backend.document_map.get(uri)?;
let actions: Vec<CodeActionOrCommand> = diagnostics
.into_iter()
.filter_map(|diagnostic| diag_to_code_action(&doc.tree, &doc.rope, diagnostic, uri))
.collect();
if actions.is_empty() {
None
} else {
Some(actions)
}
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use pretty_assertions::assert_eq;
use rstest::rstest;
use tower_lsp::lsp_types::{
CodeAction, CodeActionContext, CodeActionKind, Diagnostic, Position, Range,
TextDocumentIdentifier, TextEdit, WorkspaceEdit,
};
use tower_lsp::lsp_types::{
CodeActionOrCommand, CodeActionParams, PartialResultParams, WorkDoneProgressParams,
request::CodeActionRequest,
};
use crate::{
Options,
handlers::code_action::CodeActions,
test_helpers::helpers::{TEST_URI, TestService, initialize_server},
};
#[rstest]
#[case(r#""\p" @_somecap"#, Options::default(), Position::new(0, 2), CodeActionContext {
diagnostics: vec![Diagnostic {
message: String::from("bad escape"),
range: Range::new(Position::new(0, 1), Position::new(0, 3)),
data: Some(serde_json::to_value(CodeActions::RemoveBackslash).unwrap()),
..Default::default()
}],
..Default::default()
}, &[CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Remove unnecessary backslash"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
diagnostics: Some(vec![Diagnostic {
message: String::from("bad escape"),
range: Range::new(Position::new(0, 1), Position::new(0, 3)),
data: Some(serde_json::to_value(CodeActions::RemoveBackslash).unwrap()),
..Default::default()
}]),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(
TEST_URI.clone(),
vec![TextEdit {
range: Range::new(Position::new(0, 1), Position::new(0, 2)),
new_text: String::new()
}]
)])),
..Default::default()
}),
..Default::default()
})])]
#[case(r#"((comment) @jsdoc_comment
(#lua-match? @jsdoc_comment ".*"))"#, Options::default(), Position::new(0, 15), CodeActionContext {
diagnostics: vec![Diagnostic {
message: String::from("bad cap"),
range: Range::new(Position::new(0, 11), Position::new(0, 24)),
data: Some(serde_json::to_value(CodeActions::PrefixUnderscore).unwrap()),
..Default::default()
}],
..Default::default()
}, &[CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Prefix capture name with underscore"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
diagnostics: Some(vec![Diagnostic {
message: String::from("bad cap"),
range: Range::new(Position::new(0, 11), Position::new(0, 24)),
data: Some(serde_json::to_value(CodeActions::PrefixUnderscore).unwrap()),
..Default::default()
}]),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(
TEST_URI.clone(),
vec![TextEdit {
range: Range::new(Position::new(0, 12), Position::new(0, 12)),
new_text: String::from("_")
}, TextEdit {
range: Range::new(Position::new(1, 16), Position::new(1, 16)),
new_text: String::from("_")
}]
), ])),
..Default::default()
}),
..Default::default()
})])]
#[case(r#"((comment) @jsdoc_comment
(#lua-match? @jsdoc_comment "asdf"))"#, Options::default(), Position::new(1, 32), CodeActionContext {
diagnostics: vec![Diagnostic {
message: String::from("Unnecessary string quotation"),
range: Range::new(Position::new(1, 30), Position::new(1, 36)),
data: Some(serde_json::to_value(CodeActions::Trim).unwrap()),
..Default::default()
}],
..Default::default()
}, &[CodeActionOrCommand::CodeAction(CodeAction {
title: String::from("Trim quotations from string"),
kind: Some(CodeActionKind::QUICKFIX),
is_preferred: Some(true),
diagnostics: Some(vec![Diagnostic {
message: String::from("Unnecessary string quotation"),
range: Range::new(Position::new(1, 30), Position::new(1, 36)),
data: Some(serde_json::to_value(CodeActions::Trim).unwrap()),
..Default::default()
}]),
edit: Some(WorkspaceEdit {
changes: Some(HashMap::from([(
TEST_URI.clone(),
vec![TextEdit {
range: Range::new(Position::new(1, 30), Position::new(1, 36)),
new_text: String::from("asdf")
}]
), ])),
..Default::default()
}),
..Default::default()
})])]
#[tokio::test(flavor = "current_thread")]
async fn server_code_action(
#[case] source: &str,
#[case] options: Options,
#[case] cursor: Position,
#[case] context: CodeActionContext,
#[case] expected_code_actions: &[CodeActionOrCommand],
) {
// Arrange
let mut service = initialize_server(&[(TEST_URI.clone(), source)], &options).await;
// Act
let code_actions = service
.request::<CodeActionRequest>(CodeActionParams {
context,
range: Range::new(cursor, cursor),
text_document: TextDocumentIdentifier {
uri: TEST_URI.clone(),
},
work_done_progress_params: WorkDoneProgressParams::default(),
partial_result_params: PartialResultParams::default(),
})
.await;
// Assert
let expected_code_actions = Some(expected_code_actions.to_vec());
assert_eq!(expected_code_actions, code_actions);
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/handlers/diagnostic.rs | src/handlers/diagnostic.rs | use std::{
collections::{HashMap, HashSet},
sync::{Arc, LazyLock},
};
use dashmap::DashMap;
use regex::Regex;
use ropey::Rope;
use tower_lsp::{
jsonrpc::{Error, ErrorCode, Result},
lsp_types::{
Diagnostic, DiagnosticRelatedInformation, DiagnosticSeverity, DiagnosticTag,
DocumentDiagnosticParams, DocumentDiagnosticReport, DocumentDiagnosticReportKind,
DocumentDiagnosticReportResult, FullDocumentDiagnosticReport, Location, NumberOrString,
Position, Range, RelatedFullDocumentDiagnosticReport, Url,
},
};
use tree_sitter::{
Language, Node, Query, QueryCursor, QueryError, QueryErrorKind, StreamingIterator as _,
TreeCursor,
};
use ts_query_ls::{
Options, ParameterConstraint, PredicateParameter, PredicateParameterArity,
PredicateParameterType, StringArgumentStyle,
};
use crate::{
Backend, DocumentData, ImportedUri, LanguageData, LspClient, QUERY_LANGUAGE, SymbolInfo,
util::{
CAPTURES_QUERY, NodeUtil as _, TextProviderRope, remove_unnecessary_escapes,
uri_to_basename,
},
};
use super::code_action::CodeActions;
pub enum DiagnosticCode {
// Errors
InvalidPatternStructure,
InvalidNode,
InvalidSubtype,
InvalidSupertype,
InvalidField,
InvalidSyntax,
MissingToken,
UndeclaredCapture,
// Warnings
NoLanguageObject,
NoLanguageName,
InvalidAbi,
InvalidCaptureName,
UnusedAuxiliaryCapture,
UnrecognizedPredicate,
UnrecognizedDirective,
UnnecessaryEscapeSequence,
UnnecessaryPattern,
ImportNameMissing,
ImportNotFound,
ParameterTypeMismatch,
InvalidNamedNode,
InvalidInteger,
InvalidEnumMember,
UnexpectedParameter,
MissingParameter,
// Hints
UnnecessaryQuotations,
UnquotedString,
RedundantAlternant,
// Special
ImportIssues,
}
impl From<DiagnosticCode> for Option<NumberOrString> {
fn from(value: DiagnosticCode) -> Self {
let string_slice = match value {
DiagnosticCode::NoLanguageObject => "no-language-object",
DiagnosticCode::NoLanguageName => "no-language-name",
DiagnosticCode::InvalidAbi => "invalid-abi",
DiagnosticCode::InvalidPatternStructure => "invalid-pattern-structure",
DiagnosticCode::InvalidNode => "invalid-node",
DiagnosticCode::InvalidSubtype => "invalid-subtype",
DiagnosticCode::InvalidSupertype => "invalid-supertype",
DiagnosticCode::InvalidField => "invalid-field",
DiagnosticCode::InvalidSyntax => "invalid-syntax",
DiagnosticCode::MissingToken => "missing-token",
DiagnosticCode::UndeclaredCapture => "undeclared-capture",
DiagnosticCode::InvalidCaptureName => "invalid-capture-name",
DiagnosticCode::UnusedAuxiliaryCapture => "unused-auxiliary-capture",
DiagnosticCode::UnrecognizedPredicate => "unrecognized-predicate",
DiagnosticCode::UnrecognizedDirective => "unrecognized-directive",
DiagnosticCode::UnnecessaryEscapeSequence => "unnecessary-escape-sequence",
DiagnosticCode::UnnecessaryPattern => "unnecessary-pattern",
DiagnosticCode::UnnecessaryQuotations => "unnecessary-quotations",
DiagnosticCode::UnquotedString => "unquoted-string",
DiagnosticCode::ImportIssues => "import-issues",
DiagnosticCode::ImportNameMissing => "import-name-missing",
DiagnosticCode::ImportNotFound => "import-not-found",
DiagnosticCode::ParameterTypeMismatch => "parameter-type-mismatch",
DiagnosticCode::InvalidNamedNode => "invalid-named-node",
DiagnosticCode::InvalidInteger => "invalid-integer",
DiagnosticCode::InvalidEnumMember => "invalid-enum-member",
DiagnosticCode::UnexpectedParameter => "unexpected-parameter",
DiagnosticCode::MissingParameter => "missing-parameter",
DiagnosticCode::RedundantAlternant => "redundant-alternant",
};
Some(NumberOrString::String(String::from(string_slice)))
}
}
static DIAGNOSTICS_QUERY: LazyLock<Query> = LazyLock::new(|| {
Query::new(
&QUERY_LANGUAGE,
include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/queries/query/diagnostics.scm"
)),
)
.unwrap()
});
static DEFINITIONS_QUERY: LazyLock<Query> =
LazyLock::new(|| Query::new(&QUERY_LANGUAGE, "(program (definition) @def)").unwrap());
static CAPTURE_DEFINITIONS_QUERY: LazyLock<Query> = LazyLock::new(|| {
Query::new(
&QUERY_LANGUAGE,
"
(named_node
(capture) @capture.definition)
(list
(capture) @capture.definition)
(anonymous_node
(capture) @capture.definition)
(grouping
(capture) @capture.definition)
(missing_node
(capture) @capture.definition)
",
)
.unwrap()
});
static CAPTURE_REFERENCES_QUERY: LazyLock<Query> = LazyLock::new(|| {
Query::new(&QUERY_LANGUAGE, "(parameters (capture) @capture.reference)").unwrap()
});
pub static IDENTIFIER_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^[a-zA-Z0-9_-][a-zA-Z0-9_.-]*$").unwrap());
pub static INTEGER_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"^-?\d+$").unwrap());
pub async fn diagnostic<C: LspClient>(
backend: &Backend<C>,
params: DocumentDiagnosticParams,
) -> Result<DocumentDiagnosticReportResult> {
let uri = ¶ms.text_document.uri;
let Some(document) = backend.document_map.get(uri).as_deref().cloned() else {
return Err(Error {
code: ErrorCode::InternalError,
message: format!("Document not found for URI '{uri}'").into(),
data: None,
});
};
let full_document_diagnostic_report = create_diagnostic_report(backend, document, uri).await;
let mut seen = HashSet::from([uri.clone()]);
let mut related_documents = HashMap::new();
add_related_diagnostics(&mut related_documents, &mut seen, backend, uri).await;
let related_documents = if related_documents.is_empty() {
None
} else {
Some(related_documents)
};
Ok(DocumentDiagnosticReportResult::Report(
DocumentDiagnosticReport::Full(RelatedFullDocumentDiagnosticReport {
related_documents,
full_document_diagnostic_report,
}),
))
}
static QUERY_SCAN_CACHE: LazyLock<DashMap<(String, String), Option<usize>>> =
LazyLock::new(DashMap::new);
async fn create_diagnostic_report<C: LspClient>(
backend: &Backend<C>,
document: DocumentData,
uri: &Url,
) -> FullDocumentDiagnosticReport {
let language_data = document
.language_name
.as_ref()
.and_then(|name| backend.language_map.get(name))
.as_deref()
.cloned();
let ignore_missing_language = false;
let cache = true;
let items = get_diagnostics(
uri,
&backend.document_map,
document,
language_data,
backend.options.clone(),
ignore_missing_language,
cache,
)
.await;
FullDocumentDiagnosticReport {
result_id: None,
items,
}
}
async fn add_related_diagnostics<C: LspClient>(
related_documents: &mut HashMap<Url, DocumentDiagnosticReportKind>,
seen: &mut HashSet<Url>,
backend: &Backend<C>,
uri: &Url,
) {
let deps = backend
.dependents
.get(uri)
.map(|deps| deps.clone())
.unwrap_or_default();
for uri in deps {
if seen.contains(&uri) {
continue;
}
seen.insert(uri.clone());
if let Some(document) = backend.document_map.get(&uri).map(|doc| doc.clone()) {
related_documents.insert(
uri.clone(),
DocumentDiagnosticReportKind::Full(
create_diagnostic_report(backend, document, &uri).await,
),
);
}
Box::pin(add_related_diagnostics(
related_documents,
seen,
backend,
&uri,
))
.await;
}
}
fn get_pattern_diagnostic_cached(
pattern_node: Node,
rope: &Rope,
language_name: String,
language: &Language,
) -> Option<usize> {
let pattern_text = pattern_node.text(rope);
let pattern_key = (language_name, pattern_text);
if let Some(cached_diag) = QUERY_SCAN_CACHE.get(&pattern_key) {
return *cached_diag;
}
let byte_offset = get_pattern_diagnostic(&pattern_key.1, language);
QUERY_SCAN_CACHE.insert(pattern_key, byte_offset);
byte_offset
}
fn get_pattern_diagnostic(pattern_text: &str, language: &Language) -> Option<usize> {
match Query::new(language, pattern_text) {
Err(QueryError {
kind: QueryErrorKind::Structure,
offset,
..
}) => Some(offset),
_ => None,
}
}
const ERROR_SEVERITY: Option<DiagnosticSeverity> = Some(DiagnosticSeverity::ERROR);
const WARNING_SEVERITY: Option<DiagnosticSeverity> = Some(DiagnosticSeverity::WARNING);
const HINT_SEVERITY: Option<DiagnosticSeverity> = Some(DiagnosticSeverity::HINT);
pub async fn get_diagnostics(
uri: &Url,
document_map: &DashMap<Url, DocumentData>,
document: DocumentData,
language_data: Option<Arc<LanguageData>>,
options_arc: Arc<tokio::sync::RwLock<Options>>,
ignore_missing_language: bool,
cache: bool,
) -> Vec<Diagnostic> {
let missing_language_diag = if !ignore_missing_language && language_data.is_none() {
let (message, code) = if let Some(language_name) = document.language_name.as_ref() {
(
format!("Language object for {language_name:?} not found"),
DiagnosticCode::NoLanguageObject.into(),
)
} else {
(
String::from("Language name could not be determined"),
DiagnosticCode::NoLanguageName.into(),
)
};
Some(Diagnostic {
message,
severity: WARNING_SEVERITY,
code,
..Default::default()
})
} else {
None
};
let mut full_report = get_diagnostics_recursively(
uri,
document_map,
document,
language_data.clone(),
options_arc.clone(),
cache,
&mut HashSet::new(),
)
.await;
if let Some(diagnostic) = missing_language_diag {
full_report.push(diagnostic);
}
// Check ABI version
let options = options_arc.read().await;
if let (Some(language_info), Some(abi_range)) =
(language_data.as_deref(), &options.supported_abi_versions)
{
let abi = language_info.language.abi_version() as u32;
if !abi_range.contains(&abi) {
let start = abi_range.start();
let end = abi_range.end();
let range_str = if start == end {
start.to_string()
} else {
format!("{start} through {end}")
};
full_report.push(Diagnostic {
message: format!("Unsupported parser ABI {abi}, expected {range_str}"),
severity: WARNING_SEVERITY,
code: DiagnosticCode::InvalidAbi.into(),
..Default::default()
});
}
}
full_report
}
async fn get_diagnostics_recursively(
uri: &Url,
document_map: &DashMap<Url, DocumentData>,
document: DocumentData,
language_data: Option<Arc<LanguageData>>,
options_arc: Arc<tokio::sync::RwLock<Options>>,
cache: bool,
seen: &mut HashSet<Url>,
) -> Vec<Diagnostic> {
let mut diagnostics = Box::pin(get_imported_query_diagnostics(
document_map,
options_arc.clone(),
&document.imported_uris,
language_data.clone(),
seen,
))
.await;
let tree = document.tree.clone();
let rope = document.rope.clone();
let ld = language_data.clone();
// Separately iterate over pattern definitions since this step can be costly and we want to
// wrap in `spawn_blocking`. We can't merge this with the main iteration loop because it would
// cause a race condition, due to holding the `options` lock while `await`ing.
let handle = tokio::task::spawn_blocking(move || {
let Some(LanguageData {
language,
name: language_name,
..
}) = ld.as_deref()
else {
return Vec::new();
};
let provider = TextProviderRope(&rope);
let mut cursor = QueryCursor::new();
let mut matches = cursor.matches(&DEFINITIONS_QUERY, tree.root_node(), &provider);
let mut diagnostics = Vec::new();
while let Some(match_) = matches.next() {
for capture in match_.captures {
if let Some(offset) = if cache {
get_pattern_diagnostic_cached(
capture.node,
&rope,
language_name.clone(),
language,
)
} else {
get_pattern_diagnostic(&capture.node.text(&rope), &language.clone())
} {
let true_offset = offset + capture.node.start_byte();
diagnostics.push(Diagnostic {
message: String::from("Invalid pattern structure"),
severity: ERROR_SEVERITY,
range: tree
.root_node()
.named_descendant_for_byte_range(true_offset, true_offset)
.map(|node| node.lsp_range(&rope))
.unwrap_or_default(),
code: DiagnosticCode::InvalidPatternStructure.into(),
..Default::default()
});
}
}
}
diagnostics
})
.await;
diagnostics.append(&mut handle.unwrap_or_default());
let options = options_arc.read().await;
let valid_captures = options
.valid_captures
.get(&uri_to_basename(uri).unwrap_or_default());
let rope = &document.rope;
let tree = &document.tree;
let valid_predicates = &options.valid_predicates;
let valid_directives = &options.valid_directives;
let string_arg_style = &options.diagnostic_options.string_argument_style;
let warn_unused_underscore_caps = options.diagnostic_options.warn_unused_underscore_captures;
let symbols = language_data.as_deref().map(|ld| &ld.symbols_set);
let fields = language_data.as_deref().map(|ld| &ld.fields_set);
let supertypes = language_data.as_deref().map(|ld| &ld.supertype_map);
let mut cursor = QueryCursor::new();
let mut helper_cursor = QueryCursor::new();
let mut tree_cursor = tree.root_node().walk();
let provider = &TextProviderRope(rope);
let mut matches = cursor.matches(&DIAGNOSTICS_QUERY, tree.root_node(), provider);
while let Some(match_) = matches.next() {
for capture in match_.captures {
let capture_name = DIAGNOSTICS_QUERY.capture_names()[capture.index as usize];
let capture_text = capture.node.text(rope);
let range = capture.node.lsp_range(rope);
match capture_name {
capture_name if capture_name.starts_with("node.") => {
let Some(symbols) = symbols else {
continue;
};
let named = capture_name == "node.named";
let capture_text = if named {
capture_text
} else {
remove_unnecessary_escapes(&capture_text)
};
let sym = SymbolInfo {
label: capture_text,
named,
};
if !symbols.contains(&sym) {
diagnostics.push(Diagnostic {
message: format!("Invalid node type: \"{}\"", sym.label),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::InvalidNode.into(),
..Default::default()
});
}
}
"supertype" => {
let (Some(supertypes), Some(symbols)) = (supertypes, symbols) else {
continue;
};
let supertype_text = capture_text;
let sym = SymbolInfo {
label: supertype_text,
named: true,
};
if let Some(subtypes) = supertypes.get(&sym) {
let subtype = capture.node.next_named_sibling().unwrap();
let named = subtype.kind() == "identifier";
let subtype_text = subtype.text(rope);
let subtype_text = if named {
subtype_text
} else {
remove_unnecessary_escapes(&subtype_text[1..subtype_text.len() - 1])
};
let subtype_sym = SymbolInfo {
label: subtype_text,
named,
};
let range = subtype.lsp_range(rope);
// Only run this check when subtypes is not empty, to account for parsers
// generated with ABI < 15
if !subtypes.is_empty() && !subtypes.contains(&subtype_sym) {
diagnostics.push(Diagnostic {
message: format!(
"Node \"{}\" is not a subtype of \"{}\"",
subtype_sym.label, sym.label
),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::InvalidSubtype.into(),
..Default::default()
});
} else if subtypes.is_empty() && !symbols.contains(&subtype_sym) {
diagnostics.push(Diagnostic {
message: format!("Invalid node type: \"{}\"", subtype_sym.label),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::InvalidNode.into(),
..Default::default()
});
}
} else {
diagnostics.push(Diagnostic {
message: format!("Node \"{}\" is not a supertype", sym.label),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::InvalidSupertype.into(),
..Default::default()
});
}
}
"field" => {
let Some(fields) = fields else {
continue;
};
let field = capture_text;
if !fields.contains(&field) {
diagnostics.push(Diagnostic {
message: format!("Invalid field name: \"{field}\""),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::InvalidField.into(),
..Default::default()
});
}
}
"alternation" => {
let mut seen = HashSet::new();
for child in capture.node.named_children(&mut tree_cursor) {
let kind = child.kind();
let named = match kind {
"named_node" => true,
"anonymous_node" => false,
"capture" => break,
_ => continue,
};
if named && child.named_child(1).is_some_and(|c| c.kind() != "capture") {
continue;
}
let content_node = child.named_child(0).unwrap_or(child);
let id = (content_node.text(rope), named);
if seen.contains(&id) {
diagnostics.push(Diagnostic {
message: format!(
"{} already captured by this alternation (fix available)",
if named {
["(", id.0.as_str(), ")"].concat()
} else {
id.0
},
),
severity: HINT_SEVERITY,
range: child.lsp_range(rope),
data: Some(CodeActions::Remove.into()),
code: DiagnosticCode::RedundantAlternant.into(),
..Default::default()
});
} else {
seen.insert(id);
}
}
}
"error" => diagnostics.push(Diagnostic {
message: "Invalid syntax".to_owned(),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::InvalidSyntax.into(),
..Default::default()
}),
"missing" => diagnostics.push(Diagnostic {
message: format!("Missing \"{}\"", capture.node.kind()),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::MissingToken.into(),
..Default::default()
}),
"capture.reference" => {
let mut matches = helper_cursor.matches(
&CAPTURE_DEFINITIONS_QUERY,
tree.root_node()
.child_with_descendant(capture.node)
.unwrap(),
provider,
);
let mut valid = false;
while let Some(m) = matches.next() {
if m.captures
.iter()
.any(|cap| cap.node.text(rope) == capture_text)
{
valid = true;
break;
}
}
if !valid {
diagnostics.push(Diagnostic {
message: format!("Undeclared capture: \"{capture_text}\""),
severity: ERROR_SEVERITY,
range,
code: DiagnosticCode::UndeclaredCapture.into(),
..Default::default()
});
}
}
"capture.definition" => {
if let Some(suffix) = capture_text.strip_prefix("@") {
if !suffix.starts_with('_')
&& valid_captures
.is_some_and(|c| !c.contains_key(&String::from(suffix)))
{
diagnostics.push(Diagnostic {
message: format!(
"Invalid capture name \"{capture_text}\" (fix available)"
),
severity: WARNING_SEVERITY,
range,
data: Some(CodeActions::PrefixUnderscore.into()),
code: DiagnosticCode::InvalidCaptureName.into(),
..Default::default()
});
} else if suffix.starts_with('_') && warn_unused_underscore_caps {
let mut matches = helper_cursor.matches(
&CAPTURE_REFERENCES_QUERY,
tree.root_node()
.child_with_descendant(capture.node)
.unwrap(),
provider,
);
let mut valid = false;
while let Some(m) = matches.next() {
if m.captures
.iter()
.any(|cap| cap.node.text(rope) == capture_text)
{
valid = true;
break;
}
}
if !valid {
diagnostics.push(Diagnostic {
message: String::from(
"Unused `_`-prefixed capture (fix available)",
),
severity: WARNING_SEVERITY,
range,
tags: Some(vec![DiagnosticTag::UNNECESSARY]),
data: Some(CodeActions::Remove.into()),
code: DiagnosticCode::UnusedAuxiliaryCapture.into(),
..Default::default()
});
}
}
}
}
"predicate" | "directive" => {
let (validator, code) = if capture_name == "predicate" {
(valid_predicates, DiagnosticCode::UnrecognizedPredicate)
} else {
(valid_directives, DiagnosticCode::UnrecognizedDirective)
};
if validator.is_empty() {
continue;
}
if let Some(predicate) = validator.get(&capture_text) {
validate_predicate(
&mut diagnostics,
&mut tree_cursor,
rope,
language_data.as_ref(),
&predicate.parameters,
capture.node,
);
} else {
diagnostics.push(Diagnostic {
message: format!("Unrecognized {capture_name} \"{capture_text}\""),
severity: WARNING_SEVERITY,
range,
code: code.into(),
..Default::default()
});
}
}
"escape" => match capture_text.chars().nth(1) {
None | Some('"' | '\\' | 'n' | 'r' | 't' | '0') => {}
_ => {
diagnostics.push(Diagnostic {
message: String::from("Unnecessary escape sequence (fix available)"),
severity: WARNING_SEVERITY,
range,
data: Some(CodeActions::RemoveBackslash.into()),
code: DiagnosticCode::UnnecessaryEscapeSequence.into(),
..Default::default()
});
}
},
"pattern" => {
let mut matches =
helper_cursor.matches(&CAPTURES_QUERY, capture.node, provider);
if matches.next().is_none() {
diagnostics.push(Diagnostic {
message: String::from(
"This pattern has no captures, and will not be processed (fix available)",
),
range,
severity: WARNING_SEVERITY,
tags: Some(vec![DiagnosticTag::UNNECESSARY]),
data: Some(CodeActions::Remove.into()),
code: DiagnosticCode::UnnecessaryPattern.into(),
..Default::default()
});
}
}
"string" => {
if *string_arg_style != StringArgumentStyle::PreferUnquoted {
continue;
}
// String contains escape sequences
if capture.node.named_child_count() > 0
|| !IDENTIFIER_REGEX.is_match(&capture_text)
{
continue;
}
let mut range = range;
range.start.character -= 1;
range.end.character += 1;
diagnostics.push(Diagnostic {
message: String::from("Unnecessary quotations (fix available)"),
range,
severity: HINT_SEVERITY,
data: Some(CodeActions::Trim.into()),
code: DiagnosticCode::UnnecessaryQuotations.into(),
..Default::default()
});
}
"identifier" => {
if *string_arg_style != StringArgumentStyle::PreferQuoted {
continue;
}
diagnostics.push(Diagnostic {
message: String::from("Unquoted string argument (fix available)"),
range,
severity: HINT_SEVERITY,
data: Some(CodeActions::Enquote.into()),
code: DiagnosticCode::UnquotedString.into(),
..Default::default()
});
}
_ => {}
}
}
}
diagnostics
}
async fn get_imported_query_diagnostics(
document_map: &DashMap<Url, DocumentData>,
options_arc: Arc<tokio::sync::RwLock<Options>>,
imported_uris: &Vec<ImportedUri>,
language_data: Option<Arc<LanguageData>>,
seen: &mut HashSet<Url>,
) -> Vec<Diagnostic> {
let mut items = Vec::new();
for ImportedUri {
start_col,
end_col,
name,
uri,
} in imported_uris
{
let range = Range {
start: Position::new(0, *start_col),
end: Position::new(0, *end_col),
};
if let Some(uri) = uri {
if seen.contains(uri) {
continue;
}
seen.insert(uri.clone());
if let Some(document) = document_map.get(uri).map(|doc| doc.clone()) {
let mut severity = DiagnosticSeverity::HINT;
let inner_diags = get_diagnostics_recursively(
uri,
document_map,
document,
language_data.clone(),
options_arc.clone(),
true,
seen,
)
.await;
let inner_diags: Vec<DiagnosticRelatedInformation> = inner_diags
.into_iter()
.map(|diag| {
if let Some(sev) = diag.severity {
// This misleadingly computes the maximum severity
severity = std::cmp::min(severity, sev);
}
DiagnosticRelatedInformation {
message: diag.message,
location: Location {
uri: uri.clone(),
range: diag.range,
},
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | true |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/cli/check.rs | src/cli/check.rs | use std::{
env, fs,
path::PathBuf,
sync::{Arc, LazyLock, atomic::AtomicI32},
};
use dashmap::DashMap;
use futures::future::join_all;
use tower_lsp::lsp_types::Url;
use crate::{
LanguageData, Options,
cli::lint::LintOptions,
handlers::did_open::init_language_data,
util::{self, get_scm_files},
};
use super::{format::format_directories, lint::lint_file};
static LANGUAGE_CACHE: LazyLock<DashMap<String, Arc<LanguageData>>> = LazyLock::new(DashMap::new);
pub async fn check_directories(
directories: &[PathBuf],
config: String,
workspace: Option<PathBuf>,
format: bool,
fix: bool,
) -> i32 {
let Ok(options) = serde_json::from_str::<Options>(&config) else {
eprintln!("Could not parse the provided configuration");
return 1;
};
let options_arc: Arc<tokio::sync::RwLock<Options>> = Arc::new(options.clone().into());
let exit_code = Arc::new(AtomicI32::new(0));
// If directories are not specified, check all files in the current directory
let directories = if directories.is_empty() {
&[env::current_dir().expect("Failed to get current directory")]
} else {
directories
};
let workspace = workspace
.unwrap_or_else(|| env::current_dir().expect("Failed to get current directory"))
.canonicalize()
.expect("Workspace path should be valid");
let workspace = Arc::new(workspace);
let scm_files = get_scm_files(directories);
let tasks = scm_files.into_iter().filter_map(|path| {
let options_arc = options_arc.clone();
let exit_code = exit_code.clone();
let absolute_path = path.canonicalize().expect("Path should be valid");
let uri = Url::from_file_path(&absolute_path).expect("Path should be absolute");
let language_name = util::get_language_name(&uri, &options);
let language_data = language_name.and_then(|name| {
LANGUAGE_CACHE.get(&name).as_deref().cloned().or_else(|| {
util::get_language(&name, &options)
.map(|lang| Arc::new(init_language_data(lang, name)))
})
});
let Ok(source) = fs::read_to_string(&path) else {
eprintln!("Failed to read {}", absolute_path.display());
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
return None;
};
let workspace = workspace.clone();
let ignore_missing_language = false;
let lint_opts = LintOptions::new(fix, ignore_missing_language);
Some(tokio::spawn(async move {
if let Some(new_source) = lint_file(
absolute_path.as_path(),
&workspace,
&source,
options_arc.clone(),
lint_opts,
language_data,
&exit_code,
)
.await
&& fs::write(&path, new_source).is_err()
{
eprintln!("Failed to write {}", absolute_path.display());
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
}
}))
});
join_all(tasks).await;
if format && format_directories(directories, true, options.formatting_options).await != 0 {
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
}
exit_code.load(std::sync::atomic::Ordering::Relaxed)
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/cli/lint.rs | src/cli/lint.rs | use std::{
env, fs,
path::{Path, PathBuf},
sync::{Arc, atomic::AtomicI32},
};
use dashmap::DashMap;
use futures::future::join_all;
use ropey::Rope;
use tower_lsp::lsp_types::{CodeAction, CodeActionOrCommand, DiagnosticSeverity, Url};
use crate::{
DocumentData, LanguageData, Options,
handlers::{
code_action::diag_to_code_action,
diagnostic::{DiagnosticCode, get_diagnostics},
did_open::populate_import_documents,
},
util::{edit_rope, get_imported_uris, get_language_name, get_scm_files, parse},
};
#[derive(Debug, Copy, Clone)]
pub struct LintOptions {
pub fix: bool,
pub ignore_missing_language: bool,
}
impl LintOptions {
#[must_use]
pub const fn new(fix: bool, ignore_missing_language: bool) -> Self {
Self {
fix,
ignore_missing_language,
}
}
}
pub(super) async fn lint_file(
absolute_path: &Path,
workspace: &Path,
source: &str,
server_options: Arc<tokio::sync::RwLock<Options>>,
lint_options: LintOptions,
language_data: Option<Arc<LanguageData>>,
exit_code: &AtomicI32,
) -> Option<String> {
let rope = Rope::from(source);
let tree = parse(&rope, None);
let uri = Url::from_file_path(absolute_path).expect("Path should be absolute");
let options_val = server_options.clone().read().await.clone();
let language_name = get_language_name(&uri, &options_val);
let workspace_uris = &[workspace.to_owned()];
let imported_uris = get_imported_uris(workspace_uris, &options_val, &uri, &rope, &tree);
let document_map = DashMap::new();
populate_import_documents(&document_map, workspace_uris, &options_val, &imported_uris);
let doc = DocumentData {
tree,
rope,
language_name,
version: Option::default(),
imported_uris,
};
let cache = false;
// The query construction already validates node names, fields, supertypes,
// etc.
let diagnostics = get_diagnostics(
&uri,
&document_map,
doc.clone(),
language_data,
server_options,
lint_options.ignore_missing_language,
cache,
)
.await;
if diagnostics.is_empty() {
return None;
}
let mut edits = Vec::with_capacity(if lint_options.fix {
diagnostics.len()
} else {
0
});
if !lint_options.fix {
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
}
let mut unfixed_issues = 0;
for diagnostic in diagnostics {
if lint_options.fix {
let is_module_diagnostic = diagnostic.code == DiagnosticCode::ImportIssues.into();
let Some(action) = diag_to_code_action(&doc.tree, &doc.rope, diagnostic, &uri) else {
if !is_module_diagnostic {
unfixed_issues += 1;
}
continue;
};
let CodeActionOrCommand::CodeAction(CodeAction {
edit: Some(edit), ..
}) = action
else {
continue;
};
let Some(mut changes) = edit.changes.and_then(|mut changes| changes.remove(&uri))
else {
continue;
};
edits.append(&mut changes);
} else {
let kind = match diagnostic.severity {
Some(DiagnosticSeverity::ERROR) => "Error",
Some(DiagnosticSeverity::WARNING) => "Warning",
Some(DiagnosticSeverity::INFORMATION) => "Info",
Some(DiagnosticSeverity::HINT) => "Hint",
_ => "Diagnostic",
};
eprintln!(
"{} in \"{}\" on line {}, col {}:\n {}",
kind,
absolute_path.display(),
diagnostic.range.start.line + 1,
diagnostic.range.start.character + 1,
diagnostic.message
);
for related_info in diagnostic.related_information.unwrap_or_default() {
eprintln!(
" ‣ {}:{}:{}: {}",
related_info
.location
.uri
.to_file_path()
.expect("Related information URI should be a valid file path")
.strip_prefix(workspace)
.expect("Related information URI should be within the workspace")
.to_string_lossy(),
related_info.location.range.start.line + 1,
related_info.location.range.start.character + 1,
related_info.message
);
}
}
}
if unfixed_issues > 0 {
let plurality = if unfixed_issues > 1 { "s" } else { "" };
println!(
"{}: {unfixed_issues} issue{plurality} could not be fixed automatically",
absolute_path.display()
);
}
if !lint_options.fix || edits.is_empty() {
return None;
}
edits.sort_unstable_by(|a, b| b.range.start.cmp(&a.range.start));
let mut rope = doc.rope;
for edit in edits {
let range = edit.range;
let new_text = edit.new_text;
edit_rope(&mut rope, range, &new_text);
}
Some(rope.to_string())
}
/// Lint all the given directories according to the given configuration. Linting covers things like
/// invalid capture names or predicate signatures, but not errors like invalid node names or
/// impossible patterns.
pub async fn lint_directories(
directories: &[PathBuf],
config: String,
workspace: Option<PathBuf>,
fix: bool,
) -> i32 {
let Ok(options) = serde_json::from_str::<Options>(&config) else {
eprintln!("Could not parse the provided configuration");
return 1;
};
let options: Arc<tokio::sync::RwLock<Options>> = Arc::new(options.into());
let exit_code = Arc::new(AtomicI32::new(0));
// If directories are not specified, lint all files in the current directory
let directories = if directories.is_empty() {
&[env::current_dir().expect("Failed to get current directory")]
} else {
directories
};
let workspace = workspace
.unwrap_or_else(|| env::current_dir().expect("Failed to get current directory"))
.canonicalize()
.expect("Workspace path should be valid");
let workspace = Arc::new(workspace);
let scm_files = get_scm_files(directories);
let tasks = scm_files.into_iter().filter_map(|path| {
let absolute_path = path.canonicalize().expect("Path should be valid");
let exit_code = exit_code.clone();
let options = options.clone();
if let Ok(source) = fs::read_to_string(&path) {
let workspace = workspace.clone();
let lint_opts = LintOptions::new(fix, true);
Some(tokio::spawn(async move {
if let Some(new_source) = lint_file(
absolute_path.as_path(),
&workspace,
&source,
options,
lint_opts,
None,
&exit_code,
)
.await
&& fs::write(&path, new_source).is_err()
{
eprintln!("Failed to write {}", absolute_path.display());
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
}
}))
} else {
eprintln!("Failed to read {}", absolute_path.display());
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
None
}
});
join_all(tasks).await;
exit_code.load(std::sync::atomic::Ordering::Relaxed)
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/cli/mod.rs | src/cli/mod.rs | pub mod check;
pub mod format;
pub mod lint;
pub mod profile;
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/cli/format.rs | src/cli/format.rs | use std::{
fs,
path::PathBuf,
sync::{Arc, atomic::AtomicI32},
};
use anstyle::{AnsiColor, Color, Style};
use futures::future::join_all;
use ropey::Rope;
use ts_query_ls::FormattingOptions;
use crate::{
handlers::formatting,
util::{get_scm_files, parse},
};
pub async fn format_directories(
directories: &[PathBuf],
check: bool,
fmt_options: FormattingOptions,
) -> i32 {
if directories.is_empty() {
eprintln!("No directories were specified to be formatted. No work was done.");
return 1;
}
let scm_files = get_scm_files(directories);
let exit_code = Arc::new(AtomicI32::new(0));
let use_color = std::env::var("NO_COLOR").map_or(true, |v| v.is_empty());
let (red, green, blue, purple) = if use_color {
(
Some(AnsiColor::Red),
Some(AnsiColor::Green),
Some(AnsiColor::Blue),
Some(AnsiColor::Magenta),
)
} else {
(None, None, None, None)
};
let tasks = scm_files.into_iter().map(|path| {
let exit_code = exit_code.clone();
tokio::spawn(async move {
let path_str = path.to_string_lossy();
let Ok(contents) = fs::read_to_string(&path) else {
eprintln!("Failed to read {path_str:?}");
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
return;
};
let rope = Rope::from(contents.as_str());
let tree = parse(&rope, None);
let Some(formatted) =
formatting::format_document(&rope, &tree.root_node(), fmt_options)
else {
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
eprintln!("No formatting performed -- invalid syntax detected at {path_str:?}");
return;
};
if check {
let mut edits = formatting::diffs(&contents, &formatted, rope);
if edits.next().is_some() {
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
eprintln!("{}", paint(purple, &format!("{path_str:?}:")));
let patch = diffy::create_patch(&contents, &formatted).to_string();
for line in patch.lines() {
if line.starts_with("@@") {
eprintln!("{}", paint(blue, line));
} else if line.starts_with('-') {
eprintln!("{}", paint(red, line));
} else if line.starts_with('+') {
eprintln!("{}", paint(green, line));
} else {
eprintln!("{line}");
}
}
eprintln!();
}
} else if fs::write(&path, formatted).is_err() {
exit_code.store(1, std::sync::atomic::Ordering::Relaxed);
eprint!("Failed to write to {path_str:?}");
}
})
});
join_all(tasks).await;
exit_code.load(std::sync::atomic::Ordering::Relaxed)
}
pub fn paint(color: Option<impl Into<Color>>, text: &str) -> String {
let style = Style::new().fg_color(color.map(Into::into));
format!("{style}{text}{style:#}")
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/src/cli/profile.rs | src/cli/profile.rs | use std::{
env, fs,
path::PathBuf,
sync::{Arc, LazyLock},
time::Instant,
};
use dashmap::DashMap;
use futures::future::join_all;
use tower_lsp::lsp_types::Url;
use tree_sitter::{Parser, Query, QueryCursor, StreamingIterator as _};
use crate::{
LanguageData, Options, QUERY_LANGUAGE,
handlers::did_open::init_language_data,
util::{self, get_scm_files},
};
static LANGUAGE_CACHE: LazyLock<DashMap<String, Arc<LanguageData>>> = LazyLock::new(DashMap::new);
static PATTERN_DEFINITION_QUERY: LazyLock<Query> =
LazyLock::new(|| Query::new(&QUERY_LANGUAGE, "(program (definition) @def)").unwrap());
pub async fn profile_directories(directories: &[PathBuf], config: String, per_file: bool) {
let Ok(options) = serde_json::from_str::<Options>(&config) else {
eprintln!("Could not parse the provided configuration");
return;
};
let directories = if directories.is_empty() {
&[env::current_dir().expect("Failed to get current directory")]
} else {
directories
};
let scm_files = get_scm_files(directories);
let tasks = scm_files.into_iter().filter_map(|path| {
let uri = Url::from_file_path(path.canonicalize().unwrap()).unwrap();
let path_str = path.to_string_lossy().to_string();
let language_name = util::get_language_name(&uri, &options);
let language_data = language_name.and_then(|name| {
LANGUAGE_CACHE.get(&name).as_deref().cloned().or_else(|| {
util::get_language(&name, &options)
.map(|lang| Arc::new(init_language_data(lang, name)))
})
});
let Some(lang_data) = language_data else {
eprintln!(
"Could not retrieve language for {}",
path.canonicalize().unwrap().display()
);
return None;
};
let lang = lang_data.language.clone();
let Ok(source) = fs::read_to_string(&path) else {
eprintln!("Failed to read {}", path.canonicalize().unwrap().display());
return None;
};
Some(tokio::spawn(async move {
if per_file {
let now = Instant::now();
let _ = Query::new(&lang, &source);
return vec![(path_str.clone(), 1, now.elapsed().as_micros())];
}
let mut results = Vec::new();
let mut parser = Parser::new();
parser.set_language(&QUERY_LANGUAGE).unwrap();
let tree = parser.parse(&source, None).expect("Tree should exist");
let mut cursor = QueryCursor::new();
let source_bytes = source.as_bytes();
let mut matches =
cursor.matches(&PATTERN_DEFINITION_QUERY, tree.root_node(), source_bytes);
while let Some(match_) = matches.next() {
for capture in match_.captures {
let now = Instant::now();
let _ = Query::new(
&lang,
capture
.node
.utf8_text(source_bytes)
.expect("Source should be UTF-8"),
);
results.push((
path_str.clone(),
capture.node.start_position().row + 1,
now.elapsed().as_micros(),
));
}
}
results
}))
});
let results = join_all(tasks).await;
let mut results = results
.into_iter()
.flat_map(|r| r.unwrap_or_default())
.collect::<Vec<_>>();
results.sort_unstable_by(|a, b| a.2.cmp(&b.2));
for (path, row, time) in results {
let time = format!("{:.2}ms", time as f64 / 1000.0);
if per_file {
println!("{time:<10} {path}");
} else {
println!("{time:<10} {path}:{row}");
}
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/tests/lint.rs | tests/lint.rs | #[cfg(test)]
mod test {
use rstest::rstest;
use std::{
collections::{BTreeMap, HashMap},
path::Path,
process::Command,
sync::LazyLock,
};
use ts_query_ls::{Options, Predicate, PredicateParameter};
static CONFIG: LazyLock<Options> = LazyLock::new(|| Options {
valid_predicates: BTreeMap::from([
(
String::from("pred-name"),
Predicate {
description: String::from("A predicate"),
parameters: vec![PredicateParameter {
description: None,
type_: ts_query_ls::PredicateParameterType::Any,
arity: ts_query_ls::PredicateParameterArity::Variadic,
..Default::default()
}],
},
),
(
String::from("match"),
Predicate {
description: String::from("Check match"),
parameters: vec![PredicateParameter {
description: None,
type_: ts_query_ls::PredicateParameterType::Any,
arity: ts_query_ls::PredicateParameterArity::Variadic,
..Default::default()
}],
},
),
]),
valid_captures: HashMap::from([
(
String::from("after_trailing_whitespace"),
BTreeMap::from([(String::from("capture"), String::from("A capture."))]),
),
(
String::from("before_predicates"),
BTreeMap::from([(String::from("type"), String::from("A type."))]),
),
]),
..Default::default()
});
#[rstest]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_trailing_whitespace.scm"),
Some(["Invalid capture name \"@cap\" (fix available)"].as_slice())
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_predicates.scm"),
Some(["Unrecognized predicate \"lua-match\""].as_slice())
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_missing.scm"),
Some(["This pattern has no captures, and will not be processed"].as_slice())
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_syntax_error.scm"),
Some(["Invalid syntax"].as_slice())
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/example_test_files/simple.scm"),
None
)]
fn cli_lint(#[case] path_str: &str, #[case] warning_messages: Option<&[&str]>) {
// Arrange
let path = Path::new(path_str);
// Act
let output = Command::new(env!("CARGO_BIN_EXE_ts_query_ls"))
.arg("lint")
.arg(path)
.arg("--config")
.arg(serde_json::to_string::<Options>(&CONFIG).unwrap())
.output()
.expect("Failed to wait on ts-query-ls format command");
// Assert
let string_output = String::from_utf8(output.stderr).unwrap();
if let Some(messages) = warning_messages {
for message in messages {
assert!(string_output.contains(message));
}
assert_eq!(output.status.code(), Some(1));
} else {
assert_eq!(string_output, "");
assert_eq!(output.status.code(), Some(0));
}
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/tests/format.rs | tests/format.rs | #[cfg(test)]
mod test {
use rstest::{Context, rstest};
use std::{env::temp_dir, fs, path::Path, process::Command};
#[rstest]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_trailing_whitespace.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_trailing_whitespace.scm")),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_predicates.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_predicates.scm")),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_missing.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_missing.scm")),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_syntax_error.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_syntax_error.scm")),
)]
#[case(
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_complex.scm")),
include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_complex.scm")),
)]
fn cli_format_write(#[context] ctx: Context, #[case] before: &str, #[case] after: &str) {
// Arrange
let path = temp_dir()
.join("ts-query-ls")
.join(ctx.case.unwrap().to_string())
.join("test.scm");
fs::create_dir_all(path.parent().unwrap()).expect("Failed to create test case directory");
fs::write(&path, before).expect("Failed to write test file");
// Act
Command::new(env!("CARGO_BIN_EXE_ts_query_ls"))
.arg("format")
.arg(&path)
.output()
.expect("Failed to wait on ts-query-ls format command");
// Assert
let formatted = fs::read_to_string(&path).expect("Failed to read test file");
_ = fs::remove_file(path); // ignore cleanup errors
assert_eq!(after, formatted);
}
enum FormatValidity {
Valid,
Invalid,
SyntaxError,
}
#[rstest]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_trailing_whitespace.scm"),
FormatValidity::Invalid
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_trailing_whitespace.scm"),
FormatValidity::Valid
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_predicates.scm"),
FormatValidity::Invalid
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_missing.scm"),
FormatValidity::Invalid
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/before_syntax_error.scm"),
FormatValidity::SyntaxError // NOTE: Files containing syntax errors cannot be formatted
)]
#[case(
concat!(env!("CARGO_MANIFEST_DIR"), "/tests/fixtures/formatting_test_files/after_complex.scm"),
FormatValidity::Valid
)]
fn cli_format_validate(#[case] path_str: &str, #[case] valid: FormatValidity) {
// Arrange
let path = Path::new(path_str);
// Act
let output = Command::new(env!("CARGO_BIN_EXE_ts_query_ls"))
.arg("format")
.arg("--check")
.arg(path)
.output()
.expect("Failed to wait on ts-query-ls format command");
// Assert
match valid {
FormatValidity::Valid => {
assert!(output.stderr.is_empty());
assert_eq!(output.status.code(), Some(0));
}
FormatValidity::Invalid => {
assert!(String::from_utf8(output.stderr).unwrap().contains(path_str));
assert_eq!(output.status.code(), Some(1));
}
FormatValidity::SyntaxError => {
assert!(
String::from_utf8(output.stderr)
.unwrap()
.contains("No formatting performed -- invalid syntax detected at")
);
assert_eq!(output.status.code(), Some(1));
}
}
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
ribru17/ts_query_ls | https://github.com/ribru17/ts_query_ls/blob/40593cb9158dbafb6c1f2e89b24629d8b1d16a8f/xtask/src/main.rs | xtask/src/main.rs | use std::{fs::File, path::Path};
use clap::{Parser, Subcommand};
use serde_json::to_writer_pretty;
use ts_query_ls::Options;
#[derive(Parser)]
#[command(author, version, about)]
struct Args {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Generate a JSON schema for the config file.
Schema,
}
fn main() {
let args = Args::parse();
match args.command {
Commands::Schema => {
let schema = schemars::schema_for!(Options);
let xtask_path = env!("CARGO_MANIFEST_DIR");
let schema_out_dir = Path::new(&xtask_path)
.parent()
.unwrap()
.join("schemas/config.json");
let Ok(mut json_file) = File::create(&schema_out_dir) else {
panic!("Cannot create file: {}", schema_out_dir.display());
};
to_writer_pretty(&mut json_file, &schema).unwrap();
}
}
}
| rust | MIT | 40593cb9158dbafb6c1f2e89b24629d8b1d16a8f | 2026-01-04T20:20:11.073589Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/benchmarks/from_iterator.rs | benchmarks/from_iterator.rs | use criterion::{black_box, criterion_group, criterion_main, Criterion};
use enumflags2::{bitflags, BitFlags};
#[bitflags]
#[repr(u16)]
#[derive(Clone, Copy)]
pub enum Test {
Flag1 = 1 << 0,
Flag2 = 1 << 1,
Flag3 = 1 << 2,
Flag4 = 1 << 3,
Flag5 = 1 << 4,
Flag6 = 1 << 5,
Flag7 = 1 << 6,
Flag8 = 1 << 7,
Flag9 = 1 << 8,
Flag10 = 1 << 9,
Flag11 = 1 << 10,
}
pub fn iterators(c: &mut Criterion) {
let v = vec![Test::Flag3, Test::Flag7, Test::Flag5, Test::Flag11];
let v2 = vec![Test::Flag10, Test::Flag3, Test::Flag1, Test::Flag4];
c.bench_function("simple iterator collect", |b| {
b.iter(|| black_box(&v).iter().copied().collect::<BitFlags<_>>())
});
c.bench_function("chained iterator collect", |b| {
b.iter(|| {
black_box(&v)
.iter()
.chain(black_box(&v2).iter())
.copied()
.collect::<BitFlags<_>>()
})
});
c.bench_function("simple iterator extend", |b| {
b.iter(|| {
let mut flags = BitFlags::empty();
flags.extend(black_box(&v).iter().copied());
flags
})
});
c.bench_function("chained iterator extend", |b| {
b.iter(|| {
let mut flags = BitFlags::empty();
flags.extend(black_box(&v).iter().chain(black_box(&v2).iter()).copied());
flags
})
});
}
criterion_group!(benches, iterators);
criterion_main!(benches);
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/benchmarks/iterator.rs | benchmarks/iterator.rs | use criterion::{black_box, criterion_group, criterion_main, Criterion};
use enumflags2::{bitflags, BitFlags};
#[bitflags]
#[repr(u16)]
#[derive(Clone, Copy, Debug)]
pub enum Test {
Flag1 = 1 << 0,
Flag2 = 1 << 1,
Flag3 = 1 << 2,
Flag4 = 1 << 3,
Flag5 = 1 << 4,
Flag6 = 1 << 5,
Flag7 = 1 << 6,
Flag8 = 1 << 7,
Flag9 = 1 << 8,
Flag10 = 1 << 9,
Flag11 = 1 << 10,
Flag12 = 1 << 11,
}
pub fn iterators(c: &mut Criterion) {
let v1 = BitFlags::<Test>::from_bits(0x003).unwrap();
let v2 = BitFlags::<Test>::from_bits(0x691).unwrap();
let v3 = BitFlags::<Test>::from_bits(0xfed).unwrap();
c.bench_function("iterate (2/12)", |b| {
b.iter(|| black_box(&v1).iter().collect::<Vec<_>>())
});
c.bench_function("iterate (5/12)", |b| {
b.iter(|| black_box(&v2).iter().collect::<Vec<_>>())
});
c.bench_function("iterate (10/12)", |b| {
b.iter(|| black_box(&v3).iter().collect::<Vec<_>>())
});
}
criterion_group!(benches, iterators);
criterion_main!(benches);
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/src/lib.rs | src/lib.rs | //! # Enum Flags
//! `enumflags2` implements the classic bitflags datastructure. Annotate an enum
//! with `#[bitflags]`, and `BitFlags<YourEnum>` will be able to hold arbitrary combinations
//! of your enum within the space of a single integer.
//!
//! Unlike other crates, `enumflags2` makes the type-level distinction between
//! a single flag (`YourEnum`) and a set of flags (`BitFlags<YourEnum>`).
//! This allows idiomatic handling of bitflags, such as with `match` and `iter`.
//!
//! ## Example
//! ```
//! use enumflags2::{bitflags, make_bitflags, BitFlags};
//!
//! #[bitflags]
//! #[repr(u8)]
//! #[derive(Copy, Clone, Debug, PartialEq)]
//! enum Test {
//! A = 0b0001,
//! B = 0b0010,
//! C, // unspecified variants pick unused bits automatically
//! D = 0b1000,
//! }
//!
//! // Flags can be combined with |, this creates a BitFlags of your type:
//! let a_b: BitFlags<Test> = Test::A | Test::B;
//! let a_c = Test::A | Test::C;
//! let b_c_d = make_bitflags!(Test::{B | C | D});
//!
//! // The debug output lets you inspect both the numeric value and
//! // the actual flags:
//! assert_eq!(format!("{:?}", a_b), "BitFlags<Test>(0b11, A | B)");
//!
//! // But if you'd rather see only one of those, that's available too:
//! assert_eq!(format!("{}", a_b), "A | B");
//! assert_eq!(format!("{:04b}", a_b), "0011");
//!
//! // Iterate over the flags like a normal set
//! assert_eq!(a_b.iter().collect::<Vec<_>>(), &[Test::A, Test::B]);
//!
//! // Query the contents with contains and intersects
//! assert!(a_b.contains(Test::A));
//! assert!(b_c_d.contains(Test::B | Test::C));
//! assert!(!(b_c_d.contains(a_b)));
//!
//! assert!(a_b.intersects(a_c));
//! assert!(!(a_b.intersects(Test::C | Test::D)));
//! ```
//!
//! ## Optional Feature Flags
//!
//! - [`serde`](https://serde.rs/) implements `Serialize` and `Deserialize`
//! for `BitFlags<T>`.
//! - `std` implements `std::error::Error` for `FromBitsError`.
//!
//! ## `const fn`-compatible APIs
//!
//! **Background:** The subset of `const fn` features currently stabilized is pretty limited.
//! Most notably, [const traits are still at the RFC stage][const-trait-rfc],
//! which makes it impossible to use any overloaded operators in a const
//! context.
//!
//! **Naming convention:** If a separate, more limited function is provided
//! for usage in a `const fn`, the name is suffixed with `_c`.
//!
//! Apart from functions whose name ends with `_c`, the [`make_bitflags!`] macro
//! is often useful for many `const` and `const fn` usecases.
//!
//! **Blanket implementations:** If you attempt to write a `const fn` ranging
//! over `T: BitFlag`, you will be met with an error explaining that currently,
//! the only allowed trait bound for a `const fn` is `?Sized`. You will probably
//! want to write a separate implementation for `BitFlags<T, u8>`,
//! `BitFlags<T, u16>`, etc — best accomplished by a simple macro.
//!
//! **Documentation considerations:** The strategy described above is often used
//! by `enumflags2` itself. To avoid clutter in the auto-generated documentation,
//! the implementations for widths other than `u8` are marked with `#[doc(hidden)]`.
//!
//! ## Customizing `Default`
//!
//! By default, creating an instance of `BitFlags<T>` with `Default` will result in an empty
//! set. If that's undesirable, you may customize this:
//!
//! ```
//! # use enumflags2::{BitFlags, bitflags};
//! #[bitflags(default = B | C)]
//! #[repr(u8)]
//! #[derive(Copy, Clone, Debug, PartialEq)]
//! enum Test {
//! A = 0b0001,
//! B = 0b0010,
//! C = 0b0100,
//! D = 0b1000,
//! }
//!
//! assert_eq!(BitFlags::default(), Test::B | Test::C);
//! ```
//!
//! [const-trait-rfc]: https://github.com/rust-lang/rfcs/pull/2632
#![warn(missing_docs)]
#![cfg_attr(all(not(test), not(feature = "std")), no_std)]
use core::hash::{Hash, Hasher};
use core::marker::PhantomData;
use core::{cmp, ops};
#[allow(unused_imports)]
#[macro_use]
extern crate enumflags2_derive;
#[doc(hidden)]
pub use enumflags2_derive::bitflags_internal as bitflags;
// Internal macro: expand into a separate copy for each supported numeric type.
macro_rules! for_each_uint {
( $d:tt $tyvar:ident $dd:tt $docattr:ident => $($input:tt)* ) => {
macro_rules! implement {
( $d $tyvar:ty => $d($d $docattr:meta)? ) => {
$($input)*
}
}
implement! { u8 => }
implement! { u16 => doc(hidden) }
implement! { u32 => doc(hidden) }
implement! { u64 => doc(hidden) }
implement! { u128 => doc(hidden) }
}
}
/// A trait automatically implemented by `#[bitflags]` to make the enum
/// a valid type parameter for `BitFlags<T>`.
pub trait BitFlag: Copy + Clone + 'static + _internal::RawBitFlags {
/// Create a `BitFlags` with no flags set (in other words, with a value of 0).
///
/// This is a convenience reexport of [`BitFlags::empty`]. It can be called with
/// `MyFlag::empty()`, thus bypassing the need for type hints in some situations.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// use enumflags2::BitFlag;
///
/// let empty = MyFlag::empty();
/// assert!(empty.is_empty());
/// assert_eq!(empty.contains(MyFlag::One), false);
/// assert_eq!(empty.contains(MyFlag::Two), false);
/// assert_eq!(empty.contains(MyFlag::Three), false);
/// ```
#[inline]
fn empty() -> BitFlags<Self> {
BitFlags::empty()
}
/// Create a `BitFlags` with all flags set.
///
/// This is a convenience reexport of [`BitFlags::all`]. It can be called with
/// `MyFlag::all()`, thus bypassing the need for type hints in some situations.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// use enumflags2::BitFlag;
///
/// let all = MyFlag::all();
/// assert!(all.is_all());
/// assert_eq!(all.contains(MyFlag::One), true);
/// assert_eq!(all.contains(MyFlag::Two), true);
/// assert_eq!(all.contains(MyFlag::Three), true);
/// ```
#[inline]
fn all() -> BitFlags<Self> {
BitFlags::all()
}
/// Create a `BitFlags` if the raw value provided does not contain
/// any illegal flags.
///
/// This is a convenience reexport of [`BitFlags::from_bits`]. It can be called
/// with `MyFlag::from_bits(bits)`, thus bypassing the need for type hints in
/// some situations.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq, Debug)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// use enumflags2::BitFlag;
///
/// let flags = MyFlag::from_bits(0b11).unwrap();
/// assert_eq!(flags.contains(MyFlag::One), true);
/// assert_eq!(flags.contains(MyFlag::Two), true);
/// assert_eq!(flags.contains(MyFlag::Three), false);
/// let invalid = MyFlag::from_bits(1 << 3);
/// assert!(invalid.is_err());
/// ```
#[inline]
fn from_bits(bits: Self::Numeric) -> Result<BitFlags<Self>, FromBitsError<Self>> {
BitFlags::from_bits(bits)
}
/// Create a `BitFlags` from an underlying bitwise value. If any
/// invalid bits are set, ignore them.
///
/// This is a convenience reexport of [`BitFlags::from_bits_truncate`]. It can be
/// called with `MyFlag::from_bits_truncate(bits)`, thus bypassing the need for
/// type hints in some situations.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// use enumflags2::BitFlag;
///
/// let flags = MyFlag::from_bits_truncate(0b1_1011);
/// assert_eq!(flags.contains(MyFlag::One), true);
/// assert_eq!(flags.contains(MyFlag::Two), true);
/// assert_eq!(flags.contains(MyFlag::Three), false);
/// ```
#[inline]
fn from_bits_truncate(bits: Self::Numeric) -> BitFlags<Self> {
BitFlags::from_bits_truncate(bits)
}
/// Create a `BitFlags` unsafely, without checking if the bits form
/// a valid bit pattern for the type.
///
/// Consider using [`from_bits`][BitFlag::from_bits]
/// or [`from_bits_truncate`][BitFlag::from_bits_truncate] instead.
///
/// # Safety
///
/// All bits set in `val` must correspond to a value of the enum.
///
/// # Example
///
/// This is a convenience reexport of [`BitFlags::from_bits_unchecked`]. It can be
/// called with `MyFlag::from_bits_unchecked(bits)`, thus bypassing the need for
/// type hints in some situations.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// use enumflags2::BitFlag;
///
/// let flags = unsafe {
/// MyFlag::from_bits_unchecked(0b011)
/// };
///
/// assert_eq!(flags.contains(MyFlag::One), true);
/// assert_eq!(flags.contains(MyFlag::Two), true);
/// assert_eq!(flags.contains(MyFlag::Three), false);
/// ```
#[inline]
unsafe fn from_bits_unchecked(bits: Self::Numeric) -> BitFlags<Self> {
BitFlags::from_bits_unchecked(bits)
}
}
/// While the module is public, this is only the case because it needs to be
/// accessed by the macro. Do not use this directly. Stability guarantees
/// don't apply.
#[doc(hidden)]
pub mod _internal {
/// A trait automatically implemented by `#[bitflags]` to make the enum
/// a valid type parameter for `BitFlags<T>`.
///
/// # Safety
///
/// The values should reflect reality, like they do if the implementation
/// is generated by the procmacro.
///
/// `bits` must return the same value as
/// [`transmute_copy`][std::mem::transmute_copy].
///
/// Representations for all values of `T` must have exactly one bit set.
pub unsafe trait RawBitFlags: Copy + Clone + 'static {
/// The underlying integer type.
type Numeric: BitFlagNum;
/// A value with no bits set.
const EMPTY: Self::Numeric;
/// The value used by the Default implementation. Equivalent to EMPTY, unless
/// customized.
const DEFAULT: Self::Numeric;
/// A value with all flag bits set.
const ALL_BITS: Self::Numeric;
/// The name of the type for debug formatting purposes.
///
/// This is typically `BitFlags<EnumName>`
const BITFLAGS_TYPE_NAME: &'static str;
/// Return the bits as a number type.
fn bits(self) -> Self::Numeric;
}
use ::core::fmt;
use ::core::ops::{BitAnd, BitOr, BitXor, Not, Sub};
use ::core::hash::Hash;
pub trait BitFlagNum:
Default
+ BitOr<Self, Output = Self>
+ BitAnd<Self, Output = Self>
+ BitXor<Self, Output = Self>
+ Sub<Self, Output = Self>
+ Not<Output = Self>
+ PartialOrd<Self>
+ Ord
+ Hash
+ fmt::Debug
+ fmt::Binary
+ Copy
+ Clone
{
const ONE: Self;
fn is_power_of_two(self) -> bool;
fn count_ones(self) -> u32;
fn wrapping_neg(self) -> Self;
}
for_each_uint! { $ty $hide_docs =>
impl BitFlagNum for $ty {
const ONE: Self = 1;
fn is_power_of_two(self) -> bool {
<$ty>::is_power_of_two(self)
}
fn count_ones(self) -> u32 {
<$ty>::count_ones(self)
}
fn wrapping_neg(self) -> Self {
<$ty>::wrapping_neg(self)
}
}
}
// Re-export libcore so the macro doesn't inject "extern crate" downstream.
pub mod core {
pub use core::{convert, ops, option};
}
pub struct AssertionSucceeded;
pub struct AssertionFailed;
pub trait ExactlyOneBitSet {
type X;
}
impl ExactlyOneBitSet for AssertionSucceeded {
type X = ();
}
pub trait AssertionHelper {
type Status;
}
impl AssertionHelper for [(); 1] {
type Status = AssertionSucceeded;
}
impl AssertionHelper for [(); 0] {
type Status = AssertionFailed;
}
pub const fn next_bit(x: u128) -> u128 {
1 << x.trailing_ones()
}
}
use _internal::BitFlagNum;
// Internal debug formatting implementations
mod formatting;
// impl TryFrom<T::Numeric> for BitFlags<T>
mod fallible;
pub use crate::fallible::FromBitsError;
mod iter;
pub use crate::iter::Iter;
mod const_api;
pub use crate::const_api::ConstToken;
/// Represents a set of flags of some type `T`.
/// `T` must have the `#[bitflags]` attribute applied.
///
/// A `BitFlags<T>` is as large as the `T` itself,
/// and stores one flag per bit.
///
/// ## Comparison operators, [`PartialOrd`] and [`Ord`]
///
/// To make it possible to use `BitFlags` as the key of a
/// [`BTreeMap`][std::collections::BTreeMap], `BitFlags` implements
/// [`Ord`]. There is no meaningful total order for bitflags,
/// so the implementation simply compares the integer values of the bits.
///
/// Unfortunately, this means that comparing `BitFlags` with an operator
/// like `<=` will compile, and return values that are probably useless
/// and not what you expect. In particular, `<=` does *not* check whether
/// one value is a subset of the other. Use [`BitFlags::contains`] for that.
///
/// ## Customizing `Default`
///
/// By default, creating an instance of `BitFlags<T>` with `Default` will result
/// in an empty set. If that's undesirable, you may customize this:
///
/// ```
/// # use enumflags2::{BitFlags, bitflags};
/// #[bitflags(default = B | C)]
/// #[repr(u8)]
/// #[derive(Copy, Clone, Debug, PartialEq)]
/// enum MyFlag {
/// A = 0b0001,
/// B = 0b0010,
/// C = 0b0100,
/// D = 0b1000,
/// }
///
/// assert_eq!(BitFlags::default(), MyFlag::B | MyFlag::C);
/// ```
///
/// ## Memory layout
///
/// `BitFlags<T>` is marked with the `#[repr(transparent)]` trait, meaning
/// it can be safely transmuted into the corresponding numeric type.
///
/// Usually, the same can be achieved by using [`BitFlags::bits`] in one
/// direction, and [`BitFlags::from_bits`], [`BitFlags::from_bits_truncate`],
/// or [`BitFlags::from_bits_unchecked`] in the other direction. However,
/// transmuting might still be useful if, for example, you're dealing with
/// an entire array of `BitFlags`.
///
/// When transmuting *into* a `BitFlags`, make sure that each set bit
/// corresponds to an existing flag
/// (cf. [`from_bits_unchecked`][BitFlags::from_bits_unchecked]).
///
/// For example:
///
/// ```
/// # use enumflags2::{BitFlags, bitflags};
/// #[bitflags]
/// #[repr(u8)] // <-- the repr determines the numeric type
/// #[derive(Copy, Clone)]
/// enum TransmuteMe {
/// One = 1 << 0,
/// Two = 1 << 1,
/// }
///
/// # use std::slice;
/// // NOTE: we use a small, self-contained function to handle the slice
/// // conversion to make sure the lifetimes are right.
/// fn transmute_slice<'a>(input: &'a [BitFlags<TransmuteMe>]) -> &'a [u8] {
/// unsafe {
/// slice::from_raw_parts(input.as_ptr() as *const u8, input.len())
/// }
/// }
///
/// let many_flags = &[
/// TransmuteMe::One.into(),
/// TransmuteMe::One | TransmuteMe::Two,
/// ];
///
/// let as_nums = transmute_slice(many_flags);
/// assert_eq!(as_nums, &[0b01, 0b11]);
/// ```
///
/// ## Implementation notes
///
/// You might expect this struct to be defined as
///
/// ```ignore
/// struct BitFlags<T: BitFlag> {
/// value: T::Numeric
/// }
/// ```
///
/// Ideally, that would be the case. However, because `const fn`s cannot
/// have trait bounds in current Rust, this would prevent us from providing
/// most `const fn` APIs. As a workaround, we define `BitFlags` with two
/// type parameters, with a default for the second one:
///
/// ```ignore
/// struct BitFlags<T, N = <T as BitFlag>::Numeric> {
/// value: N,
/// marker: PhantomData<T>,
/// }
/// ```
///
/// Manually providing a type for the `N` type parameter shouldn't ever
/// be necessary.
///
/// The types substituted for `T` and `N` must always match, creating a
/// `BitFlags` value where that isn't the case is only possible with
/// incorrect unsafe code.
#[derive(Copy, Clone)]
#[repr(transparent)]
pub struct BitFlags<T, N = <T as _internal::RawBitFlags>::Numeric> {
val: N,
marker: PhantomData<T>,
}
/// `make_bitflags!` provides a succint syntax for creating instances of
/// `BitFlags<T>`. Instead of repeating the name of your type for each flag
/// you want to add, try `make_bitflags!(Flags::{Foo | Bar})`.
/// ```
/// # use enumflags2::{bitflags, BitFlags, make_bitflags};
/// # #[bitflags]
/// # #[repr(u8)]
/// # #[derive(Clone, Copy, Debug)]
/// # enum Test {
/// # A = 1 << 0,
/// # B = 1 << 1,
/// # C = 1 << 2,
/// # }
/// let x = make_bitflags!(Test::{A | C});
/// assert_eq!(x, Test::A | Test::C);
///
/// // Also works in const contexts:
/// const X: BitFlags<Test> = make_bitflags!(Test::A);
/// ```
#[macro_export]
macro_rules! make_bitflags {
( $enum:ident ::{ $($variant:ident)|* } ) => {
{
let mut n = 0;
$(
{
let flag: $enum = $enum::$variant;
n |= flag as <$enum as $crate::_internal::RawBitFlags>::Numeric;
}
)*
// SAFETY: The value has been created from numeric values of the underlying
// enum, so only valid bits are set.
unsafe { $crate::BitFlags::<$enum>::from_bits_unchecked_c(
n, $crate::BitFlags::CONST_TOKEN) }
}
};
( $enum:ident :: $variant:ident ) => {
{
let flag: $enum = $enum::$variant;
let n = flag as <$enum as $crate::_internal::RawBitFlags>::Numeric;
// SAFETY: The value has been created from the numeric value of
// the underlying enum, so only valid bits are set.
unsafe { $crate::BitFlags::<$enum>::from_bits_unchecked_c(
n, $crate::BitFlags::CONST_TOKEN) }
}
};
}
/// The default value returned is one with all flags unset, i. e. [`empty`][Self::empty],
/// unless [customized](index.html#customizing-default).
impl<T> Default for BitFlags<T>
where
T: BitFlag,
{
#[inline(always)]
fn default() -> Self {
BitFlags {
val: T::DEFAULT,
marker: PhantomData,
}
}
}
impl<T: BitFlag> From<T> for BitFlags<T> {
#[inline(always)]
fn from(t: T) -> BitFlags<T> {
Self::from_flag(t)
}
}
impl<T> BitFlags<T>
where
T: BitFlag,
{
/// Create a `BitFlags` if the raw value provided does not contain
/// any illegal flags.
///
/// See also: [a convenience re-export in the `BitFlag` trait][BitFlag::from_bits],
/// which can help avoid the need for type hints.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq, Debug)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// let flags: BitFlags<MyFlag> = BitFlags::from_bits(0b11).unwrap();
/// assert_eq!(flags.contains(MyFlag::One), true);
/// assert_eq!(flags.contains(MyFlag::Two), true);
/// assert_eq!(flags.contains(MyFlag::Three), false);
/// let invalid = BitFlags::<MyFlag>::from_bits(1 << 3);
/// assert!(invalid.is_err());
/// ```
#[inline]
pub fn from_bits(bits: T::Numeric) -> Result<Self, FromBitsError<T>> {
let flags = Self::from_bits_truncate(bits);
if flags.bits() == bits {
Ok(flags)
} else {
Err(FromBitsError {
flags,
invalid: bits & !flags.bits(),
})
}
}
/// Create a `BitFlags` from an underlying bitwise value. If any
/// invalid bits are set, ignore them.
///
/// See also: [a convenience re-export in the `BitFlag` trait][BitFlag::from_bits_truncate],
/// which can help avoid the need for type hints.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// let flags: BitFlags<MyFlag> = BitFlags::from_bits_truncate(0b1_1011);
/// assert_eq!(flags.contains(MyFlag::One), true);
/// assert_eq!(flags.contains(MyFlag::Two), true);
/// assert_eq!(flags.contains(MyFlag::Three), false);
/// ```
#[must_use]
#[inline(always)]
pub fn from_bits_truncate(bits: T::Numeric) -> Self {
// SAFETY: We're truncating out all the invalid bits, so the remaining
// ones must be valid.
unsafe { BitFlags::from_bits_unchecked(bits & T::ALL_BITS) }
}
/// Create a new BitFlags unsafely, without checking if the bits form
/// a valid bit pattern for the type.
///
/// Consider using [`from_bits`][BitFlags::from_bits]
/// or [`from_bits_truncate`][BitFlags::from_bits_truncate] instead.
///
/// # Safety
///
/// All bits set in `val` must correspond to a value of the enum.
///
/// # Example
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// let flags: BitFlags<MyFlag> = unsafe {
/// BitFlags::from_bits_unchecked(0b011)
/// };
///
/// assert_eq!(flags.contains(MyFlag::One), true);
/// assert_eq!(flags.contains(MyFlag::Two), true);
/// assert_eq!(flags.contains(MyFlag::Three), false);
/// ```
#[must_use]
#[inline(always)]
pub unsafe fn from_bits_unchecked(val: T::Numeric) -> Self {
BitFlags {
val,
marker: PhantomData,
}
}
/// Turn a `T` into a `BitFlags<T>`. Also available as `flag.into()`.
#[must_use]
#[inline(always)]
pub fn from_flag(flag: T) -> Self {
// SAFETY: A value of the underlying enum is valid by definition.
unsafe { Self::from_bits_unchecked(flag.bits()) }
}
/// Create a `BitFlags` with no flags set (in other words, with a value of `0`).
///
/// See also: [`BitFlag::empty`], a convenience reexport;
/// [`BitFlags::EMPTY`], the same functionality available
/// as a constant for `const fn` code.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// let empty: BitFlags<MyFlag> = BitFlags::empty();
/// assert!(empty.is_empty());
/// assert_eq!(empty.contains(MyFlag::One), false);
/// assert_eq!(empty.contains(MyFlag::Two), false);
/// assert_eq!(empty.contains(MyFlag::Three), false);
/// ```
#[inline(always)]
pub fn empty() -> Self {
Self::EMPTY
}
/// Create a `BitFlags` with all flags set.
///
/// See also: [`BitFlag::all`], a convenience reexport;
/// [`BitFlags::ALL`], the same functionality available
/// as a constant for `const fn` code.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// let empty: BitFlags<MyFlag> = BitFlags::all();
/// assert!(empty.is_all());
/// assert_eq!(empty.contains(MyFlag::One), true);
/// assert_eq!(empty.contains(MyFlag::Two), true);
/// assert_eq!(empty.contains(MyFlag::Three), true);
/// ```
#[inline(always)]
pub fn all() -> Self {
Self::ALL
}
/// Returns true if all flags are set
#[inline(always)]
pub fn is_all(self) -> bool {
self.val == T::ALL_BITS
}
/// Returns true if no flag is set
#[inline(always)]
pub fn is_empty(self) -> bool {
self.val == T::EMPTY
}
/// Returns the number of flags set.
#[inline(always)]
pub fn len(self) -> usize {
self.val.count_ones() as usize
}
/// If exactly one flag is set, the flag is returned. Otherwise, returns `None`.
///
/// See also [`Itertools::exactly_one`](https://docs.rs/itertools/latest/itertools/trait.Itertools.html#method.exactly_one).
#[inline(always)]
pub fn exactly_one(self) -> Option<T> {
if self.val.is_power_of_two() {
// SAFETY: By the invariant of the BitFlags type, all bits are valid
// in isolation for the underlying enum.
Some(unsafe { core::mem::transmute_copy(&self.val) })
} else {
None
}
}
/// Returns the underlying bitwise value.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy)]
/// enum Flags {
/// Foo = 1 << 0,
/// Bar = 1 << 1,
/// }
///
/// let both_flags = Flags::Foo | Flags::Bar;
/// assert_eq!(both_flags.bits(), 0b11);
/// ```
#[inline(always)]
pub fn bits(self) -> T::Numeric {
self.val
}
/// Returns true if at least one flag is shared.
#[inline(always)]
pub fn intersects<B: Into<BitFlags<T>>>(self, other: B) -> bool {
(self.bits() & other.into().bits()) != Self::EMPTY.val
}
/// Returns true if all flags are contained.
#[inline(always)]
pub fn contains<B: Into<BitFlags<T>>>(self, other: B) -> bool {
let other = other.into();
(self.bits() & other.bits()) == other.bits()
}
/// Toggles the matching bits
#[inline(always)]
pub fn toggle<B: Into<BitFlags<T>>>(&mut self, other: B) {
*self ^= other.into();
}
/// Inserts the flags into the BitFlag
#[inline(always)]
pub fn insert<B: Into<BitFlags<T>>>(&mut self, other: B) {
*self |= other.into();
}
/// Removes the matching flags
#[inline(always)]
pub fn remove<B: Into<BitFlags<T>>>(&mut self, other: B) {
*self &= !other.into();
}
/// Inserts if `cond` holds, else removes
///
/// ```
/// # use enumflags2::bitflags;
/// #[bitflags]
/// #[derive(Clone, Copy, PartialEq, Debug)]
/// #[repr(u8)]
/// enum MyFlag {
/// A = 1 << 0,
/// B = 1 << 1,
/// C = 1 << 2,
/// }
///
/// let mut state = MyFlag::A | MyFlag::C;
/// state.set(MyFlag::A | MyFlag::B, false);
///
/// // Because the condition was false, both
/// // `A` and `B` are removed from the set
/// assert_eq!(state, MyFlag::C);
/// ```
#[inline(always)]
pub fn set<B: Into<BitFlags<T>>>(&mut self, other: B, cond: bool) {
if cond {
self.insert(other);
} else {
self.remove(other);
}
}
}
impl<T, N: PartialEq> PartialEq for BitFlags<T, N> {
#[inline(always)]
fn eq(&self, other: &Self) -> bool {
self.val == other.val
}
}
impl<T, N: Eq> Eq for BitFlags<T, N> {}
impl<T, N: PartialOrd> PartialOrd for BitFlags<T, N> {
#[inline(always)]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
self.val.partial_cmp(&other.val)
}
}
impl<T, N: Ord> Ord for BitFlags<T, N> {
#[inline(always)]
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.val.cmp(&other.val)
}
}
// Clippy complains when Hash is derived while PartialEq is implemented manually
impl<T, N: Hash> Hash for BitFlags<T, N> {
#[inline(always)]
fn hash<H: Hasher>(&self, state: &mut H) {
self.val.hash(state)
}
}
impl<T> cmp::PartialEq<T> for BitFlags<T>
where
T: BitFlag,
{
#[inline(always)]
fn eq(&self, other: &T) -> bool {
self.bits() == Into::<Self>::into(*other).bits()
}
}
impl<T, B> ops::BitOr<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
type Output = BitFlags<T>;
#[inline(always)]
fn bitor(self, other: B) -> BitFlags<T> {
// SAFETY: The two operands are known to be composed of valid bits,
// and 0 | 0 = 0 in the columns of the invalid bits.
unsafe { BitFlags::from_bits_unchecked(self.bits() | other.into().bits()) }
}
}
impl<T, B> ops::BitAnd<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
type Output = BitFlags<T>;
#[inline(always)]
fn bitand(self, other: B) -> BitFlags<T> {
// SAFETY: The two operands are known to be composed of valid bits,
// and 0 & 0 = 0 in the columns of the invalid bits.
unsafe { BitFlags::from_bits_unchecked(self.bits() & other.into().bits()) }
}
}
impl<T, B> ops::BitXor<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
type Output = BitFlags<T>;
#[inline(always)]
fn bitxor(self, other: B) -> BitFlags<T> {
// SAFETY: The two operands are known to be composed of valid bits,
// and 0 ^ 0 = 0 in the columns of the invalid bits.
unsafe { BitFlags::from_bits_unchecked(self.bits() ^ other.into().bits()) }
}
}
impl<T, B> ops::BitOrAssign<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
#[inline(always)]
fn bitor_assign(&mut self, other: B) {
*self = *self | other;
}
}
impl<T, B> ops::BitAndAssign<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
#[inline(always)]
fn bitand_assign(&mut self, other: B) {
*self = *self & other;
}
}
impl<T, B> ops::BitXorAssign<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
#[inline(always)]
fn bitxor_assign(&mut self, other: B) {
*self = *self ^ other;
}
}
impl<T> ops::Not for BitFlags<T>
where
T: BitFlag,
{
type Output = BitFlags<T>;
#[inline(always)]
fn not(self) -> BitFlags<T> {
BitFlags::from_bits_truncate(!self.bits())
}
}
#[cfg(feature = "serde")]
mod impl_serde {
use super::{BitFlag, BitFlags};
use serde::de::{Error, Unexpected};
use serde::{Deserialize, Serialize};
impl<'a, T> Deserialize<'a> for BitFlags<T>
where
T: BitFlag,
T::Numeric: Deserialize<'a> + Into<u64>,
{
fn deserialize<D: serde::Deserializer<'a>>(d: D) -> Result<Self, D::Error> {
let val = T::Numeric::deserialize(d)?;
Self::from_bits(val).map_err(|_| {
D::Error::invalid_value(
Unexpected::Unsigned(val.into()),
&"valid bit representation",
)
})
}
}
impl<T> Serialize for BitFlags<T>
where
T: BitFlag,
T::Numeric: Serialize,
{
fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
T::Numeric::serialize(&self.val, s)
}
}
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/src/const_api.rs | src/const_api.rs | use crate::{BitFlags, BitFlag};
use core::marker::PhantomData;
/// Workaround for `const fn` limitations.
///
/// Some `const fn`s in this crate will need an instance of this type
/// for some type-level information usually provided by traits.
///
/// A token can be obtained from [`BitFlags::CONST_TOKEN`]. The relevant types
/// should be readily inferred from context.
///
/// For an example of usage, see [`not_c`][BitFlags::not_c].
pub struct ConstToken<T, N>(BitFlags<T, N>);
impl<T> BitFlags<T>
where
T: BitFlag,
{
/// An empty `BitFlags`. Equivalent to [`empty()`][BitFlags::empty],
/// but works in a const context.
pub const EMPTY: Self = BitFlags {
val: T::EMPTY,
marker: PhantomData,
};
/// A `BitFlags` with all flags set. Equivalent to [`all()`][BitFlags::all],
/// but works in a const context.
pub const ALL: Self = BitFlags {
val: T::ALL_BITS,
marker: PhantomData,
};
/// A [`ConstToken`] for this type of flag.
pub const CONST_TOKEN: ConstToken<T, T::Numeric> = ConstToken(Self::ALL);
}
for_each_uint! { $ty $hide_docs =>
impl<T> BitFlags<T, $ty> {
/// Create a new BitFlags unsafely, without checking if the bits form
/// a valid bit pattern for the type.
///
/// Const variant of
/// [`from_bits_unchecked`][BitFlags::from_bits_unchecked].
///
/// Consider using
/// [`from_bits_truncate_c`][BitFlags::from_bits_truncate_c] instead.
///
/// # Safety
///
/// All bits set in `val` must correspond to a value of the enum.
#[must_use]
#[inline(always)]
$(#[$hide_docs])?
pub const unsafe fn from_bits_unchecked_c(
val: $ty, const_token: ConstToken<T, $ty>
) -> Self {
let _ = const_token;
BitFlags {
val,
marker: PhantomData,
}
}
/// Create a `BitFlags<T>` from an underlying bitwise value. If any
/// invalid bits are set, ignore them.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, Debug, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// const FLAGS: BitFlags<MyFlag> =
/// BitFlags::<MyFlag>::from_bits_truncate_c(0b10101010, BitFlags::CONST_TOKEN);
/// assert_eq!(FLAGS, MyFlag::Two);
/// ```
#[must_use]
#[inline(always)]
$(#[$hide_docs])?
pub const fn from_bits_truncate_c(
bits: $ty, const_token: ConstToken<T, $ty>
) -> Self {
BitFlags {
val: bits & const_token.0.val,
marker: PhantomData,
}
}
/// Bitwise OR — return value contains flag if either argument does.
///
/// Also available as `a | b`, but operator overloads are not usable
/// in `const fn`s at the moment.
#[must_use]
#[inline(always)]
$(#[$hide_docs])?
pub const fn union_c(self, other: Self) -> Self {
BitFlags {
val: self.val | other.val,
marker: PhantomData,
}
}
/// Bitwise AND — return value contains flag if both arguments do.
///
/// Also available as `a & b`, but operator overloads are not usable
/// in `const fn`s at the moment.
#[must_use]
#[inline(always)]
$(#[$hide_docs])?
pub const fn intersection_c(self, other: Self) -> Self {
BitFlags {
val: self.val & other.val,
marker: PhantomData,
}
}
/// Bitwise NOT — return value contains flag if argument doesn't.
///
/// Also available as `!a`, but operator overloads are not usable
/// in `const fn`s at the moment.
///
/// Moreover, due to `const fn` limitations, `not_c` needs a
/// [`ConstToken`] as an argument.
///
/// ```
/// # use enumflags2::{bitflags, BitFlags, make_bitflags};
/// #[bitflags]
/// #[repr(u8)]
/// #[derive(Clone, Copy, Debug, PartialEq, Eq)]
/// enum MyFlag {
/// One = 1 << 0,
/// Two = 1 << 1,
/// Three = 1 << 2,
/// }
///
/// const FLAGS: BitFlags<MyFlag> = make_bitflags!(MyFlag::{One | Two});
/// const NEGATED: BitFlags<MyFlag> = FLAGS.not_c(BitFlags::CONST_TOKEN);
/// assert_eq!(NEGATED, MyFlag::Three);
/// ```
#[must_use]
#[inline(always)]
$(#[$hide_docs])?
pub const fn not_c(self, const_token: ConstToken<T, $ty>) -> Self {
BitFlags {
val: !self.val & const_token.0.val,
marker: PhantomData,
}
}
/// Returns the underlying bitwise value.
///
/// `const` variant of [`bits`][BitFlags::bits].
#[inline(always)]
$(#[$hide_docs])?
pub const fn bits_c(self) -> $ty {
self.val
}
}
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/src/iter.rs | src/iter.rs | use crate::{BitFlag, BitFlags, BitFlagNum};
use core::iter::{FromIterator, FusedIterator};
impl<T> BitFlags<T>
where
T: BitFlag,
{
/// Iterate over the `BitFlags`.
///
/// ```
/// # use enumflags2::{bitflags, make_bitflags};
/// # #[bitflags]
/// # #[derive(Clone, Copy, PartialEq, Debug)]
/// # #[repr(u8)]
/// # enum MyFlag {
/// # A = 1 << 0,
/// # B = 1 << 1,
/// # C = 1 << 2,
/// # }
/// let flags = make_bitflags!(MyFlag::{A | C});
///
/// flags.iter()
/// .for_each(|flag| println!("{:?}", flag));
/// ```
#[inline]
pub fn iter(self) -> Iter<T> {
Iter { rest: self }
}
}
impl<T: BitFlag> IntoIterator for BitFlags<T> {
type IntoIter = Iter<T>;
type Item = T;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// Iterator that yields each flag set in a `BitFlags`.
#[derive(Clone, Debug)]
pub struct Iter<T: BitFlag> {
rest: BitFlags<T>,
}
impl<T> Iterator for Iter<T>
where
T: BitFlag,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if self.rest.is_empty() {
None
} else {
// SAFETY: `flag` will be a single bit, because
// x & -x = x & (~x + 1), and the increment causes only one 0 -> 1 transition.
// The invariant of `from_bits_unchecked` is satisfied, because bits & x
// is a subset of bits, which we know are the valid bits.
unsafe {
let bits = self.rest.bits();
let flag: T::Numeric = bits & bits.wrapping_neg();
let flag: T = core::mem::transmute_copy(&flag);
self.rest = BitFlags::from_bits_unchecked(bits & (bits - BitFlagNum::ONE));
Some(flag)
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let l = self.rest.len();
(l, Some(l))
}
}
impl<T> ExactSizeIterator for Iter<T>
where
T: BitFlag,
{
fn len(&self) -> usize {
self.rest.len()
}
}
impl<T: BitFlag> FusedIterator for Iter<T> {}
impl<T, B> FromIterator<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
#[inline]
fn from_iter<I>(it: I) -> BitFlags<T>
where
I: IntoIterator<Item = B>,
{
it.into_iter()
.fold(BitFlags::empty(), |acc, flag| acc | flag)
}
}
impl<T, B> Extend<B> for BitFlags<T>
where
T: BitFlag,
B: Into<BitFlags<T>>,
{
#[inline]
fn extend<I>(&mut self, it: I)
where
I: IntoIterator<Item = B>,
{
*self = it.into_iter().fold(*self, |acc, flag| acc | flag)
}
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/src/formatting.rs | src/formatting.rs | use crate::{BitFlag, BitFlags};
use core::fmt::{self, Binary, Debug};
impl<T> fmt::Debug for BitFlags<T>
where
T: BitFlag + fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let name = T::BITFLAGS_TYPE_NAME;
let bits = DebugBinaryFormatter(&self.val);
let iter = if !self.is_empty() {
Some(FlagFormatter(self.iter()))
} else {
None
};
if !fmt.alternate() {
// Concise tuple formatting is a better default
let mut debug = fmt.debug_tuple(name);
debug.field(&bits);
if let Some(iter) = iter {
debug.field(&iter);
}
debug.finish()
} else {
// Pretty-printed tuples are ugly and hard to read, so use struct format
let mut debug = fmt.debug_struct(name);
debug.field("bits", &bits);
if let Some(iter) = iter {
debug.field("flags", &iter);
}
debug.finish()
}
}
}
impl<T> fmt::Display for BitFlags<T>
where
T: BitFlag + fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&FlagFormatter(self.iter()), fmt)
}
}
impl<T> fmt::Binary for BitFlags<T>
where
T: BitFlag,
T::Numeric: fmt::Binary,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Binary::fmt(&self.bits(), fmt)
}
}
impl<T> fmt::Octal for BitFlags<T>
where
T: BitFlag,
T::Numeric: fmt::Octal,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Octal::fmt(&self.bits(), fmt)
}
}
impl<T> fmt::LowerHex for BitFlags<T>
where
T: BitFlag,
T::Numeric: fmt::LowerHex,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::LowerHex::fmt(&self.bits(), fmt)
}
}
impl<T> fmt::UpperHex for BitFlags<T>
where
T: BitFlag,
T::Numeric: fmt::UpperHex,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::UpperHex::fmt(&self.bits(), fmt)
}
}
// Format an iterator of flags into "A | B | etc"
struct FlagFormatter<I>(I);
impl<T: Debug, I: Clone + Iterator<Item = T>> Debug for FlagFormatter<I> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut iter = self.0.clone();
if let Some(val) = iter.next() {
Debug::fmt(&val, fmt)?;
for val in iter {
fmt.write_str(" | ")?;
Debug::fmt(&val, fmt)?;
}
Ok(())
} else {
fmt.write_str("<empty>")
}
}
}
// A formatter that obeys format arguments but falls back to binary when
// no explicit format is requested. Supports {:08?}, {:08x?}, etc.
struct DebugBinaryFormatter<'a, F>(&'a F);
impl<'a, F: Debug + Binary + 'a> Debug for DebugBinaryFormatter<'a, F> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check if {:x?} or {:X?} was used; this is determined via the
// discriminator of core::fmt::FlagV1::{DebugLowerHex, DebugUpperHex},
// which is not an accessible type: https://github.com/rust-lang/rust/blob/d65e272a9fe3e61aa5f229c5358e35a909435575/src/libcore/fmt/mod.rs#L306
// See also: https://github.com/rust-lang/rfcs/pull/2226
#[allow(deprecated)]
let format_hex = fmt.flags() >> 4;
let width = fmt.width().unwrap_or(0);
if format_hex & 1 != 0 {
// FlagV1::DebugLowerHex
write!(fmt, "{:#0width$x?}", &self.0, width = width)
} else if format_hex & 2 != 0 {
// FlagV1::DebugUpperHex
write!(fmt, "{:#0width$X?}", &self.0, width = width)
} else {
// Fall back to binary otheriwse
write!(fmt, "{:#0width$b}", &self.0, width = width)
}
}
}
#[test]
fn flag_formatter() {
use core::iter;
macro_rules! assert_fmt {
($fmt:expr, $expr:expr, $expected:expr) => {
assert_eq!(format!($fmt, FlagFormatter($expr)), $expected)
};
}
assert_fmt!("{:?}", iter::empty::<u8>(), "<empty>");
assert_fmt!("{:?}", iter::once(1), "1");
assert_fmt!("{:?}", [1, 2].iter(), "1 | 2");
assert_fmt!("{:?}", [1, 2, 10].iter(), "1 | 2 | 10");
assert_fmt!("{:02x?}", [1, 2, 10].iter(), "01 | 02 | 0a");
assert_fmt!("{:#04X?}", [1, 2, 10].iter(), "0x01 | 0x02 | 0x0A");
}
#[test]
fn debug_binary_formatter() {
macro_rules! assert_fmt {
($fmt:expr, $expr:expr, $expected:expr) => {
assert_eq!(format!($fmt, DebugBinaryFormatter(&$expr)), $expected)
};
}
assert_fmt!("{:?}", 10, "0b1010");
assert_fmt!("{:#?}", 10, "0b1010");
assert_fmt!("{:010?}", 10, "0b00001010");
assert_fmt!("{:010x?}", 10, "0x0000000a");
assert_fmt!("{:#010X?}", 10, "0x0000000A");
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/src/fallible.rs | src/fallible.rs | use super::BitFlag;
use super::BitFlags;
use core::convert::TryFrom;
use core::fmt;
// Coherence doesn't let us use a generic type here. Work around by implementing
// for each integer type manually.
for_each_uint! { $ty $hide_docs =>
impl<T> TryFrom<$ty> for BitFlags<T>
where
T: BitFlag<Numeric=$ty>,
{
type Error = FromBitsError<T>;
fn try_from(bits: T::Numeric) -> Result<Self, Self::Error> {
Self::from_bits(bits)
}
}
}
/// The error struct used by [`BitFlags::from_bits`]
/// and the [`TryFrom`] implementation for invalid values.
///
/// Note that the implementation of [`std::error::Error`]
/// for this type is gated on the `std` feature flag.
///
/// ```
/// # use std::convert::TryInto;
/// # use enumflags2::{bitflags, BitFlags};
/// #[bitflags]
/// #[derive(Clone, Copy, Debug)]
/// #[repr(u8)]
/// enum MyFlags {
/// A = 0b0001,
/// B = 0b0010,
/// C = 0b0100,
/// D = 0b1000,
/// }
///
/// let result: Result<BitFlags<MyFlags>, _> = 0b10101u8.try_into();
/// assert!(result.is_err());
/// let error = result.unwrap_err();
/// assert_eq!(error.truncate(), MyFlags::C | MyFlags::A);
/// assert_eq!(error.invalid_bits(), 0b10000);
/// ```
#[derive(Debug, Copy, Clone)]
pub struct FromBitsError<T: BitFlag> {
pub(crate) flags: BitFlags<T>,
pub(crate) invalid: T::Numeric,
}
impl<T: BitFlag> FromBitsError<T> {
/// Return the truncated result of the conversion.
pub fn truncate(self) -> BitFlags<T> {
self.flags
}
/// Return the bits that didn't correspond to any flags.
pub fn invalid_bits(self) -> T::Numeric {
self.invalid
}
}
impl<T: BitFlag + fmt::Debug> fmt::Display for FromBitsError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"Invalid bits for {:?}: {:#b}",
self.flags, self.invalid
)
}
}
#[cfg(feature = "std")]
impl<T: BitFlag + fmt::Debug> std::error::Error for FromBitsError<T> {
fn description(&self) -> &str {
"invalid bitflags representation"
}
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui_tests.rs | test_suite/ui_tests.rs | use glob::glob;
#[test]
fn ui() {
let t = trybuild::TestCases::new();
for test in glob("ui/*.rs").unwrap() {
let path = test.unwrap();
if path == std::path::Path::new("ui/must_use_warning.rs") {
t.pass(path)
} else {
t.compile_fail(path)
}
}
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/common.rs | test_suite/common.rs | #[bitflags]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
enum Test {
A = 1 << 0,
B = 1 << 1,
C = 1 << 2,
D = 1 << 3,
}
#[bitflags]
#[derive(Copy, Clone, Debug)]
#[repr(u64)]
enum Test1 {
A = 1 << 0,
B = 1 << 1,
C = 1 << 2,
D = 1 << 3,
E = 1 << 34,
}
#[bitflags(default = B | C)]
#[derive(Copy, Clone, Debug)]
#[repr(u8)]
enum Default6 {
A = 1 << 0,
B = 1 << 1,
C = 1 << 2,
D = 1 << 3,
}
#[bitflags]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
enum AssociatedTypeNameConflict {
Stringy = 1 << 0,
Numeric = 1 << 1,
}
#[test]
fn test_ctors() {
use enumflags2::BitFlags;
assert_eq!(
BitFlags::<Test>::all(),
Test::A | Test::B | Test::C | Test::D
);
assert_eq!(BitFlags::<Test>::all() & Test::A, Test::A);
assert_eq!(BitFlags::<Test>::from_bits_truncate(4), Test::C);
assert_eq!(BitFlags::<Test>::from_bits_truncate(5), Test::A | Test::C);
assert_eq!(
BitFlags::<Test>::from_bits_truncate(16),
BitFlags::<Test>::empty()
);
assert_eq!(BitFlags::<Test>::from_bits_truncate(17), Test::A);
assert!(BitFlags::<Test>::from_bits(17).is_err());
assert_eq!(
BitFlags::<Test>::from_bits(15).unwrap(),
BitFlags::<Test>::all()
);
assert_eq!((Test::A | Test::B).bits(), 3);
assert_eq!((!(Test::A | Test::B)).bits(), 12);
assert_eq!(BitFlags::<Test>::all().bits(), 15);
assert_eq!(BitFlags::<Default6>::default(), Default6::B | Default6::C);
}
#[test]
fn test_ops() {
assert_eq!(!Test::A, Test::B | Test::C | Test::D);
assert_eq!((Test::A | Test::C) ^ (Test::C | Test::B), Test::A | Test::B);
assert!((Test::A | Test::B).intersects(Test::B));
assert!(!(Test::A | Test::B).intersects(Test::C));
assert!((Test::A | Test::B | Test::C).contains(Test::A | Test::B));
assert!(!(Test::A | Test::B | Test::C).contains(Test::A | Test::D));
assert_eq!(!(Test::A | Test::B), Test::C | Test::D);
assert_eq!((Test::A ^ Test::B), Test::A | Test::B);
}
#[test]
fn test_mutation() {
{
let mut b = Test::A | Test::B;
b.insert(Test::C);
assert_eq!(b, Test::A | Test::B | Test::C);
}
{
let mut b = Test::A | Test::B | Test::C;
b.remove(Test::B);
assert_eq!(b, Test::A | Test::C);
}
}
#[test]
fn test_exactly_one() {
use enumflags2::BitFlags;
assert_eq!(BitFlags::<Test>::empty().exactly_one(), None);
assert_eq!(BitFlags::<Test>::from(Test::B).exactly_one(), Some(Test::B));
assert_eq!((Test::A | Test::C).exactly_one(), None);
}
#[test]
fn test_len() {
use enumflags2::BitFlags;
assert_eq!(BitFlags::<Test>::empty().len(), 0);
assert_eq!(BitFlags::<Test>::all().len(), 4);
assert_eq!((Test::A | Test::D).len(), 2);
}
#[test]
fn iterator() {
use enumflags2::BitFlags;
// it's a separate statement because type ascription is nightly
let tests: &[(BitFlags<Test>, &[Test])] = &[
(BitFlags::empty(), &[]),
(Test::A.into(), &[Test::A]),
(Test::A | Test::B, &[Test::A, Test::B]),
];
for &(bitflag, expected) in tests {
assert!(bitflag.iter().zip(expected.iter().copied()).all(|(a, b)| a == b));
// If cloned, the iterator will yield the same elements.
let it = bitflag.iter();
assert!(it.clone().zip(it).all(|(a, b)| a == b));
// The ExactLenIterator implementation should always yield the
// correct remaining length.
let mut it = bitflag.iter();
for rest in (0..=expected.len()).rev() {
assert_eq!(it.len(), rest);
assert_eq!(it.size_hint(), (rest, Some(rest)));
it.next();
}
}
}
#[test]
fn assign_ops() {
let mut x = Test::A | Test::B;
x |= Test::C;
assert_eq!(x, Test::A | Test::B | Test::C);
let mut x = Test::A | Test::B;
x &= Test::B | Test::C;
assert_eq!(x, Test::B);
let mut x = Test::A | Test::B;
x ^= Test::B | Test::C;
assert_eq!(x, Test::A | Test::C);
}
#[test]
const fn fn_derive() {
#[bitflags]
#[derive(Copy, Clone, Debug)]
#[repr(u8)]
enum TestFn {
A = 1 << 0,
}
}
#[test]
const fn module() {
mod some_modules {
#[enumflags2::bitflags]
#[derive(Copy, Clone, Debug)]
#[repr(u8)]
enum Test2 {
A = 1 << 0,
B = 1 << 1,
C = 1 << 2,
D = 1 << 3,
}
}
}
#[test]
fn inferred_values() {
#[bitflags]
#[derive(Copy, Clone, Debug)]
#[repr(u8)]
enum Inferred {
Infer2,
SpecifiedA = 1,
Infer8,
SpecifiedB = 4,
}
#[bitflags]
#[derive(Copy, Clone, Debug)]
#[repr(u8)]
enum OnlyInferred {
Infer1,
Infer2,
Infer4,
Infer8,
}
assert_eq!(Inferred::Infer2 as u8, 2);
assert_eq!(Inferred::Infer8 as u8, 8);
assert_eq!(OnlyInferred::Infer1 as u8, 1);
assert_eq!(OnlyInferred::Infer2 as u8, 2);
assert_eq!(OnlyInferred::Infer4 as u8, 4);
assert_eq!(OnlyInferred::Infer8 as u8, 8);
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/serde.rs | test_suite/tests/serde.rs | use enumflags2::{bitflags, BitFlags};
use serde::{Deserialize, Serialize};
#[test]
fn serde_compile() {
#[bitflags]
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
#[repr(u8)]
enum Test {
A = 1 << 0,
B = 1 << 1,
C = 1 << 2,
D = 1 << 3,
}
type TestBitFlags = BitFlags<Test>;
#[derive(Clone, Debug, Serialize, Deserialize)]
struct TestStructContainsFlags {
flags: TestBitFlags,
}
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/no_implicit_prelude_2018.rs | test_suite/tests/no_implicit_prelude_2018.rs | #![no_implicit_prelude]
#[::enumflags2::bitflags]
#[derive(Copy, Clone, Debug, PartialEq)]
#[repr(u8)]
pub enum Test {
A = 1 << 0,
B = 1 << 1,
C = 1 << 2,
D = 1 << 3,
}
#[test]
fn test_foo() {
// assert!() doesn't even work in no_implicit_prelude!
use ::enumflags2::BitFlags;
let _ = BitFlags::<Test>::all();
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/requires_std.rs | test_suite/tests/requires_std.rs | #![allow(dead_code)]
use enumflags2::{bitflags, BitFlag, BitFlags};
include!("../common.rs");
#[test]
fn debug_format() {
// Assert that our Debug output format meets expectations
assert_eq!(
format!("{:?}", BitFlags::<Test>::all()),
"BitFlags<Test>(0b1111, A | B | C | D)"
);
assert_eq!(
format!("{:?}", BitFlags::<Test>::empty()),
"BitFlags<Test>(0b0)"
);
assert_eq!(
format!("{:?}", BitFlags::from_flag(Test::B)),
"BitFlags<Test>(0b10, B)"
);
assert_eq!(
format!("{:04x?}", BitFlags::<Test>::all()),
"BitFlags<Test>(0x0f, A | B | C | D)"
);
assert_eq!(
format!("{:04X?}", BitFlags::<Test>::all()),
"BitFlags<Test>(0x0F, A | B | C | D)"
);
}
#[test]
fn debug_format_alternate() {
/// Handle the slight difference in alternate debug output on rustc 1.34.2.
fn compare(mut actual: String, expected: &str) {
if actual.ends_with("\n}") && !actual.ends_with(",\n}") {
actual.replace_range(actual.len() - 2.., ",\n}");
}
assert_eq!(actual, expected);
}
compare(
format!("{:#010?}", BitFlags::<Test>::all()),
"BitFlags<Test> {
bits: 0b00001111,
flags: A | B | C | D,
}",
);
compare(
format!("{:#?}", BitFlags::<Test>::empty()),
"BitFlags<Test> {
bits: 0b0,
}",
);
}
#[test]
fn display_format() {
// Assert that our Debug output format meets expectations
assert_eq!(
format!("{}", BitFlags::<Test>::all()),
"A | B | C | D"
);
assert_eq!(
format!("{}", BitFlags::<Test>::empty()),
"<empty>"
);
assert_eq!(
format!("{}", BitFlags::from_flag(Test::B)),
"B"
);
}
#[test]
fn format() {
// Assert BitFlags<T> impls fmt::{Binary, Octal, LowerHex, UpperHex}
assert_eq!(format!("{:b}", BitFlags::<Test>::all()), "1111");
assert_eq!(format!("{:o}", BitFlags::<Test>::all()), "17");
assert_eq!(format!("{:x}", BitFlags::<Test>::all()), "f");
assert_eq!(format!("{:#04X}", BitFlags::<Test>::all()), "0x0F");
}
#[test]
fn debug_generic() {
#[derive(Debug)]
struct Debug<T: BitFlag>(BitFlags<T>);
let _ = format!("{:?}", Debug(BitFlags::<Test>::all()));
}
fn works_in_maps() {
// Assert that BitFlags<T> implements Hash and Ord.
use std::collections::{BTreeSet, HashSet};
let mut map: BTreeSet<BitFlags<Test>> = BTreeSet::new();
map.insert(BitFlags::empty());
let mut map: HashSet<BitFlags<Test>> = HashSet::new();
map.insert(BitFlags::empty());
}
fn works_in_maps_generic<T: BitFlag>() {
// Assert that BitFlags<T> implements Hash and Ord.
use std::collections::{BTreeSet, HashSet};
let mut map: BTreeSet<BitFlags<T>> = BTreeSet::new();
map.insert(BitFlags::empty());
let mut map: HashSet<BitFlags<T>> = HashSet::new();
map.insert(BitFlags::empty());
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/not_literal.rs | test_suite/tests/not_literal.rs | #![forbid(trivial_numeric_casts)]
const FOO_BAR: u8 = 1;
const FOO_BAZ: u8 = 2;
#[enumflags2::bitflags]
#[derive(Clone, Copy)]
#[repr(u8)]
enum Foo {
Bar = FOO_BAR,
Baz = FOO_BAZ,
}
#[enumflags2::bitflags]
#[derive(Clone, Copy)]
#[repr(u8)]
enum SingleTest {
Hello = FOO_BAR,
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/no_std.rs | test_suite/tests/no_std.rs | #![no_std]
include!("bitflag_tests.rs");
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/no_std_2018.rs | test_suite/tests/no_std_2018.rs | #![no_std]
include!("bitflag_tests_2018.rs");
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/bitflag_tests.rs | test_suite/tests/bitflag_tests.rs | #[macro_use]
extern crate enumflags2;
include!("../common.rs");
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/bitflag_tests_2018.rs | test_suite/tests/bitflag_tests_2018.rs | // "an inner attribute is not permitted in this context" :/
#[deny(clippy::all, clippy::pedantic, clippy::nursery)]
mod everything {
use enumflags2::bitflags;
include!("../common.rs");
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/tests/no_implicit_prelude.rs | test_suite/tests/no_implicit_prelude.rs | #![no_implicit_prelude]
#[macro_use]
extern crate enumflags2;
use enumflags2::BitFlags;
#[bitflags]
#[derive(Copy, Clone, Debug, PartialEq)]
#[repr(u8)]
pub enum Test {
A = 1 << 0,
B = 1 << 1,
C = 1 << 2,
D = 1 << 3,
}
#[test]
fn test_foo() {
// assert!() doesn't even work in no_implicit_prelude!
let _ = BitFlags::<Test>::all();
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/sneaky_make_bitflags.rs | test_suite/ui/sneaky_make_bitflags.rs | use enumflags2::{bitflags, make_bitflags};
#[bitflags]
#[repr(u8)]
#[derive(Copy, Clone, Debug)]
enum Test {
A = 1,
B = 2,
}
impl Test {
const C: u8 = 69;
}
fn main() {
let x = make_bitflags!(Test::{C});
dbg!(x);
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/invalid_repr.rs | test_suite/ui/invalid_repr.rs | #[enumflags2::bitflags]
#[repr(C)]
#[derive(Clone, Copy)]
enum NotAType {
Bar = 1,
Baz = 2,
}
#[enumflags2::bitflags]
#[repr(i32)]
#[derive(Clone, Copy)]
enum SignedType {
Bar = 1,
Baz = 2,
}
#[enumflags2::bitflags]
#[repr(usize)]
#[derive(Clone, Copy)]
enum Usize {
Bar = 1,
Baz = 2,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/with_fields.rs | test_suite/ui/with_fields.rs | #[enumflags2::bitflags]
#[repr(u8)]
#[derive(Copy, Clone)]
enum Foo {
Bar(u32),
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/invalid_attribute_syntax.rs | test_suite/ui/invalid_attribute_syntax.rs | use enumflags2::bitflags;
#[bitflags(default = A + B)]
enum Test {
A = 1,
B = 2,
}
#[bitflags(default = A |)]
enum Test {
A = 1,
B = 2,
}
#[bitflags(default =)]
enum Test {
A = 1,
B = 2,
}
#[bitflags(default)]
enum Test {
A = 1,
B = 2,
}
#[bitflags(yes)]
enum Test {
A = 1,
B = 2,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/literal_out_of_range.rs | test_suite/ui/literal_out_of_range.rs | //#[enumflags2::bitflags]
#[repr(u64)]
#[derive(Copy, Clone)]
enum Foo {
BigNumber = 0xdeadbeefcafebabe1337,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/shift_out_of_range.rs | test_suite/ui/shift_out_of_range.rs | #[enumflags2::bitflags]
#[repr(u64)]
#[derive(Copy, Clone)]
enum Foo {
BigNumber = 1 << 69,
}
#[enumflags2::bitflags]
#[repr(u16)]
#[derive(Copy, Clone)]
enum Bar {
BigNumber = 1 << 20,
}
#[enumflags2::bitflags]
#[repr(u16)]
#[derive(Copy, Clone)]
enum Baz {
BigNumber = (1 << 10) << 10,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/with_generics.rs | test_suite/ui/with_generics.rs | #[enumflags2::bitflags]
#[repr(u8)]
enum Foo<A> {
Bar,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/multiple_bits_deferred.rs | test_suite/ui/multiple_bits_deferred.rs | const THREE: u8 = 3;
#[enumflags2::bitflags]
#[derive(Copy, Clone)]
#[repr(u8)]
enum Foo {
Three = THREE,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/invalid_name_in_default.rs | test_suite/ui/invalid_name_in_default.rs | use enumflags2::bitflags;
#[bitflags(default = A | C)]
#[repr(u8)]
#[derive(Clone, Copy)]
enum Test {
A = 1,
B = 2,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/zero_discriminant_deferred.rs | test_suite/ui/zero_discriminant_deferred.rs | const ZERO: u8 = 0;
#[enumflags2::bitflags]
#[derive(Copy, Clone)]
#[repr(u8)]
enum Foo {
Zero = ZERO,
}
#[enumflags2::bitflags]
#[derive(Copy, Clone)]
#[repr(u8)]
enum Bar {
Overflown = (ZERO + 2) << 7,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/overlapping_flags.rs | test_suite/ui/overlapping_flags.rs | #[enumflags2::bitflags]
#[repr(u8)]
#[derive(Copy, Clone)]
enum Foo {
SomeFlag = 1 << 0,
OverlappingFlag = 1 << 0,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/multiple_bits.rs | test_suite/ui/multiple_bits.rs | #[enumflags2::bitflags]
#[repr(u8)]
#[derive(Copy, Clone)]
enum Foo {
SingleBit = 1,
MultipleBits = 6,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/zero_disciminant.rs | test_suite/ui/zero_disciminant.rs | #[enumflags2::bitflags]
#[repr(u8)]
#[derive(Copy, Clone)]
enum Foo {
Zero = 0,
}
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/test_suite/ui/not_enum.rs | test_suite/ui/not_enum.rs | #[enumflags2::bitflags]
#[derive(Copy, Clone)]
struct Foo(u16);
#[enumflags2::bitflags]
const WTF: u8 = 42;
fn main() {}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
meithecatte/enumflags2 | https://github.com/meithecatte/enumflags2/blob/8a2dc61708e58a85ac9cb4975eddd3f45a5067a9/enumflags_derive/src/lib.rs | enumflags_derive/src/lib.rs | #![recursion_limit = "2048"]
extern crate proc_macro;
#[macro_use]
extern crate quote;
use proc_macro2::{Span, TokenStream};
use std::convert::TryFrom;
use syn::{
parse::{Parse, ParseStream},
parse_macro_input,
spanned::Spanned,
Expr, Ident, DeriveInput, Data, Token, Variant,
};
struct Flag<'a> {
name: Ident,
span: Span,
value: FlagValue<'a>,
}
enum FlagValue<'a> {
Literal(u128),
Deferred,
Inferred(&'a mut Variant),
}
impl FlagValue<'_> {
fn is_inferred(&self) -> bool {
matches!(self, FlagValue::Inferred(_))
}
}
struct Parameters {
default: Vec<Ident>,
}
impl Parse for Parameters {
fn parse(input: ParseStream) -> syn::parse::Result<Self> {
if input.is_empty() {
return Ok(Parameters { default: vec![] });
}
input.parse::<Token![default]>()?;
input.parse::<Token![=]>()?;
let mut default = vec![input.parse()?];
while !input.is_empty() {
input.parse::<Token![|]>()?;
default.push(input.parse()?);
}
Ok(Parameters { default })
}
}
#[proc_macro_attribute]
pub fn bitflags_internal(
attr: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let Parameters { default } = parse_macro_input!(attr as Parameters);
let mut ast = parse_macro_input!(input as DeriveInput);
let output = gen_enumflags(&mut ast, default);
output
.unwrap_or_else(|err| {
let error = err.to_compile_error();
quote! {
#ast
#error
}
})
.into()
}
/// Try to evaluate the expression given.
fn fold_expr(expr: &syn::Expr) -> Option<u128> {
match expr {
Expr::Lit(ref expr_lit) => match expr_lit.lit {
syn::Lit::Int(ref lit_int) => lit_int.base10_parse().ok(),
_ => None,
},
Expr::Binary(ref expr_binary) => {
let l = fold_expr(&expr_binary.left)?;
let r = fold_expr(&expr_binary.right)?;
match &expr_binary.op {
syn::BinOp::Shl(_) => u32::try_from(r).ok().and_then(|r| l.checked_shl(r)),
_ => None,
}
}
Expr::Paren(syn::ExprParen { expr, .. }) | Expr::Group(syn::ExprGroup { expr, .. }) => {
fold_expr(expr)
}
_ => None,
}
}
fn collect_flags<'a>(
variants: impl Iterator<Item = &'a mut Variant>,
) -> Result<Vec<Flag<'a>>, syn::Error> {
variants
.map(|variant| {
if !matches!(variant.fields, syn::Fields::Unit) {
return Err(syn::Error::new_spanned(
&variant.fields,
"Bitflag variants cannot contain additional data",
));
}
let name = variant.ident.clone();
let span = variant.span();
let value = if let Some(ref expr) = variant.discriminant {
if let Some(n) = fold_expr(&expr.1) {
FlagValue::Literal(n)
} else {
FlagValue::Deferred
}
} else {
FlagValue::Inferred(variant)
};
Ok(Flag { name, span, value })
})
.collect()
}
fn inferred_value(type_name: &Ident, previous_variants: &[Ident], repr: &Ident) -> Expr {
let tokens = if previous_variants.is_empty() {
quote!(1)
} else {
quote!(::enumflags2::_internal::next_bit(
#(#type_name::#previous_variants as u128)|*
) as #repr)
};
syn::parse2(tokens).expect("couldn't parse inferred value")
}
fn infer_values(flags: &mut [Flag], type_name: &Ident, repr: &Ident) {
let mut previous_variants: Vec<Ident> = flags
.iter()
.filter(|flag| !flag.value.is_inferred())
.map(|flag| flag.name.clone())
.collect();
for flag in flags {
if let FlagValue::Inferred(ref mut variant) = flag.value {
variant.discriminant = Some((
<Token![=]>::default(),
inferred_value(type_name, &previous_variants, repr),
));
previous_variants.push(flag.name.clone());
}
}
}
/// Given a list of attributes, find the `repr`, if any, and return the integer
/// type specified.
fn extract_repr(attrs: &[syn::Attribute]) -> Result<Option<Ident>, syn::Error> {
let mut res = None;
for attr in attrs {
if attr.path().is_ident("repr") {
attr.parse_nested_meta(|meta| {
if let Some(ident) = meta.path.get_ident() {
res = Some(ident.clone());
}
Ok(())
})?;
}
}
Ok(res)
}
/// Check the repr and return the number of bits available
fn type_bits(ty: &Ident) -> Result<u8, syn::Error> {
// This would be so much easier if we could just match on an Ident...
if ty == "usize" {
Err(syn::Error::new_spanned(
ty,
"#[repr(usize)] is not supported. Use u32 or u64 instead.",
))
} else if ty == "i8"
|| ty == "i16"
|| ty == "i32"
|| ty == "i64"
|| ty == "i128"
|| ty == "isize"
{
Err(syn::Error::new_spanned(
ty,
"Signed types in a repr are not supported.",
))
} else if ty == "u8" {
Ok(8)
} else if ty == "u16" {
Ok(16)
} else if ty == "u32" {
Ok(32)
} else if ty == "u64" {
Ok(64)
} else if ty == "u128" {
Ok(128)
} else {
Err(syn::Error::new_spanned(
ty,
"repr must be an integer type for #[bitflags].",
))
}
}
/// Returns deferred checks
fn check_flag(type_name: &Ident, flag: &Flag, bits: u8) -> Result<Option<TokenStream>, syn::Error> {
use FlagValue::*;
match flag.value {
Literal(n) => {
if !n.is_power_of_two() {
Err(syn::Error::new(
flag.span,
"Flags must have exactly one set bit",
))
} else if bits < 128 && n >= 1 << bits {
Err(syn::Error::new(
flag.span,
format!("Flag value out of range for u{}", bits),
))
} else {
Ok(None)
}
}
Inferred(_) => Ok(None),
Deferred => {
let variant_name = &flag.name;
Ok(Some(quote_spanned!(flag.span =>
const _:
<<[(); (
(#type_name::#variant_name as u128).is_power_of_two()
) as usize] as ::enumflags2::_internal::AssertionHelper>
::Status as ::enumflags2::_internal::ExactlyOneBitSet>::X
= ();
)))
}
}
}
fn gen_enumflags(ast: &mut DeriveInput, default: Vec<Ident>) -> Result<TokenStream, syn::Error> {
let ident = &ast.ident;
let span = Span::call_site();
let ast_variants = match &mut ast.data {
Data::Enum(ref mut data) => &mut data.variants,
Data::Struct(data) => {
return Err(syn::Error::new_spanned(&data.struct_token,
"expected enum for #[bitflags], found struct"));
}
Data::Union(data) => {
return Err(syn::Error::new_spanned(&data.union_token,
"expected enum for #[bitflags], found union"));
}
};
if ast.generics.lt_token.is_some() || ast.generics.where_clause.is_some() {
return Err(syn::Error::new_spanned(&ast.generics,
"bitflags cannot be generic"));
}
let repr = extract_repr(&ast.attrs)?
.ok_or_else(|| syn::Error::new_spanned(ident,
"repr attribute missing. Add #[repr(u64)] or a similar attribute to specify the size of the bitfield."))?;
let bits = type_bits(&repr)?;
let mut variants = collect_flags(ast_variants.iter_mut())?;
let deferred = variants
.iter()
.flat_map(|variant| check_flag(ident, variant, bits).transpose())
.collect::<Result<Vec<_>, _>>()?;
infer_values(&mut variants, ident, &repr);
if (bits as usize) < variants.len() {
return Err(syn::Error::new_spanned(
&repr,
format!("Not enough bits for {} flags", variants.len()),
));
}
let std = quote_spanned!(span => ::enumflags2::_internal::core);
let ast_variants = match &ast.data {
Data::Enum(ref data) => &data.variants,
_ => unreachable!(),
};
let variant_names = ast_variants.iter().map(|v| &v.ident).collect::<Vec<_>>();
Ok(quote_spanned! {
span =>
#ast
#(#deferred)*
impl #std::ops::Not for #ident {
type Output = ::enumflags2::BitFlags<Self>;
#[inline(always)]
fn not(self) -> Self::Output {
use ::enumflags2::BitFlags;
BitFlags::from_flag(self).not()
}
}
impl #std::ops::BitOr for #ident {
type Output = ::enumflags2::BitFlags<Self>;
#[inline(always)]
fn bitor(self, other: Self) -> Self::Output {
use ::enumflags2::BitFlags;
BitFlags::from_flag(self) | other
}
}
impl #std::ops::BitAnd for #ident {
type Output = ::enumflags2::BitFlags<Self>;
#[inline(always)]
fn bitand(self, other: Self) -> Self::Output {
use ::enumflags2::BitFlags;
BitFlags::from_flag(self) & other
}
}
impl #std::ops::BitXor for #ident {
type Output = ::enumflags2::BitFlags<Self>;
#[inline(always)]
fn bitxor(self, other: Self) -> Self::Output {
use ::enumflags2::BitFlags;
BitFlags::from_flag(self) ^ other
}
}
unsafe impl ::enumflags2::_internal::RawBitFlags for #ident {
type Numeric = #repr;
const EMPTY: <Self as ::enumflags2::_internal::RawBitFlags>::Numeric = 0;
const DEFAULT: <Self as ::enumflags2::_internal::RawBitFlags>::Numeric =
0 #(| (Self::#default as #repr))*;
const ALL_BITS: <Self as ::enumflags2::_internal::RawBitFlags>::Numeric =
0 #(| (Self::#variant_names as #repr))*;
const BITFLAGS_TYPE_NAME : &'static str =
concat!("BitFlags<", stringify!(#ident), ">");
fn bits(self) -> <Self as ::enumflags2::_internal::RawBitFlags>::Numeric {
self as #repr
}
}
impl ::enumflags2::BitFlag for #ident {}
})
}
| rust | Apache-2.0 | 8a2dc61708e58a85ac9cb4975eddd3f45a5067a9 | 2026-01-04T20:20:11.889172Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/errors.rs | src/errors.rs | use std::io;
use thiserror::Error;
#[derive(Debug, Error)]
#[non_exhaustive]
/// Errors returned from taskchampion operations
pub enum Error {
/// A server-related error
#[error("Server Error: {0}")]
Server(String),
/// A task-database-related error
#[error("Task Database Error: {0}")]
Database(String),
/// An error specifically indicating that the local replica cannot
/// be synchronized with the sever, due to being out of date or some
/// other irrecoverable error.
#[error("Local replica is out of sync with the server")]
OutOfSync,
/// A usage error
#[error("Usage Error: {0}")]
Usage(String),
/// A general error.
#[error(transparent)]
Other(#[from] anyhow::Error),
}
/// Convert private and third party errors into Error::Other.
macro_rules! other_error {
( $error:ty ) => {
impl From<$error> for Error {
fn from(err: $error) -> Self {
Self::Other(err.into())
}
}
};
}
other_error!(io::Error);
other_error!(serde_json::Error);
other_error!(tokio::sync::oneshot::error::RecvError);
#[cfg(feature = "storage-sqlite")]
other_error!(rusqlite::Error);
#[cfg(feature = "storage-sqlite")]
other_error!(crate::storage::sqlite::SqliteError);
#[cfg(feature = "server-gcp")]
other_error!(google_cloud_storage::http::Error);
#[cfg(feature = "server-gcp")]
other_error!(google_cloud_storage::client::google_cloud_auth::error::Error);
#[cfg(feature = "server-aws")]
other_error!(aws_sdk_s3::Error);
#[cfg(feature = "server-aws")]
other_error!(aws_sdk_s3::primitives::ByteStreamError);
impl<T: Sync + Send + 'static> From<tokio::sync::mpsc::error::SendError<T>> for Error {
fn from(err: tokio::sync::mpsc::error::SendError<T>) -> Self {
Self::Other(err.into())
}
}
/// Convert [`idb::Error`] into [`Error::Database`]
#[cfg(all(target_arch = "wasm32", feature = "storage-indexeddb"))]
impl From<idb::Error> for Error {
fn from(err: idb::Error) -> Self {
Error::Database(err.to_string())
}
}
/// Convert [`serde_wasm_bindgen::Error`] into [`Error::Database`]
#[cfg(all(target_arch = "wasm32", feature = "storage-indexeddb"))]
impl From<serde_wasm_bindgen::Error> for Error {
fn from(err: serde_wasm_bindgen::Error) -> Self {
Error::Database(err.to_string())
}
}
/// Convert reqwest errors more carefully
#[cfg(feature = "http")]
impl From<reqwest::Error> for Error {
fn from(err: reqwest::Error) -> Self {
if let Some(status_code) = err.status() {
let msg = format!(
"{} responded with {} {}",
err.url().map(|u| u.as_str()).unwrap_or("unknown"),
status_code.as_u16(),
status_code.canonical_reason().unwrap_or("unknown"),
);
return Self::Server(msg);
}
Self::Server(err.to_string())
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/lib.rs | src/lib.rs | #![deny(clippy::all)]
#![deny(unreachable_pub)]
#![deny(unnameable_types)]
#![deny(clippy::dbg_macro)]
#![doc = include_str!("crate-doc.md")]
mod depmap;
mod errors;
mod operation;
mod replica;
pub mod server;
pub mod storage;
mod task;
mod taskdb;
mod utils;
mod workingset;
pub use depmap::DependencyMap;
pub use errors::Error;
pub use operation::{Operation, Operations};
pub use replica::Replica;
pub use server::{Server, ServerConfig};
#[cfg(all(target_arch = "wasm32", feature = "storage-indexeddb"))]
pub use storage::indexeddb::IndexedDbStorage;
#[cfg(feature = "storage-sqlite")]
pub use storage::sqlite::SqliteStorage;
pub use task::{utc_timestamp, Annotation, Status, Tag, Task, TaskData};
pub use workingset::WorkingSet;
/// Re-exported type from the `uuid` crate, for ease of compatibility for consumers of this crate.
pub use uuid::Uuid;
/// Re-exported chrono module.
pub use chrono;
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/replica.rs | src/replica.rs | use crate::depmap::DependencyMap;
use crate::errors::Result;
use crate::operation::{Operation, Operations};
use crate::server::Server;
use crate::storage::{Storage, TaskMap};
use crate::task::{Status, Task};
use crate::taskdb::TaskDb;
use crate::workingset::WorkingSet;
use crate::{Error, TaskData};
use anyhow::Context;
use chrono::{DateTime, Duration, Utc};
use log::trace;
use std::collections::HashMap;
use std::sync::Arc;
use uuid::Uuid;
/// A replica represents an instance of a user's task data, providing an easy interface
/// for querying and modifying that data.
///
/// ## Tasks
///
/// Tasks are uniquely identified by UUIDs. Most task modifications are performed via the
/// [`TaskData`](crate::TaskData) or [`Task`](crate::Task) types. The first is a lower-level type
/// that wraps the key-value store representing a task, while the second is a higher-level type
/// that supports methods to update specific properties, maintain dependencies and tags, and so on.
///
/// ## Operations
///
/// Changes to a replica occur by committing [`Operations`]s. All methods that change a replica
/// take an argument of type `&mut Operations`, and the necessary operations are added to that
/// sequence. Those changes may be reflected locally, such as in a [`Task`] or [`TaskData`] value, but
/// are not reflected in the Replica's storage until committed with [`Replica::commit_operations`].
/**
```rust
# #[cfg(feature = "storage-sqlite")]
# {
# use taskchampion::chrono::{TimeZone, Utc};
# use taskchampion::{storage::AccessMode, Operations, Replica, Status, Uuid, SqliteStorage};
# use tempfile::TempDir;
# async fn main() -> anyhow::Result<()> {
# let tmp_dir = TempDir::new()?;
# let mut replica = Replica::new(SqliteStorage::new(
# tmp_dir.path().to_path_buf(),
# AccessMode::ReadWrite,
# true
# ).await?);
// Create a new task, recording the required operations.
let mut ops = Operations::new();
let uuid = Uuid::new_v4();
let mut t = replica.create_task(uuid, &mut ops).await?;
t.set_description("my first task".into(), &mut ops)?;
t.set_status(Status::Pending, &mut ops)?;
t.set_entry(Some(Utc::now()), &mut ops)?;
// Commit those operations to storage.
replica.commit_operations(ops).await?;
#
# Ok(())
# }
# }
```
**/
/// Undo is supported by producing an [`Operations`] value representing the operations to be
/// undone. These are then committed with [`Replica::commit_reversed_operations`].
///
/// ## Working Set
///
/// A replica maintains a "working set" of tasks that are of current concern to the user,
/// specifically pending tasks. These are indexed with small, easy-to-type integers. Newly
/// pending tasks are automatically added to the working set, and the working set can be
/// "renumbered" when necessary.
pub struct Replica<S: Storage> {
taskdb: TaskDb<S>,
/// If true, this replica has already added an undo point.
added_undo_point: bool,
/// The dependency map for this replica, if it has been calculated.
depmap: Option<Arc<DependencyMap>>,
}
impl<S: Storage> Replica<S> {
pub fn new(storage: S) -> Replica<S> {
Replica {
taskdb: TaskDb::new(storage),
added_undo_point: false,
depmap: None,
}
}
/// Update an existing task. If the value is Some, the property is added or updated. If the
/// value is None, the property is deleted. It is not an error to delete a nonexistent
/// property.
#[deprecated(since = "0.7.0", note = "please use TaskData instead")]
pub async fn update_task<S1, S2>(
&mut self,
uuid: Uuid,
property: S1,
value: Option<S2>,
) -> Result<TaskMap>
where
S1: Into<String>,
S2: Into<String>,
{
let value = value.map(|v| v.into());
let property = property.into();
let mut ops = self.make_operations();
let Some(mut task) = self.get_task_data(uuid).await? else {
return Err(Error::Database(format!("Task {uuid} does not exist")));
};
task.update(property, value, &mut ops);
self.commit_operations(ops).await?;
Ok(self
.taskdb
.get_task(uuid)
.await?
.expect("task should exist after an update"))
}
/// Get all tasks represented as a map keyed by UUID
pub async fn all_tasks(&mut self) -> Result<HashMap<Uuid, Task>> {
let depmap = self.dependency_map(false).await?;
let mut res = HashMap::new();
for (uuid, tm) in self.taskdb.all_tasks().await?.drain(..) {
res.insert(uuid, Task::new(TaskData::new(uuid, tm), depmap.clone()));
}
Ok(res)
}
/// Get all task represented as a map of [`TaskData`] keyed by UUID
pub async fn all_task_data(&mut self) -> Result<HashMap<Uuid, TaskData>> {
let mut res = HashMap::new();
for (uuid, tm) in self.taskdb.all_tasks().await?.drain(..) {
res.insert(uuid, TaskData::new(uuid, tm));
}
Ok(res)
}
/// Get the UUIDs of all tasks
pub async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>> {
self.taskdb.all_task_uuids().await
}
/// Get an array containing all pending tasks
pub async fn pending_tasks(&mut self) -> Result<Vec<Task>> {
let depmap = self.dependency_map(false).await?;
let res = self
.pending_task_data()
.await?
.into_iter()
.map(|taskdata| Task::new(taskdata, depmap.clone()))
.collect();
Ok(res)
}
pub async fn pending_task_data(&mut self) -> Result<Vec<TaskData>> {
let res = self
.taskdb
.get_pending_tasks()
.await?
.into_iter()
.map(|(uuid, taskmap)| TaskData::new(uuid, taskmap))
.collect::<Vec<_>>();
Ok(res)
}
/// Get the "working set" for this replica. This is a snapshot of the current state,
/// and it is up to the caller to decide how long to store this value.
pub async fn working_set(&mut self) -> Result<WorkingSet> {
Ok(WorkingSet::new(self.taskdb.working_set().await?))
}
/// Get the dependency map for all pending tasks.
///
/// A task dependency is recognized when a task in the working set depends on a task with
/// status equal to Pending.
///
/// The data in this map is cached when it is first requested and may not contain modifications
/// made locally in this Replica instance. The result is reference-counted and may
/// outlive the Replica.
///
/// If `force` is true, then the result is re-calculated from the current state of the replica,
/// although previously-returned dependency maps are not updated.
///
/// Calculating this value requires a scan of the full working set and may not be performant.
/// The [`TaskData`] API avoids generating this value.
pub async fn dependency_map(&mut self, force: bool) -> Result<Arc<DependencyMap>> {
if force || self.depmap.is_none() {
// note: we can't use self.get_task here, as that depends on a
// DependencyMap
let mut dm = DependencyMap::new();
// temporary cache tracking whether tasks are considered Pending or not.
let mut is_pending_cache: HashMap<Uuid, bool> = HashMap::new();
let ws = self.working_set().await?;
// for each task in the working set
for i in 1..=ws.largest_index() {
// get the task UUID
if let Some(u) = ws.by_index(i) {
// get the task
if let Some(taskmap) = self.taskdb.get_task(u).await? {
// search the task's keys
for p in taskmap.keys() {
// for one matching `dep_..`
if let Some(dep_str) = p.strip_prefix("dep_") {
// and extract the UUID from the key
if let Ok(dep) = Uuid::parse_str(dep_str) {
// the dependency is pending if
let dep_pending = {
// we've determined this before and cached the result
if let Some(dep_pending) = is_pending_cache.get(&dep) {
*dep_pending
} else if let Some(dep_taskmap) =
// or if we get the task
self.taskdb.get_task(dep).await?
{
// and its status is "pending"
let dep_pending = matches!(
dep_taskmap
.get("status")
.map(|tm| Status::from_taskmap(tm)),
Some(Status::Pending)
);
is_pending_cache.insert(dep, dep_pending);
dep_pending
} else {
false
}
};
if dep_pending {
dm.add_dependency(u, dep);
}
}
}
}
}
}
}
self.depmap = Some(Arc::new(dm));
}
// at this point self.depmap is guaranteed to be Some(_)
Ok(self.depmap.as_ref().unwrap().clone())
}
/// Get an existing task by its UUID
pub async fn get_task(&mut self, uuid: Uuid) -> Result<Option<Task>> {
let depmap = self.dependency_map(false).await?;
Ok(self
.taskdb
.get_task(uuid)
.await?
.map(move |tm| Task::new(TaskData::new(uuid, tm), depmap)))
}
/// Get an existing task by its UUID, as a [`TaskData`](crate::TaskData).
pub async fn get_task_data(&mut self, uuid: Uuid) -> Result<Option<TaskData>> {
Ok(self
.taskdb
.get_task(uuid)
.await?
.map(move |tm| TaskData::new(uuid, tm)))
}
/// Get the operations that led to the given task.
///
/// This set of operations is suitable for providing an overview of the task history, but does
/// not satisfy any invariants around operations and task state. That is, it is not guaranteed
/// that the returned operations, if applied in order, would generate the current task state.
///
/// It is also not guaranteed to be the same on every replica. Differences can occur when
/// conflicting operations were performed on different replicas. The "losing" operations in
/// those conflicts may not appear on all replicas. In practice, conflicts are rare and the
/// results of this function will be the same on all replicas for most tasks.
pub async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Operations> {
self.taskdb.get_task_operations(uuid).await
}
/// Create a new task, setting `modified`, `description`, `status`, and `entry`.
///
/// This uses the high-level task interface. To create a task with the low-level
/// interface, use [`TaskData::create`](crate::TaskData::create).
#[deprecated(
since = "0.7.0",
note = "please use `create_task` and call `Task` methods `set_status`, `set_description`, and `set_entry`"
)]
pub async fn new_task(&mut self, status: Status, description: String) -> Result<Task> {
let uuid = Uuid::new_v4();
let mut ops = self.make_operations();
let now = format!("{}", Utc::now().timestamp());
let mut task = TaskData::create(uuid, &mut ops);
task.update("modified", Some(now.clone()), &mut ops);
task.update("description", Some(description), &mut ops);
task.update("status", Some(status.to_taskmap().to_string()), &mut ops);
task.update("entry", Some(now), &mut ops);
self.commit_operations(ops).await?;
trace!("task {} created", uuid);
Ok(self
.get_task(uuid)
.await?
.expect("Task should exist after creation"))
}
/// Create a new task.
///
/// Use [Uuid::new_v4] to invent a new task ID, if necessary. If the task already
/// exists, it is returned.
pub async fn create_task(&mut self, uuid: Uuid, ops: &mut Operations) -> Result<Task> {
if let Some(task) = self.get_task(uuid).await? {
return Ok(task);
}
let depmap = self.dependency_map(false).await?;
Ok(Task::new(TaskData::create(uuid, ops), depmap))
}
/// Create a new, empty task with the given UUID. This is useful for importing tasks, but
/// otherwise should be avoided in favor of `create_task`. If the task already exists, this
/// does nothing and returns the existing task.
#[deprecated(since = "0.7.0", note = "please use TaskData instead")]
pub async fn import_task_with_uuid(&mut self, uuid: Uuid) -> Result<Task> {
let mut ops = self.make_operations();
TaskData::create(uuid, &mut ops);
self.commit_operations(ops).await?;
Ok(self
.get_task(uuid)
.await?
.expect("Task should exist after creation"))
}
/// Delete a task. The task must exist. Note that this is different from setting status to
/// Deleted; this is the final purge of the task.
///
/// Deletion may interact poorly with modifications to the same task on other replicas. For
/// example, if a task is deleted on replica 1 and its description modified on replica 1, then
/// after both replicas have fully synced, the resulting task will only have a `description`
/// property.
#[deprecated(since = "0.7.0", note = "please use TaskData::delete")]
pub async fn delete_task(&mut self, uuid: Uuid) -> Result<()> {
let Some(mut task) = self.get_task_data(uuid).await? else {
return Err(Error::Database(format!("Task {uuid} does not exist")));
};
let mut ops = self.make_operations();
task.delete(&mut ops);
self.commit_operations(ops).await?;
trace!("task {} deleted", uuid);
Ok(())
}
/// Commit a set of operations to the replica.
///
/// All local state on the replica will be updated accordingly, including the working set and
/// and temporarily cached data.
pub async fn commit_operations(&mut self, operations: Operations) -> Result<()> {
if operations.is_empty() {
return Ok(());
}
// Add tasks to the working set when the status property is updated from anything other
// than pending or recurring to one of those two statuses.
let pending = Status::Pending.to_taskmap();
let recurring = Status::Recurring.to_taskmap();
let is_p_or_r = |val: &Option<String>| {
if let Some(val) = val {
val == pending || val == recurring
} else {
false
}
};
let add_to_working_set = |op: &Operation| match op {
Operation::Update {
property,
value,
old_value,
..
} => property == "status" && !is_p_or_r(old_value) && is_p_or_r(value),
_ => false,
};
self.taskdb
.commit_operations(operations, add_to_working_set)
.await?;
// The cached dependency map may now be invalid, do not retain it. Any existing Task values
// will continue to use the old map.
self.depmap = None;
Ok(())
}
/// Synchronize this replica against the given server. The working set is rebuilt after
/// this occurs, but without renumbering, so any newly-pending tasks should appear in
/// the working set.
///
/// If `avoid_snapshots` is true, the sync operations produces a snapshot only when the server
/// indicate it is urgent (snapshot urgency "high"). This allows time for other replicas to
/// create a snapshot before this one does.
///
/// Set this to true on systems more constrained in CPU, memory, or bandwidth than a typical desktop
/// system
pub async fn sync(
&mut self,
server: &mut Box<dyn Server>,
avoid_snapshots: bool,
) -> Result<()> {
self.taskdb
.sync(server, avoid_snapshots)
.await
.context("Failed to synchronize with server")?;
self.rebuild_working_set(false)
.await
.context("Failed to rebuild working set after sync")?;
Ok(())
}
/// Return the operations back to and including the last undo point, or since the last sync if
/// no undo point is found.
///
/// The operations are returned in the order they were applied. Use
/// [`Replica::commit_reversed_operations`] to "undo" them.
pub async fn get_undo_operations(&mut self) -> Result<Operations> {
self.taskdb.get_undo_operations().await
}
/// Commit the reverse of the given operations, beginning with the last operation in the given
/// operations and proceeding to the first.
///
/// This method only supports reversing operations if they precisely match local operations
/// that have not yet been synchronized, and will return `false` if this is not the case.
pub async fn commit_reversed_operations(&mut self, operations: Operations) -> Result<bool> {
if !self.taskdb.commit_reversed_operations(operations).await? {
return Ok(false);
}
// Both the dependency map and the working set are potentially now invalid.
self.depmap = None;
self.rebuild_working_set(false)
.await
.context("Failed to rebuild working set after committing reversed operations")?;
Ok(true)
}
/// Rebuild this replica's working set, based on whether tasks are pending or not. If
/// `renumber` is true, then existing tasks may be moved to new working-set indices; in any
/// case, on completion all pending and recurring tasks are in the working set and all tasks
/// with other statuses are not.
pub async fn rebuild_working_set(&mut self, renumber: bool) -> Result<()> {
let pending = String::from(Status::Pending.to_taskmap());
let recurring = String::from(Status::Recurring.to_taskmap());
self.taskdb
.rebuild_working_set(
|t| {
if let Some(st) = t.get("status") {
st == &pending || st == &recurring
} else {
false
}
},
renumber,
)
.await?;
Ok(())
}
/// Expire old, deleted tasks.
///
/// Expiration entails removal of tasks from the replica. Any modifications that occur after
/// the deletion (such as operations synchronized from other replicas) will do nothing.
///
/// Tasks are eligible for expiration when they have status Deleted and have not been modified
/// for 180 days (about six months). Note that completed tasks are not eligible.
pub async fn expire_tasks(&mut self) -> Result<()> {
let six_mos_ago = Utc::now() - Duration::days(180);
let mut ops = Operations::new();
let deleted = Status::Deleted.to_taskmap();
self.all_task_data()
.await?
.drain()
.filter(|(_, t)| t.get("status") == Some(deleted))
.filter(|(_, t)| {
t.get("modified").is_some_and(|m| {
m.parse().is_ok_and(|time_sec| {
DateTime::from_timestamp(time_sec, 0).is_some_and(|dt| dt < six_mos_ago)
})
})
})
.for_each(|(_, mut t)| t.delete(&mut ops));
self.commit_operations(ops).await
}
/// Add an UndoPoint, if one has not already been added by this Replica. This occurs
/// automatically when a change is made. The `force` flag allows forcing a new UndoPoint
/// even if one has already been created by this Replica, and may be useful when a Replica
/// instance is held for a long time and used to apply more than one user-visible change.
#[deprecated(
since = "0.7.0",
note = "Push an `Operation::UndoPoint` onto your `Operations` instead."
)]
pub async fn add_undo_point(&mut self, force: bool) -> Result<()> {
if force || !self.added_undo_point {
let ops = vec![Operation::UndoPoint];
self.commit_operations(ops).await?;
self.added_undo_point = true;
}
Ok(())
}
/// Make a new `Operations`, with an undo operation if one has not already been added by
/// this `Replica` insance
fn make_operations(&mut self) -> Operations {
let mut ops = Operations::new();
if !self.added_undo_point {
ops.push(Operation::UndoPoint);
self.added_undo_point = true;
}
ops
}
/// Get the number of operations local to this replica and not yet synchronized to the server.
pub async fn num_local_operations(&mut self) -> Result<usize> {
self.taskdb.num_operations().await
}
/// Get the number of undo points available (number of times `undo` will succeed).
pub async fn num_undo_points(&mut self) -> Result<usize> {
self.taskdb.num_undo_points().await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{storage::inmemory::InMemoryStorage, task::Status};
use chrono::{DateTime, TimeZone};
use pretty_assertions::assert_eq;
use std::collections::HashSet;
use uuid::Uuid;
const JUST_NOW: Option<DateTime<Utc>> = DateTime::from_timestamp(1800000000, 0);
/// Rewrite automatically-created dates to "just-now" or `JUST_NOW` for ease of testing.
fn clean_op(op: Operation) -> Operation {
if let Operation::Update {
uuid,
property,
mut old_value,
mut value,
..
} = op
{
if property == "modified" || property == "end" || property == "entry" {
if value.is_some() {
value = Some("just-now".into());
}
if old_value.is_some() {
old_value = Some("just-now".into());
}
}
Operation::Update {
uuid,
property,
old_value,
value,
timestamp: JUST_NOW.unwrap(),
}
} else {
op
}
}
#[tokio::test]
async fn new_task() {
let mut rep = Replica::new(InMemoryStorage::new());
#[allow(deprecated)]
let t = rep
.new_task(Status::Pending, "a task".into())
.await
.unwrap();
assert_eq!(t.get_description(), String::from("a task"));
assert_eq!(t.get_status(), Status::Pending);
assert!(t.get_modified().is_some());
}
#[tokio::test]
async fn modify_task() {
let mut rep = Replica::new(InMemoryStorage::new());
// Further test the deprecated `new_task` method.
#[allow(deprecated)]
let mut t = rep
.new_task(Status::Pending, "a task".into())
.await
.unwrap();
let mut ops = Operations::new();
t.set_description(String::from("past tense"), &mut ops)
.unwrap();
t.set_status(Status::Completed, &mut ops).unwrap();
// check that values have changed on the Task
assert_eq!(t.get_description(), "past tense");
assert_eq!(t.get_status(), Status::Completed);
// check that values have not changed in storage, yet
let t = rep.get_task(t.get_uuid()).await.unwrap().unwrap();
assert_eq!(t.get_description(), "a task");
assert_eq!(t.get_status(), Status::Pending);
// check that values have changed in storage after commit
rep.commit_operations(ops).await.unwrap();
let t = rep.get_task(t.get_uuid()).await.unwrap().unwrap();
assert_eq!(t.get_description(), "past tense");
assert_eq!(t.get_status(), Status::Completed);
// and check for the corresponding operations, cleaning out the timestamps
// and modified properties as these are based on the current time
assert_eq!(
rep.taskdb
.operations()
.await
.into_iter()
.map(clean_op)
.collect::<Vec<_>>(),
vec![
Operation::UndoPoint,
Operation::Create { uuid: t.get_uuid() },
Operation::Update {
uuid: t.get_uuid(),
property: "modified".into(),
old_value: None,
value: Some("just-now".into()),
timestamp: JUST_NOW.unwrap(),
},
Operation::Update {
uuid: t.get_uuid(),
property: "description".into(),
old_value: None,
value: Some("a task".into()),
timestamp: JUST_NOW.unwrap(),
},
Operation::Update {
uuid: t.get_uuid(),
property: "status".into(),
old_value: None,
value: Some("pending".into()),
timestamp: JUST_NOW.unwrap(),
},
Operation::Update {
uuid: t.get_uuid(),
property: "entry".into(),
old_value: None,
value: Some("just-now".into()),
timestamp: JUST_NOW.unwrap(),
},
Operation::Update {
uuid: t.get_uuid(),
property: "modified".into(),
old_value: Some("just-now".into()),
value: Some("just-now".into()),
timestamp: JUST_NOW.unwrap(),
},
Operation::Update {
uuid: t.get_uuid(),
property: "description".into(),
old_value: Some("a task".into()),
value: Some("past tense".into()),
timestamp: JUST_NOW.unwrap(),
},
Operation::Update {
uuid: t.get_uuid(),
property: "end".into(),
old_value: None,
value: Some("just-now".into()),
timestamp: JUST_NOW.unwrap(),
},
Operation::Update {
uuid: t.get_uuid(),
property: "status".into(),
old_value: Some("pending".into()),
value: Some("completed".into()),
timestamp: JUST_NOW.unwrap(),
},
]
);
// num_local_operations includes all but the undo point
assert_eq!(rep.num_local_operations().await.unwrap(), 9);
// num_undo_points includes only the undo point
assert_eq!(rep.num_undo_points().await.unwrap(), 1);
// A second undo point is counted.
let ops = vec![Operation::UndoPoint];
rep.commit_operations(ops).await.unwrap();
assert_eq!(rep.num_undo_points().await.unwrap(), 2);
}
#[tokio::test]
async fn delete_task() {
let mut rep = Replica::new(InMemoryStorage::new());
let uuid = Uuid::new_v4();
let mut ops = Operations::new();
rep.create_task(uuid, &mut ops).await.unwrap();
rep.commit_operations(ops).await.unwrap();
#[allow(deprecated)]
rep.delete_task(uuid).await.unwrap();
assert_eq!(rep.get_task(uuid).await.unwrap(), None);
}
#[tokio::test]
async fn all_tasks() {
let mut rep = Replica::new(InMemoryStorage::new());
let (uuid1, uuid2) = (Uuid::new_v4(), Uuid::new_v4());
let mut ops = Operations::new();
rep.create_task(uuid1, &mut ops).await.unwrap();
rep.create_task(uuid2, &mut ops).await.unwrap();
rep.commit_operations(ops).await.unwrap();
let all_tasks = rep.all_tasks().await.unwrap();
assert_eq!(all_tasks.len(), 2);
assert_eq!(all_tasks.get(&uuid1).unwrap().get_uuid(), uuid1);
assert_eq!(all_tasks.get(&uuid2).unwrap().get_uuid(), uuid2);
let all_tasks = rep.all_task_data().await.unwrap();
assert_eq!(all_tasks.len(), 2);
assert_eq!(all_tasks.get(&uuid1).unwrap().get_uuid(), uuid1);
assert_eq!(all_tasks.get(&uuid2).unwrap().get_uuid(), uuid2);
let mut all_uuids = rep.all_task_uuids().await.unwrap();
all_uuids.sort();
let mut exp_uuids = vec![uuid1, uuid2];
exp_uuids.sort();
assert_eq!(all_uuids.len(), 2);
assert_eq!(all_uuids, exp_uuids);
}
#[tokio::test]
async fn pending_tasks() {
let mut rep = Replica::new(InMemoryStorage::new());
let (uuid1, uuid2, uuid3) = (Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4());
let mut ops = Operations::new();
let mut t1 = rep.create_task(uuid1, &mut ops).await.unwrap();
t1.set_status(Status::Pending, &mut ops).unwrap();
let mut t2 = rep.create_task(uuid2, &mut ops).await.unwrap();
t2.set_status(Status::Pending, &mut ops).unwrap();
let mut t3 = rep.create_task(uuid3, &mut ops).await.unwrap();
t3.set_status(Status::Completed, &mut ops).unwrap();
rep.commit_operations(ops).await.unwrap();
let pending_tasks = rep.pending_tasks().await.unwrap();
assert_eq!(pending_tasks.len(), 2);
assert_eq!(pending_tasks.first().unwrap().get_uuid(), uuid1);
assert_eq!(pending_tasks.get(1).unwrap().get_uuid(), uuid2);
}
#[tokio::test]
async fn commit_operations() -> Result<()> {
// This mostly tests the working-set callback, as `TaskDB::commit_operations` has
// tests for the remaining functionality.
let mut rep = Replica::new(InMemoryStorage::new());
// Generate the depmap so later assertions can verify it is reset.
rep.dependency_map(true).await.unwrap();
assert!(rep.depmap.is_some());
let mut ops = Operations::new();
let uuid1 = Uuid::new_v4();
let mut t = rep.create_task(uuid1, &mut ops).await.unwrap();
t.set_status(Status::Pending, &mut ops).unwrap();
// uuid2 is created and deleted, but this does not affect the
// working set.
let uuid2 = Uuid::new_v4();
ops.push(Operation::Create { uuid: uuid2 });
ops.push(Operation::Delete {
uuid: uuid2,
old_task: TaskMap::new(),
});
let update_op = |uuid, property: &str, old_value: Option<&str>, value: Option<&str>| {
Operation::Update {
uuid,
property: property.to_string(),
value: value.map(|v| v.to_string()),
timestamp: Utc::now(),
old_value: old_value.map(|v| v.to_string()),
}
};
// uuid3 has status deleted, so is not added to the working set.
let uuid3 = Uuid::new_v4();
ops.push(update_op(uuid3, "status", None, Some("deleted")));
// uuid4 goes from pending to pending, so is not added to the working set.
let uuid4 = Uuid::new_v4();
ops.push(update_op(uuid4, "status", Some("pending"), Some("pending")));
// uuid5 goes from recurring to recurring, so is not added to the working set.
let uuid5 = Uuid::new_v4();
ops.push(update_op(
uuid5,
"status",
Some("recurring"),
Some("recurring"),
));
// uuid6 goes from recurring to pending, so is not added to the working set.
let uuid6 = Uuid::new_v4();
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | true |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/utils.rs | src/utils.rs | use std::convert::TryInto;
use uuid::Uuid;
/// A representation of a UUID as a key. This is just a newtype wrapping the 128-bit packed form
/// of a UUID.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) struct Key(uuid::Bytes);
impl From<&[u8]> for Key {
fn from(bytes: &[u8]) -> Key {
Key(bytes.try_into().expect("expected 16 bytes"))
}
}
impl From<&Uuid> for Key {
fn from(uuid: &Uuid) -> Key {
let key = Key(*uuid.as_bytes());
key
}
}
impl From<Uuid> for Key {
fn from(uuid: Uuid) -> Key {
let key = Key(*uuid.as_bytes());
key
}
}
impl From<Key> for Uuid {
fn from(key: Key) -> Uuid {
Uuid::from_bytes(key.0)
}
}
impl AsRef<[u8]> for Key {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn test_from_bytes() {
let k: Key = (&[1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16][..]).into();
let u: Uuid = k.into();
assert_eq!(
u,
Uuid::parse_str("01020304-0506-0708-090a-0b0c0d0e0f10").unwrap()
);
}
#[test]
#[should_panic]
fn test_from_bytes_bad_len() {
let _: Key = (&[1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..]).into();
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/depmap.rs | src/depmap.rs | use uuid::Uuid;
/// DependencyMap stores information on task dependencies between pending tasks.
///
/// This information requires a scan of the working set to generate, so it is
/// typically calculated once and re-used.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct DependencyMap {
/// Edges of the dependency graph. If (a, b) is in this array, then task a depends on task b.
edges: Vec<(Uuid, Uuid)>,
}
impl DependencyMap {
/// Create a new, empty DependencyMap.
pub(super) fn new() -> Self {
Self { edges: Vec::new() }
}
/// Add a dependency of a on b.
pub(super) fn add_dependency(&mut self, a: Uuid, b: Uuid) {
self.edges.push((a, b));
}
/// Return an iterator of Uuids on which task `deps_of` depends. This is equivalent to
/// `task.get_dependencies()`.
pub fn dependencies(&self, dep_of: Uuid) -> impl Iterator<Item = Uuid> + '_ {
self.edges
.iter()
.filter_map(move |(a, b)| if a == &dep_of { Some(*b) } else { None })
}
/// Return an iterator of Uuids of tasks that depend on `dep_on`
/// `task.get_dependencies()`.
pub fn dependents(&self, dep_on: Uuid) -> impl Iterator<Item = Uuid> + '_ {
self.edges
.iter()
.filter_map(move |(a, b)| if b == &dep_on { Some(*a) } else { None })
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use std::collections::HashSet;
#[test]
fn dependencies() {
let t = Uuid::new_v4();
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let mut dm = DependencyMap::new();
dm.add_dependency(t, uuid1);
dm.add_dependency(t, uuid2);
dm.add_dependency(Uuid::new_v4(), t);
dm.add_dependency(Uuid::new_v4(), uuid1);
dm.add_dependency(uuid2, Uuid::new_v4());
assert_eq!(
dm.dependencies(t).collect::<HashSet<_>>(),
HashSet::from([uuid1, uuid2])
);
}
#[test]
fn dependents() {
let t = Uuid::new_v4();
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let mut dm = DependencyMap::new();
dm.add_dependency(uuid1, t);
dm.add_dependency(uuid2, t);
dm.add_dependency(t, Uuid::new_v4());
dm.add_dependency(Uuid::new_v4(), uuid1);
dm.add_dependency(uuid2, Uuid::new_v4());
assert_eq!(
dm.dependents(t).collect::<HashSet<_>>(),
HashSet::from([uuid1, uuid2])
);
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/workingset.rs | src/workingset.rs | use std::collections::HashMap;
use uuid::Uuid;
/// A WorkingSet represents a snapshot of the working set from a replica.
///
/// A replica's working set is a mapping from small integers to task uuids for all pending tasks.
/// The small integers are meant to be stable, easily-typed identifiers for users to interact with
/// important tasks.
///
/// IMPORTANT: the content of the working set may change at any time that a DB transaction is not
/// in progress, and the data in this type will not be updated automatically. It is up to the
/// caller to decide how long to keep this value, and how much to trust the accuracy of its
/// contents. In practice, the answers are usually "a few milliseconds" and treating unexpected
/// results as non-fatal.
#[derive(Debug)]
pub struct WorkingSet {
by_index: Vec<Option<Uuid>>,
by_uuid: HashMap<Uuid, usize>,
}
impl WorkingSet {
/// Create a new WorkingSet. Typically this is acquired via `replica.working_set()`
pub(crate) fn new(by_index: Vec<Option<Uuid>>) -> Self {
let mut by_uuid = HashMap::new();
// working sets are 1-indexed, so element 0 should always be None
assert!(by_index.is_empty() || by_index[0].is_none());
for (index, uuid) in by_index.iter().enumerate() {
if let Some(uuid) = uuid {
by_uuid.insert(*uuid, index);
}
}
Self { by_index, by_uuid }
}
/// Get the "length" of the working set: the total number of uuids in the set.
pub fn len(&self) -> usize {
self.by_index.iter().filter(|e| e.is_some()).count()
}
/// Get the largest index in the working set, or zero if the set is empty.
pub fn largest_index(&self) -> usize {
self.by_index.len().saturating_sub(1)
}
/// True if the length is zero
pub fn is_empty(&self) -> bool {
self.by_index.iter().all(|e| e.is_none())
}
/// Get the uuid with the given index, if any exists.
pub fn by_index(&self, index: usize) -> Option<Uuid> {
if let Some(Some(uuid)) = self.by_index.get(index) {
Some(*uuid)
} else {
None
}
}
/// Get the index for the given uuid, if any
pub fn by_uuid(&self, uuid: Uuid) -> Option<usize> {
self.by_uuid.get(&uuid).copied()
}
/// Iterate over pairs (index, uuid), in order by index.
pub fn iter(&self) -> impl Iterator<Item = (usize, Uuid)> + '_ {
self.by_index
.iter()
.enumerate()
.filter_map(|(index, uuid)| uuid.as_ref().map(|uuid| (index, *uuid)))
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
fn make() -> (Uuid, Uuid, WorkingSet) {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
(
uuid1,
uuid2,
WorkingSet::new(vec![None, Some(uuid1), None, Some(uuid2), None]),
)
}
#[test]
fn test_new() {
let (_, uuid2, ws) = make();
assert_eq!(ws.by_index[3], Some(uuid2));
assert_eq!(ws.by_uuid.get(&uuid2), Some(&3));
}
#[test]
fn test_len_and_is_empty() {
let (_, _, ws) = make();
assert_eq!(ws.len(), 2);
assert_eq!(ws.is_empty(), false);
let ws = WorkingSet::new(vec![]);
assert_eq!(ws.len(), 0);
assert_eq!(ws.is_empty(), true);
let ws = WorkingSet::new(vec![None, None, None]);
assert_eq!(ws.len(), 0);
assert_eq!(ws.is_empty(), true);
}
#[test]
fn test_largest_index() {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let ws = WorkingSet::new(vec![]);
assert_eq!(ws.largest_index(), 0);
let ws = WorkingSet::new(vec![None, Some(uuid1)]);
assert_eq!(ws.largest_index(), 1);
let ws = WorkingSet::new(vec![None, Some(uuid1), None, Some(uuid2)]);
assert_eq!(ws.largest_index(), 3);
let ws = WorkingSet::new(vec![None, Some(uuid1), None, Some(uuid2), None]);
assert_eq!(ws.largest_index(), 4);
}
#[test]
fn test_by_index() {
let (uuid1, uuid2, ws) = make();
assert_eq!(ws.by_index(0), None);
assert_eq!(ws.by_index(1), Some(uuid1));
assert_eq!(ws.by_index(2), None);
assert_eq!(ws.by_index(3), Some(uuid2));
assert_eq!(ws.by_index(4), None);
assert_eq!(ws.by_index(100), None); // past the end of the vector
}
#[test]
fn test_by_uuid() {
let (uuid1, uuid2, ws) = make();
let nosuch = Uuid::new_v4();
assert_eq!(ws.by_uuid(uuid1), Some(1));
assert_eq!(ws.by_uuid(uuid2), Some(3));
assert_eq!(ws.by_uuid(nosuch), None);
}
#[test]
fn test_iter() {
let (uuid1, uuid2, ws) = make();
assert_eq!(ws.iter().collect::<Vec<_>>(), vec![(1, uuid1), (3, uuid2),]);
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/operation.rs | src/operation.rs | use crate::storage::TaskMap;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::cmp::{Ord, Ordering};
use uuid::Uuid;
/// An Operation defines a single change to the task database, as stored locally in the replica.
///
/// Operations are the means by which changes are made to the database, typically batched together
/// into [`Operations`] and committed to the replica.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub enum Operation {
/// Create a new task.
///
/// On undo, the task is deleted.
Create { uuid: Uuid },
/// Delete an existing task.
///
/// On undo, the task's data is restored from old_task.
Delete { uuid: Uuid, old_task: TaskMap },
/// Update an existing task, setting the given property to the given value. If the value is
/// None, then the corresponding property is deleted.
///
/// On undo, the property is set back to its previous value.
Update {
uuid: Uuid,
property: String,
old_value: Option<String>,
value: Option<String>,
timestamp: DateTime<Utc>,
},
/// Mark a point in the operations history to which the user might like to undo. Users
/// typically want to undo more than one operation at a time (for example, most changes update
/// both the `modified` property and some other task property -- the user would like to "undo"
/// both updates at the same time). Applying an UndoPoint does nothing.
UndoPoint,
}
impl Operation {
/// Determine whether this is an undo point.
pub fn is_undo_point(&self) -> bool {
self == &Self::UndoPoint
}
/// Get the UUID for this function, if it has one.
pub fn get_uuid(&self) -> Option<Uuid> {
match self {
Operation::Create { uuid: u } => Some(*u),
Operation::Delete { uuid: u, .. } => Some(*u),
Operation::Update { uuid: u, .. } => Some(*u),
Operation::UndoPoint => None,
}
}
}
impl PartialOrd for Operation {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Operation {
/// Define an order for operations.
///
/// First, orders by Uuid, with all UndoPoints first. Then, by type, with Creates, then Updates,
/// then Deletes. Updates are ordered by timestamp and, where that is equal, by the remaining
/// fields. This ordering is intended to be "human-readable", even in confusing situations like
/// multiple creations of the same task.
fn cmp(&self, other: &Self) -> Ordering {
use Operation::*;
use Ordering::*;
fn type_idx(op: &Operation) -> u8 {
match op {
UndoPoint => 0,
Create { .. } => 1,
Update { .. } => 2,
Delete { .. } => 3,
}
}
Equal
// First sort by UUID. UndoPoint's have `None` as uuid, and are thus sorted first.
.then(self.get_uuid().cmp(&other.get_uuid()))
// Then sort by type.
.then(type_idx(self).cmp(&type_idx(other)))
// Then sort within the same type. Only match arms with `self` and `other` the same
// type are possible, as we have already sorted by type.
.then_with(|| {
match (self, other) {
(Create { uuid: uuid1 }, Create { uuid: uuid2 }) => uuid1.cmp(uuid2),
(
Delete {
uuid: uuid1,
old_task: old_task1,
},
Delete {
uuid: uuid2,
old_task: old_task2,
},
) => uuid1.cmp(uuid2).then_with(|| {
let mut old_task1 = old_task1.iter().collect::<Vec<_>>();
old_task1.sort();
let mut old_task2 = old_task2.iter().collect::<Vec<_>>();
old_task2.sort();
old_task1.cmp(&old_task2)
}),
(
Update {
uuid: uuid1,
property: property1,
value: value1,
old_value: old_value1,
timestamp: timestamp1,
},
Update {
uuid: uuid2,
property: property2,
value: value2,
old_value: old_value2,
timestamp: timestamp2,
},
) => Equal
// Sort Updates principally by timestamp.
.then(uuid1.cmp(uuid2))
.then(timestamp1.cmp(timestamp2))
.then(property1.cmp(property2))
.then(value1.cmp(value2))
.then(old_value1.cmp(old_value2)),
(UndoPoint, UndoPoint) => Equal,
_ => unreachable!(),
}
})
}
}
/// Operations are a sequence of [`Operation`] values, which can be committed in a single
/// transaction with [`Replica::commit_operations`](crate::Replica::commit_operations).
pub type Operations = Vec<Operation>;
#[cfg(test)]
mod test {
use super::*;
use crate::errors::Result;
use chrono::Utc;
use pretty_assertions::assert_eq;
use Operation::*;
#[test]
fn test_json_create() -> Result<()> {
let uuid = Uuid::new_v4();
let op = Create { uuid };
let json = serde_json::to_string(&op)?;
assert_eq!(json, format!(r#"{{"Create":{{"uuid":"{}"}}}}"#, uuid));
let deser: Operation = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_delete() -> Result<()> {
let uuid = Uuid::new_v4();
let old_task = vec![("foo".into(), "bar".into())].drain(..).collect();
let op = Delete { uuid, old_task };
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Delete":{{"uuid":"{}","old_task":{{"foo":"bar"}}}}}}"#,
uuid
)
);
let deser: Operation = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update() -> Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
old_value: Some("true".into()),
value: Some("false".into()),
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","old_value":"true","value":"false","timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: Operation = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update_none() -> Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
old_value: None,
value: None,
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","old_value":null,"value":null,"timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: Operation = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn op_order() {
let mut uuid1 = Uuid::new_v4();
let mut uuid2 = Uuid::new_v4();
if uuid2 < uuid1 {
(uuid1, uuid2) = (uuid2, uuid1);
}
let now1 = Utc::now();
let now2 = now1 + chrono::Duration::seconds(1);
let create1 = Operation::Create { uuid: uuid1 };
let create2 = Operation::Create { uuid: uuid2 };
let update1 = Operation::Update {
uuid: uuid1,
property: "prop1".into(),
old_value: None,
value: None,
timestamp: now1,
};
let update2_now1_prop1_val1 = Operation::Update {
uuid: uuid2,
property: "prop1".into(),
old_value: None,
value: None,
timestamp: now1,
};
let update2_now1_prop1_val2 = Operation::Update {
uuid: uuid2,
property: "prop1".into(),
old_value: None,
value: Some("v".into()),
timestamp: now1,
};
let update2_now1_prop2_val1 = Operation::Update {
uuid: uuid2,
property: "prop2".into(),
old_value: None,
value: None,
timestamp: now1,
};
let update2_now1_prop2_val2 = Operation::Update {
uuid: uuid2,
property: "prop2".into(),
old_value: None,
value: Some("v".into()),
timestamp: now1,
};
let update2_now2_prop1_val1 = Operation::Update {
uuid: uuid2,
property: "prop1".into(),
old_value: None,
value: None,
timestamp: now2,
};
let update2_now2_prop1_val2 = Operation::Update {
uuid: uuid2,
property: "prop1".into(),
old_value: None,
value: Some("v".into()),
timestamp: now2,
};
let update2_now2_prop2_val1 = Operation::Update {
uuid: uuid2,
property: "prop2".into(),
old_value: None,
value: None,
timestamp: now2,
};
let update2_now2_prop2_val2_oldval1 = Operation::Update {
uuid: uuid2,
property: "prop2".into(),
old_value: None,
value: Some("v".into()),
timestamp: now2,
};
let update2_now2_prop2_val2_oldval2 = Operation::Update {
uuid: uuid2,
property: "prop2".into(),
old_value: Some("v2".into()),
value: Some("v".into()),
timestamp: now2,
};
let update2_now2_prop2_val2_oldval3 = Operation::Update {
uuid: uuid2,
property: "prop2".into(),
old_value: Some("v3".into()),
value: Some("v".into()),
timestamp: now2,
};
let delete1 = Operation::Delete {
uuid: uuid1,
old_task: TaskMap::from([("a".to_string(), "a".to_string())]),
};
let delete1b = Operation::Delete {
uuid: uuid1,
old_task: TaskMap::from([("b".to_string(), "b".to_string())]),
};
let delete2 = Operation::Delete {
uuid: uuid2,
old_task: TaskMap::from([("a".to_string(), "a".to_string())]),
};
let undo_point = Operation::UndoPoint;
// Specify order all of these operations should be in.
let total_order = vec![
undo_point,
create1,
update1,
delete1,
delete1b,
create2,
update2_now1_prop1_val1,
update2_now1_prop1_val2,
update2_now1_prop2_val1,
update2_now1_prop2_val2,
update2_now2_prop1_val1,
update2_now2_prop1_val2,
update2_now2_prop2_val1,
update2_now2_prop2_val2_oldval1,
update2_now2_prop2_val2_oldval2,
update2_now2_prop2_val2_oldval3,
delete2,
];
// Check that each operation compares the same as the comparison of its index. This is more
// thorough than just sorting a list, which would not perform every pairwise comparison.
for i in 0..total_order.len() {
for j in 0..total_order.len() {
let a = &total_order[i];
let b = &total_order[j];
assert_eq!(a.cmp(b), i.cmp(&j), "{a:?} <??> {b:?} ([{i}] <??> [{j}])");
}
}
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/config.rs | src/storage/config.rs | #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AccessMode {
ReadOnly,
ReadWrite,
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/test.rs | src/storage/test.rs | //! Tests for storage backends. This tests consistency across multiple method calls, to ensure that
//! all implementations are consistent.
use super::{Storage, TaskMap};
use crate::errors::Result;
use crate::storage::{taskmap_with, DEFAULT_BASE_VERSION};
use crate::Operation;
use chrono::Utc;
use pretty_assertions::assert_eq;
use uuid::Uuid;
/// Define a collection of storage tests that apply to all storage implementations.
macro_rules! storage_tests_base {
($storage:expr, $macro:meta) => {
#[$macro]
async fn get_working_set_empty() -> $crate::errors::Result<()> {
$crate::storage::test::get_working_set_empty($storage).await
}
#[$macro]
async fn add_to_working_set() -> $crate::errors::Result<()> {
$crate::storage::test::add_to_working_set($storage).await
}
#[$macro]
async fn clear_working_set() -> $crate::errors::Result<()> {
$crate::storage::test::clear_working_set($storage).await
}
#[$macro]
async fn drop_transaction() -> $crate::errors::Result<()> {
$crate::storage::test::drop_transaction($storage).await
}
#[$macro]
async fn create() -> $crate::errors::Result<()> {
$crate::storage::test::create($storage).await
}
#[$macro]
async fn create_exists() -> $crate::errors::Result<()> {
$crate::storage::test::create_exists($storage).await
}
#[$macro]
async fn get_missing() -> $crate::errors::Result<()> {
$crate::storage::test::get_missing($storage).await
}
#[$macro]
async fn set_task() -> $crate::errors::Result<()> {
$crate::storage::test::set_task($storage).await
}
#[$macro]
async fn delete_task_missing() -> $crate::errors::Result<()> {
$crate::storage::test::delete_task_missing($storage).await
}
#[$macro]
async fn delete_task_exists() -> $crate::errors::Result<()> {
$crate::storage::test::delete_task_exists($storage).await
}
#[$macro]
async fn all_tasks_empty() -> $crate::errors::Result<()> {
$crate::storage::test::all_tasks_empty($storage).await
}
#[$macro]
async fn all_tasks_and_uuids() -> $crate::errors::Result<()> {
$crate::storage::test::all_tasks_and_uuids($storage).await
}
#[$macro]
async fn pending_tasks_empty() -> $crate::errors::Result<()> {
$crate::storage::test::pending_tasks_empty($storage).await
}
#[$macro]
async fn pending_tasks() -> $crate::errors::Result<()> {
$crate::storage::test::pending_tasks($storage).await
}
#[$macro]
async fn base_version_default() -> Result<()> {
$crate::storage::test::base_version_default($storage).await
}
#[$macro]
async fn base_version_setting() -> Result<()> {
$crate::storage::test::base_version_setting($storage).await
}
#[$macro]
async fn unsynced_operations() -> Result<()> {
$crate::storage::test::unsynced_operations($storage).await
}
#[$macro]
async fn remove_operations() -> Result<()> {
$crate::storage::test::remove_operations($storage).await
}
#[$macro]
async fn task_operations() -> Result<()> {
$crate::storage::test::task_operations($storage).await
}
#[$macro]
async fn sync_complete() -> Result<()> {
$crate::storage::test::sync_complete($storage).await
}
#[$macro]
async fn set_working_set_item() -> Result<()> {
$crate::storage::test::set_working_set_item($storage).await
}
};
}
pub(crate) use storage_tests_base;
/// Invoke storage_tests_base with the regular `#[tokio::test]` macro.
macro_rules! storage_tests {
($storage:expr) => {
use $crate::storage::test::storage_tests_base;
storage_tests_base!($storage, tokio::test);
};
}
pub(crate) use storage_tests;
/// Invoke storage_tests_base with the `#[wasm_bindgen_test]` macro.
#[cfg(target_arch = "wasm32")]
macro_rules! storage_tests_wasm {
($storage:expr) => {
use $crate::storage::test::storage_tests_base;
storage_tests_base!($storage, wasm_bindgen_test);
};
}
#[cfg(target_arch = "wasm32")]
pub(crate) use storage_tests_wasm;
pub(super) async fn get_working_set_empty(mut storage: impl Storage) -> Result<()> {
{
let mut txn = storage.txn().await?;
let ws = txn.get_working_set().await?;
assert_eq!(ws, vec![None]);
}
Ok(())
}
pub(super) async fn add_to_working_set(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
txn.add_to_working_set(uuid1).await?;
txn.add_to_working_set(uuid2).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let ws = txn.get_working_set().await?;
assert_eq!(ws, vec![None, Some(uuid1), Some(uuid2)]);
}
Ok(())
}
pub(super) async fn clear_working_set(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
txn.add_to_working_set(uuid1).await?;
txn.add_to_working_set(uuid2).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
txn.clear_working_set().await?;
txn.add_to_working_set(uuid2).await?;
txn.add_to_working_set(uuid1).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let ws = txn.get_working_set().await?;
assert_eq!(ws, vec![None, Some(uuid2), Some(uuid1)]);
}
Ok(())
}
pub(super) async fn drop_transaction(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid1).await?);
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid2).await?);
std::mem::drop(txn); // Unnecessary explicit drop of transaction
}
{
let mut txn = storage.txn().await?;
let uuids = txn.all_task_uuids().await?;
assert_eq!(uuids, [uuid1]);
}
Ok(())
}
pub(super) async fn create(mut storage: impl Storage) -> Result<()> {
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid).await?);
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let task = txn.get_task(uuid).await?;
assert_eq!(task, Some(taskmap_with(vec![])));
}
Ok(())
}
pub(super) async fn create_exists(mut storage: impl Storage) -> Result<()> {
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid).await?);
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
assert!(!txn.create_task(uuid).await?);
txn.commit().await?;
}
Ok(())
}
pub(super) async fn get_missing(mut storage: impl Storage) -> Result<()> {
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
let task = txn.get_task(uuid).await?;
assert_eq!(task, None);
}
Ok(())
}
pub(super) async fn set_task(mut storage: impl Storage) -> Result<()> {
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
txn.set_task(uuid, taskmap_with(vec![("k".to_string(), "v".to_string())]))
.await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let task = txn.get_task(uuid).await?;
assert_eq!(
task,
Some(taskmap_with(vec![("k".to_string(), "v".to_string())]))
);
}
Ok(())
}
pub(super) async fn delete_task_missing(mut storage: impl Storage) -> Result<()> {
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
assert!(!txn.delete_task(uuid).await?);
}
Ok(())
}
pub(super) async fn delete_task_exists(mut storage: impl Storage) -> Result<()> {
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid).await?);
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
assert!(txn.delete_task(uuid).await?);
}
Ok(())
}
pub(super) async fn all_tasks_empty(mut storage: impl Storage) -> Result<()> {
{
let mut txn = storage.txn().await?;
let tasks = txn.all_tasks().await?;
assert_eq!(tasks, vec![]);
}
Ok(())
}
pub(super) async fn all_tasks_and_uuids(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid1).await?);
txn.set_task(
uuid1,
taskmap_with(vec![("num".to_string(), "1".to_string())]),
)
.await?;
assert!(txn.create_task(uuid2).await?);
txn.set_task(
uuid2,
taskmap_with(vec![("num".to_string(), "2".to_string())]),
)
.await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let mut tasks = txn.all_tasks().await?;
// order is nondeterministic, so sort by uuid
tasks.sort_by(|a, b| a.0.cmp(&b.0));
let mut exp = vec![
(
uuid1,
taskmap_with(vec![("num".to_string(), "1".to_string())]),
),
(
uuid2,
taskmap_with(vec![("num".to_string(), "2".to_string())]),
),
];
exp.sort_by(|a, b| a.0.cmp(&b.0));
assert_eq!(tasks, exp);
}
{
let mut txn = storage.txn().await?;
let mut uuids = txn.all_task_uuids().await?;
uuids.sort();
let mut exp = vec![uuid1, uuid2];
exp.sort();
assert_eq!(uuids, exp);
}
Ok(())
}
pub(super) async fn pending_tasks_empty(mut storage: impl Storage) -> Result<()> {
{
let mut txn = storage.txn().await?;
let tasks = txn.get_pending_tasks().await?;
assert_eq!(tasks, vec![]);
}
Ok(())
}
pub(super) async fn pending_tasks(mut storage: impl Storage) -> Result<()> {
let uuids = [Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4()];
{
let mut txn = storage.txn().await?;
for (i, uuid) in uuids.iter().enumerate() {
assert!(txn.create_task(*uuid).await?);
txn.set_task(
*uuid,
taskmap_with(vec![("num".to_string(), i.to_string())]),
)
.await?;
}
// Put only uuids[1] and [2] and a UUID with no matching task in the working set.
txn.add_to_working_set(uuids[0]).await?;
txn.add_to_working_set(uuids[1]).await?;
txn.add_to_working_set(uuids[2]).await?;
txn.add_to_working_set(Uuid::new_v4()).await?;
txn.set_working_set_item(1, None).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let mut tasks = txn.get_pending_tasks().await?;
// order is nondeterministic, so sort by uuid
tasks.sort_by(|a, b| a.0.cmp(&b.0));
let mut exp = vec![
(
uuids[1],
taskmap_with(vec![("num".to_string(), "1".to_string())]),
),
(
uuids[2],
taskmap_with(vec![("num".to_string(), "2".to_string())]),
),
];
exp.sort_by(|a, b| a.0.cmp(&b.0));
assert_eq!(tasks, exp);
}
Ok(())
}
pub(super) async fn base_version_default(mut storage: impl Storage) -> Result<()> {
{
let mut txn = storage.txn().await?;
assert_eq!(txn.base_version().await?, DEFAULT_BASE_VERSION);
}
Ok(())
}
pub(super) async fn base_version_setting(mut storage: impl Storage) -> Result<()> {
let u = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
txn.set_base_version(u).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
assert_eq!(txn.base_version().await?, u);
}
Ok(())
}
pub(super) async fn unsynced_operations(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let uuid3 = Uuid::new_v4();
// create some operations
{
let mut txn = storage.txn().await?;
txn.add_operation(Operation::Create { uuid: uuid1 }).await?;
txn.add_operation(Operation::Create { uuid: uuid2 }).await?;
txn.commit().await?;
}
// read them back
{
let mut txn = storage.txn().await?;
let ops = txn.unsynced_operations().await?;
assert_eq!(
ops,
vec![
Operation::Create { uuid: uuid1 },
Operation::Create { uuid: uuid2 },
]
);
assert_eq!(txn.num_unsynced_operations().await?, 2);
}
// Sync them.
{
let mut txn = storage.txn().await?;
txn.sync_complete().await?;
txn.commit().await?;
}
// create some more operations (to test adding operations after sync)
{
let mut txn = storage.txn().await?;
txn.add_operation(Operation::Create { uuid: uuid3 }).await?;
txn.add_operation(Operation::Delete {
uuid: uuid3,
old_task: TaskMap::new(),
})
.await?;
txn.commit().await?;
}
// read them back
{
let mut txn = storage.txn().await?;
let ops = txn.unsynced_operations().await?;
assert_eq!(
ops,
vec![
Operation::Create { uuid: uuid3 },
Operation::Delete {
uuid: uuid3,
old_task: TaskMap::new()
},
]
);
assert_eq!(txn.num_unsynced_operations().await?, 2);
}
// Remove the right one
{
let mut txn = storage.txn().await?;
txn.remove_operation(Operation::Delete {
uuid: uuid3,
old_task: TaskMap::new(),
})
.await?;
txn.commit().await?;
}
// read the remaining op back
{
let mut txn = storage.txn().await?;
let ops = txn.unsynced_operations().await?;
assert_eq!(ops, vec![Operation::Create { uuid: uuid3 },]);
assert_eq!(txn.num_unsynced_operations().await?, 1);
}
Ok(())
}
pub(super) async fn remove_operations(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let uuid3 = Uuid::new_v4();
let uuid4 = Uuid::new_v4();
// Create some tasks and operations.
{
let mut txn = storage.txn().await?;
txn.create_task(uuid1).await?;
txn.create_task(uuid2).await?;
txn.create_task(uuid3).await?;
txn.add_operation(Operation::Create { uuid: uuid1 }).await?;
txn.add_operation(Operation::Create { uuid: uuid2 }).await?;
txn.add_operation(Operation::Create { uuid: uuid3 }).await?;
txn.commit().await?;
}
// Remove the uuid3 operation.
{
let mut txn = storage.txn().await?;
txn.remove_operation(Operation::Create { uuid: uuid3 })
.await?;
assert_eq!(txn.num_unsynced_operations().await?, 2);
txn.commit().await?;
}
// Remove a nonexistent operation
{
let mut txn = storage.txn().await?;
assert!(txn
.remove_operation(Operation::Create { uuid: uuid4 })
.await
.is_err());
}
// Remove an operation that is not most recent.
{
let mut txn = storage.txn().await?;
assert!(txn
.remove_operation(Operation::Create { uuid: uuid1 })
.await
.is_err());
}
// Mark operations as synced.
{
let mut txn = storage.txn().await?;
txn.sync_complete().await?;
txn.commit().await?;
}
// Try to remove the synced operation.
{
let mut txn = storage.txn().await?;
assert!(txn
.remove_operation(Operation::Create { uuid: uuid2 })
.await
.is_err());
}
Ok(())
}
pub(super) async fn task_operations(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
let uuid3 = Uuid::new_v4();
let now = Utc::now();
// Create some tasks and operations.
{
let mut txn = storage.txn().await?;
txn.create_task(uuid1).await?;
txn.create_task(uuid2).await?;
txn.create_task(uuid3).await?;
txn.add_operation(Operation::UndoPoint).await?;
txn.add_operation(Operation::Create { uuid: uuid1 }).await?;
txn.add_operation(Operation::Create { uuid: uuid1 }).await?;
txn.add_operation(Operation::UndoPoint).await?;
txn.add_operation(Operation::Delete {
uuid: uuid2,
old_task: TaskMap::new(),
})
.await?;
txn.add_operation(Operation::Update {
uuid: uuid3,
property: "p".into(),
old_value: None,
value: Some("P".into()),
timestamp: now,
})
.await?;
txn.add_operation(Operation::Delete {
uuid: uuid3,
old_task: TaskMap::new(),
})
.await?;
txn.commit().await?;
}
// remove the last operation to verify it doesn't appear
{
let mut txn = storage.txn().await?;
txn.remove_operation(Operation::Delete {
uuid: uuid3,
old_task: TaskMap::new(),
})
.await?;
txn.commit().await?;
}
// read them back
{
let mut txn = storage.txn().await?;
let ops = txn.get_task_operations(uuid1).await?;
assert_eq!(
ops,
vec![
Operation::Create { uuid: uuid1 },
Operation::Create { uuid: uuid1 },
]
);
let ops = txn.get_task_operations(uuid2).await?;
assert_eq!(
ops,
vec![Operation::Delete {
uuid: uuid2,
old_task: TaskMap::new()
}]
);
let ops = txn.get_task_operations(uuid3).await?;
assert_eq!(
ops,
vec![Operation::Update {
uuid: uuid3,
property: "p".into(),
old_value: None,
value: Some("P".into()),
timestamp: now,
}]
);
}
// Sync and verify the task operations still exist.
{
let mut txn = storage.txn().await?;
txn.sync_complete().await?;
let ops = txn.get_task_operations(uuid1).await?;
assert_eq!(ops.len(), 2);
let ops = txn.get_task_operations(uuid2).await?;
assert_eq!(ops.len(), 1);
let ops = txn.get_task_operations(uuid3).await?;
assert_eq!(ops.len(), 1);
}
Ok(())
}
pub(super) async fn sync_complete(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
// Create some tasks and operations.
{
let mut txn = storage.txn().await?;
txn.create_task(uuid1).await?;
txn.create_task(uuid2).await?;
txn.add_operation(Operation::Create { uuid: uuid1 }).await?;
txn.add_operation(Operation::Create { uuid: uuid2 }).await?;
txn.commit().await?;
}
// Sync and verify the task operations still exist.
{
let mut txn = storage.txn().await?;
txn.sync_complete().await?;
let ops = txn.get_task_operations(uuid1).await?;
assert_eq!(ops.len(), 1);
let ops = txn.get_task_operations(uuid2).await?;
assert_eq!(ops.len(), 1);
}
// Delete uuid2.
{
let mut txn = storage.txn().await?;
txn.delete_task(uuid2).await?;
txn.add_operation(Operation::Delete {
uuid: uuid2,
old_task: TaskMap::new(),
})
.await?;
txn.commit().await?;
}
// Sync and verify that uuid1's operations still exist, but uuid2's do not.
{
let mut txn = storage.txn().await?;
txn.sync_complete().await?;
let ops = txn.get_task_operations(uuid1).await?;
assert_eq!(ops.len(), 1);
let ops = txn.get_task_operations(uuid2).await?;
assert_eq!(ops.len(), 0);
}
Ok(())
}
pub(super) async fn set_working_set_item(mut storage: impl Storage) -> Result<()> {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
txn.add_to_working_set(uuid1).await?;
txn.add_to_working_set(uuid2).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let ws = txn.get_working_set().await?;
assert_eq!(ws, vec![None, Some(uuid1), Some(uuid2)]);
}
// Clear one item
{
let mut txn = storage.txn().await?;
txn.set_working_set_item(1, None).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let ws = txn.get_working_set().await?;
assert_eq!(ws, vec![None, None, Some(uuid2)]);
}
// Override item
{
let mut txn = storage.txn().await?;
txn.set_working_set_item(2, Some(uuid1)).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let ws = txn.get_working_set().await?;
assert_eq!(ws, vec![None, None, Some(uuid1)]);
}
// Set the last item to None
{
let mut txn = storage.txn().await?;
txn.set_working_set_item(1, Some(uuid1)).await?;
txn.set_working_set_item(2, None).await?;
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let ws = txn.get_working_set().await?;
// Note no trailing `None`.
assert_eq!(ws, vec![None, Some(uuid1)]);
}
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/mod.rs | src/storage/mod.rs | /*!
This module defines the backend storage used by [`Replica`](crate::Replica).
It defines a [trait](crate::storage::Storage) for storage implementations, and provides a default
on-disk implementation as well as an in-memory implementation for testing.
Typical uses of this crate do not interact directly with this module. However, users who wish to
implement their own storage backends can implement the traits defined here and pass the result to
[`Replica`](crate::Replica).
*/
use crate::errors::Result;
use crate::operation::Operation;
use async_trait::async_trait;
use std::collections::HashMap;
use uuid::Uuid;
mod config;
#[cfg(all(target_arch = "wasm32", feature = "storage-indexeddb"))]
pub mod indexeddb;
pub mod inmemory;
#[cfg(feature = "storage-sqlite")]
pub mod sqlite;
#[cfg(test)]
mod test;
pub use config::AccessMode;
#[cfg(any(
feature = "storage-sqlite",
all(target_arch = "wasm32", feature = "storage-indexeddb")
))]
mod send_wrapper;
#[doc(hidden)]
/// For compatibility with 0.6 and earlier, [`Operation`] is re-exported here.
pub use crate::Operation as ReplicaOp;
/// An in-memory representation of a task as a simple hashmap
pub type TaskMap = HashMap<String, String>;
#[cfg(test)]
pub(crate) fn taskmap_with(mut properties: Vec<(String, String)>) -> TaskMap {
let mut rv = TaskMap::new();
for (p, v) in properties.drain(..) {
rv.insert(p, v);
}
rv
}
/// The type of VersionIds
use crate::server::VersionId;
/// The default for base_version, if none exists in the DB.
const DEFAULT_BASE_VERSION: Uuid = crate::server::NIL_VERSION_ID;
/// A Storage transaction, in which storage operations are performed.
///
/// # Concurrency
///
/// Serializable consistency must be maintained. Concurrent access is unusual
/// and some implementations may simply apply a mutex to limit access to
/// one transaction at a time.
///
/// # Commiting and Aborting
///
/// A transaction is not visible to other readers until it is committed with
/// [`crate::storage::StorageTxn::commit`]. Transactions are aborted if they are dropped.
/// It is safe and performant to drop transactions that did not modify any data without committing.
#[async_trait]
pub trait StorageTxn: Send {
/// Get an (immutable) task, if it is in the storage
async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>>;
/// Get a vector of all pending tasks from the working_set
async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>>;
/// Create an (empty) task, only if it does not already exist. Returns true if
/// the task was created (did not already exist).
async fn create_task(&mut self, uuid: Uuid) -> Result<bool>;
/// Set a task, overwriting any existing task. If the task does not exist, this implicitly
/// creates it (use `get_task` to check first, if necessary).
async fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()>;
/// Delete a task, if it exists. Returns true if the task was deleted (already existed)
async fn delete_task(&mut self, uuid: Uuid) -> Result<bool>;
/// Get the uuids and bodies of all tasks in the storage, in undefined order.
async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>>;
/// Get the uuids of all tasks in the storage, in undefined order.
async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>>;
/// Get the current base_version for this storage -- the last version synced from the server.
/// If no version has been set, this returns the nil version.
async fn base_version(&mut self) -> Result<VersionId>;
/// Set the current base_version for this storage.
async fn set_base_version(&mut self, version: VersionId) -> Result<()>;
/// Get the set of operations for the given task.
async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Vec<Operation>>;
/// Get the current set of outstanding operations (operations that have not been synced to the
/// server yet)
async fn unsynced_operations(&mut self) -> Result<Vec<Operation>>;
/// Get the current set of outstanding operations (operations that have not been synced to the
/// server yet)
async fn num_unsynced_operations(&mut self) -> Result<usize>;
/// Add an operation to the end of the list of operations in the storage. Note that this
/// merely *stores* the operation; it is up to the TaskDb to apply it.
async fn add_operation(&mut self, op: Operation) -> Result<()>;
/// Remove an operation from the end of the list of operations in the storage. The operation
/// must exactly match the most recent operation, and must not be synced. Note that like
/// `add_operation` this only affects the list of operations.
async fn remove_operation(&mut self, op: Operation) -> Result<()>;
/// A sync has been completed, so all operations should be marked as synced. The storage
/// may perform additional cleanup at this time.
async fn sync_complete(&mut self) -> Result<()>;
/// Get the entire working set, with each task UUID at its appropriate (1-based) index.
/// Element 0 is always None.
async fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>>;
/// Add a task to the working set and return its (one-based) index. This index will be one greater
/// than the highest used index.
async fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize>;
/// Update the working set task at the given index. This cannot add a new item to the
/// working set.
async fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> Result<()>;
/// Clear all tasks from the working set in preparation for a renumbering operation.
/// Note that this is the only way items are removed from the set.
async fn clear_working_set(&mut self) -> Result<()>;
/// Check whether this storage is entirely empty
#[allow(clippy::wrong_self_convention)] // mut is required here for storage access
async fn is_empty(&mut self) -> Result<bool> {
let mut empty = true;
empty = empty && self.all_tasks().await?.is_empty();
empty = empty && self.get_working_set().await? == vec![None];
empty = empty && self.base_version().await? == Uuid::nil();
empty = empty && self.unsynced_operations().await?.is_empty();
Ok(empty)
}
/// Commit any changes made in the transaction. It is an error to call this more than
/// once.
async fn commit(&mut self) -> Result<()>;
}
/// A trait for objects able to act as task storage. Most of the interesting behavior is in the
/// [`crate::storage::StorageTxn`] trait.
#[async_trait]
pub trait Storage: Send {
/// Begin a transaction
async fn txn<'a>(&'a mut self) -> Result<Box<dyn StorageTxn + Send + 'a>>;
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/inmemory.rs | src/storage/inmemory.rs | #![allow(clippy::new_without_default)]
use crate::errors::{Error, Result};
use crate::operation::Operation;
use crate::storage::{Storage, StorageTxn, TaskMap, VersionId, DEFAULT_BASE_VERSION};
use async_trait::async_trait;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use uuid::Uuid;
#[derive(PartialEq, Debug, Clone)]
struct Data {
tasks: HashMap<Uuid, TaskMap>,
base_version: VersionId,
operations: Vec<(bool, Operation)>,
working_set: Vec<Option<Uuid>>,
}
struct Txn<'t> {
storage: &'t mut InMemoryStorage,
new_data: Option<Data>,
}
impl Txn<'_> {
fn mut_data_ref(&mut self) -> &mut Data {
if self.new_data.is_none() {
self.new_data = Some(self.storage.data.clone());
}
if let Some(ref mut data) = self.new_data {
data
} else {
unreachable!();
}
}
fn data_ref(&mut self) -> &Data {
if let Some(ref data) = self.new_data {
data
} else {
&self.storage.data
}
}
// Remove any "None" items from the end of the working set.
fn normalize_working_set(&mut self) {
let working_set = &mut self.mut_data_ref().working_set;
while let Some(None) = &working_set[1..].last() {
working_set.pop();
}
}
}
#[async_trait]
impl StorageTxn for Txn<'_> {
async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>> {
match self.data_ref().tasks.get(&uuid) {
None => Ok(None),
Some(t) => Ok(Some(t.clone())),
}
}
async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
let res = self
.get_working_set()
.await?
.iter()
.filter_map(|uuid| {
// Since uuid is wrapped in an Option and get(&inner_uuid)
// also returns an Option, the resulting type will be
// Option<Option<(Uuid, TaskMap)>>. To turn that into
// an Option<(Uuid, TaskMap)>, flatten is called
uuid.map(|inner_uuid| {
self.data_ref()
.tasks
.get(&inner_uuid)
.map(|taskmap| (inner_uuid, taskmap.clone()))
})
.flatten()
})
.collect::<Vec<_>>();
Ok(res)
}
async fn create_task(&mut self, uuid: Uuid) -> Result<bool> {
if let ent @ Entry::Vacant(_) = self.mut_data_ref().tasks.entry(uuid) {
ent.or_insert_with(TaskMap::new);
Ok(true)
} else {
Ok(false)
}
}
async fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()> {
self.mut_data_ref().tasks.insert(uuid, task);
Ok(())
}
async fn delete_task(&mut self, uuid: Uuid) -> Result<bool> {
Ok(self.mut_data_ref().tasks.remove(&uuid).is_some())
}
async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
Ok(self
.data_ref()
.tasks
.iter()
.map(|(u, t)| (*u, t.clone()))
.collect())
}
async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>> {
Ok(self.data_ref().tasks.keys().copied().collect())
}
async fn base_version(&mut self) -> Result<VersionId> {
Ok(self.data_ref().base_version)
}
async fn set_base_version(&mut self, version: VersionId) -> Result<()> {
self.mut_data_ref().base_version = version;
Ok(())
}
async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Vec<Operation>> {
Ok(self
.data_ref()
.operations
.iter()
.filter(|(_, op)| op.get_uuid() == Some(uuid))
.map(|(_, op)| op.clone())
.collect())
}
async fn unsynced_operations(&mut self) -> Result<Vec<Operation>> {
Ok(self
.data_ref()
.operations
.iter()
.filter(|(synced, _)| !synced)
.map(|(_, op)| op.clone())
.collect())
}
async fn num_unsynced_operations(&mut self) -> Result<usize> {
Ok(self
.data_ref()
.operations
.iter()
.filter(|(synced, _)| !synced)
.count())
}
async fn add_operation(&mut self, op: Operation) -> Result<()> {
self.mut_data_ref().operations.push((false, op));
Ok(())
}
async fn remove_operation(&mut self, op: Operation) -> Result<()> {
if let Some((synced, last_op)) = self.data_ref().operations.last() {
if *synced {
return Err(Error::Database(
"Last operation has been synced -- cannot remove".to_string(),
));
}
if last_op == &op {
self.mut_data_ref().operations.pop();
return Ok(());
}
}
Err(Error::Database(
"Last operation does not match -- cannot remove".to_string(),
))
}
async fn sync_complete(&mut self) -> Result<()> {
let data = self.data_ref();
// Mark all operations as synced, but drop operations which no longer have a
// corresponding task.
let new_operations = data
.operations
.iter()
.filter(|(_, op)| {
if let Some(uuid) = op.get_uuid() {
data.tasks.contains_key(&uuid)
} else {
true
}
})
.map(|(_, op)| (true, op.clone()))
.collect();
self.mut_data_ref().operations = new_operations;
Ok(())
}
async fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>> {
Ok(self.data_ref().working_set.clone())
}
async fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize> {
let working_set = &mut self.mut_data_ref().working_set;
working_set.push(Some(uuid));
Ok(working_set.len())
}
async fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> Result<()> {
let working_set = &mut self.mut_data_ref().working_set;
if index >= working_set.len() {
return Err(Error::Database(format!(
"Index {index} is not in the working set"
)));
}
working_set[index] = uuid;
self.normalize_working_set();
Ok(())
}
async fn clear_working_set(&mut self) -> Result<()> {
self.mut_data_ref().working_set = vec![None];
Ok(())
}
async fn commit(&mut self) -> Result<()> {
// copy the new_data back into storage to commit the transaction
if let Some(data) = self.new_data.take() {
self.storage.data = data;
}
Ok(())
}
}
/// InMemoryStorage is a simple in-memory task storage implementation. It is not useful for
/// production data, but is useful for testing purposes.
#[derive(PartialEq, Debug, Clone)]
pub struct InMemoryStorage {
data: Data,
}
impl InMemoryStorage {
pub fn new() -> InMemoryStorage {
InMemoryStorage {
data: Data {
tasks: HashMap::new(),
base_version: DEFAULT_BASE_VERSION,
operations: vec![],
working_set: vec![None],
},
}
}
}
#[async_trait]
impl Storage for InMemoryStorage {
async fn txn<'a>(&'a mut self) -> Result<Box<dyn StorageTxn + Send + 'a>> {
Ok(Box::new(Txn {
storage: self,
new_data: None,
}))
}
}
#[cfg(test)]
mod test {
use super::*;
async fn storage() -> InMemoryStorage {
InMemoryStorage::new()
}
crate::storage::test::storage_tests!(storage().await);
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/sqlite/inner.rs | src/storage/sqlite/inner.rs | use crate::errors::{Error, Result};
use crate::operation::Operation;
use crate::storage::config::AccessMode;
use crate::storage::send_wrapper::{WrappedStorage, WrappedStorageTxn};
use crate::storage::sqlite::{schema, SqliteError, StoredUuid};
use crate::storage::{TaskMap, VersionId, DEFAULT_BASE_VERSION};
use anyhow::Context;
use async_trait::async_trait;
use rusqlite::types::{FromSql, ToSql};
use rusqlite::{params, Connection, OpenFlags, OptionalExtension, TransactionBehavior};
use std::path::Path;
use uuid::Uuid;
/// Wraps [`TaskMap`] (type alias for HashMap) so we can implement rusqlite conversion traits for it
struct StoredTaskMap(TaskMap);
/// Parses TaskMap stored as JSON in string column
impl FromSql for StoredTaskMap {
fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult<Self> {
let o: TaskMap = serde_json::from_str(value.as_str()?)
.map_err(|_| rusqlite::types::FromSqlError::InvalidType)?;
Ok(StoredTaskMap(o))
}
}
/// Stores TaskMap in string column
impl ToSql for StoredTaskMap {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
let s = serde_json::to_string(&self.0)
.map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?;
Ok(s.into())
}
}
/// Stores [`Operation`] in SQLite
impl FromSql for Operation {
fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult<Self> {
let o: Operation = serde_json::from_str(value.as_str()?)
.map_err(|_| rusqlite::types::FromSqlError::InvalidType)?;
Ok(o)
}
}
/// Parses Operation stored as JSON in string column
impl ToSql for Operation {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
let s = serde_json::to_string(&self)
.map_err(|e| rusqlite::Error::ToSqlConversionFailure(Box::new(e)))?;
Ok(s.into())
}
}
/// SqliteStorage is an on-disk storage backed by SQLite3.
pub(super) struct SqliteStorageInner {
con: Connection,
access_mode: AccessMode,
}
impl SqliteStorageInner {
pub(super) fn new<P: AsRef<Path>>(
directory: P,
access_mode: AccessMode,
create_if_missing: bool,
) -> Result<SqliteStorageInner> {
let directory = directory.as_ref();
if create_if_missing {
// Ensure parent folder exists
std::fs::create_dir_all(directory).map_err(|e| {
Error::Database(format!("Cannot create directory {directory:?}: {e}"))
})?;
}
// Open (or create) database
let db_file = directory.join("taskchampion.sqlite3");
let mut flags = OpenFlags::default();
// Determine the mode in which to open the DB itself, using read-write mode
// for a non-existent DB to allow opening an empty DB in read-only mode.
let mut open_access_mode = access_mode;
if create_if_missing && access_mode == AccessMode::ReadOnly && !db_file.exists() {
open_access_mode = AccessMode::ReadWrite;
}
// default contains SQLITE_OPEN_CREATE, so remove it if we are not to
// create a DB when missing.
if !create_if_missing {
flags.remove(OpenFlags::SQLITE_OPEN_CREATE);
}
if open_access_mode == AccessMode::ReadOnly {
flags.remove(OpenFlags::SQLITE_OPEN_READ_WRITE);
flags.insert(OpenFlags::SQLITE_OPEN_READ_ONLY);
// SQLite does not allow create when opening read-only
flags.remove(OpenFlags::SQLITE_OPEN_CREATE);
}
let mut con = Connection::open_with_flags(db_file, flags)?;
// Initialize database
con.query_row("PRAGMA journal_mode=WAL", [], |_row| Ok(()))
.context("Setting journal_mode=WAL")?;
if open_access_mode == AccessMode::ReadWrite {
schema::upgrade_db(&mut con)?;
}
Ok(Self { access_mode, con })
}
}
#[async_trait(?Send)]
impl WrappedStorage for SqliteStorageInner {
async fn txn<'a>(&'a mut self) -> Result<Box<dyn WrappedStorageTxn + 'a>> {
let txn = self
.con
.transaction_with_behavior(TransactionBehavior::Immediate)?;
Ok(Box::new(Txn {
txn: Some(txn),
access_mode: self.access_mode,
}))
}
}
pub(super) struct Txn<'t> {
txn: Option<rusqlite::Transaction<'t>>,
access_mode: AccessMode,
}
impl<'t> Txn<'t> {
fn check_write_access(&self) -> std::result::Result<(), SqliteError> {
if self.access_mode != AccessMode::ReadWrite {
Err(SqliteError::ReadOnlyStorage)
} else {
Ok(())
}
}
fn get_txn(&self) -> std::result::Result<&rusqlite::Transaction<'t>, SqliteError> {
self.txn
.as_ref()
.ok_or(SqliteError::TransactionAlreadyCommitted)
}
fn get_next_working_set_number(&self) -> Result<usize> {
let t = self.get_txn()?;
let next_id: Option<usize> = t
.query_row(
"SELECT COALESCE(MAX(id), 0) + 1 FROM working_set",
[],
|r| r.get(0),
)
.optional()
.context("Getting highest working set ID")?;
Ok(next_id.unwrap_or(0))
}
}
#[async_trait(?Send)]
impl WrappedStorageTxn for Txn<'_> {
async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>> {
let t = self.get_txn()?;
let result: Option<StoredTaskMap> = t
.query_row(
"SELECT data FROM tasks WHERE uuid = ? LIMIT 1",
[&StoredUuid(uuid)],
|r| r.get("data"),
)
.optional()?;
// Get task from "stored" wrapper
Ok(result.map(|t| t.0))
}
async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
let t = self.get_txn()?;
let mut q = t.prepare(
"SELECT tasks.* FROM tasks JOIN working_set ON tasks.uuid = working_set.uuid",
)?;
let rows = q.query_map([], |r| {
let uuid: StoredUuid = r.get("uuid")?;
let data: StoredTaskMap = r.get("data")?;
Ok((uuid.0, data.0))
})?;
let mut res = Vec::new();
for row in rows {
res.push(row?)
}
Ok(res)
}
async fn create_task(&mut self, uuid: Uuid) -> Result<bool> {
self.check_write_access()?;
let t = self.get_txn()?;
let count: usize = t.query_row(
"SELECT count(uuid) FROM tasks WHERE uuid = ?",
[&StoredUuid(uuid)],
|x| x.get(0),
)?;
if count > 0 {
return Ok(false);
}
let data = TaskMap::default();
t.execute(
"INSERT INTO tasks (uuid, data) VALUES (?, ?)",
params![&StoredUuid(uuid), &StoredTaskMap(data)],
)
.context("Create task query")?;
Ok(true)
}
async fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()> {
self.check_write_access()?;
let t = self.get_txn()?;
t.execute(
"INSERT OR REPLACE INTO tasks (uuid, data) VALUES (?, ?)",
params![&StoredUuid(uuid), &StoredTaskMap(task)],
)
.context("Update task query")?;
Ok(())
}
async fn delete_task(&mut self, uuid: Uuid) -> Result<bool> {
self.check_write_access()?;
let t = self.get_txn()?;
let changed = t
.execute("DELETE FROM tasks WHERE uuid = ?", [&StoredUuid(uuid)])
.context("Delete task query")?;
Ok(changed > 0)
}
async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT uuid, data FROM tasks")?;
let rows = q.query_map([], |r| {
let uuid: StoredUuid = r.get("uuid")?;
let data: StoredTaskMap = r.get("data")?;
Ok((uuid.0, data.0))
})?;
let mut ret = vec![];
for r in rows {
ret.push(r?);
}
Ok(ret)
}
async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT uuid FROM tasks")?;
let rows = q.query_map([], |r| {
let uuid: StoredUuid = r.get("uuid")?;
Ok(uuid.0)
})?;
let mut ret = vec![];
for r in rows {
ret.push(r?);
}
Ok(ret)
}
async fn base_version(&mut self) -> Result<VersionId> {
let t = self.get_txn()?;
let version: Option<StoredUuid> = t
.query_row(
"SELECT value FROM sync_meta WHERE key = 'base_version'",
[],
|r| r.get("value"),
)
.optional()?;
Ok(version.map(|u| u.0).unwrap_or(DEFAULT_BASE_VERSION))
}
async fn set_base_version(&mut self, version: VersionId) -> Result<()> {
self.check_write_access()?;
let t = self.get_txn()?;
t.execute(
"INSERT OR REPLACE INTO sync_meta (key, value) VALUES (?, ?)",
params!["base_version", &StoredUuid(version)],
)
.context("Set base version")?;
Ok(())
}
async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Vec<Operation>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT data FROM operations where uuid=? ORDER BY id ASC")?;
let rows = q.query_map([&StoredUuid(uuid)], |r| {
let data: Operation = r.get("data")?;
Ok(data)
})?;
let mut ret = vec![];
for r in rows {
ret.push(r?);
}
Ok(ret)
}
async fn unsynced_operations(&mut self) -> Result<Vec<Operation>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT data FROM operations WHERE NOT synced ORDER BY id ASC")?;
let rows = q.query_map([], |r| {
let data: Operation = r.get("data")?;
Ok(data)
})?;
let mut ret = vec![];
for r in rows {
ret.push(r?);
}
Ok(ret)
}
async fn num_unsynced_operations(&mut self) -> Result<usize> {
let t = self.get_txn()?;
let count: usize = t.query_row(
"SELECT count(*) FROM operations WHERE NOT synced",
[],
|x| x.get(0),
)?;
Ok(count)
}
async fn add_operation(&mut self, op: Operation) -> Result<()> {
self.check_write_access()?;
let t = self.get_txn()?;
t.execute("INSERT INTO operations (data) VALUES (?)", params![&op])
.context("Add operation query")?;
Ok(())
}
async fn remove_operation(&mut self, op: Operation) -> Result<()> {
self.check_write_access()?;
let t = self.get_txn()?;
let last: Option<(u32, Operation)> = t
.query_row(
"SELECT id, data FROM operations WHERE NOT synced ORDER BY id DESC LIMIT 1",
[],
|x| Ok((x.get(0)?, x.get(1)?)),
)
.optional()?;
// If there is a "last" operation, and it matches the given operation,
// remove it.
if let Some((last_id, last_op)) = last {
if last_op == op {
t.execute("DELETE FROM operations where id = ?", [last_id])
.context("Removing operation")?;
return Ok(());
}
}
Err(Error::Database(
"Last operation does not match -- cannot remove".to_string(),
))
}
async fn sync_complete(&mut self) -> Result<()> {
self.check_write_access()?;
let t = self.get_txn()?;
t.execute(
"UPDATE operations SET synced = true WHERE synced = false",
[],
)
.context("Marking operations as synced")?;
// Delete all operations for non-existent (usually, deleted) tasks.
t.execute(
r#"DELETE from operations
WHERE uuid IN (
SELECT operations.uuid FROM operations LEFT JOIN tasks ON operations.uuid = tasks.uuid WHERE tasks.uuid IS NULL
)"#,
[],
)
.context("Deleting orphaned operations")?;
Ok(())
}
async fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>> {
let t = self.get_txn()?;
let mut q = t.prepare("SELECT id, uuid FROM working_set ORDER BY id ASC")?;
let rows = q
.query_map([], |r| {
let id: usize = r.get("id")?;
let uuid: StoredUuid = r.get("uuid")?;
Ok((id, uuid.0))
})
.context("Get working set query")?;
let rows: Vec<std::result::Result<(usize, Uuid), _>> = rows.collect();
let mut res = Vec::with_capacity(rows.len());
for _ in 0..self
.get_next_working_set_number()
.context("Getting working set number")?
{
res.push(None);
}
for r in rows {
let (id, uuid) = r?;
res[id] = Some(uuid);
}
Ok(res)
}
async fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize> {
self.check_write_access()?;
let t = self.get_txn()?;
let next_working_id = self.get_next_working_set_number()?;
t.execute(
"INSERT INTO working_set (id, uuid) VALUES (?, ?)",
params![next_working_id, &StoredUuid(uuid)],
)
.context("Create task query")?;
Ok(next_working_id)
}
async fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> Result<()> {
self.check_write_access()?;
let t = self.get_txn()?;
match uuid {
// Add or override item
Some(uuid) => t.execute(
"INSERT OR REPLACE INTO working_set (id, uuid) VALUES (?, ?)",
params![index, &StoredUuid(uuid)],
),
// Setting to None removes the row from database
None => t.execute("DELETE FROM working_set WHERE id = ?", [index]),
}
.context("Set working set item query")?;
Ok(())
}
async fn clear_working_set(&mut self) -> Result<()> {
self.check_write_access()?;
let t = self.get_txn()?;
t.execute("DELETE FROM working_set", [])
.context("Clear working set query")?;
Ok(())
}
async fn commit(&mut self) -> Result<()> {
self.check_write_access()?;
let t = self
.txn
.take()
.ok_or(SqliteError::TransactionAlreadyCommitted)?;
t.commit().context("Committing transaction")?;
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::storage::taskmap_with;
use chrono::Utc;
use pretty_assertions::assert_eq;
use rstest::rstest;
use std::thread;
use std::time::Duration;
use tempfile::TempDir;
/// Manually create a 0_8_0 db, as based on a dump from an actual (test) user.
/// This is used to test in-place upgrading.
fn create_0_8_0_db(path: &Path) -> Result<()> {
let db_file = path.join("taskchampion.sqlite3");
let con = Connection::open(db_file)?;
con.query_row("PRAGMA journal_mode=WAL", [], |_row| Ok(()))
.context("Setting journal_mode=WAL")?;
let queries = vec![
r#"CREATE TABLE operations (id INTEGER PRIMARY KEY AUTOINCREMENT, data STRING);"#,
r#"INSERT INTO operations VALUES(1,'"UndoPoint"');"#,
r#"INSERT INTO operations VALUES(2,
'{"Create":{"uuid":"e2956511-fd47-4e40-926a-52616229c2fa"}}');"#,
r#"INSERT INTO operations VALUES(3,
'{"Update":{"uuid":"e2956511-fd47-4e40-926a-52616229c2fa",
"property":"description",
"old_value":null,
"value":"one",
"timestamp":"2024-08-25T19:06:11.840482523Z"}}');"#,
r#"INSERT INTO operations VALUES(4,
'{"Update":{"uuid":"e2956511-fd47-4e40-926a-52616229c2fa",
"property":"entry",
"old_value":null,
"value":"1724612771",
"timestamp":"2024-08-25T19:06:11.840497662Z"}}');"#,
r#"INSERT INTO operations VALUES(5,
'{"Update":{"uuid":"e2956511-fd47-4e40-926a-52616229c2fa",
"property":"modified",
"old_value":null,
"value":"1724612771",
"timestamp":"2024-08-25T19:06:11.840498973Z"}}');"#,
r#"INSERT INTO operations VALUES(6,
'{"Update":{"uuid":"e2956511-fd47-4e40-926a-52616229c2fa",
"property":"status",
"old_value":null,
"value":"pending",
"timestamp":"2024-08-25T19:06:11.840505346Z"}}');"#,
r#"INSERT INTO operations VALUES(7,'"UndoPoint"');"#,
r#"INSERT INTO operations VALUES(8,
'{"Create":{"uuid":"1d125b41-ee1d-49a7-9319-0506dee414f8"}}');"#,
r#"INSERT INTO operations VALUES(9,
'{"Update":{"uuid":"1d125b41-ee1d-49a7-9319-0506dee414f8",
"property":"dep_e2956511-fd47-4e40-926a-52616229c2fa",
"old_value":null,
"value":"x",
"timestamp":"2024-08-25T19:06:15.880952492Z"}}');"#,
r#"INSERT INTO operations VALUES(10,
'{"Update":{"uuid":"1d125b41-ee1d-49a7-9319-0506dee414f8",
"property":"depends",
"old_value":null,
"value":"e2956511-fd47-4e40-926a-52616229c2fa",
"timestamp":"2024-08-25T19:06:15.880969429Z"}}');"#,
r#"INSERT INTO operations VALUES(11,
'{"Update":{"uuid":"1d125b41-ee1d-49a7-9319-0506dee414f8",
"property":"description",
"old_value":null,
"value":"two",
"timestamp":"2024-08-25T19:06:15.880970972Z"}}');"#,
r#"INSERT INTO operations VALUES(12,
'{"Update":{"uuid":"1d125b41-ee1d-49a7-9319-0506dee414f8",
"property":"entry",
"old_value":null,
"value":"1724612775",
"timestamp":"2024-08-25T19:06:15.880974948Z"}}');"#,
r#"INSERT INTO operations VALUES(13,
'{"Update":{"uuid":"1d125b41-ee1d-49a7-9319-0506dee414f8",
"property":"modified",
"old_value":null,
"value":"1724612775",
"timestamp":"2024-08-25T19:06:15.880976160Z"}}');"#,
r#"INSERT INTO operations VALUES(14,
'{"Update":{"uuid":"1d125b41-ee1d-49a7-9319-0506dee414f8",
"property":"status",
"old_value":null,
"value":"pending",
"timestamp":"2024-08-25T19:06:15.880977255Z"}}');"#,
r#"CREATE TABLE sync_meta (key STRING PRIMARY KEY, value STRING);"#,
r#"CREATE TABLE tasks (uuid STRING PRIMARY KEY, data STRING);"#,
r#"INSERT INTO tasks VALUES('e2956511-fd47-4e40-926a-52616229c2fa',
'{"status":"pending",
"entry":"1724612771",
"modified":"1724612771",
"description":"one"}');"#,
r#"INSERT INTO tasks VALUES('1d125b41-ee1d-49a7-9319-0506dee414f8',
'{"modified":"1724612775",
"status":"pending",
"description":"two",
"dep_e2956511-fd47-4e40-926a-52616229c2fa":"x",
"entry":"1724612775",
"depends":"e2956511-fd47-4e40-926a-52616229c2fa"}');"#,
r#"CREATE TABLE working_set (id INTEGER PRIMARY KEY, uuid STRING);"#,
r#"INSERT INTO working_set VALUES(1,'e2956511-fd47-4e40-926a-52616229c2fa');"#,
r#"INSERT INTO working_set VALUES(2,'1d125b41-ee1d-49a7-9319-0506dee414f8');"#,
r#"DELETE FROM sqlite_sequence;"#,
r#"INSERT INTO sqlite_sequence VALUES('operations',14);"#,
];
for q in queries {
con.execute(q, [])
.with_context(|| format!("executing {q}"))?;
}
Ok(())
}
#[tokio::test]
async fn test_empty_dir() -> Result<()> {
let tmp_dir = TempDir::new()?;
let non_existant = tmp_dir.path().join("subdir");
let mut storage =
SqliteStorageInner::new(non_existant.clone(), AccessMode::ReadWrite, true)?;
let uuid = Uuid::new_v4();
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid).await?);
txn.commit().await?;
}
{
let mut txn = storage.txn().await?;
let task = txn.get_task(uuid).await?;
assert_eq!(task, Some(taskmap_with(vec![])));
}
// Re-open the DB.
let mut storage = SqliteStorageInner::new(non_existant, AccessMode::ReadWrite, true)?;
{
let mut txn = storage.txn().await?;
let task = txn.get_task(uuid).await?;
assert_eq!(task, Some(taskmap_with(vec![])));
}
Ok(())
}
/// Test upgrading from a TaskChampion-0.8.0 database, ensuring that some basic task data
/// remains intact from that version. This provides a basic coverage test of all schema
/// upgrade functions.
#[tokio::test]
async fn test_0_8_0_db() -> Result<()> {
let tmp_dir = TempDir::new()?;
create_0_8_0_db(tmp_dir.path())?;
let mut storage = SqliteStorageInner::new(tmp_dir.path(), AccessMode::ReadWrite, true)?;
assert_eq!(
schema::get_db_version(&mut storage.con)?,
schema::LATEST_VERSION,
);
let one = Uuid::parse_str("e2956511-fd47-4e40-926a-52616229c2fa").unwrap();
let two = Uuid::parse_str("1d125b41-ee1d-49a7-9319-0506dee414f8").unwrap();
{
let mut txn = storage.txn().await?;
let mut task_one = txn.get_task(one).await?.unwrap();
assert_eq!(task_one.get("description").unwrap(), "one");
let task_two = txn.get_task(two).await?.unwrap();
assert_eq!(task_two.get("description").unwrap(), "two");
let ops = txn.unsynced_operations().await?;
assert_eq!(ops.len(), 14);
assert_eq!(ops[0], Operation::UndoPoint);
task_one.insert("description".into(), "updated".into());
txn.set_task(one, task_one).await?;
txn.add_operation(Operation::Update {
uuid: one,
property: "description".into(),
old_value: Some("one".into()),
value: Some("updated".into()),
timestamp: Utc::now(),
})
.await?;
txn.commit().await?;
}
// Read back the modification.
{
let mut txn = storage.txn().await?;
let task_one = txn.get_task(one).await?.unwrap();
assert_eq!(task_one.get("description").unwrap(), "updated");
let ops = txn.unsynced_operations().await?;
assert_eq!(ops.len(), 15);
}
// Check the UUID fields on the operations directly in the DB.
{
let t = storage
.con
.transaction_with_behavior(TransactionBehavior::Immediate)?;
let mut q = t.prepare("SELECT data, uuid FROM operations ORDER BY id ASC")?;
let mut num_ops = 0;
for row in q
.query_map([], |r| {
let uuid: Option<StoredUuid> = r.get("uuid")?;
let operation: Operation = r.get("data")?;
Ok((uuid.map(|su| su.0), operation))
})
.context("Get all operations")?
{
let (uuid, operation) = row?;
assert_eq!(uuid, operation.get_uuid());
num_ops += 1;
}
assert_eq!(num_ops, 15);
}
Ok(())
}
#[test]
fn test_concurrent_access() -> Result<()> {
let tmp_dir = TempDir::new()?;
let path = tmp_dir.path();
// Initialize the DB once, as schema modifications are not isolated by transactions.
SqliteStorageInner::new(path, AccessMode::ReadWrite, true).unwrap();
// First thread begins a transaction, writes immediately, waits 100ms, and commits it.
thread::scope(|scope| {
scope.spawn(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_time()
.build()
.expect("Failed to create current-thread runtime");
rt.block_on(async move {
let mut storage =
SqliteStorageInner::new(path, AccessMode::ReadWrite, true).unwrap();
let u = Uuid::new_v4();
let mut txn = storage.txn().await.unwrap();
txn.set_base_version(u).await.unwrap();
tokio::time::sleep(Duration::from_millis(100)).await;
txn.commit().await.unwrap();
});
});
// Second thread waits 50ms, and begins a transaction. This
// should wait for the first to complete, but the regression would be a SQLITE_BUSY
// failure.
scope.spawn(|| {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_time()
.build()
.expect("Failed to create current-thread runtime");
rt.block_on(async move {
tokio::time::sleep(Duration::from_millis(50)).await;
let mut storage =
SqliteStorageInner::new(path, AccessMode::ReadWrite, true).unwrap();
let u = Uuid::new_v4();
let mut txn = storage.txn().await.unwrap();
txn.set_base_version(u).await.unwrap();
txn.commit().await.unwrap();
});
});
});
Ok(())
}
/// Verify that mutating methods fail in the read-only access mode, but read methods succeed,
/// both with and without the database having been created before being opened.
#[rstest]
#[case::create_non_existent(false, true)]
#[case::create_exists(true, true)]
#[case::exists_dont_create(true, false)]
#[tokio::test]
async fn test_read_only(#[case] exists: bool, #[case] create: bool) -> Result<()> {
let tmp_dir = TempDir::new()?;
// If the DB should already exist, create it.
if exists {
SqliteStorageInner::new(tmp_dir.path(), AccessMode::ReadWrite, true)?;
}
let mut storage = SqliteStorageInner::new(tmp_dir.path(), AccessMode::ReadOnly, create)?;
fn is_read_only_err<T: std::fmt::Debug>(res: Result<T>) -> bool {
&res.unwrap_err().to_string() == "Task storage was opened in read-only mode"
}
let mut txn = storage.txn().await?;
let taskmap = TaskMap::new();
let op = Operation::UndoPoint;
// Mutating things fail.
assert!(is_read_only_err(txn.create_task(Uuid::new_v4()).await));
assert!(is_read_only_err(
txn.set_task(Uuid::new_v4(), taskmap).await
));
assert!(is_read_only_err(txn.delete_task(Uuid::new_v4()).await));
assert!(is_read_only_err(txn.set_base_version(Uuid::new_v4()).await));
assert!(is_read_only_err(txn.add_operation(op.clone()).await));
assert!(is_read_only_err(txn.remove_operation(op).await));
assert!(is_read_only_err(txn.sync_complete().await));
assert!(is_read_only_err(
txn.add_to_working_set(Uuid::new_v4()).await
));
assert!(is_read_only_err(txn.set_working_set_item(1, None).await));
assert!(is_read_only_err(txn.clear_working_set().await));
assert!(is_read_only_err(txn.commit().await));
// Read-only things succeed.
assert_eq!(txn.get_task(Uuid::new_v4()).await?, None);
assert_eq!(txn.get_pending_tasks().await?.len(), 0);
assert_eq!(txn.all_tasks().await?.len(), 0);
assert_eq!(txn.base_version().await?, Uuid::nil());
assert_eq!(txn.get_task_operations(Uuid::new_v4()).await?.len(), 0);
assert_eq!(txn.unsynced_operations().await?.len(), 0);
assert_eq!(txn.num_unsynced_operations().await?, 0);
assert_eq!(txn.get_working_set().await?.len(), 1);
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/sqlite/schema.rs | src/storage/sqlite/schema.rs | use crate::errors::{Error, Result};
use anyhow::Context;
use rusqlite::{params, Connection, OptionalExtension, Transaction};
/// A database schema version.
///
/// The first value is the major version, with different major versions completely incompatible
/// with one another.
///
/// The second is the minor version, with all minor versions in the same major version being
/// compatible with one another. That is, a TaskChampion binary with a latest version of (MAJ, MIN)
/// can safely operate on any DB with major version MAJ, whether its minor version is greater or
/// smaller than MIN, and can upgrade that DB to (MAJ, MIN) if necessary.
///
/// For example, a new index would trigger an increment of the minor version, as older versions of
/// TaskChampion can still safely use the DB with the addition of the index.
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub(super) struct DbVersion(pub u32, pub u32);
type UpgradeFn = fn(&Transaction) -> Result<()>;
/// DB Versions and functions to upgrade to them.
///
/// Add new vesions here, in order. Prefer minor version bumps for better compatibility, using
/// techniques like virtual columns where possible.
const VERSIONS: &[(DbVersion, UpgradeFn)] = &[
(DbVersion(0, 1), upgrade_to_0_1),
(DbVersion(0, 2), upgrade_to_0_2),
];
pub(super) const LATEST_VERSION: DbVersion = VERSIONS[VERSIONS.len() - 1].0;
pub(super) fn upgrade_db(con: &mut Connection) -> Result<()> {
let mut current_version = get_db_version(con)?;
if current_version.0 > LATEST_VERSION.0 {
return Err(Error::Database(
"Database is too new for this version of TaskChampion".into(),
));
}
for (version, upgrade) in VERSIONS {
if current_version < *version {
let t = con.transaction()?;
upgrade(&t)?;
t.commit()?;
current_version = *version;
}
}
Ok(())
}
/// Update to DbVersion(0, 1).
///
/// This function can upgrade from any schema before (0, 1), including those of TaskChampion
/// 0.8 and 0.9, as all of its operations are performed only if the schema element does not
/// already exist.
fn upgrade_to_0_1(t: &Transaction) -> Result<()> {
let create_tables = vec![
"CREATE TABLE IF NOT EXISTS operations (id INTEGER PRIMARY KEY AUTOINCREMENT, data STRING);",
"CREATE TABLE IF NOT EXISTS sync_meta (key STRING PRIMARY KEY, value STRING);",
"CREATE TABLE IF NOT EXISTS tasks (uuid STRING PRIMARY KEY, data STRING);",
"CREATE TABLE IF NOT EXISTS working_set (id INTEGER PRIMARY KEY, uuid STRING);",
];
for q in create_tables {
t.execute(q, []).context("Creating table")?;
}
// -- At this point the DB schema is that of TaskChampion 0.8.0.
// Check for and add the `operations.uuid` column.
if !has_column(t, "operations", "uuid")? {
t.execute(
r#"ALTER TABLE operations ADD COLUMN uuid GENERATED ALWAYS AS (
coalesce(json_extract(data, "$.Update.uuid"),
json_extract(data, "$.Create.uuid"),
json_extract(data, "$.Delete.uuid"))) VIRTUAL"#,
[],
)
.context("Adding operations.uuid")?;
t.execute("CREATE INDEX operations_by_uuid ON operations (uuid)", [])
.context("Creating operations_by_uuid")?;
}
if !has_column(t, "operations", "synced")? {
t.execute(
"ALTER TABLE operations ADD COLUMN synced bool DEFAULT false",
[],
)
.context("Adding operations.synced")?;
t.execute(
"CREATE INDEX operations_by_synced ON operations (synced)",
[],
)
.context("Creating operations_by_synced")?;
}
// -- At this point the DB schema is that of TaskChampion 0.9.0.
create_version_table(t)?;
set_db_version(t, DbVersion(0, 1))?;
Ok(())
}
/// Update to DbVersion(0, 2).
///
/// This fixes some bad syntax in the schema in DbVersion(0, 1) -- use of double quotes in the
/// JSON paths passed to `json_extract`. This syntax works with SQLite 3.45.1 but not 3.50.4, but
/// is invalid per the grammar in both versions.
fn upgrade_to_0_2(t: &Transaction) -> Result<()> {
// Fix the `operations.uuid` column. Note that this column is virtual and
// thus contains no data, so dropping it is not a lossy operation.
t.execute(r#"DROP INDEX operations_by_uuid"#, [])
.context("Dropping index operatoins_by_uuid")?;
t.execute(r#"ALTER TABLE operations DROP COLUMN uuid"#, [])
.context("Removing incorrect operations.uuid")?;
t.execute(
r#"ALTER TABLE operations ADD COLUMN uuid GENERATED ALWAYS AS (
coalesce(json_extract(data, '$.Update.uuid'),
json_extract(data, '$.Create.uuid'),
json_extract(data, '$.Delete.uuid'))) VIRTUAL"#,
[],
)
.context("Creating correct operations.uuid")?;
t.execute("CREATE INDEX operations_by_uuid ON operations (uuid)", [])
.context("Creating index operations_by_uuid")?;
set_db_version(t, DbVersion(0, 2))?;
Ok(())
}
fn create_version_table(t: &Transaction) -> Result<()> {
// The `singleton` column constrains this table to have no more than one row.
t.execute(
r#"CREATE TABLE IF NOT EXISTS version (
singleton INTEGER PRIMARY KEY CHECK (singleton = 0),
major INTEGER,
minor INTEGER)"#,
[],
)
.context("Creating table")?;
Ok(())
}
/// Get the current DB version, from the `version` table. If the table or row does not exist, that
/// is considered version (0, 0).
///
/// This takes a connection for efficiency: this is called every time a Storage instance is
/// created, so the overhead of BEGIN and COMMIT for a transaction is unnecessary in the happy
/// path.
pub(super) fn get_db_version(con: &mut Connection) -> Result<DbVersion> {
let version: Option<(u32, u32)> = match con
.query_row("SELECT major, minor FROM version", [], |r| {
Ok((r.get("major")?, r.get("minor")?))
})
.optional()
{
Ok(v) => v,
Err(err @ rusqlite::Error::SqliteFailure(_, _)) => {
// This error may have occurred because the "version" table does not exist, in which
// case the version is (0, 0).
if has_column(&con.transaction()?, "version", "major")? {
return Err(err.into());
}
None
}
Err(err) => return Err(err.into()),
};
let (major, minor) = version.unwrap_or((0, 0));
Ok(DbVersion(major, minor))
}
/// Set the current DB version.
fn set_db_version(t: &Transaction, version: DbVersion) -> Result<()> {
let DbVersion(major, minor) = version;
t.execute(
r#"INSERT INTO version (singleton, major, minor) VALUES (0, ?, ?)
ON CONFLICT(singleton) do UPDATE SET major=?, minor=?"#,
params![major, minor, major, minor],
)?;
Ok(())
}
fn has_column(t: &Transaction, table: &str, column: &str) -> Result<bool> {
let res: u32 = t
.query_row(
"SELECT COUNT(*) AS c FROM pragma_table_xinfo(?) WHERE name=?",
[table, column],
|r| r.get(0),
)
.with_context(|| format!("Checking for {table}.{column}"))?;
Ok(res > 0)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn get_db_version_no_table() -> Result<()> {
let mut con = Connection::open_in_memory()?;
assert_eq!(get_db_version(&mut con)?, DbVersion(0, 0));
Ok(())
}
#[test]
fn get_db_version_empty() -> Result<()> {
let mut con = Connection::open_in_memory()?;
let t = con.transaction()?;
create_version_table(&t)?;
t.commit()?;
assert_eq!(get_db_version(&mut con)?, DbVersion(0, 0));
Ok(())
}
#[test]
fn get_db_version_set() -> Result<()> {
let mut con = Connection::open_in_memory()?;
let t = con.transaction()?;
create_version_table(&t)?;
set_db_version(&t, DbVersion(3, 5))?;
t.commit()?;
assert_eq!(get_db_version(&mut con)?, DbVersion(3, 5));
Ok(())
}
#[test]
fn get_db_version_set_twice() -> Result<()> {
let mut con = Connection::open_in_memory()?;
let t = con.transaction()?;
create_version_table(&t)?;
set_db_version(&t, DbVersion(3, 5))?;
set_db_version(&t, DbVersion(4, 7))?;
t.commit()?;
assert_eq!(get_db_version(&mut con)?, DbVersion(4, 7));
Ok(())
}
#[test]
fn test_upgrade_to_0_1() -> Result<()> {
let mut con = Connection::open_in_memory()?;
{
let t = con.transaction()?;
upgrade_to_0_1(&t)?;
t.commit()?;
}
{
let t = con.transaction()?;
assert!(has_column(&t, "operations", "id")?);
assert!(has_column(&t, "operations", "data")?);
assert!(has_column(&t, "operations", "uuid")?);
assert!(has_column(&t, "sync_meta", "key")?);
assert!(has_column(&t, "sync_meta", "value")?);
assert!(has_column(&t, "tasks", "uuid")?);
assert!(has_column(&t, "tasks", "data")?);
assert!(has_column(&t, "working_set", "id")?);
assert!(has_column(&t, "working_set", "uuid")?);
}
assert_eq!(get_db_version(&mut con)?, DbVersion(0, 1));
Ok(())
}
#[test]
fn test_upgrade_to_0_2() -> Result<()> {
let mut con = Connection::open_in_memory()?;
{
let t = con.transaction()?;
upgrade_to_0_1(&t)?;
upgrade_to_0_2(&t)?;
t.commit()?;
}
{
let t = con.transaction()?;
assert!(has_column(&t, "operations", "id")?);
assert!(has_column(&t, "operations", "data")?);
assert!(has_column(&t, "operations", "uuid")?);
assert!(has_column(&t, "sync_meta", "key")?);
assert!(has_column(&t, "sync_meta", "value")?);
assert!(has_column(&t, "tasks", "uuid")?);
assert!(has_column(&t, "tasks", "data")?);
assert!(has_column(&t, "working_set", "id")?);
assert!(has_column(&t, "working_set", "uuid")?);
}
assert_eq!(get_db_version(&mut con)?, DbVersion(0, 2));
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/sqlite/mod.rs | src/storage/sqlite/mod.rs | use crate::errors::Result;
use crate::storage::config::AccessMode;
use crate::storage::send_wrapper::Wrapper;
use crate::storage::sqlite::inner::SqliteStorageInner;
use crate::storage::{Storage, StorageTxn};
use async_trait::async_trait;
use rusqlite::types::FromSql;
use rusqlite::ToSql;
use std::path::Path;
use uuid::Uuid;
mod inner;
mod schema;
#[derive(Debug, thiserror::Error, PartialEq, Eq)]
pub(crate) enum SqliteError {
#[error("SQLite transaction already committted")]
TransactionAlreadyCommitted,
#[error("Task storage was opened in read-only mode")]
ReadOnlyStorage,
}
/// Newtype to allow implementing `FromSql` for foreign `uuid::Uuid`
pub(crate) struct StoredUuid(pub(crate) Uuid);
/// Conversion from Uuid stored as a string (rusqlite's uuid feature stores as binary blob)
impl FromSql for StoredUuid {
fn column_result(value: rusqlite::types::ValueRef<'_>) -> rusqlite::types::FromSqlResult<Self> {
let u = Uuid::parse_str(value.as_str()?)
.map_err(|_| rusqlite::types::FromSqlError::InvalidType)?;
Ok(StoredUuid(u))
}
}
/// Store Uuid as string in database
impl ToSql for StoredUuid {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
let s = self.0.to_string();
Ok(s.into())
}
}
/// SqliteStorage stores task data in a file on disk.
#[derive(Clone)]
pub struct SqliteStorage(Wrapper);
impl SqliteStorage {
pub async fn new<P: AsRef<Path>>(
path: P,
access_mode: AccessMode,
create_if_missing: bool,
) -> Result<Self> {
let path = path.as_ref().to_path_buf();
Ok(Self(
Wrapper::new(async move || {
let inner = SqliteStorageInner::new(&path, access_mode, create_if_missing)?;
Ok(inner)
})
.await?,
))
}
}
#[async_trait]
impl Storage for SqliteStorage {
async fn txn<'a>(&'a mut self) -> Result<Box<dyn StorageTxn + Send + 'a>> {
Ok(self.0.txn().await?)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::errors::Error;
use crate::storage::config::AccessMode;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
async fn storage() -> Result<SqliteStorage> {
let tmp_dir = TempDir::new()?;
SqliteStorage::new(tmp_dir.path(), AccessMode::ReadWrite, true).await
}
crate::storage::test::storage_tests!(storage().await?);
#[tokio::test]
async fn test_implicit_rollback() -> Result<()> {
let mut storage = storage().await?;
let uuid = Uuid::new_v4();
// Begin a transaction, create a task, but do not commit.
// The transaction will go out of scope, triggering Drop.
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid).await?);
// txn is dropped here, which should trigger a rollback message.
}
// Begin a new transaction and verify the task does not exist.
let mut txn = storage.txn().await?;
let task = txn.get_task(uuid).await?;
assert_eq!(task, None, "Task should not exist after implicit rollback");
Ok(())
}
#[tokio::test]
async fn test_init_failure() -> Result<()> {
let tmp_dir = TempDir::new()?;
let file_path = tmp_dir.path().join("a_file");
std::fs::write(&file_path, "I am a file, not a directory")?;
// Try to create the storage inside a path that is a file, not a directory.
// This should cause SqliteStorage::new to fail inside the actor thread.
let result = SqliteStorage::new(&file_path, AccessMode::ReadWrite, true).await;
assert!(
result.is_err(),
"Initialization should fail for an invalid path"
);
// Check for the expected error message propagated from the actor thread.
if let Err(Error::Database(msg)) = result {
assert!(
msg.contains("Cannot create directory"),
"Error message should indicate a directory creation problem"
);
} else {
panic!("Expected a Database error");
}
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/indexeddb/storage.rs | src/storage/indexeddb/storage.rs | use super::schema;
use crate::errors::{Error, Result};
use crate::operation::Operation;
use crate::storage::send_wrapper::{WrappedStorage, WrappedStorageTxn, Wrapper};
use crate::storage::{Storage, StorageTxn, TaskMap, VersionId, DEFAULT_BASE_VERSION};
use async_trait::async_trait;
use idb::{CursorDirection, DatabaseEvent, Query, Transaction, TransactionMode};
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use uuid::Uuid;
use wasm_bindgen::JsValue;
struct Inner {
db: idb::Database,
}
#[async_trait(?Send)]
impl WrappedStorage for Inner {
async fn txn<'a>(&'a mut self) -> Result<Box<dyn WrappedStorageTxn + 'a>> {
Ok(Box::new(InnerTxn(Some(self.db.transaction(
&[
schema::TASKS,
schema::OPERATIONS,
schema::SYNC_META,
schema::WORKING_SET,
],
TransactionMode::ReadWrite,
)?))))
}
}
/// Return a new error stating the data is invalid.
fn invalid() -> Error {
Error::Database("invalid data in IndexedDB".into())
}
/// Wrap the given error stating the data is invalid.
fn invalid_err(e: impl std::error::Error) -> Error {
Error::Database(format!("invalid data in IndexedDB: {e}"))
}
/// Return a new error indicating this transaction has been committed.
fn already_committed() -> Error {
Error::Database("transaction is already committed".into())
}
/// Return a new error indicating the operation was not found.
fn operation_not_found() -> Error {
Error::Database("operation not found".into())
}
/// Return a new error indicating the operation has already been synced.
fn operation_synced() -> Error {
Error::Database("operation has been synced".into())
}
/// Convert a JsValue to a Uuid.
fn js2uuid(js: JsValue) -> Result<Uuid> {
let json = js.as_string().ok_or_else(invalid)?;
let uuid = Uuid::parse_str(json.as_str()).map_err(invalid_err)?;
Ok(uuid)
}
/// Convert a UUID to a JsValue.
fn uuid2js(uuid: Uuid) -> Result<JsValue> {
let string = uuid.to_string();
let js = JsValue::from_str(string.as_str());
Ok(js)
}
/// Convert a JsValue to a Task.
fn js2task(js: JsValue) -> Result<TaskMap> {
Ok(serde_wasm_bindgen::from_value(js)?)
}
/// Convert a Task to a JsValue.
fn task2js(task: TaskMap) -> Result<JsValue> {
Ok(serde_wasm_bindgen::to_value(&task)?)
}
fn unsynced_is_zero(v: &u8) -> bool {
*v == 0
}
/// Operations are stored with a separate UUID (for IndexedDB indexing) and
/// a flag indicating whether they have been synced.
#[derive(Serialize, Deserialize)]
struct StoredOperation {
uuid: Option<Uuid>,
operation: Operation,
// IndexedDB does not support indexing by a boolean value, so
// we store a number here instead. Since we want to index un-synced
// operations, this field is only present on such entries.
#[serde(skip_serializing_if = "unsynced_is_zero")]
#[serde(default)]
unsynced: u8,
}
/// Convert a JsValue to an Operation.
fn js2op(js: JsValue) -> Result<Operation> {
Ok(serde_wasm_bindgen::from_value::<StoredOperation>(js)?.operation)
}
/// Convert an Operation to a JsValue.
fn op2js(operation: Operation, unsynced: bool) -> Result<JsValue> {
let operation = StoredOperation {
uuid: operation.get_uuid(),
operation,
unsynced: unsynced as u8,
};
Ok(serde_wasm_bindgen::to_value(&operation)?)
}
struct InnerTxn(Option<Transaction>);
impl InnerTxn {
fn idb_txn(&self) -> Result<&idb::Transaction> {
if let Some(transaction) = &self.0 {
Ok(transaction)
} else {
Err(already_committed())
}
}
async fn get_next_working_set_number(&self) -> Result<usize> {
let working_set = self.idb_txn()?.object_store(schema::WORKING_SET)?;
let mut max: usize = 0;
let mut maybe_cursor = working_set.open_key_cursor(None, None)?.await?;
while let Some(cursor) = maybe_cursor {
let i = cursor.key()?.as_f64().ok_or_else(invalid)? as usize;
if i > max {
max = i;
}
maybe_cursor = cursor.next(None)?.await?;
}
Ok(max + 1)
}
}
#[async_trait(?Send)]
impl WrappedStorageTxn for InnerTxn {
async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>> {
let tasks = self.idb_txn()?.object_store(schema::TASKS)?;
if let Some(task) = tasks.get(Query::Key(uuid2js(uuid)?))?.await? {
Ok(Some(js2task(task)?))
} else {
Ok(None)
}
}
async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
let tasks = self.idb_txn()?.object_store(schema::TASKS)?;
let working_set = self.idb_txn()?.object_store(schema::WORKING_SET)?;
let mut maybe_cursor = working_set
.open_cursor(None, Some(CursorDirection::Prev))?
.await?;
let mut res = Vec::new();
while let Some(cursor) = maybe_cursor {
let jsuuid = cursor.value()?;
let uuid = js2uuid(jsuuid.clone())?;
if let Some(task) = tasks.get(Query::Key(jsuuid))?.await? {
res.push((uuid, js2task(task)?));
}
maybe_cursor = cursor.next(None)?.await?;
}
Ok(res)
}
async fn create_task(&mut self, uuid: Uuid) -> Result<bool> {
let tasks = self.idb_txn()?.object_store(schema::TASKS)?;
match tasks
.add(&task2js(HashMap::new())?, Some(&uuid2js(uuid)?))?
.await
{
Ok(_) => Ok(true),
Err(idb::Error::DomException(e)) if e.name() == "ConstraintError" => {
// IndexedDB returns ConstraintError when the entry already exists,
// which `create_task` indicates by returning false.
Ok(false)
}
Err(e) => Err(e.into()),
}
}
async fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()> {
let tasks = self.idb_txn()?.object_store(schema::TASKS)?;
tasks.put(&task2js(task)?, Some(&uuid2js(uuid)?))?.await?;
Ok(())
}
async fn delete_task(&mut self, uuid: Uuid) -> Result<bool> {
let tasks = self.idb_txn()?.object_store(schema::TASKS)?;
if tasks.get_key(Query::Key(uuid2js(uuid)?))?.await?.is_some() {
tasks.delete(Query::Key(uuid2js(uuid)?))?.await?;
Ok(true)
} else {
Ok(false)
}
}
async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
let tasks = self.idb_txn()?.object_store(schema::TASKS)?;
let mut res = Vec::new();
let mut maybe_cursor = tasks.open_cursor(None, None)?.await?;
while let Some(cursor) = maybe_cursor {
res.push((js2uuid(cursor.key()?)?, js2task(cursor.value()?)?));
maybe_cursor = cursor.next(None)?.await?;
}
Ok(res)
}
async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>> {
let tasks = self.idb_txn()?.object_store(schema::TASKS)?;
let mut res = Vec::new();
let mut maybe_cursor = tasks.open_key_cursor(None, None)?.await?;
while let Some(cursor) = maybe_cursor {
res.push(js2uuid(cursor.key()?)?);
maybe_cursor = cursor.next(None)?.await?;
}
Ok(res)
}
async fn base_version(&mut self) -> Result<VersionId> {
let sync_meta = self.idb_txn()?.object_store(schema::SYNC_META)?;
let base_version: JsValue = "base_version".into();
if let Some(version) = sync_meta.get(Query::Key(base_version))?.await? {
Ok(js2uuid(version)?)
} else {
Ok(DEFAULT_BASE_VERSION)
}
}
async fn set_base_version(&mut self, version: VersionId) -> Result<()> {
let sync_meta = self.idb_txn()?.object_store(schema::SYNC_META)?;
let base_version: JsValue = "base_version".into();
sync_meta
.put(&uuid2js(version)?, Some(&base_version))?
.await?;
Ok(())
}
async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Vec<Operation>> {
let ops_by_uuid = self
.idb_txn()?
.object_store(schema::OPERATIONS)?
.index(schema::OPERATIONS_BY_UUID)?;
let mut res = Vec::new();
let mut maybe_cursor = ops_by_uuid
.open_cursor(Some(Query::Key(uuid2js(uuid)?)), None)?
.await?;
while let Some(cursor) = maybe_cursor {
res.push(js2op(cursor.value()?)?);
maybe_cursor = cursor.next(None)?.await?;
}
Ok(res)
}
async fn unsynced_operations(&mut self) -> Result<Vec<Operation>> {
let ops_by_synced = self
.idb_txn()?
.object_store(schema::OPERATIONS)?
.index(schema::OPERATIONS_BY_UNSYNCED)?;
let mut res = Vec::new();
// Using no query here returns only values with the `unsynced` property set.
let mut maybe_cursor = ops_by_synced.open_cursor(None, None)?.await?;
while let Some(cursor) = maybe_cursor {
res.push(js2op(cursor.value()?)?);
maybe_cursor = cursor.next(None)?.await?;
}
Ok(res)
}
async fn num_unsynced_operations(&mut self) -> Result<usize> {
let ops_by_synced = self
.idb_txn()?
.object_store(schema::OPERATIONS)?
.index(schema::OPERATIONS_BY_UNSYNCED)?;
let mut count = 0;
// Using no query here returns only values with the `unsynced` property set.
let mut maybe_cursor = ops_by_synced.open_cursor(None, None)?.await?;
while let Some(cursor) = maybe_cursor {
count += 1;
maybe_cursor = cursor.next(None)?.await?;
}
Ok(count)
}
async fn add_operation(&mut self, op: Operation) -> Result<()> {
let operations = self.idb_txn()?.object_store(schema::OPERATIONS)?;
operations.add(&op2js(op, true)?, None)?.await?;
Ok(())
}
async fn remove_operation(&mut self, op: Operation) -> Result<()> {
// Iterate operations in reverse, to get the highest index.
let operations = self.idb_txn()?.object_store(schema::OPERATIONS)?;
let maybe_cursor = operations
.open_cursor(None, Some(CursorDirection::Prev))?
.await?;
let Some(cursor) = maybe_cursor else {
return Err(operation_not_found());
};
let found_op = serde_wasm_bindgen::from_value::<StoredOperation>(cursor.value()?)?;
if found_op.unsynced == 0 {
return Err(operation_synced());
}
if found_op.operation != op {
return Err(operation_not_found());
}
cursor.delete()?.await?;
Ok(())
}
async fn sync_complete(&mut self) -> Result<()> {
let ops = self.idb_txn()?.object_store(schema::OPERATIONS)?;
let ops_by_synced = ops.index(schema::OPERATIONS_BY_UNSYNCED)?;
// Update all operations to indicate they are sync'd. Using no query here returns only
// values with the `unsynced` property set.
let mut maybe_cursor = ops_by_synced.open_cursor(None, None)?.await?;
while let Some(cursor) = maybe_cursor {
let op = js2op(cursor.value()?)?;
cursor.update(&op2js(op, false)?)?.await?;
maybe_cursor = cursor.next(None)?.await?;
}
// Now delete all operations for which no task exists (usually deleted tasks).
let task_uuids: HashSet<Uuid, std::hash::RandomState> =
HashSet::from_iter(self.all_task_uuids().await?.drain(..));
let mut maybe_cursor = ops.open_cursor(None, None)?.await?;
while let Some(cursor) = maybe_cursor {
let stored_op = serde_wasm_bindgen::from_value::<StoredOperation>(cursor.value()?)?;
if let Some(uuid) = stored_op.uuid {
if !task_uuids.contains(&uuid) {
cursor.delete()?.await?;
}
}
maybe_cursor = cursor.next(None)?.await?;
}
Ok(())
}
async fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>> {
let working_set = self.idb_txn()?.object_store(schema::WORKING_SET)?;
let mut res = vec![None];
let mut maybe_cursor = working_set
.open_cursor(None, Some(CursorDirection::Prev))?
.await?;
while let Some(cursor) = maybe_cursor {
let id = cursor.key()?.as_f64().ok_or_else(invalid)? as usize;
let uuid = js2uuid(cursor.value()?)?;
res.resize_with(res.len().max(id + 1), Default::default);
res[id] = Some(uuid);
maybe_cursor = cursor.next(None)?.await?;
}
Ok(res)
}
async fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize> {
let next_working_set_id = self.get_next_working_set_number().await?;
let working_set = self.idb_txn()?.object_store(schema::WORKING_SET)?;
working_set
.add(&uuid2js(uuid)?, Some(&next_working_set_id.into()))?
.await?;
Ok(next_working_set_id)
}
async fn set_working_set_item(&mut self, id: usize, uuid: Option<Uuid>) -> Result<()> {
let working_set = self.idb_txn()?.object_store(schema::WORKING_SET)?;
if let Some(uuid) = uuid {
working_set.put(&uuid2js(uuid)?, Some(&id.into()))?.await?;
} else {
working_set.delete(Query::Key(id.into()))?.await?;
}
Ok(())
}
async fn clear_working_set(&mut self) -> Result<()> {
let working_set = self.idb_txn()?.object_store(schema::WORKING_SET)?;
working_set.clear()?.await?;
Ok(())
}
async fn commit(&mut self) -> Result<()> {
self.0.take().ok_or_else(already_committed)?.commit()?;
Ok(())
}
}
impl Drop for InnerTxn {
fn drop(&mut self) {
if let Some(txn) = self.0.take() {
// Make an attempt to abort the transaction, but without any recourse
// if it fails.
let _ = txn.abort();
}
}
}
/// IndexedDbStorage uses the [IndexedDB
/// API](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API) to store tasks, and is
/// suitable for use when running in a browser.
///
/// Although TaskChampion can initialize itself by synchronizing to a server, it is recommended to
/// use the
/// [`StorageManager.persist()`](https://developer.mozilla.org/en-US/docs/Web/API/StorageManager/persist)
/// method to ensure the browser does not discard the database under storage pressure.
///
/// WARNING: Do not read or write the database except via this storage implementation. Later
/// versions of this library may change the schema incompatibly. pub struct
/// IndexedDbStorage(Wrapper);
pub struct IndexedDbStorage(Wrapper);
impl IndexedDbStorage {
/// Create a new IndexedDbStorage, using the given name for the database.
pub async fn new(db_name: impl Into<String>) -> Result<IndexedDbStorage> {
let db_name = db_name.into();
Ok(IndexedDbStorage(
Wrapper::new(async move || {
let factory = idb::Factory::new()?;
let mut open_request = factory.open(&db_name, Some(schema::DB_VERSION))?;
open_request.on_upgrade_needed(|event| {
// It's unclear what to do with errors here. Abort the transaction?? The
// callback is 'static so we can't capture with &mut some local variable
// or anything of the sort. For now, `unwrap()`, which the browser will
// handle as a JS exception.
let old_version = event.old_version().unwrap();
let db = event.database().unwrap();
schema::upgrade(db, old_version).unwrap()
});
let db = open_request.await?;
Ok(Inner { db })
})
.await?,
))
}
}
#[async_trait]
impl Storage for IndexedDbStorage {
async fn txn<'a>(&'a mut self) -> Result<Box<dyn StorageTxn + Send + 'a>> {
Ok(self.0.txn().await?)
}
}
#[cfg(test)]
mod test {
use super::*;
use wasm_bindgen_test::*;
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
async fn storage() -> IndexedDbStorage {
// Use a random name for the DB, as tests run in parallel in the same
// browser profile and would otherwise interfere.
IndexedDbStorage::new(Uuid::new_v4().to_string().as_str())
.await
.expect("IndexedDB setup failed")
}
crate::storage::test::storage_tests_wasm!(storage().await);
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/indexeddb/schema.rs | src/storage/indexeddb/schema.rs | use crate::errors::Result;
use idb::{Database, KeyPath};
/// The IndexedDB database version. This can be incremented to trigger calls to
/// `upgrade`.
pub(super) const DB_VERSION: u32 = 1;
pub(super) const TASKS: &str = "tasks";
pub(super) const OPERATIONS: &str = "operations";
pub(super) const OPERATIONS_BY_UUID: &str = "operations_by_uuid";
pub(super) const OPERATIONS_BY_UNSYNCED: &str = "operations_by_unsynced";
pub(super) const SYNC_META: &str = "sync_meta";
pub(super) const WORKING_SET: &str = "working_set";
/* Current Schema:
*
* - Object store `tasks` stores TaskMaps as values, in the form of a JS object. Keys are UUIDs in
* the form of a string.
*
* - Object store `operations` stores values {uuid, operation, unsynced}, with `unsynced` omitted
* for synced operations. Keys are auto-incrementing integers. The `uuid` and `unsynced`
* properties are indexed.
*
* - Object store `sync_meta` stores string values with string keys. At the moment it only stores
* the base revision.
*
* - Object store `working_set` stores UUIDs as strings, keyed by the working set ID as an integer.
*/
fn upgrade_0_to_1(db: Database) -> Result<()> {
// Create the `tasks` table
let mut params = idb::ObjectStoreParams::new();
params.key_path(None);
params.auto_increment(false);
db.create_object_store(TASKS, params)?;
// Create the `operations` table
let mut params = idb::ObjectStoreParams::new();
params.key_path(None);
params.auto_increment(true);
let ops = db.create_object_store(OPERATIONS, params)?;
ops.create_index(OPERATIONS_BY_UUID, KeyPath::new_single("uuid"), None)?;
ops.create_index(
OPERATIONS_BY_UNSYNCED,
KeyPath::new_single("unsynced"),
None,
)?;
// Create the `sync_meta` table
let mut params = idb::ObjectStoreParams::new();
params.key_path(None);
params.auto_increment(false);
db.create_object_store(SYNC_META, params)?;
// Create the `working_set` table
let mut params = idb::ObjectStoreParams::new();
params.key_path(None);
params.auto_increment(false);
db.create_object_store(WORKING_SET, params)?;
Ok(())
}
pub(super) fn upgrade(db: Database, old_version: u32) -> Result<()> {
if old_version == 0 {
upgrade_0_to_1(db)
} else {
unreachable!("old_version should be less than DB_VERSION");
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/indexeddb/mod.rs | src/storage/indexeddb/mod.rs | #[cfg(not(target_arch = "wasm32"))]
compile_error!("IndexdDBStorage is only available on WASM targets");
mod storage;
pub use storage::IndexedDbStorage;
mod schema;
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/send_wrapper/test.rs | src/storage/send_wrapper/test.rs | use super::Wrapper;
use crate::errors::{Error, Result};
use crate::operation::Operation;
use crate::storage::inmemory::InMemoryStorage;
use crate::storage::send_wrapper::{WrappedStorage, WrappedStorageTxn};
use crate::storage::{Storage, StorageTxn, TaskMap, VersionId};
use async_trait::async_trait;
use pretty_assertions::assert_eq;
use uuid::Uuid;
// Implement WrappedStorage(Txn) for InMemoryStorage and a boxed StorageTxn
#[async_trait(?Send)]
impl WrappedStorageTxn for Box<dyn StorageTxn + Send + '_> {
async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>> {
self.as_mut().get_task(uuid).await
}
async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
self.as_mut().get_pending_tasks().await
}
async fn create_task(&mut self, uuid: Uuid) -> Result<bool> {
self.as_mut().create_task(uuid).await
}
async fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()> {
self.as_mut().set_task(uuid, task).await
}
async fn delete_task(&mut self, uuid: Uuid) -> Result<bool> {
self.as_mut().delete_task(uuid).await
}
async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
self.as_mut().all_tasks().await
}
async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>> {
self.as_mut().all_task_uuids().await
}
async fn base_version(&mut self) -> Result<VersionId> {
self.as_mut().base_version().await
}
async fn set_base_version(&mut self, version: VersionId) -> Result<()> {
self.as_mut().set_base_version(version).await
}
async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Vec<Operation>> {
self.as_mut().get_task_operations(uuid).await
}
async fn unsynced_operations(&mut self) -> Result<Vec<Operation>> {
self.as_mut().unsynced_operations().await
}
async fn num_unsynced_operations(&mut self) -> Result<usize> {
self.as_mut().num_unsynced_operations().await
}
async fn add_operation(&mut self, op: Operation) -> Result<()> {
self.as_mut().add_operation(op).await
}
async fn remove_operation(&mut self, op: Operation) -> Result<()> {
self.as_mut().remove_operation(op).await
}
async fn sync_complete(&mut self) -> Result<()> {
self.as_mut().sync_complete().await
}
async fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>> {
self.as_mut().get_working_set().await
}
async fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize> {
self.as_mut().add_to_working_set(uuid).await
}
async fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> Result<()> {
self.as_mut().set_working_set_item(index, uuid).await
}
async fn clear_working_set(&mut self) -> Result<()> {
self.as_mut().clear_working_set().await
}
#[allow(clippy::wrong_self_convention)] // mut is required here for storage access
async fn is_empty(&mut self) -> Result<bool> {
self.as_mut().is_empty().await
}
async fn commit(&mut self) -> Result<()> {
self.as_mut().commit().await
}
}
#[async_trait(?Send)]
impl WrappedStorage for InMemoryStorage {
async fn txn<'a>(&'a mut self) -> Result<Box<dyn WrappedStorageTxn + 'a>> {
Ok(Box::new(<InMemoryStorage as Storage>::txn(self).await?))
}
}
async fn storage() -> Result<Wrapper> {
Wrapper::new(async || Ok(InMemoryStorage::new())).await
}
crate::storage::test::storage_tests!(storage().await.unwrap());
#[tokio::test]
async fn test_implicit_rollback() -> Result<()> {
let mut storage = storage().await?;
let uuid = Uuid::new_v4();
// Begin a transaction, create a task, but do not commit.
// The transaction will go out of scope, triggering Drop.
{
let mut txn = storage.txn().await?;
assert!(txn.create_task(uuid).await?);
// txn is dropped here, which should trigger a rollback message.
}
// Begin a new transaction and verify the task does not exist.
let mut txn = storage.txn().await?;
let task = txn.get_task(uuid).await?;
assert_eq!(task, None, "Task should not exist after implicit rollback");
Ok(())
}
#[tokio::test]
async fn test_init_failure() -> Result<()> {
// The constructor runs in the thread, and its failure must be transmitted back via channels to
// appear here.
assert!(
Wrapper::new::<InMemoryStorage, _, _>(async || Err(Error::Database("uhoh!".into())))
.await
.is_err()
);
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/send_wrapper/actor.rs | src/storage/send_wrapper/actor.rs | use super::{WrappedStorage, WrappedStorageTxn};
use crate::errors::Result;
use crate::operation::Operation;
use crate::storage::{TaskMap, VersionId};
use tokio::sync::{mpsc, oneshot};
use uuid::Uuid;
/// An enum for messages sent to the sync thread actor.
pub(super) enum ActorMessage {
// Transaction control
BeginTxn(oneshot::Sender<Result<mpsc::UnboundedSender<TxnMessage>>>),
}
pub(super) enum TxnMessage {
Commit(oneshot::Sender<Result<()>>),
Rollback,
// Transactional operations
GetTask(Uuid, oneshot::Sender<Result<Option<TaskMap>>>),
GetPendingTasks(oneshot::Sender<Result<Vec<(Uuid, TaskMap)>>>),
CreateTask(Uuid, oneshot::Sender<Result<bool>>),
SetTask(Uuid, TaskMap, oneshot::Sender<Result<()>>),
DeleteTask(Uuid, oneshot::Sender<Result<bool>>),
AllTasks(oneshot::Sender<Result<Vec<(Uuid, TaskMap)>>>),
AllTaskUuids(oneshot::Sender<Result<Vec<Uuid>>>),
BaseVersion(oneshot::Sender<Result<VersionId>>),
SetBaseVersion(VersionId, oneshot::Sender<Result<()>>),
GetTaskOperations(Uuid, oneshot::Sender<Result<Vec<Operation>>>),
UnsyncedOperations(oneshot::Sender<Result<Vec<Operation>>>),
NumUnsyncedOperations(oneshot::Sender<Result<usize>>),
AddOperation(Operation, oneshot::Sender<Result<()>>),
RemoveOperation(Operation, oneshot::Sender<Result<()>>),
SyncComplete(oneshot::Sender<Result<()>>),
GetWorkingSet(oneshot::Sender<Result<Vec<Option<Uuid>>>>),
AddToWorkingSet(Uuid, oneshot::Sender<Result<usize>>),
SetWorkingSetItem(usize, Option<Uuid>, oneshot::Sender<Result<()>>),
ClearWorkingSet(oneshot::Sender<Result<()>>),
IsEmpty(oneshot::Sender<Result<bool>>),
}
/// State owned by the dedicated thread. It handles the various channels and
/// delegates to the inner storage.
pub(super) struct ActorImpl<S: WrappedStorage> {
storage: S,
receiver: mpsc::UnboundedReceiver<ActorMessage>,
}
impl<S: WrappedStorage> ActorImpl<S> {
pub(super) fn new(storage: S, receiver: mpsc::UnboundedReceiver<ActorMessage>) -> Self {
Self { storage, receiver }
}
pub(super) async fn run(&mut self) {
// The outer loop waits for a BeginTxn message. If the channel is disconnected, the thread
// will exit gracefully. Note that this loop blocks until the transaction is complete,
// effectively ensuring serialized access (and simplifying management of ownership).
while let Some(ActorMessage::BeginTxn(reply_sender)) = self.receiver.recv().await {
let (txn_sender, mut txn_receiver) = mpsc::unbounded_channel::<TxnMessage>();
match self.storage.txn().await {
Ok(mut txn) => {
// Send the new transaction channel sender back
if reply_sender.send(Ok(txn_sender)).is_err() {
continue; // Don't handle the txn if the client is gone.
}
Self::handle_transaction(&mut txn_receiver, &mut txn).await;
}
Err(e) => {
// Send the database error back to the caller
let _ = reply_sender.send(Err(e));
}
}
}
}
/// The inner loop for handling messages within an active transaction.
async fn handle_transaction(
receiver: &mut mpsc::UnboundedReceiver<TxnMessage>,
txn: &mut Box<dyn WrappedStorageTxn + '_>,
) {
while let Some(msg) = receiver.recv().await {
match msg {
TxnMessage::Commit(resp) => {
let _ = resp.send(txn.commit().await);
return; // Transaction over, return to the outer loop.
}
TxnMessage::Rollback => {
return; // Transaction over, return to the outer loop.
}
TxnMessage::GetTask(uuid, resp) => {
let _ = resp.send(txn.get_task(uuid).await);
}
TxnMessage::GetPendingTasks(resp) => {
let _ = resp.send(txn.get_pending_tasks().await);
}
TxnMessage::CreateTask(uuid, resp) => {
let _ = resp.send(txn.create_task(uuid).await);
}
TxnMessage::SetTask(uuid, t, resp) => {
let _ = resp.send(txn.set_task(uuid, t).await);
}
TxnMessage::DeleteTask(uuid, resp) => {
let _ = resp.send(txn.delete_task(uuid).await);
}
TxnMessage::AllTasks(resp) => {
let _ = resp.send(txn.all_tasks().await);
}
TxnMessage::AllTaskUuids(resp) => {
let _ = resp.send(txn.all_task_uuids().await);
}
TxnMessage::BaseVersion(resp) => {
let _ = resp.send(txn.base_version().await);
}
TxnMessage::SetBaseVersion(v, resp) => {
let _ = resp.send(txn.set_base_version(v).await);
}
TxnMessage::GetTaskOperations(u, resp) => {
let _ = resp.send(txn.get_task_operations(u).await);
}
TxnMessage::UnsyncedOperations(resp) => {
let _ = resp.send(txn.unsynced_operations().await);
}
TxnMessage::NumUnsyncedOperations(resp) => {
let _ = resp.send(txn.num_unsynced_operations().await);
}
TxnMessage::AddOperation(o, resp) => {
let _ = resp.send(txn.add_operation(o).await);
}
TxnMessage::RemoveOperation(o, resp) => {
let _ = resp.send(txn.remove_operation(o).await);
}
TxnMessage::SyncComplete(resp) => {
let _ = resp.send(txn.sync_complete().await);
}
TxnMessage::GetWorkingSet(resp) => {
let _ = resp.send(txn.get_working_set().await);
}
TxnMessage::AddToWorkingSet(u, resp) => {
let _ = resp.send(txn.add_to_working_set(u).await);
}
TxnMessage::SetWorkingSetItem(i, u, resp) => {
let _ = resp.send(txn.set_working_set_item(i, u).await);
}
TxnMessage::ClearWorkingSet(resp) => {
let _ = resp.send(txn.clear_working_set().await);
}
TxnMessage::IsEmpty(resp) => {
let _ = resp.send(txn.is_empty().await);
}
};
}
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/send_wrapper/wrapper.rs | src/storage/send_wrapper/wrapper.rs | use super::actor::{ActorImpl, ActorMessage, TxnMessage};
use super::WrappedStorage;
use crate::errors::Result;
use crate::operation::Operation;
use crate::storage::{Storage, StorageTxn, TaskMap, VersionId};
use async_trait::async_trait;
use std::future::Future;
use tokio::sync::{mpsc, oneshot};
use uuid::Uuid;
/// Wrapper wraps a `!Send` storage implementation (specifically, implementing [`WrappedStorage`]
/// to make it Send ([`Storage`]).
///
/// Async runtimes like Tokio can move a future between threads to support efficient execution.
/// This requires the future to also implement `Send`. For most purposes, this is not an issue, but
/// a few types are `!Send` and any async function handling such types are also `!Send`.
///
/// On WASM, the wrapped storage runs in an async task, but not in a thread.
#[derive(Clone)]
pub(in crate::storage) struct Wrapper {
sender: mpsc::UnboundedSender<ActorMessage>,
}
impl Wrapper {
/// Create a new wrapper.
///
/// The constructor is called in a dedicated single-threaded runtime, and all operations on the
/// resulting WrappedStorage implementation will also occur in that thread. As such, neither
/// the constructor nor any of the [`WrappedStorage`] or [`super::traits::WrappedStorageTxn`]
/// methods are required to implement `Send`.
pub(in crate::storage) async fn new<S, FN, FUT>(constructor: FN) -> Result<Self>
where
S: WrappedStorage,
FUT: Future<Output = Result<S>>,
FN: FnOnce() -> FUT + Send + 'static,
{
let (sender, receiver) = mpsc::unbounded_channel();
// Use a oneshot channel to block until the thread has initialized.
let (init_sender, init_receiver): (oneshot::Sender<Result<_>>, _) = oneshot::channel();
let in_thread = async move |init_sender: oneshot::Sender<Result<_>>| {
match constructor().await {
Ok(storage) => {
// Send Ok back to the caller of `new` and then start the actor loop.
let _ = init_sender.send(Ok(()));
let mut actor = ActorImpl::new(storage, receiver);
actor.run().await;
}
Err(e) => {
// Send the initialization error back.
let _ = init_sender.send(Err(e));
}
}
};
// On WASM, we do not have threads, so spawn the constructor in the current thread.
#[cfg(target_arch = "wasm32")]
{
wasm_bindgen_futures::spawn_local(in_thread(init_sender));
}
// Otherwise, spawn a new thread, and within that a local Tokio RT that can handle !Send
// futures.
#[cfg(not(target_arch = "wasm32"))]
{
use std::thread;
use tokio::runtime;
thread::spawn(move || {
let rt = match runtime::Builder::new_current_thread().build() {
Ok(rt) => rt,
Err(e) => {
let _ = init_sender.send(Err(e.into()));
return;
}
};
rt.block_on(in_thread(init_sender));
});
}
// Wait until the thread sends its initialization result.
init_receiver.await??;
Ok(Self { sender })
}
}
#[async_trait]
impl Storage for Wrapper {
// Sends the BeginTxn message to the underlying ActorImpl. Now that the txn has
// begun, this async txn obj can be passed around and operated upon, as it
// communicates with the underlying sync txn.
async fn txn<'a>(&'a mut self) -> Result<Box<dyn StorageTxn + Send + 'a>> {
let (reply_tx, reply_rx) = oneshot::channel();
self.sender.send(ActorMessage::BeginTxn(reply_tx))?;
let txn_sender = reply_rx.await??;
Ok(Box::new(WrapperTxn::new(txn_sender)))
}
}
/// An async proxy for a transaction running on the sync actor thread.
struct WrapperTxn {
sender: mpsc::UnboundedSender<TxnMessage>,
committed: bool,
}
impl WrapperTxn {
fn new(sender: mpsc::UnboundedSender<TxnMessage>) -> Self {
Self {
sender,
committed: false,
}
}
async fn call<R, F>(&self, f: F) -> Result<R>
where
F: FnOnce(oneshot::Sender<Result<R>>) -> TxnMessage,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
self.sender.send(f(tx))?;
rx.await?
}
}
impl Drop for WrapperTxn {
fn drop(&mut self) {
if !self.committed {
// If the transaction proxy is dropped without being committed,
// we send a Rollback message. There's nothing we can do if this
// fails, so ignore the result and do not use a channel to wait
// for a response.
let _ = self.sender.send(TxnMessage::Rollback);
}
}
}
#[async_trait]
impl StorageTxn for WrapperTxn {
async fn commit(&mut self) -> Result<()> {
let res = self.call(TxnMessage::Commit).await;
if res.is_ok() {
self.committed = true;
}
res
}
async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>> {
self.call(|tx| TxnMessage::GetTask(uuid, tx)).await
}
async fn create_task(&mut self, uuid: Uuid) -> Result<bool> {
self.call(|tx| TxnMessage::CreateTask(uuid, tx)).await
}
async fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()> {
self.call(|tx| TxnMessage::SetTask(uuid, task, tx)).await
}
async fn delete_task(&mut self, uuid: Uuid) -> Result<bool> {
self.call(|tx| TxnMessage::DeleteTask(uuid, tx)).await
}
async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
self.call(TxnMessage::GetPendingTasks).await
}
async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>> {
self.call(TxnMessage::AllTasks).await
}
async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>> {
self.call(TxnMessage::AllTaskUuids).await
}
async fn base_version(&mut self) -> Result<VersionId> {
self.call(TxnMessage::BaseVersion).await
}
async fn set_base_version(&mut self, version: VersionId) -> Result<()> {
self.call(|tx| TxnMessage::SetBaseVersion(version, tx))
.await
}
async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Vec<Operation>> {
self.call(|tx| TxnMessage::GetTaskOperations(uuid, tx))
.await
}
async fn unsynced_operations(&mut self) -> Result<Vec<Operation>> {
self.call(TxnMessage::UnsyncedOperations).await
}
async fn num_unsynced_operations(&mut self) -> Result<usize> {
self.call(TxnMessage::NumUnsyncedOperations).await
}
async fn add_operation(&mut self, op: Operation) -> Result<()> {
self.call(|tx| TxnMessage::AddOperation(op, tx)).await
}
async fn remove_operation(&mut self, op: Operation) -> Result<()> {
self.call(|tx| TxnMessage::RemoveOperation(op, tx)).await
}
async fn sync_complete(&mut self) -> Result<()> {
self.call(TxnMessage::SyncComplete).await
}
async fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>> {
self.call(TxnMessage::GetWorkingSet).await
}
async fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize> {
self.call(|tx| TxnMessage::AddToWorkingSet(uuid, tx)).await
}
async fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> Result<()> {
self.call(|tx| TxnMessage::SetWorkingSetItem(index, uuid, tx))
.await
}
async fn clear_working_set(&mut self) -> Result<()> {
self.call(TxnMessage::ClearWorkingSet).await
}
async fn is_empty(&mut self) -> Result<bool> {
self.call(TxnMessage::IsEmpty).await
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/send_wrapper/mod.rs | src/storage/send_wrapper/mod.rs | //! This module implements a wrapper around a non-Send version of the Storage and StorageTxn
//! traits, exposing implementations of those traits.
//!
//! The wrapper uses an actor model: the wrapper uses channels to communicate with a single
//! instance of the wrapped type, running in a dedicated thread.
mod actor;
mod traits;
pub(super) use traits::*;
mod wrapper;
pub(super) use wrapper::Wrapper;
#[cfg(test)]
mod test;
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/storage/send_wrapper/traits.rs | src/storage/send_wrapper/traits.rs | use crate::errors::Result;
use crate::operation::Operation;
use crate::storage::{TaskMap, VersionId};
use async_trait::async_trait;
use uuid::Uuid;
/// This trait is identical to [`crate::storage::StorageTxn`] except that it is not Send.
#[async_trait(?Send)]
pub(in crate::storage) trait WrappedStorageTxn {
async fn get_task(&mut self, uuid: Uuid) -> Result<Option<TaskMap>>;
async fn get_pending_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>>;
async fn create_task(&mut self, uuid: Uuid) -> Result<bool>;
async fn set_task(&mut self, uuid: Uuid, task: TaskMap) -> Result<()>;
async fn delete_task(&mut self, uuid: Uuid) -> Result<bool>;
async fn all_tasks(&mut self) -> Result<Vec<(Uuid, TaskMap)>>;
async fn all_task_uuids(&mut self) -> Result<Vec<Uuid>>;
async fn base_version(&mut self) -> Result<VersionId>;
async fn set_base_version(&mut self, version: VersionId) -> Result<()>;
async fn get_task_operations(&mut self, uuid: Uuid) -> Result<Vec<Operation>>;
async fn unsynced_operations(&mut self) -> Result<Vec<Operation>>;
async fn num_unsynced_operations(&mut self) -> Result<usize>;
async fn add_operation(&mut self, op: Operation) -> Result<()>;
async fn remove_operation(&mut self, op: Operation) -> Result<()>;
async fn sync_complete(&mut self) -> Result<()>;
async fn get_working_set(&mut self) -> Result<Vec<Option<Uuid>>>;
async fn add_to_working_set(&mut self, uuid: Uuid) -> Result<usize>;
async fn set_working_set_item(&mut self, index: usize, uuid: Option<Uuid>) -> Result<()>;
async fn clear_working_set(&mut self) -> Result<()>;
#[allow(clippy::wrong_self_convention)] // mut is required here for storage access
async fn is_empty(&mut self) -> Result<bool> {
let mut empty = true;
empty = empty && self.all_tasks().await?.is_empty();
empty = empty && self.get_working_set().await? == vec![None];
empty = empty && self.base_version().await? == Uuid::nil();
empty = empty && self.unsynced_operations().await?.is_empty();
Ok(empty)
}
async fn commit(&mut self) -> Result<()>;
}
/// This trait is similar to [`crate::storage::Storage`] except that it is not Send.
#[async_trait(?Send)]
pub(in crate::storage) trait WrappedStorage {
async fn txn<'a>(&'a mut self) -> Result<Box<dyn WrappedStorageTxn + 'a>>;
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/op.rs | src/server/op.rs | use crate::operation::Operation;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
/// A SyncOp defines a single change to the task database, that can be synchronized
/// via a server.
#[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize)]
pub(crate) enum SyncOp {
/// Create a new task.
///
/// On application, if the task already exists, the operation does nothing.
Create { uuid: Uuid },
/// Delete an existing task.
///
/// On application, if the task does not exist, the operation does nothing.
Delete { uuid: Uuid },
/// Update an existing task, setting the given property to the given value. If the value is
/// None, then the corresponding property is deleted.
///
/// If the given task does not exist, the operation does nothing.
Update {
uuid: Uuid,
property: String,
value: Option<String>,
timestamp: DateTime<Utc>,
},
}
use SyncOp::*;
impl SyncOp {
// Transform takes two operations A and B that happened concurrently and produces two
// operations A' and B' such that `apply(apply(S, A), B') = apply(apply(S, B), A')`. This
// function is used to serialize operations in a process similar to a Git "rebase".
//
// *
// / \
// op1 / \ op2
// / \
// * *
//
// this function "completes the diamond:
//
// * *
// \ /
// op2' \ / op1'
// \ /
// *
//
// such that applying op2' after op1 has the same effect as applying op1' after op2. This
// allows two different systems which have already applied op1 and op2, respectively, and thus
// reached different states, to return to the same state by applying op2' and op1',
// respectively.
pub(crate) fn transform(
operation1: SyncOp,
operation2: SyncOp,
) -> (Option<SyncOp>, Option<SyncOp>) {
match (&operation1, &operation2) {
// Two creations or deletions of the same uuid reach the same state, so there's no need
// for any further operations to bring the state together.
(&Create { uuid: uuid1 }, &Create { uuid: uuid2 }) if uuid1 == uuid2 => (None, None),
(&Delete { uuid: uuid1 }, &Delete { uuid: uuid2 }) if uuid1 == uuid2 => (None, None),
// Given a create and a delete of the same task, one of the operations is invalid: the
// create implies the task does not exist, but the delete implies it exists. Somewhat
// arbitrarily, we prefer the Create
(&Create { uuid: uuid1 }, &Delete { uuid: uuid2 }) if uuid1 == uuid2 => {
(Some(operation1), None)
}
(&Delete { uuid: uuid1 }, &Create { uuid: uuid2 }) if uuid1 == uuid2 => {
(None, Some(operation2))
}
// And again from an Update and a Create, prefer the Update
(&Update { uuid: uuid1, .. }, &Create { uuid: uuid2 }) if uuid1 == uuid2 => {
(Some(operation1), None)
}
(&Create { uuid: uuid1 }, &Update { uuid: uuid2, .. }) if uuid1 == uuid2 => {
(None, Some(operation2))
}
// Given a delete and an update, prefer the delete
(&Update { uuid: uuid1, .. }, &Delete { uuid: uuid2 }) if uuid1 == uuid2 => {
(None, Some(operation2))
}
(&Delete { uuid: uuid1 }, &Update { uuid: uuid2, .. }) if uuid1 == uuid2 => {
(Some(operation1), None)
}
// Two updates to the same property of the same task might conflict.
(
Update {
uuid: uuid1,
property: property1,
value: value1,
timestamp: timestamp1,
},
Update {
uuid: uuid2,
property: property2,
value: value2,
timestamp: timestamp2,
},
) if uuid1 == uuid2 && property1 == property2 => {
// if the value is the same, there's no conflict
if value1 == value2 {
(None, None)
} else if timestamp1 < timestamp2 {
// prefer the later modification
(None, Some(operation2))
} else {
// prefer the later modification or, if the modifications are the same,
// just choose one of them
(Some(operation1), None)
}
}
// anything else is not a conflict of any sort, so return the operations unchanged
(_, _) => (Some(operation1), Some(operation2)),
}
}
/// Convert the public Operation type into a SyncOp. `UndoPoint` operations are converted to
/// `None`.
pub(crate) fn from_op(op: Operation) -> Option<Self> {
match op {
Operation::Create { uuid } => Some(SyncOp::Create { uuid }),
Operation::Delete { uuid, .. } => Some(SyncOp::Delete { uuid }),
Operation::Update {
uuid,
property,
value,
timestamp,
..
} => Some(SyncOp::Update {
uuid,
property,
value,
timestamp,
}),
Operation::UndoPoint => None,
}
}
/// Convert a SyncOp to an [`Operation`], lossily.
///
/// The `Operation` type keeps old values to support undoing operations, but this information
/// is not preserved in `SyncOp`. This function makes those values (`old_task` for `Delete` and
/// `old_value` for `Update`) to empty.
pub(crate) fn into_op(self) -> Operation {
match self {
Create { uuid } => Operation::Create { uuid },
Delete { uuid } => Operation::Delete {
uuid,
old_task: crate::storage::TaskMap::new(),
},
Update {
uuid,
property,
value,
timestamp,
} => Operation::Update {
uuid,
property,
value,
timestamp,
old_value: None,
},
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::taskdb::TaskDb;
use crate::Operations;
use crate::{errors::Result, storage::inmemory::InMemoryStorage};
use chrono::{Duration, Utc};
use pretty_assertions::assert_eq;
#[test]
fn test_json_create() -> Result<()> {
let uuid = Uuid::new_v4();
let op = Create { uuid };
let json = serde_json::to_string(&op)?;
assert_eq!(json, format!(r#"{{"Create":{{"uuid":"{}"}}}}"#, uuid));
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_delete() -> Result<()> {
let uuid = Uuid::new_v4();
let op = Delete { uuid };
let json = serde_json::to_string(&op)?;
assert_eq!(json, format!(r#"{{"Delete":{{"uuid":"{}"}}}}"#, uuid));
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update() -> Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","value":"false","timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
#[test]
fn test_json_update_none() -> Result<()> {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
let op = Update {
uuid,
property: "abc".into(),
value: None,
timestamp,
};
let json = serde_json::to_string(&op)?;
assert_eq!(
json,
format!(
r#"{{"Update":{{"uuid":"{}","property":"abc","value":null,"timestamp":"{:?}"}}}}"#,
uuid, timestamp,
)
);
let deser: SyncOp = serde_json::from_str(&json)?;
assert_eq!(deser, op);
Ok(())
}
async fn test_transform(
setup: Option<SyncOp>,
o1: SyncOp,
o2: SyncOp,
exp1p: Option<SyncOp>,
exp2p: Option<SyncOp>,
) {
let (o1p, o2p) = SyncOp::transform(o1.clone(), o2.clone());
assert_eq!((&o1p, &o2p), (&exp1p, &exp2p));
// check that the two operation sequences have the same effect, enforcing the invariant of
// the transform function.
let mut db1 = TaskDb::new(InMemoryStorage::new());
let mut ops1 = Operations::new();
if let Some(o) = setup.clone() {
ops1.push(o.into_op());
}
ops1.push(o1.into_op());
if let Some(o) = o2p {
ops1.push(o.into_op());
}
db1.commit_operations(ops1, |_| false).await.unwrap();
let mut db2 = TaskDb::new(InMemoryStorage::new());
let mut ops2 = Operations::new();
if let Some(o) = setup {
ops2.push(o.into_op());
}
ops2.push(o2.into_op());
if let Some(o) = o1p {
ops2.push(o.into_op());
}
db2.commit_operations(ops2, |_| false).await.unwrap();
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
}
#[tokio::test]
async fn test_unrelated_create() {
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
test_transform(
None,
Create { uuid: uuid1 },
Create { uuid: uuid2 },
Some(Create { uuid: uuid1 }),
Some(Create { uuid: uuid2 }),
)
.await;
}
#[tokio::test]
async fn test_related_updates_different_props() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
test_transform(
Some(Create { uuid }),
Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
},
Update {
uuid,
property: "def".into(),
value: Some("false".into()),
timestamp,
},
Some(Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
}),
Some(Update {
uuid,
property: "def".into(),
value: Some("false".into()),
timestamp,
}),
)
.await;
}
#[tokio::test]
async fn test_related_updates_same_prop() {
let uuid = Uuid::new_v4();
let timestamp1 = Utc::now();
let timestamp2 = timestamp1 + Duration::seconds(10);
test_transform(
Some(Create { uuid }),
Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp: timestamp1,
},
Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp: timestamp2,
},
None,
Some(Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp: timestamp2,
}),
)
.await;
}
#[tokio::test]
async fn test_related_updates_same_prop_same_time() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
test_transform(
Some(Create { uuid }),
Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
},
Update {
uuid,
property: "abc".into(),
value: Some("false".into()),
timestamp,
},
Some(Update {
uuid,
property: "abc".into(),
value: Some("true".into()),
timestamp,
}),
None,
)
.await;
}
}
#[cfg(all(test, not(target_arch = "wasm32")))]
mod proptest_test {
use super::*;
use crate::storage::inmemory::InMemoryStorage;
use crate::storage::TaskMap;
use crate::taskdb::TaskDb;
use crate::Operations;
use chrono::Utc;
use pretty_assertions::assert_eq;
use proptest::prelude::*;
fn uuid_strategy() -> impl Strategy<Value = Uuid> {
prop_oneof![
Just(Uuid::parse_str("83a2f9ef-f455-4195-b92e-a54c161eebfc").unwrap()),
Just(Uuid::parse_str("56e0be07-c61f-494c-a54c-bdcfdd52d2a7").unwrap()),
Just(Uuid::parse_str("4b7ed904-f7b0-4293-8a10-ad452422c7b3").unwrap()),
Just(Uuid::parse_str("9bdd0546-07c8-4e1f-a9bc-9d6299f4773b").unwrap()),
]
}
fn operation_strategy() -> impl Strategy<Value = SyncOp> {
prop_oneof![
uuid_strategy().prop_map(|uuid| Create { uuid }),
uuid_strategy().prop_map(|uuid| Delete { uuid }),
(uuid_strategy(), "(title|project|status)").prop_map(|(uuid, property)| {
Update {
uuid,
property,
value: Some("true".into()),
timestamp: Utc::now(),
}
}),
]
}
proptest! {
#![proptest_config(ProptestConfig {
cases: 1024, .. ProptestConfig::default()
})]
#[test]
/// Check that, given two operations, their transform produces the same result, as
/// required by the invariant.
fn transform_invariant_holds(o1 in operation_strategy(), o2 in operation_strategy()) {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
let (o1p, o2p) = SyncOp::transform(o1.clone(), o2.clone());
let mut ops1 = Operations::new();
let mut ops2 = Operations::new();
let mut db1 = TaskDb::new(InMemoryStorage::new());
let mut db2 = TaskDb::new(InMemoryStorage::new());
// Ensure that any expected tasks already exist
for o in [&o1, &o2] {
match o {
Update { uuid, .. } | Delete { uuid } => {
ops1.push(Operation::Create { uuid: *uuid });
ops2.push(Operation::Create { uuid: *uuid });
}
_ => {},
}
}
ops1.push(o1.into_op());
ops2.push(o2.into_op());
if let Some(o2p) = o2p {
ops1.push(o2p.into_op());
}
if let Some(o1p) = o1p {
ops2.push(o1p.into_op());
}
db1.commit_operations(ops1, |_| false).await.unwrap();
db2.commit_operations(ops2, |_| false).await.unwrap();
assert_eq!(db1.sorted_tasks().await, db2.sorted_tasks().await);
});
}
}
#[test]
fn test_from_op_create() {
let uuid = Uuid::new_v4();
assert_eq!(
SyncOp::from_op(Operation::Create { uuid }),
Some(SyncOp::Create { uuid })
);
}
#[test]
fn test_from_op_delete() {
let uuid = Uuid::new_v4();
assert_eq!(
SyncOp::from_op(Operation::Delete {
uuid,
old_task: TaskMap::new()
}),
Some(SyncOp::Delete { uuid })
);
}
#[test]
fn test_from_op_update() {
let uuid = Uuid::new_v4();
let timestamp = Utc::now();
assert_eq!(
SyncOp::from_op(Operation::Update {
uuid,
property: "prop".into(),
old_value: Some("foo".into()),
value: Some("v".into()),
timestamp,
}),
Some(SyncOp::Update {
uuid,
property: "prop".into(),
value: Some("v".into()),
timestamp,
})
);
}
#[test]
fn test_from_op_undo_point() {
assert_eq!(SyncOp::from_op(Operation::UndoPoint), None);
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/config.rs | src/server/config.rs | use super::types::Server;
use crate::errors::Result;
#[cfg(feature = "server-aws")]
pub use crate::server::cloud::aws::AwsCredentials;
#[cfg(feature = "server-aws")]
use crate::server::cloud::aws::AwsService;
#[cfg(feature = "server-gcp")]
use crate::server::cloud::gcp::GcpService;
#[cfg(feature = "cloud")]
use crate::server::cloud::CloudServer;
#[cfg(feature = "server-local")]
use crate::server::local::LocalServer;
#[cfg(feature = "server-sync")]
use crate::server::sync::SyncServer;
#[cfg(feature = "server-local")]
use std::path::PathBuf;
#[cfg(feature = "server-sync")]
use uuid::Uuid;
/// The configuration for a replica's access to a sync server.
///
/// This enum is non-exhaustive, as users should only be constructing required
/// variants, not matching on it.
#[non_exhaustive]
pub enum ServerConfig {
/// A local task database, for situations with a single replica.
#[cfg(feature = "server-local")]
Local {
/// Path containing the server's DB
server_dir: PathBuf,
},
/// A remote taskchampion-sync-server instance
#[cfg(feature = "server-sync")]
Remote {
/// The base URL of the Sync server
url: String,
/// Client ID to identify and authenticate this replica to the server
client_id: Uuid,
/// Private encryption secret used to encrypt all data sent to the server. This can
/// be any suitably un-guessable string of bytes.
encryption_secret: Vec<u8>,
},
/// A Google Cloud Platform storage bucket.
#[cfg(feature = "server-gcp")]
Gcp {
/// Bucket in which to store the task data. This bucket must not be used for any other
/// purpose.
///
/// No special bucket configuration is reqiured.
bucket: String,
/// Path to a GCP credential file, in JSON format.
///
/// If `None`, then [Application Default
/// Credentials](https://cloud.google.com/docs/authentication/application-default-credentials)
/// are used. Typically these are associated with the user's Google Cloud account.
///
/// If `Some(path)`, then the path must be to a service account key. The service account
/// must have a role with the following permissions:
///
/// - storage.buckets.create
/// - storage.buckets.get
/// - storage.buckets.update
/// - storage.objects.create
/// - storage.objects.get
/// - storage.objects.list
/// - storage.objects.update
/// - storage.objects.delete
///
/// See the following GCP resources for more information:
/// - <https://cloud.google.com/docs/authentication#service-accounts>
/// - <https://cloud.google.com/iam/docs/keys-create-delete#creating>
credential_path: Option<String>,
/// Private encryption secret used to encrypt all data sent to the server. This can
/// be any suitably un-guessable string of bytes.
encryption_secret: Vec<u8>,
},
/// An Amazon Web Services storage bucket.
///
/// This configuration supports S3-compatibile services, by specifying an endpoint URL, forcing
/// path style URLs, and omitting the region. In particular, support has been confirmed for
/// minio. Contributions are welcome to document tested support for additional S3-compatible
/// services.
#[cfg(feature = "server-aws")]
Aws {
/// Region in which the bucket is located.
/// If `None`, the default region is used.
/// The `default` region is based on the AWS SDK
/// - <https://docs.aws.amazon.com/sdk-for-rust/latest/dg/region.html>
///
/// following, in order:
/// 1. `AWS_REGION` environment variable,
/// 2. `AWS_CONFIG_FILE` environment variable and the `region` in that file
/// 3. `AWS_PROFILE` variable and the region for that file in the config file
///
/// Failing all of those, we will default to `us-east-1`.
///
/// Note that instance metadata (IMDS) is not included here.
region: Option<String>,
/// Bucket in which to store the task data.
///
/// This bucket must not be used for any other purpose. No special bucket configuration is
/// required.
bucket: String,
/// An optional URL to specify the hostname of an s3-compatible service. When endpoint_url
/// is used, region is ignored by the underlying S3 client.
endpoint_url: Option<String>,
/// If set, force the S3 client to use path-style URLs instead of virtual-hosted-style
/// (subdomain) URLs for the bucket.
force_path_style: bool,
/// Credential configuration for access to the bucket.
credentials: AwsCredentials,
/// Private encryption secret used to encrypt all data sent to the server. This can
/// be any suitably un-guessable string of bytes.
encryption_secret: Vec<u8>,
},
}
impl ServerConfig {
/// Get a server based on this configuration
pub async fn into_server(self) -> Result<Box<dyn Server>> {
// This expression is unreachable if no server features are enabled.
#[allow(unreachable_code)]
Ok(match self {
#[cfg(feature = "server-local")]
ServerConfig::Local { server_dir } => Box::new(LocalServer::new(server_dir)?),
#[cfg(feature = "server-sync")]
ServerConfig::Remote {
url,
client_id,
encryption_secret,
} => Box::new(SyncServer::new(url, client_id, encryption_secret)?),
#[cfg(feature = "server-gcp")]
ServerConfig::Gcp {
bucket,
credential_path,
encryption_secret,
} => Box::new(
CloudServer::new(
GcpService::new(bucket, credential_path).await?,
encryption_secret,
)
.await?,
),
#[cfg(feature = "server-aws")]
ServerConfig::Aws {
region,
bucket,
credentials,
encryption_secret,
endpoint_url,
force_path_style,
} => Box::new(
CloudServer::new(
AwsService::new(region, bucket, credentials, endpoint_url, force_path_style)
.await?,
encryption_secret,
)
.await?,
),
})
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/test.rs | src/server/test.rs | use crate::errors::Result;
use crate::server::{
AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId, NIL_VERSION_ID,
};
use async_trait::async_trait;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use uuid::Uuid;
struct Version {
version_id: VersionId,
parent_version_id: VersionId,
history_segment: HistorySegment,
}
/// TestServer implements the Server trait with a test implementation.
#[derive(Clone)]
pub(crate) struct TestServer(Arc<Mutex<Inner>>);
pub(crate) struct Inner {
latest_version_id: VersionId,
// NOTE: indexed by parent_version_id!
versions: HashMap<VersionId, Version>,
snapshot_urgency: SnapshotUrgency,
snapshot: Option<(VersionId, Snapshot)>,
}
impl TestServer {
/// A test server has no notion of clients, signatures, encryption, etc.
pub(crate) fn new() -> TestServer {
TestServer(Arc::new(Mutex::new(Inner {
latest_version_id: NIL_VERSION_ID,
versions: HashMap::new(),
snapshot_urgency: SnapshotUrgency::None,
snapshot: None,
})))
}
// feel free to add any test utility functions here
/// Get a boxed Server implementation referring to this TestServer
pub(crate) fn server(&self) -> Box<dyn Server> {
Box::new(self.clone())
}
pub(crate) fn set_snapshot_urgency(&self, urgency: SnapshotUrgency) {
let mut inner = self.0.lock().unwrap();
inner.snapshot_urgency = urgency;
}
/// Get the latest snapshot added to this server
pub(crate) fn snapshot(&self) -> Option<(VersionId, Snapshot)> {
let inner = self.0.lock().unwrap();
inner.snapshot.as_ref().cloned()
}
/// Delete a version from storage
pub(crate) fn delete_version(&mut self, parent_version_id: VersionId) {
let mut inner = self.0.lock().unwrap();
inner.versions.remove(&parent_version_id);
}
pub(crate) fn versions_len(&self) -> usize {
let inner = self.0.lock().unwrap();
inner.versions.len()
}
}
#[async_trait(?Send)]
impl Server for TestServer {
/// Add a new version. If the given version number is incorrect, this responds with the
/// appropriate version and expects the caller to try again.
async fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> Result<(AddVersionResult, SnapshotUrgency)> {
let mut inner = self.0.lock().unwrap();
// no client lookup
// no signature validation
// check the parent_version_id for linearity
if inner.latest_version_id != NIL_VERSION_ID && parent_version_id != inner.latest_version_id
{
return Ok((
AddVersionResult::ExpectedParentVersion(inner.latest_version_id),
SnapshotUrgency::None,
));
}
// invent a new ID for this version
let version_id = Uuid::new_v4();
inner.versions.insert(
parent_version_id,
Version {
version_id,
parent_version_id,
history_segment,
},
);
inner.latest_version_id = version_id;
// reply with the configured urgency and reset it to None
let urgency = inner.snapshot_urgency;
inner.snapshot_urgency = SnapshotUrgency::None;
Ok((AddVersionResult::Ok(version_id), urgency))
}
/// Get a vector of all versions after `since_version`
async fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> Result<GetVersionResult> {
let inner = self.0.lock().unwrap();
if let Some(version) = inner.versions.get(&parent_version_id) {
Ok(GetVersionResult::Version {
version_id: version.version_id,
parent_version_id: version.parent_version_id,
history_segment: version.history_segment.clone(),
})
} else {
Ok(GetVersionResult::NoSuchVersion)
}
}
async fn add_snapshot(&mut self, version_id: VersionId, snapshot: Snapshot) -> Result<()> {
let mut inner = self.0.lock().unwrap();
// test implementation -- does not perform any validation
inner.snapshot = Some((version_id, snapshot));
Ok(())
}
async fn get_snapshot(&mut self) -> Result<Option<(VersionId, Snapshot)>> {
let inner = self.0.lock().unwrap();
Ok(inner.snapshot.clone())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/http.rs | src/server/http.rs | //! Common support for HTTP client
//!
//! This contains some utilities to make using `reqwest` easier, including getting
//! the correct TLS certificate store.
use crate::errors::Result;
#[cfg(all(
not(target_arch = "wasm32"),
not(any(feature = "tls-native-roots", feature = "tls-webpki-roots"))
))]
compile_error!(
"Either feature \"tls-native-roots\" or \"tls-webpki-roots\" must be enabled for HTTP client support."
);
static USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
/// Create a new [`reqwest::Client`] with configuration appropriate to this library.
#[cfg(not(target_arch = "wasm32"))]
pub(super) fn client() -> Result<reqwest::Client> {
use std::time::Duration;
let client = reqwest::Client::builder()
.use_rustls_tls()
.user_agent(USER_AGENT)
.connect_timeout(Duration::from_secs(10))
.read_timeout(Duration::from_secs(60));
// Select native or webpki certs depending on features
let client = client.tls_built_in_root_certs(false);
#[cfg(feature = "tls-native-roots")]
let client = client.tls_built_in_native_certs(true);
#[cfg(all(feature = "tls-webpki-roots", not(feature = "tls-native-roots")))]
let client = client.tls_built_in_webpki_certs(true);
Ok(client.build()?)
}
/// Create a new [`reqwest::Client`] with configuration appropriate to this library.
///
/// On WASM, this uses the Fetch API.
#[cfg(target_arch = "wasm32")]
pub(super) fn client() -> Result<reqwest::Client> {
let client = reqwest::Client::builder().user_agent(USER_AGENT);
// Timeouts and TLS cannot be configured via the Fetch API.
Ok(client.build()?)
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/encryption.rs | src/server/encryption.rs | /// This module implements the encryption specified in the sync-protocol
/// document.
use crate::errors::{Error, Result};
use ring::{aead, pbkdf2, rand, rand::SecureRandom};
use uuid::Uuid;
const PBKDF2_ITERATIONS: u32 = 600000;
const ENVELOPE_VERSION: u8 = 1;
const AAD_LEN: usize = 17;
const TASK_APP_ID: u8 = 1;
/// An Cryptor stores a secret and allows sealing and unsealing. It derives a key from the secret,
/// which takes a nontrivial amount of time, so it should be created once and re-used for the given
/// context.
#[derive(Clone)]
pub(super) struct Cryptor {
key: aead::LessSafeKey,
rng: rand::SystemRandom,
}
impl Cryptor {
pub(super) fn new(salt: impl AsRef<[u8]>, secret: &Secret) -> Result<Self> {
Ok(Cryptor {
key: Self::derive_key(salt, secret)?,
rng: rand::SystemRandom::new(),
})
}
/// Generate a suitable random salt.
#[cfg(any(test, feature = "cloud"))] // server-sync uses the clientId as the salt.
pub(super) fn gen_salt() -> Result<Vec<u8>> {
let rng = rand::SystemRandom::new();
let mut salt = [0u8; 16];
rng.fill(&mut salt)
.map_err(|e| anyhow::anyhow!("error generating random salt: {e}"))?;
Ok(salt.to_vec())
}
/// Derive a key as specified for version 1. Note that this may take 10s of ms.
fn derive_key(salt: impl AsRef<[u8]>, secret: &Secret) -> Result<aead::LessSafeKey> {
let mut key_bytes = vec![0u8; aead::CHACHA20_POLY1305.key_len()];
pbkdf2::derive(
pbkdf2::PBKDF2_HMAC_SHA256,
std::num::NonZeroU32::new(PBKDF2_ITERATIONS).unwrap(),
salt.as_ref(),
secret.as_ref(),
&mut key_bytes,
);
let unbound_key = aead::UnboundKey::new(&aead::CHACHA20_POLY1305, &key_bytes)
.map_err(|e| anyhow::anyhow!("error while creating AEAD key: {e}"))?;
Ok(aead::LessSafeKey::new(unbound_key))
}
/// Encrypt the given payload.
pub(super) fn seal(&self, payload: Unsealed) -> Result<Sealed> {
let Unsealed {
version_id,
mut payload,
} = payload;
let mut nonce_buf = [0u8; aead::NONCE_LEN];
self.rng
.fill(&mut nonce_buf)
.map_err(|e| anyhow::anyhow!("error generating random nonce: {e}"))?;
let nonce = aead::Nonce::assume_unique_for_key(nonce_buf);
let aad = self.make_aad(version_id);
let tag = self
.key
.seal_in_place_separate_tag(nonce, aad, &mut payload)
.map_err(|e| anyhow::anyhow!("error while sealing: {e}"))?;
payload.extend_from_slice(tag.as_ref());
let env = Envelope {
nonce: &nonce_buf,
payload: payload.as_ref(),
};
Ok(Sealed {
version_id,
payload: env.to_bytes(),
})
}
/// Decrypt the given payload, verifying it was created for the given version_id
pub(super) fn unseal(&self, payload: Sealed) -> Result<Unsealed> {
let Sealed {
version_id,
payload,
} = payload;
let env = Envelope::from_bytes(&payload)?;
let mut nonce = [0u8; aead::NONCE_LEN];
nonce.copy_from_slice(env.nonce);
let nonce = aead::Nonce::assume_unique_for_key(nonce);
let aad = self.make_aad(version_id);
let mut payload = env.payload.to_vec();
let plaintext = self
.key
.open_in_place(nonce, aad, payload.as_mut())
.map_err(|e| anyhow::anyhow!("error while unsealing encrypted value: {e}"))?;
Ok(Unsealed {
version_id,
payload: plaintext.to_vec(),
})
}
fn make_aad(&self, version_id: Uuid) -> aead::Aad<[u8; AAD_LEN]> {
let mut aad = [0u8; AAD_LEN];
aad[0] = TASK_APP_ID;
aad[1..].copy_from_slice(version_id.as_bytes());
aead::Aad::from(aad)
}
}
/// Secret represents a secret key as used for encryption and decryption.
pub(super) struct Secret(pub(super) Vec<u8>);
impl From<Vec<u8>> for Secret {
fn from(bytes: Vec<u8>) -> Self {
Self(bytes)
}
}
impl AsRef<[u8]> for Secret {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
/// Envelope for the data stored on the server, containing the information
/// required to decrypt.
#[derive(Debug, PartialEq, Eq)]
struct Envelope<'a> {
nonce: &'a [u8],
payload: &'a [u8],
}
impl<'a> Envelope<'a> {
fn from_bytes(buf: &'a [u8]) -> Result<Envelope<'a>> {
if buf.len() <= 1 + aead::NONCE_LEN {
return Err(Error::Server(String::from("envelope is too small")));
}
let version = buf[0];
if version != ENVELOPE_VERSION {
return Err(Error::Server(format!(
"unrecognized encryption envelope version {version}"
)));
}
Ok(Envelope {
nonce: &buf[1..1 + aead::NONCE_LEN],
payload: &buf[1 + aead::NONCE_LEN..],
})
}
fn to_bytes(&self) -> Vec<u8> {
let mut buf = Vec::with_capacity(1 + self.nonce.len() + self.payload.len());
buf.push(ENVELOPE_VERSION);
buf.extend_from_slice(self.nonce);
buf.extend_from_slice(self.payload);
buf
}
}
/// A unsealed payload with an attached version_id. The version_id is used to
/// validate the context of the payload on unsealing.
pub(super) struct Unsealed {
pub(super) version_id: Uuid,
pub(super) payload: Vec<u8>,
}
impl From<Unsealed> for Vec<u8> {
fn from(val: Unsealed) -> Self {
val.payload
}
}
/// An encrypted payload
pub(super) struct Sealed {
pub(super) version_id: Uuid,
pub(super) payload: Vec<u8>,
}
impl AsRef<[u8]> for Sealed {
fn as_ref(&self) -> &[u8] {
self.payload.as_ref()
}
}
impl From<Sealed> for Vec<u8> {
fn from(val: Sealed) -> Self {
val.payload
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
fn make_salt() -> Vec<u8> {
Cryptor::gen_salt().unwrap()
}
#[test]
fn envelope_round_trip() {
let env = Envelope {
nonce: &[2; 12],
payload: b"HELLO",
};
let bytes = env.to_bytes();
let env2 = Envelope::from_bytes(&bytes).unwrap();
assert_eq!(env, env2);
}
#[test]
fn envelope_bad_version() {
let env = Envelope {
nonce: &[2; 12],
payload: b"HELLO",
};
let mut bytes = env.to_bytes();
bytes[0] = 99;
assert!(Envelope::from_bytes(&bytes).is_err());
}
#[test]
fn envelope_too_short() {
let env = Envelope {
nonce: &[2; 12],
payload: b"HELLO",
};
let bytes = env.to_bytes();
let bytes = &bytes[..10];
assert!(Envelope::from_bytes(bytes).is_err());
}
#[test]
fn round_trip() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload: payload.clone(),
};
let sealed = cryptor.seal(unsealed).unwrap();
let unsealed = cryptor.unseal(sealed).unwrap();
assert_eq!(unsealed.payload, payload);
assert_eq!(unsealed.version_id, version_id);
}
#[test]
fn round_trip_bad_key() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let salt = make_salt();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(&salt, &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload,
};
let sealed = cryptor.seal(unsealed).unwrap();
let secret = Secret(b"DIFFERENT_SECRET".to_vec());
let cryptor = Cryptor::new(&salt, &secret).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn round_trip_bad_version() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload,
};
let mut sealed = cryptor.seal(unsealed).unwrap();
sealed.version_id = Uuid::new_v4(); // change the version_id
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn round_trip_bad_salt() {
let version_id = Uuid::new_v4();
let payload = b"HISTORY REPEATS ITSELF".to_vec();
let secret = Secret(b"SEKRIT".to_vec());
let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
let unsealed = Unsealed {
version_id,
payload,
};
let sealed = cryptor.seal(unsealed).unwrap();
let cryptor = Cryptor::new(make_salt(), &secret).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
mod externally_valid {
// validate data generated by generate-test-data.py. The intent is to
// validate that this format matches the specification by implementing
// the specification in a second language
use super::*;
use pretty_assertions::assert_eq;
/// The values in generate-test-data.py
fn defaults() -> (Uuid, Vec<u8>, Vec<u8>) {
let version_id = Uuid::parse_str("b0517957-f912-4d49-8330-f612e73030c4").unwrap();
let encryption_secret = b"b4a4e6b7b811eda1dc1a2693ded".to_vec();
let client_id = Uuid::parse_str("0666d464-418a-4a08-ad53-6f15c78270cd").unwrap();
let salt = client_id.as_bytes().to_vec();
(version_id, salt, encryption_secret)
}
#[test]
fn good() {
let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-good.data").to_vec(),
};
let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
let unsealed = cryptor.unseal(sealed).unwrap();
assert_eq!(unsealed.payload, b"SUCCESS");
assert_eq!(unsealed.version_id, version_id);
}
#[test]
fn bad_version_id() {
let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-version-id.data").to_vec(),
};
let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_salt() {
let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-client-id.data").to_vec(),
};
let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_secret() {
let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-secret.data").to_vec(),
};
let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_version() {
let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-version.data").to_vec(),
};
let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
#[test]
fn bad_app_id() {
let (version_id, salt, encryption_secret) = defaults();
let sealed = Sealed {
version_id,
payload: include_bytes!("test-bad-app-id.data").to_vec(),
};
let cryptor = Cryptor::new(salt, &Secret(encryption_secret)).unwrap();
assert!(cryptor.unseal(sealed).is_err());
}
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/types.rs | src/server/types.rs | use crate::errors::Result;
use async_trait::async_trait;
use uuid::Uuid;
/// Versions are referred to with UUIDs.
pub type VersionId = Uuid;
/// The distinguished value for "no version"
pub const NIL_VERSION_ID: VersionId = Uuid::nil();
/// A segment in the history of this task database, in the form of a sequence of operations. This
/// data is pre-encoded, and from the protocol level appears as a sequence of bytes.
pub type HistorySegment = Vec<u8>;
/// A snapshot of the state of the task database. This is encoded by the taskdb implementation
/// and treated as a sequence of bytes by the server implementation.
pub type Snapshot = Vec<u8>;
/// AddVersionResult is the response type from [`crate::server::Server::add_version`].
#[derive(Debug, PartialEq, Eq)]
pub enum AddVersionResult {
/// OK, version added with the given ID
Ok(VersionId),
/// Rejected; expected a version with the given parent version
ExpectedParentVersion(VersionId),
}
/// SnapshotUrgency indicates how much the server would like this replica to send a snapshot.
#[derive(PartialEq, Debug, Clone, Copy, Eq, PartialOrd, Ord)]
pub enum SnapshotUrgency {
/// Don't need a snapshot right now.
None,
/// A snapshot would be good, but can wait for other replicas to provide it.
Low,
/// A snapshot is needed right now.
High,
}
/// A version as downloaded from the server
#[derive(Debug, PartialEq, Eq)]
pub enum GetVersionResult {
/// No such version exists
NoSuchVersion,
/// The requested version
Version {
version_id: VersionId,
parent_version_id: VersionId,
history_segment: HistorySegment,
},
}
/// A value implementing this trait can act as a server against which a replica can sync.
#[async_trait(?Send)]
pub trait Server {
/// Add a new version.
///
/// This must ensure that the new version is the only version with the given
/// `parent_version_id`, and that all versions form a single parent-child chain. Inductively,
/// this means that if there are any versions on the server, then `parent_version_id` must be
/// the only version that does not already have a child.
async fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> Result<(AddVersionResult, SnapshotUrgency)>;
/// Get the version with the given parent VersionId
async fn get_child_version(&mut self, parent_version_id: VersionId)
-> Result<GetVersionResult>;
/// Add a snapshot on the server
async fn add_snapshot(&mut self, version_id: VersionId, snapshot: Snapshot) -> Result<()>;
async fn get_snapshot(&mut self) -> Result<Option<(VersionId, Snapshot)>>;
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/mod.rs | src/server/mod.rs | /*!
This module defines the client interface to TaskChampion sync servers.
It defines a [trait](crate::server::Server) for servers, and implements both local and remote
servers.
Typical uses of this crate do not interact directly with this module; [`ServerConfig`] is
sufficient. However, users who wish to implement their own server interfaces can implement the
traits defined here and pass the result to [`Replica`](crate::Replica).
*/
#[cfg(test)]
pub(crate) mod test;
mod config;
mod op;
mod types;
#[cfg(feature = "encryption")]
mod encryption;
#[cfg(feature = "http")]
mod http;
#[cfg(feature = "server-local")]
mod local;
#[cfg(feature = "server-sync")]
mod sync;
#[cfg(feature = "cloud")]
mod cloud;
pub use config::*;
pub use types::*;
pub(crate) use op::SyncOp;
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/cloud/gcp.rs | src/server/cloud/gcp.rs | use super::service::{validate_object_name, ObjectInfo, Service};
use crate::errors::Result;
use crate::server::{cloud::iter::AsyncObjectIterator, http};
use async_trait::async_trait;
use google_cloud_storage::client::google_cloud_auth::credentials::CredentialsFile;
use google_cloud_storage::client::{Client, ClientConfig};
use google_cloud_storage::http::error::ErrorResponse;
use google_cloud_storage::http::objects;
use google_cloud_storage::http::Error as GcsError;
#[cfg(not(any(feature = "tls-native-roots", feature = "tls-webpki-roots")))]
compile_error!(
"Either feature \"tls-native-roots\" or \"tls-webpki-roots\" must be enabled for TLS support."
);
/// A [`Service`] implementation based on the Google Cloud Storage service.
pub(in crate::server) struct GcpService {
client: Client,
bucket: String,
}
/// Determine whether the given result contains an HTTP error with the given code.
fn is_http_error<T>(query: u16, res: &std::result::Result<T, GcsError>) -> bool {
match res {
// Errors from RPC's.
Err(GcsError::Response(ErrorResponse { code, .. })) => *code == query,
// Errors from reqwest (downloads, uploads).
Err(GcsError::HttpClient(e)) => e.status().map(|s| s.as_u16()) == Some(query),
_ => false,
}
}
impl GcpService {
pub(in crate::server) async fn new(
bucket: String,
credential_path: Option<String>,
) -> Result<Self> {
#![allow(unused)]
let mut config = ClientConfig {
http: Some(reqwest_middleware::ClientBuilder::new(http::client()?).build()),
..ClientConfig::default()
};
// Set up the credentials after the HTTP client has been configured, so that the client is used to
// validate the credentials.
if let Some(credentials) = credential_path {
let credentials = CredentialsFile::new_from_file(credentials).await?;
config = config.with_credentials(credentials).await?
} else {
config = config.with_auth().await?
};
Ok(Self {
client: Client::new(config),
bucket,
})
}
}
#[async_trait]
impl Service for GcpService {
async fn put(&mut self, name: &str, value: &[u8]) -> Result<()> {
validate_object_name(name);
let upload_type =
objects::upload::UploadType::Simple(objects::upload::Media::new(name.to_string()));
self.client
.upload_object(
&objects::upload::UploadObjectRequest {
bucket: self.bucket.clone(),
..Default::default()
},
value.to_vec(),
&upload_type,
)
.await?;
Ok(())
}
async fn get(&mut self, name: &str) -> Result<Option<Vec<u8>>> {
validate_object_name(name);
let download_res = self
.client
.download_object(
&objects::get::GetObjectRequest {
bucket: self.bucket.clone(),
object: name.to_string(),
..Default::default()
},
&objects::download::Range::default(),
)
.await;
if is_http_error(404, &download_res) {
Ok(None)
} else {
Ok(Some(download_res?))
}
}
async fn del(&mut self, name: &str) -> Result<()> {
validate_object_name(name);
let del_res = self
.client
.delete_object(&objects::delete::DeleteObjectRequest {
bucket: self.bucket.clone(),
object: name.to_string(),
..Default::default()
})
.await;
if !is_http_error(404, &del_res) {
del_res?;
}
Ok(())
}
async fn list<'a>(&'a mut self, prefix: &'a str) -> Box<dyn AsyncObjectIterator + Send + 'a> {
validate_object_name(prefix);
Box::new(ObjectIterator {
service: self,
prefix: prefix.to_string(),
last_response: None,
next_index: 0,
})
}
async fn compare_and_swap(
&mut self,
name: &str,
existing_value: Option<Vec<u8>>,
new_value: Vec<u8>,
) -> Result<bool> {
validate_object_name(name);
let get_res = self
.client
.get_object(&objects::get::GetObjectRequest {
bucket: self.bucket.clone(),
object: name.to_string(),
..Default::default()
})
.await;
// Determine the object's generation. See https://cloud.google.com/storage/docs/metadata#generation-number
let generation = if is_http_error(404, &get_res) {
// If a value was expected, that expectation has not been met.
if existing_value.is_some() {
return Ok(false);
}
// Generation 0 indicates that the object does not yet exist.
0
} else {
get_res?.generation
};
// If the file existed, then verify its contents.
if generation > 0 {
let data = self
.client
.download_object(
&objects::get::GetObjectRequest {
bucket: self.bucket.clone(),
object: name.to_string(),
// Fetch the same generation.
generation: Some(generation),
..Default::default()
},
&objects::download::Range::default(),
)
.await?;
if Some(data) != existing_value {
return Ok(false);
}
}
// When testing, an object named "$pfx-racing-delete" is deleted between get_object and
// put_object.
#[cfg(test)]
if name.ends_with("-racing-delete") {
println!("deleting object {name}");
let del_res = self
.client
.delete_object(&objects::delete::DeleteObjectRequest {
bucket: self.bucket.clone(),
object: name.to_string(),
..Default::default()
})
.await;
if !is_http_error(404, &del_res) {
del_res?;
}
}
// When testing, if the object is named "$pfx-racing-put" then the value "CHANGED" is
// written to it between get_object and put_object.
#[cfg(test)]
if name.ends_with("-racing-put") {
println!("changing object {name}");
let upload_type =
objects::upload::UploadType::Simple(objects::upload::Media::new(name.to_string()));
self.client
.upload_object(
&objects::upload::UploadObjectRequest {
bucket: self.bucket.clone(),
..Default::default()
},
b"CHANGED".to_vec(),
&upload_type,
)
.await?;
}
// Finally, put the new value with a condition that the generation hasn't changed.
let upload_type =
objects::upload::UploadType::Simple(objects::upload::Media::new(name.to_string()));
let upload_res = self
.client
.upload_object(
&objects::upload::UploadObjectRequest {
bucket: self.bucket.clone(),
if_generation_match: Some(generation),
..Default::default()
},
new_value.to_vec(),
&upload_type,
)
.await;
if is_http_error(412, &upload_res) {
// A 412 indicates the precondition was not satisfied: the given generation
// is no longer the latest.
Ok(false)
} else {
upload_res?;
Ok(true)
}
}
}
/// An Iterator returning names of objects from `list_objects`.
///
/// This handles response pagination by fetching one page at a time.
struct ObjectIterator<'a> {
service: &'a mut GcpService,
prefix: String,
last_response: Option<objects::list::ListObjectsResponse>,
next_index: usize,
}
impl ObjectIterator<'_> {
async fn fetch_batch(&mut self) -> Result<()> {
let mut page_token = None;
if let Some(ref resp) = self.last_response {
page_token.clone_from(&resp.next_page_token);
}
self.last_response = Some(
self.service
.client
.list_objects(&objects::list::ListObjectsRequest {
bucket: self.service.bucket.clone(),
prefix: Some(self.prefix.clone()),
page_token,
#[cfg(test)] // For testing, use a small page size.
max_results: Some(6),
..Default::default()
})
.await?,
);
self.next_index = 0;
Ok(())
}
}
#[async_trait]
impl AsyncObjectIterator for ObjectIterator<'_> {
async fn next(&mut self) -> Option<Result<ObjectInfo>> {
// If the iterator is just starting, fetch the first response.
if self.last_response.is_none() {
if let Err(e) = self.fetch_batch().await {
return Some(Err(e));
}
}
if let Some(ref result) = self.last_response {
if let Some(ref items) = result.items {
if self.next_index < items.len() {
// Return a result from the existing response.
let obj = &items[self.next_index];
self.next_index += 1;
// It's unclear when `time_created` would be None, so default to 0 in that case
// or when the timestamp is not a valid u64 (before 1970).
let creation = obj.time_created.map(|t| t.unix_timestamp()).unwrap_or(0);
let creation: u64 = creation.try_into().unwrap_or(0);
return Some(Ok(ObjectInfo {
name: obj.name.clone(),
creation,
}));
} else if result.next_page_token.is_some() {
// Fetch the next page and try again.
if let Err(e) = self.fetch_batch().await {
return Some(Err(e));
}
return self.next().await;
}
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Make a service if `GCP_TEST_BUCKET` is set, as well as a function to put a unique prefix on
/// an object name, so that tests do not interfere with one another.
///
/// Set up this bucket with a lifecyle policy to delete objects with age > 1 day. While passing
/// tests should correctly clean up after themselves, failing tests may leave objects in the
/// bucket.
///
/// When the environment variable is not set, this returns false and the test does not run.
/// Note that the Rust test runner will still show "ok" for the test, as there is no way to
/// indicate anything else.
async fn make_service() -> Option<GcpService> {
let Ok(bucket) = std::env::var("GCP_TEST_BUCKET") else {
return None;
};
let Ok(credential_path) = std::env::var("GCP_TEST_CREDENTIAL_PATH") else {
return None;
};
Some(
GcpService::new(bucket, Some(credential_path))
.await
.unwrap(),
)
}
crate::server::cloud::test::service_tests!(make_service().await);
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/cloud/aws.rs | src/server/cloud/aws.rs | use super::service::{validate_object_name, ObjectInfo, Service};
use crate::{
errors::Result,
server::{cloud::iter::AsyncObjectIterator, http},
};
use async_trait::async_trait;
use aws_config::{
environment::EnvironmentVariableRegionProvider,
meta::region::RegionProviderChain,
profile::{self, ProfileFileCredentialsProvider},
BehaviorVersion, Region,
};
use aws_credential_types::Credentials;
use aws_sdk_s3::{
self as s3,
config::http::{HttpRequest, HttpResponse},
error::ProvideErrorMetadata,
operation::{get_object::GetObjectOutput, list_objects_v2::ListObjectsV2Output},
};
use aws_smithy_runtime_api::{
client::{
http::{HttpClient, HttpConnector, HttpConnectorFuture, SharedHttpConnector},
result::ConnectorError,
retries::ErrorKind,
},
http::{Headers, StatusCode},
};
use reqwest::Method;
/// A [`Service`] implementation based on AWS S3.
pub(in crate::server) struct AwsService {
client: s3::Client,
bucket: String,
}
/// Credential configuration for access to the AWS service.
///
/// These credentials must have a least the following policy, with BUCKETNAME replaced by
/// the bucket name:
///
/// ```json
/// {
/// "Version": "2012-10-17",
/// "Statement": [
/// {
/// "Sid": "TaskChampion",
/// "Effect": "Allow",
/// "Action": [
/// "s3:PutObject",
/// "s3:GetObject",
/// "s3:ListBucket",
/// "s3:DeleteObject"
/// ],
/// "Resource": [
/// "arn:aws:s3:::BUCKETNAME",
/// "arn:aws:s3:::BUCKETNAME/*"
/// ]
/// }
/// ]
/// }
/// ```
#[non_exhaustive]
pub enum AwsCredentials {
/// A pair of access key ID and secret access key.
AccessKey {
access_key_id: String,
secret_access_key: String,
},
/// A named profile from the profile files in the user's home directory.
Profile { profile_name: String },
/// Use the [default credential
/// sources](https://docs.rs/aws-config/latest/aws_config/default_provider/credentials/struct.DefaultCredentialsChain.html),
/// such as enviroment variables, the default profile, or the task/instance IAM role.
Default,
}
impl AwsService {
pub(in crate::server) async fn new(
region: Option<String>,
bucket: String,
creds: AwsCredentials,
endpoint_url: Option<String>,
force_path_style: bool,
) -> Result<Self> {
let mut config_provider = aws_config::defaults(BehaviorVersion::latest());
match creds {
AwsCredentials::AccessKey {
access_key_id,
secret_access_key,
} => {
config_provider = config_provider.credentials_provider(Credentials::from_keys(
access_key_id,
secret_access_key,
None,
));
}
AwsCredentials::Profile { profile_name } => {
config_provider = config_provider.credentials_provider(
ProfileFileCredentialsProvider::builder()
.profile_name(profile_name)
.build(),
);
}
AwsCredentials::Default => {
// Just use the default.
}
}
config_provider = config_provider.http_client(ReqwestClient::new()?);
// This will:
// 1. If a region is set, use it
// 2. If No region is set, try environment variables and profiles.
// (Instance metadata (IMDS) is not used because it requires an HTTPS client)
// 3. If no region is discovered, hardcode to "us-east-1"
//
// If there's a region specified we will always prefer that
// Next, the default provider chain will look at things like AWS_REGION environment
// variables, the profile file, etc.
//
// we provide the hardcoded fallback because a region MUST be set
// but, a region being set does _not_ make sense if endpoint_url is set, because
// the endpoint URL would include a region.
// realistically, endpoint_url is more useful for S3-compatible services
// and would not use a separate region in addition to endpoint_url.
config_provider = config_provider.region(
RegionProviderChain::first_try(region.map(Region::new))
.or_else(EnvironmentVariableRegionProvider::new())
.or_else(profile::region::Builder::default().build())
.or_else(Region::new("us-east-1")),
);
if let Some(url) = endpoint_url {
config_provider = config_provider.endpoint_url(url)
};
let config = config_provider.load().await;
let s3_config = aws_sdk_s3::config::Builder::from(&config)
.force_path_style(force_path_style)
.build();
let client = aws_sdk_s3::Client::from_conf(s3_config);
Ok(Self { client, bucket })
}
}
/// An [`HttpClient`] implementation wrapping Reqwest.
#[derive(Debug)]
struct ReqwestClient {
connector: SharedHttpConnector,
}
impl ReqwestClient {
fn new() -> Result<Self> {
let client = http::client()?;
Ok(ReqwestClient {
connector: SharedHttpConnector::new(ReqwestConnector { client }),
})
}
}
impl HttpClient for ReqwestClient {
fn http_connector(
&self,
_settings: &aws_smithy_runtime_api::client::http::HttpConnectorSettings,
_components: &aws_sdk_s3::config::RuntimeComponents,
) -> SharedHttpConnector {
self.connector.clone()
}
}
/// An [`HttpConnector`] implementation wrapping Reqwest.
#[derive(Debug)]
struct ReqwestConnector {
client: reqwest::Client,
}
/// Convert a [`reqwest::Error`] into an AWS ['ConnectorError'].
fn reqwest_error_to_connector(err: reqwest::Error) -> ConnectorError {
let mut kind = None;
if err.is_connect() || err.is_timeout() {
kind = Some(ErrorKind::TransientError);
}
if err.is_request() {
kind = Some(ErrorKind::ClientError);
}
ConnectorError::other(Box::new(err), kind)
}
impl HttpConnector for ReqwestConnector {
fn call(&self, request: HttpRequest) -> HttpConnectorFuture {
use std::str::FromStr;
// This `from_str` only fails if it cannot allocate. For the methods we will
// see from the AWS SDK, this will not occur.
let method = Method::from_str(request.method()).unwrap();
let mut reqwest_req = self.client.request(method, request.uri());
for (h, v) in request.headers() {
reqwest_req = reqwest_req.header(h, v);
}
if let Some(b) = request.into_body().bytes().map(|b| b.to_vec()) {
reqwest_req = reqwest_req.body(b);
}
HttpConnectorFuture::new(async {
let reqwest_resp = reqwest_req
.send()
.await
.map_err(reqwest_error_to_connector)?;
let status_code = reqwest_resp.status().as_u16();
// Gather headers before consuming reqwest_resp to get the body.
let mut aws_headers = Headers::new();
for (h, v) in reqwest_resp.headers() {
if let Ok(v) = v.to_str() {
aws_headers.insert(h.to_string(), v.to_owned());
}
}
// Collect the body in memory
let body = reqwest_resp
.bytes()
.await
.map_err(reqwest_error_to_connector)?;
// Combine all of that into an AWS HttpResponse
let mut resp = HttpResponse::new(
StatusCode::try_from(status_code)
.map_err(|e| ConnectorError::other(Box::new(e), None))?,
body.into(),
);
*resp.headers_mut() = aws_headers;
Ok(resp)
})
}
}
/// Convert an error that can be converted to `s3::Error` (but not [`crate::Error`]) into
/// `s3::Error`. One such error is SdkError, which has type parameters that are difficult to
/// constrain in order to write `From<SdkError<..>> for crate::Error`.
fn aws_err<E: Into<s3::Error>>(err: E) -> s3::Error {
err.into()
}
/// Convert a `NoSuchKey` error into `Ok(None)`, and `Ok(..)` into `Ok(Some(..))`.
#[allow(clippy::result_large_err)] // s3::Error is large, it's not our fault!
fn if_key_exists<T>(
res: std::result::Result<T, s3::Error>,
) -> std::result::Result<Option<T>, s3::Error> {
res
// convert Result<T, E> to Result<Option<T>, E>
.map(Some)
// handle NoSuchKey
.or_else(|err| match err {
s3::Error::NoSuchKey(_) => Ok(None),
err => Err(err),
})
}
/// Get the body of a `get_object` result.
async fn get_body(get_res: GetObjectOutput) -> Result<Vec<u8>> {
Ok(get_res.body.collect().await?.to_vec())
}
#[async_trait]
impl Service for AwsService {
async fn put(&mut self, name: &str, value: &[u8]) -> Result<()> {
validate_object_name(name);
self.client
.put_object()
.bucket(self.bucket.clone())
.key(name)
.body(value.to_vec().into())
.send()
.await
.map_err(aws_err)?;
Ok(())
}
async fn get(&mut self, name: &str) -> Result<Option<Vec<u8>>> {
validate_object_name(name);
let Some(get_res) = if_key_exists(
self.client
.get_object()
.bucket(self.bucket.clone())
.key(name)
.send()
.await
.map_err(aws_err),
)?
else {
return Ok(None);
};
Ok(Some(get_body(get_res).await?))
}
async fn del(&mut self, name: &str) -> Result<()> {
validate_object_name(name);
self.client
.delete_object()
.bucket(self.bucket.clone())
.key(name)
.send()
.await
.map_err(aws_err)?;
Ok(())
}
async fn list<'a>(&'a mut self, prefix: &'a str) -> Box<dyn AsyncObjectIterator + Send + 'a> {
validate_object_name(prefix);
Box::new(ObjectIterator {
service: self,
prefix: prefix.to_string(),
last_response: None,
next_index: 0,
})
}
async fn compare_and_swap(
&mut self,
name: &str,
existing_value: Option<Vec<u8>>,
new_value: Vec<u8>,
) -> Result<bool> {
validate_object_name(name);
let get_res = if_key_exists(
self.client
.get_object()
.bucket(self.bucket.clone())
.key(name)
.send()
.await
.map_err(aws_err),
)?;
// Check the expectation and gather the e_tag for the existing value.
let e_tag;
if let Some(get_res) = get_res {
// If a value was not expected but one exists, that expectation has not been met.
let Some(existing_value) = existing_value else {
return Ok(false);
};
e_tag = get_res.e_tag.clone();
let body = get_body(get_res).await?;
if body != existing_value {
return Ok(false);
}
} else {
// If a value was expected but none exists, that expectation has not been met.
if existing_value.is_some() {
return Ok(false);
}
e_tag = None;
};
// When testing, an object named "$pfx-racing-delete" is deleted between get_object and
// put_object.
#[cfg(test)]
if name.ends_with("-racing-delete") {
println!("deleting object {name}");
self.client
.delete_object()
.bucket(self.bucket.clone())
.key(name)
.send()
.await
.map_err(aws_err)?;
}
// When testing, if the object is named "$pfx-racing-put" then the value "CHANGED" is
// written to it between get_object and put_object.
#[cfg(test)]
if name.ends_with("-racing-put") {
println!("changing object {name}");
self.client
.put_object()
.bucket(self.bucket.clone())
.key(name)
.body(b"CHANGED".to_vec().into())
.send()
.await
.map_err(aws_err)?;
}
// Try to put the object, using an appropriate conditional.
let mut put_builder = self.client.put_object();
if let Some(e_tag) = e_tag {
put_builder = put_builder.if_match(e_tag);
} else {
put_builder = put_builder.if_none_match("*");
}
match put_builder
.bucket(self.bucket.clone())
.key(name)
.body(new_value.to_vec().into())
.send()
.await
.map_err(aws_err)
{
Ok(_) => Ok(true),
// If the key disappears, S3 returns 404.
Err(err) if err.code() == Some("NoSuchKey") => Ok(false),
// PreconditionFailed occurs if the file changed unexpectedly
Err(err) if err.code() == Some("PreconditionFailed") => Ok(false),
// Docs describe this as a "conflicting operation" with no further details.
Err(err) if err.code() == Some("ConditionalRequestConflict") => Ok(false),
Err(e) => Err(e.into()),
}
}
}
/// An Iterator returning names of objects from `list_objects_v2`.
///
/// This handles response pagination by fetching one page at a time.
struct ObjectIterator<'a> {
service: &'a mut AwsService,
prefix: String,
last_response: Option<ListObjectsV2Output>,
next_index: usize,
}
impl ObjectIterator<'_> {
async fn fetch_batch(&mut self) -> Result<()> {
let mut continuation_token = None;
if let Some(ref resp) = self.last_response {
continuation_token.clone_from(&resp.next_continuation_token);
}
// Use the default max_keys in production, but a smaller value in testing so
// we can test the pagination.
#[cfg(test)]
let max_keys = Some(8);
#[cfg(not(test))]
let max_keys = None;
self.last_response = None;
self.last_response = Some(
self.service
.client
.list_objects_v2()
.bucket(self.service.bucket.clone())
.prefix(self.prefix.clone())
.set_max_keys(max_keys)
.set_continuation_token(continuation_token)
.send()
.await
.map_err(aws_err)?,
);
self.next_index = 0;
Ok(())
}
}
#[async_trait]
impl AsyncObjectIterator for ObjectIterator<'_> {
async fn next(&mut self) -> Option<Result<ObjectInfo>> {
// If the iterator is just starting, fetch the first response.
if self.last_response.is_none() {
if let Err(e) = self.fetch_batch().await {
return Some(Err(e));
}
}
if let Some(ref result) = self.last_response {
if let Some(ref items) = result.contents {
if self.next_index < items.len() {
// Return a result from the existing response.
let obj = &items[self.next_index];
self.next_index += 1;
// Use `last_modified` as a proxy for creation time, since most objects
// are not updated after they are created.
let creation = obj.last_modified.map(|t| t.secs()).unwrap_or(0);
let creation: u64 = creation.try_into().unwrap_or(0);
let name = obj.key.as_ref().expect("object has no key").clone();
return Some(Ok(ObjectInfo {
name: name.clone(),
creation,
}));
} else if result.next_continuation_token.is_some() {
// Fetch the next page and try again.
if let Err(e) = self.fetch_batch().await {
return Some(Err(e));
}
return self.next().await;
}
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
/// Make a service.
///
/// The service is only created if the following environment variables are set:
/// * `AWS_TEST_REGION` - region containing the test bucket
/// * `AWS_TEST_BUCKET` - test bucket
/// * `AWS_TEST_ACCESS_KEY_ID` / `AWS_TEST_SECRET_ACCESS_KEY` - credentials for access to the
/// bucket.
///
/// Additionally, the following environment variables are optional and control
/// the created S3 client
/// * `AWS_TEST_ENDPOINT_URL` - endpoint URL to use, potentially for an S3-compatible API,
/// or potentially just to ensure this feature works with AWS (e.g. s3.us-east-1.amazonaws.com)
/// * `AWS_TEST_FORCE_PATH_STYLE` - if set to "1" or "true", uses "path-style" S3
/// urls ($AWS_TEST_ENDPOINT_URL/$AWS_TEST_BUCKET vs $AWS_TEST_BUCKET.$AWS_TEST_ENDPOINT_URL)
///
///
/// Set up the bucket with a lifecyle policy to delete objects with age > 1 day. While passing
/// tests should correctly clean up after themselves, failing tests may leave objects in the
/// bucket.
///
/// When the environment variables are not set, this returns false and the test does not run.
/// Note that the Rust test runner will still show "ok" for the test, as there is no way to
/// indicate anything else.
async fn make_service() -> Option<AwsService> {
let fail_if_not_set = std::env::var("AWS_FAIL_IF_NOT_SET").is_ok();
let Ok(region) = std::env::var("AWS_TEST_REGION") else {
if fail_if_not_set {
panic!("AWS_TEST_REGION not set");
}
return None;
};
let Ok(bucket) = std::env::var("AWS_TEST_BUCKET") else {
if fail_if_not_set {
panic!("AWS_TEST_BUCKET not set");
}
return None;
};
let Ok(access_key_id) = std::env::var("AWS_TEST_ACCESS_KEY_ID") else {
if fail_if_not_set {
panic!("AWS_TEST_ACCESS_KEY_ID not set");
}
return None;
};
let Ok(secret_access_key) = std::env::var("AWS_TEST_SECRET_ACCESS_KEY") else {
if fail_if_not_set {
panic!("AWS_TEST_SECRET_ACCESS_KEY not set");
}
return None;
};
let endpoint_url = std::env::var("AWS_TEST_ENDPOINT_URL").ok();
let force_path_style = std::env::var("AWS_TEST_FORCE_PATH_STYLE")
.map(|f| f == "1" || f == "true")
.unwrap_or(false);
Some(
AwsService::new(
Some(region),
bucket,
AwsCredentials::AccessKey {
access_key_id,
secret_access_key,
},
endpoint_url,
force_path_style,
)
.await
.unwrap(),
)
}
crate::server::cloud::test::service_tests!(make_service().await);
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/cloud/test.rs | src/server/cloud/test.rs | //! Tests for cloud services.
//!
//! This tests that the various service methods, and especially `compare_and_swap`,
//! satisfy their requirements.
//!
//! The server must also satisfy:
//! - `list`: Use a page size of 6 for n `#[cfg(test)]`
//! - `compare_and_swap`: if the object name ends with `-racing-delete`, delete the
//! object between the "compare" and "swap" phases of the operation
//! - `compare_and_swap`: if the object name ends with `-racing-put`, put the
//! object between the "compare" and "swap" phases of the operation, with value
//! `b"CHANGED"`.
use crate::errors::Result;
use crate::server::cloud::service::Service;
use pretty_assertions::assert_eq;
/// Define a collection of cloud service tests that apply to all service implementations.
macro_rules! service_tests {
($service:expr) => {
fn make_pfx() -> impl Fn(&str) -> String {
let prefix = uuid::Uuid::new_v4();
move |n: &_| format!("{}-{}", prefix.as_simple(), n)
}
#[tokio::test]
async fn put_and_get() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::put_and_get(service, make_pfx()).await
}
#[tokio::test]
async fn get_missing() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::get_missing(service, make_pfx()).await
}
#[tokio::test]
async fn del() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::del(service, make_pfx()).await
}
#[tokio::test]
async fn del_missing() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::del_missing(service, make_pfx()).await
}
#[tokio::test]
async fn list() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::list(service, make_pfx()).await
}
#[tokio::test]
async fn compare_and_swap_create() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::compare_and_swap_create(service, make_pfx()).await
}
#[tokio::test]
async fn compare_and_swap_matches() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::compare_and_swap_matches(service, make_pfx()).await
}
#[tokio::test]
async fn compare_and_swap_expected_no_file() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::compare_and_swap_expected_no_file(service, make_pfx())
.await
}
#[tokio::test]
async fn compare_and_swap_old_value() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::compare_and_swap_old_value(service, make_pfx()).await
}
#[tokio::test]
async fn compare_and_swap_changes() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::compare_and_swap_changes(service, make_pfx()).await
}
#[tokio::test]
async fn compare_and_swap_disappears() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::compare_and_swap_disappears(service, make_pfx()).await
}
#[tokio::test]
async fn compare_and_swap_appears() -> $crate::errors::Result<()> {
let Some(service) = $service else {
return Ok(());
};
$crate::server::cloud::test::compare_and_swap_appears(service, make_pfx()).await
}
};
}
pub(crate) use service_tests;
pub(super) async fn put_and_get(mut svc: impl Service, pfx: impl Fn(&str) -> String) -> Result<()> {
svc.put(&pfx("testy"), b"foo").await?;
let got = svc.get(&pfx("testy")).await?;
assert_eq!(got, Some(b"foo".to_vec()));
// Clean up.
svc.del(&pfx("testy")).await?;
Ok(())
}
pub(super) async fn get_missing(mut svc: impl Service, pfx: impl Fn(&str) -> String) -> Result<()> {
let got = svc.get(&pfx("testy")).await?;
assert_eq!(got, None);
Ok(())
}
pub(super) async fn del(mut svc: impl Service, pfx: impl Fn(&str) -> String) -> Result<()> {
svc.put(&pfx("testy"), b"data").await?;
svc.del(&pfx("testy")).await?;
let got = svc.get(&pfx("testy")).await?;
assert_eq!(got, None);
Ok(())
}
pub(super) async fn del_missing(mut svc: impl Service, pfx: impl Fn(&str) -> String) -> Result<()> {
// Deleting an object that does not exist is not an error.
assert!(svc.del(&pfx("testy")).await.is_ok());
Ok(())
}
pub(super) async fn list(mut svc: impl Service, pfx: impl Fn(&str) -> String) -> Result<()> {
let mut names: Vec<_> = (0..20).map(|i| pfx(&format!("pp-{i:02}"))).collect();
names.sort();
// Create 20 objects that will be listed.
for n in &names {
svc.put(n, b"data").await?;
}
// And another object that should not be included in the list.
svc.put(&pfx("xxx"), b"data").await?;
let mut got_names: Vec<_> = {
let mut got_names = Vec::new();
let prefix = &pfx("pp-");
let mut iterator = svc.list(prefix).await;
while let Some(res) = iterator.next().await {
match res {
Ok(o) => got_names.push(o.name),
Err(e) => return Err(e),
}
}
got_names
};
got_names.sort();
assert_eq!(got_names, names);
// Clean up.
for n in got_names {
svc.del(&n).await?;
}
svc.del(&pfx("xxx")).await?;
Ok(())
}
pub(super) async fn compare_and_swap_create(
mut svc: impl Service,
pfx: impl Fn(&str) -> String,
) -> Result<()> {
assert!(
svc.compare_and_swap(&pfx("testy"), None, b"bar".to_vec())
.await?
);
let got = svc.get(&pfx("testy")).await?;
assert_eq!(got, Some(b"bar".to_vec()));
// Clean up.
svc.del(&pfx("testy")).await?;
Ok(())
}
pub(super) async fn compare_and_swap_matches(
mut svc: impl Service,
pfx: impl Fn(&str) -> String,
) -> Result<()> {
// Create the existing file, with two different values over time.
svc.put(&pfx("testy"), b"foo1").await?;
svc.put(&pfx("testy"), b"foo2").await?;
// A compare_and_swap for the latest value succeeds.
assert!(
svc.compare_and_swap(&pfx("testy"), Some(b"foo2".to_vec()), b"bar".to_vec())
.await?
);
let got = svc.get(&pfx("testy")).await?;
assert_eq!(got, Some(b"bar".to_vec()));
// Clean up.
svc.del(&pfx("testy")).await?;
Ok(())
}
pub(super) async fn compare_and_swap_expected_no_file(
mut svc: impl Service,
pfx: impl Fn(&str) -> String,
) -> Result<()> {
svc.put(&pfx("testy"), b"foo1").await?;
assert!(
!svc.compare_and_swap(&pfx("testy"), None, b"bar".to_vec())
.await?
);
let got = svc.get(&pfx("testy")).await?;
assert_eq!(got, Some(b"foo1".to_vec()));
// Clean up.
svc.del(&pfx("testy")).await?;
Ok(())
}
pub(super) async fn compare_and_swap_old_value(
mut svc: impl Service,
pfx: impl Fn(&str) -> String,
) -> Result<()> {
// Create the existing file, with two different values over time.
svc.put(&pfx("testy"), b"foo1").await?;
svc.put(&pfx("testy"), b"foo2").await?;
// A compare_and_swap for the old value fails.
assert!(
!svc.compare_and_swap(&pfx("testy"), Some(b"foo1".to_vec()), b"bar".to_vec())
.await?
);
let got = svc.get(&pfx("testy")).await?;
assert_eq!(got, Some(b"foo2".to_vec()));
// Clean up.
svc.del(&pfx("testy")).await?;
Ok(())
}
pub(super) async fn compare_and_swap_changes(
mut svc: impl Service,
pfx: impl Fn(&str) -> String,
) -> Result<()> {
// Create the existing object, but since it is named "racing-put" its value will change
// just before the `put_object` call. This tests the "compare" part of `compare_and_swap`.
svc.put(&pfx("racing-put"), b"foo1").await?;
assert!(
!svc.compare_and_swap(&pfx("racing-put"), Some(b"foo1".to_vec()), b"bar".to_vec())
.await?
);
let got = svc.get(&pfx("racing-put")).await?;
assert_eq!(got, Some(b"CHANGED".to_vec()));
Ok(())
}
pub(super) async fn compare_and_swap_disappears(
mut svc: impl Service,
pfx: impl Fn(&str) -> String,
) -> Result<()> {
// Create the existing object, but since it is named "racing-delete" it will disappear just
// before the `put_object` call. This tests the case where the object exists when
// `compare_and_swap` calls `get_object` but is deleted when it calls `put_object`.
svc.put(&pfx("racing-delete"), b"foo1").await?;
assert!(
!svc.compare_and_swap(
&pfx("racing-delete"),
Some(b"foo1".to_vec()),
b"bar".to_vec()
)
.await?
);
let got = svc.get(&pfx("racing-delete")).await?;
assert_eq!(got, None);
Ok(())
}
pub(super) async fn compare_and_swap_appears(
mut svc: impl Service,
pfx: impl Fn(&str) -> String,
) -> Result<()> {
// Create the existing object, but since it is named "racing-put" the object will appear just
// before the `put_object` call. This tests the case where the object does not exist when
// `compare_and_swap` calls `get_object`, but does exist when it calls `put_object`.
assert!(
!svc.compare_and_swap(&pfx("racing-put"), None, b"bar".to_vec())
.await?
);
let got = svc.get(&pfx("racing-put")).await?;
assert_eq!(got, Some(b"CHANGED".to_vec()));
Ok(())
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/cloud/service.rs | src/server/cloud/service.rs | use crate::{errors::Result, server::cloud::iter::AsyncObjectIterator};
use async_trait::async_trait;
/// Information about an object as returned from `Service::list`
pub(in crate::server) struct ObjectInfo {
/// Name of the object.
pub(in crate::server) name: String,
/// Creation time of the object, in seconds since the UNIX epoch.
pub(in crate::server) creation: u64,
}
/// An abstraction of a cloud-storage service.
///
/// The underlying cloud storage is assumed to be a map from object names to object values,
/// similar to a HashMap, with the addition of a compare-and-swap operation. Object names
/// are always simple strings from the character set `[a-zA-Z0-9-]`, no more than 100 characters
/// in length.
#[async_trait]
pub(in crate::server) trait Service {
/// Put an object into cloud storage. If the object exists, it is overwritten.
async fn put(&mut self, name: &str, value: &[u8]) -> Result<()>;
/// Get an object from cloud storage, or None if the object does not exist.
async fn get(&mut self, name: &str) -> Result<Option<Vec<u8>>>;
/// Delete an object. Does nothing if the object does not exist.
async fn del(&mut self, name: &str) -> Result<()>;
/// Enumerate objects with the given prefix.
async fn list<'a>(&'a mut self, prefix: &'a str) -> Box<dyn AsyncObjectIterator + Send + 'a>;
/// Compare the existing object's value with `existing_value`, and replace with `new_value`
/// only if the values match. Returns true if the replacement occurred.
async fn compare_and_swap(
&mut self,
name: &str,
existing_value: Option<Vec<u8>>,
new_value: Vec<u8>,
) -> Result<bool>;
}
/// Enforce the limits on `name` described for [`Service`].
///
/// Names are generated by `server.rs` in a way that is not affected by user input, so this only
/// asserts in debug builds.
pub(in crate::server) fn validate_object_name(name: &str) {
debug_assert!(name.is_ascii());
debug_assert!(name.len() <= 100);
debug_assert!(name.chars().all(|c| c.is_ascii_alphanumeric() || c == '-'));
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/cloud/mod.rs | src/server/cloud/mod.rs | /*!
* Support for cloud-service-backed sync.
*
* All of these operate using a similar approach, with specific patterns of object names. The
* process of adding a new version requires a compare-and-swap operation that sets a new version
* as the "latest" only if the existing "latest" has the expected value. This ensures a continuous
* chain of versions, even if multiple replicas attempt to sync at the same time.
*/
#[cfg(target_arch = "wasm32")]
compile_error!("Cloud servers are not available on WASM targets");
mod iter;
mod server;
mod service;
pub(in crate::server) use server::CloudServer;
#[cfg(feature = "server-gcp")]
pub(in crate::server) mod gcp;
#[cfg(feature = "server-aws")]
pub(in crate::server) mod aws;
#[cfg(all(test, feature = "cloud"))]
mod test;
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/cloud/server.rs | src/server/cloud/server.rs | use super::service::{ObjectInfo, Service};
use crate::errors::{Error, Result};
use crate::server::encryption::{Cryptor, Sealed, Unsealed};
use crate::server::{
AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId,
};
use async_trait::async_trait;
use ring::rand;
use std::collections::{HashMap, HashSet};
#[cfg(test)]
use std::future::Future;
#[cfg(test)]
use std::pin::Pin;
#[cfg(not(test))]
use std::time::{SystemTime, UNIX_EPOCH};
use uuid::Uuid;
#[cfg(test)]
type InterceptFn<S> =
Box<dyn for<'a> FnOnce(&'a mut S) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> + Send>;
/// Implement the Server trait for a cloud service implemented by [`Service`].
///
/// This type implements a TaskChampion server over a basic object-storage service. It encapsulates
/// all of the logic to ensure a linear sequence of versions, encrypt and decrypt data, and clean
/// up old data so that this can be supported on a variety of cloud services.
///
/// ## Encryption
///
/// The encryption scheme is described in `sync-protocol.md`. The salt value used for key
/// derivation is stored in "salt", which is created if it does not exist. Object names are not
/// encrypted, by the nature of key/value stores. Since the content of the "latest" object can
/// usually be inferred from object names, it, too, is not encrypted.
///
/// ## Object Organization
///
/// UUIDs emebedded in names and values appear in their "simple" form: lower-case hexadecimal with
/// no hyphens.
///
/// Versions are stored as objects with name `v-PARENT-VERSION` where `PARENT` is the parent
/// version's UUID and `VERSION` is the version's UUID. The object value is the raw history
/// segment. These objects are created with simple `put` requests, as the name uniquely identifies
/// the content.
///
/// The latest version is stored as an object with name "latest", containing the UUID of the latest
/// version. This file is updated with `compare_and_swap`. After a successful update of this
/// object, the version is considered committed.
///
/// Since there are no strong constraints on creation of version objects, it is possible
/// to have multiple such files with the same `PARENT`. However, only one such object will be
/// contained in the chain of parent-child relationships beginning with the value in "latest".
/// All other objects are invalid and not visible outside this type.
///
/// Snapshots are stored as objects with name `s-VERSION` where `VERSION` is the version at which
/// the snapshot was made. These objects are created with simple `put` requests, as any snapshot
/// for a given version is functionally equivalent to any other.
///
/// ## Cleanup
///
/// Cleanup of unnecessary data is performed probabalistically after `add_version`, although any
/// errors are ignored.
///
/// - Any versions not reachable from "latest" and which cannot become "latest" are deleted.
/// - Any snapshots older than the most recent are deleted.
/// - Any versions older than [`MAX_VERSION_AGE_SECS`] which are incorporated into a snapshot
/// are deleted.
pub(in crate::server) struct CloudServer<SVC: Service> {
service: SVC,
/// The Cryptor supporting encryption and decryption of objects in this server.
cryptor: Cryptor,
/// The probability (0..255) that this run will perform cleanup.
cleanup_probability: u8,
/// For testing, a function that is called in the middle of `add_version` to simulate
/// a concurrent change in the service.
#[cfg(test)]
add_version_intercept: Option<InterceptFn<SVC>>,
}
const LATEST: &str = "latest";
const DEFAULT_CLEANUP_PROBABILITY: u8 = 13; // about 5%
#[cfg(not(test))]
const MAX_VERSION_AGE_SECS: u64 = 3600 * 24 * 180; // about half a year
fn version_to_bytes(v: VersionId) -> Vec<u8> {
v.as_simple().to_string().into_bytes()
}
impl<SVC: Service> CloudServer<SVC> {
pub(in crate::server) async fn new(
mut service: SVC,
encryption_secret: Vec<u8>,
) -> Result<Self> {
let salt = Self::get_salt(&mut service).await?;
let cryptor = Cryptor::new(salt, &encryption_secret.into())?;
Ok(Self {
service,
cryptor,
cleanup_probability: DEFAULT_CLEANUP_PROBABILITY,
#[cfg(test)]
add_version_intercept: None,
})
}
/// Get the salt value stored in the service, creating a new random one if necessary.
async fn get_salt(service: &mut SVC) -> Result<Vec<u8>> {
const SALT_NAME: &str = "salt";
loop {
if let Some(salt) = service.get(SALT_NAME).await? {
return Ok(salt);
}
service
.compare_and_swap(SALT_NAME, None, Cryptor::gen_salt()?)
.await?;
}
}
/// Generate an object name for the given parent and child versions.
fn version_name(parent_version_id: &VersionId, child_version_id: &VersionId) -> String {
format!(
"v-{}-{}",
parent_version_id.as_simple(),
child_version_id.as_simple()
)
}
/// Parse a version name as generated by `version_name`, returning None if the name does not
/// have a valid format.
fn parse_version_name(name: &str) -> Option<(VersionId, VersionId)> {
debug_assert!(name.is_ascii());
let dash = 2 + 32;
if name.len() != 2 + 32 + 1 + 32 || !name.starts_with("v-") || &name[dash..dash + 1] != "-"
{
return None;
}
let Ok(parent_version_id) = VersionId::try_parse(&name[2..2 + 32]) else {
return None;
};
let Ok(child_version_id) = VersionId::try_parse(&name[2 + 32 + 1..]) else {
return None;
};
Some((parent_version_id, child_version_id))
}
/// Generate an object name for a snapshot at the given version.
fn snapshot_name(version_id: &VersionId) -> String {
format!("s-{}", version_id.as_simple())
}
/// Parse a snapshot name as generated by `snapshot_name`, returning None if the name does not
/// have a valid format.
fn parse_snapshot_name(name: &str) -> Option<VersionId> {
if name.len() != 2 + 32 || !name.starts_with("s-") {
return None;
}
let Ok(version_id) = VersionId::try_parse(&name[2..2 + 32]) else {
return None;
};
Some(version_id)
}
/// Generate a random integer in (0..255) for use in probabalistic decisions.
fn randint(&self) -> Result<u8> {
use rand::SecureRandom;
let mut randint = [0u8];
rand::SystemRandom::new()
.fill(&mut randint)
.map_err(|_| Error::Server("Random number generator failure".into()))?;
Ok(randint[0])
}
/// Get the version from "latest", or None if the object does not exist. This always fetches a fresh
/// value from storage.
async fn get_latest(&mut self) -> Result<Option<VersionId>> {
let Some(latest) = self.service.get(LATEST).await? else {
return Ok(None);
};
let latest = VersionId::try_parse_ascii(&latest)
.map_err(|_| Error::Server("'latest' object contains invalid data".into()))?;
Ok(Some(latest))
}
/// Get the possible child versions of the given parent version, based only on the object
/// names.
async fn get_child_versions(
&mut self,
parent_version_id: &VersionId,
) -> Result<Vec<VersionId>> {
let mut versions = Vec::new();
let prefix = &format!("v-{}-", parent_version_id.as_simple());
let mut iterator = self.service.list(prefix).await;
while let Some(res) = iterator.next().await {
match res {
Ok(ObjectInfo { name, .. }) => {
if let Some((_, c)) = Self::parse_version_name(&name) {
versions.push(c);
}
}
Err(e) => {
return Err(e);
}
}
}
Ok(versions)
}
/// Determine the snapshot urgency. This is done probabalistically:
/// - High urgency approximately 1% of the time.
/// - Low urgency approximately 10% of the time.
fn snapshot_urgency(&self) -> Result<SnapshotUrgency> {
let r = self.randint()?;
if r < 2 {
Ok(SnapshotUrgency::High)
} else if r < 25 {
Ok(SnapshotUrgency::Low)
} else {
Ok(SnapshotUrgency::None)
}
}
/// Maybe call `cleanup` depending on `cleanup_probability`.
async fn maybe_cleanup(&mut self) -> Result<()> {
if self.randint()? < self.cleanup_probability {
self.cleanup_probability = DEFAULT_CLEANUP_PROBABILITY;
self.cleanup().await
} else {
Ok(())
}
}
/// Perform cleanup, deleting unnecessary data.
async fn cleanup(&mut self) -> Result<()> {
// Construct a vector containing all (child, parent, creation) tuples
let mut versions = {
let mut versions = Vec::new();
let mut iterator = self.service.list("v-").await;
while let Some(res) = iterator.next().await {
match res {
Ok(ObjectInfo { name, creation }) => {
if let Some((p, c)) = Self::parse_version_name(&name) {
versions.push((c, p, creation));
}
}
Err(e) => return Err(e),
}
}
versions
};
versions.sort();
// Function to find the parent of a given child version in `versions`, taking
// advantage of having sorted the vector by child version ID.
let parent_of = |c| match versions.binary_search_by_key(&c, |tup| tup.0) {
Ok(idx) => Some(versions[idx].1),
Err(_) => None,
};
// Create chains mapping forward (parent -> child) and backward (child -> parent), starting
// at "latest".
let mut rev_chain = HashMap::new();
let mut iterations = versions.len() + 1; // For cycle detection.
let latest = self.get_latest().await?;
if let Some(mut c) = latest {
while let Some(p) = parent_of(c) {
rev_chain.insert(c, p);
c = p;
iterations -= 1;
if iterations == 0 {
return Err(Error::Server("Version cycle detected".into()));
}
}
}
// Collect all versions older than MAX_VERSION_AGE_SECS
#[cfg(not(test))]
let age_threshold = {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|t| t.as_secs())
.unwrap_or(0);
now.saturating_sub(MAX_VERSION_AGE_SECS)
};
// In testing, cutoff age is 1000.
#[cfg(test)]
let age_threshold = 1000;
let old_versions: HashSet<Uuid> = versions
.iter()
.filter_map(|(c, _, creation)| {
if *creation < age_threshold {
Some(*c)
} else {
None
}
})
.collect();
// Now, any pair not present in that chain can be deleted. However, another replica
// may be in the state where it has uploaded a version but not changed "latest" yet,
// so any pair with parent equal to latest is allowed to stay.
for (c, p, _) in versions {
if rev_chain.get(&c) != Some(&p) && Some(p) != latest {
self.service.del(&Self::version_name(&p, &c)).await?;
}
}
// Collect a set of all snapshots.
let snapshots = {
let mut snapshots = HashSet::new();
let mut iterator = self.service.list("s-").await;
while let Some(res) = iterator.next().await {
match res {
Ok(ObjectInfo { name, .. }) => {
if let Some(parsed_name) = Self::parse_snapshot_name(&name) {
snapshots.insert(parsed_name);
}
}
Err(e) => return Err(e),
}
}
snapshots
};
// Find the latest snapshot by iterating back from "latest". Note that this iteration is
// guaranteed not to be cyclical, as that was checked above.
let mut latest_snapshot = None;
if let Some(mut version) = latest {
loop {
if snapshots.contains(&version) {
latest_snapshot = Some(version);
break;
}
if let Some(v) = rev_chain.get(&version) {
version = *v;
} else {
break;
}
}
}
// If there's a latest snapshot, delete all other snapshots.
let Some(latest_snapshot) = latest_snapshot else {
// If there's no snapshot, no further cleanup is possible.
return Ok(());
};
for version in snapshots {
if version != latest_snapshot {
self.service.del(&Self::snapshot_name(&version)).await?;
}
}
// Now continue iterating backward from that version; any version in `old_versions` can be
// deleted.
let mut version = latest_snapshot;
while let Some(parent) = rev_chain.get(&version) {
if old_versions.contains(&version) {
self.service
.del(&Self::version_name(parent, &version))
.await?;
}
version = *parent;
}
Ok(())
}
}
#[async_trait(?Send)]
impl<SVC: Service + Send> Server for CloudServer<SVC> {
async fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> Result<(AddVersionResult, SnapshotUrgency)> {
let latest = self.get_latest().await?;
if let Some(l) = latest {
if l != parent_version_id {
return Ok((
AddVersionResult::ExpectedParentVersion(l),
self.snapshot_urgency()?,
));
}
}
// Invent a new version ID and upload the version data.
let version_id = VersionId::new_v4();
let new_name = Self::version_name(&parent_version_id, &version_id);
let sealed = self.cryptor.seal(Unsealed {
version_id,
payload: history_segment,
})?;
self.service.put(&new_name, sealed.as_ref()).await?;
#[cfg(test)]
if let Some(f) = self.add_version_intercept.take() {
f(&mut self.service).await;
}
// Try to compare-and-swap this value into LATEST
let old_value = latest.map(version_to_bytes);
let new_value = version_to_bytes(version_id);
if !self
.service
.compare_and_swap(LATEST, old_value, new_value)
.await?
{
// Delete the version data, since it was not latest.
self.service.del(&new_name).await?;
let latest = self.get_latest().await?;
let latest = latest.unwrap_or(Uuid::nil());
return Ok((
AddVersionResult::ExpectedParentVersion(latest),
self.snapshot_urgency()?,
));
}
// Attempt a cleanup, but ignore errors.
let _ = self.maybe_cleanup().await;
Ok((AddVersionResult::Ok(version_id), self.snapshot_urgency()?))
}
async fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> Result<GetVersionResult> {
// The `get_child_versions` function may return several possible children, only one of
// those will lead to `latest`, and importantly the others will not have their own
// children. So we can detect the "true" child as the one that is equal to "latest" or has
// children. Note that even if `get_child_versions` returns a single version, that version
// may not be valid and the appropriate result may be NoSuchVersion.
let version_id = match &(self.get_child_versions(&parent_version_id).await?)[..] {
[] => return Ok(GetVersionResult::NoSuchVersion),
children => {
// There are some extra version objects, so a cleanup is warranted.
self.cleanup_probability = 255;
let latest = self.get_latest().await?;
let mut true_child = None;
for child in children {
if Some(*child) == latest {
true_child = Some(*child);
break;
}
}
if true_child.is_none() {
for child in children {
if !self.get_child_versions(child).await?.is_empty() {
true_child = Some(*child)
}
}
}
match true_child {
Some(true_child) => true_child,
None => return Ok(GetVersionResult::NoSuchVersion),
}
}
};
let Some(sealed) = self
.service
.get(&Self::version_name(&parent_version_id, &version_id))
.await?
else {
// This really shouldn't happen, since the chain was derived from object names, but
// perhaps the object was deleted.
return Ok(GetVersionResult::NoSuchVersion);
};
let unsealed = self.cryptor.unseal(Sealed {
version_id,
payload: sealed,
})?;
Ok(GetVersionResult::Version {
version_id,
parent_version_id,
history_segment: unsealed.into(),
})
}
async fn add_snapshot(&mut self, version_id: VersionId, snapshot: Snapshot) -> Result<()> {
let name = Self::snapshot_name(&version_id);
let sealed = self.cryptor.seal(Unsealed {
version_id,
payload: snapshot,
})?;
self.service.put(&name, sealed.as_ref()).await?;
Ok(())
}
async fn get_snapshot(&mut self) -> Result<Option<(VersionId, Snapshot)>> {
// Pick the first snapshot we find.
let Some(name) = self.service.list("s-").await.next().await else {
return Ok(None);
};
let ObjectInfo { name, .. } = name?;
let Some(version_id) = Self::parse_snapshot_name(&name) else {
return Ok(None);
};
let Some(payload) = self.service.get(&name).await? else {
return Ok(None);
};
let unsealed = self.cryptor.unseal(Sealed {
version_id,
payload,
})?;
Ok(Some((version_id, unsealed.payload)))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::{
cloud::iter::{AsyncObjectIterator, SyncIteratorWrapper},
NIL_VERSION_ID,
};
/// A simple in-memory service for testing. All insertions via Service methods occur at time
/// `INSERTION_TIME`. All versions older that 1000 are considered "old".
#[derive(Clone)]
struct MockService(HashMap<String, (u64, Vec<u8>)>);
const INSERTION_TIME: u64 = 9999999999;
impl MockService {
fn new() -> Self {
let mut map = HashMap::new();
// Use a fixed salt for consistent results
map.insert("salt".into(), (0, "abcdefghabcdefgh".into()));
Self(map)
}
}
#[async_trait]
impl Service for MockService {
async fn put(&mut self, name: &str, value: &[u8]) -> Result<()> {
self.0.insert(name.into(), (INSERTION_TIME, value.into()));
Ok(())
}
async fn get(&mut self, name: &str) -> Result<Option<Vec<u8>>> {
Ok(self.0.get(name).map(|(_, data)| data.clone()))
}
async fn del(&mut self, name: &str) -> Result<()> {
self.0.remove(name);
Ok(())
}
async fn compare_and_swap(
&mut self,
name: &str,
existing_value: Option<Vec<u8>>,
new_value: Vec<u8>,
) -> Result<bool> {
if self.0.get(name).map(|(_, d)| d) == existing_value.as_ref() {
self.0.insert(name.into(), (INSERTION_TIME, new_value));
return Ok(true);
}
Ok(false)
}
async fn list<'a>(
&'a mut self,
prefix: &'a str,
) -> Box<dyn AsyncObjectIterator + Send + 'a> {
let inner = self
.0
.iter()
.filter(move |(k, _)| k.starts_with(prefix))
.map(|(k, (t, _))| {
Ok(ObjectInfo {
name: k.to_string(),
creation: *t,
})
});
Box::new(SyncIteratorWrapper { inner })
}
}
impl std::fmt::Debug for MockService {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_map().entries(self.0.iter()).finish()
}
}
// Add some testing utilities to CloudServer.
impl CloudServer<MockService> {
fn mock_add_version(
&mut self,
parent: VersionId,
child: VersionId,
creation: u64,
data: &[u8],
) {
let name = Self::version_name(&parent, &child);
let sealed = self
.cryptor
.seal(Unsealed {
version_id: child,
payload: data.into(),
})
.unwrap();
self.service.0.insert(name, (creation, sealed.into()));
}
fn mock_add_snapshot(&mut self, version: VersionId, creation: u64, snapshot: &[u8]) {
let name = Self::snapshot_name(&version);
let sealed = self
.cryptor
.seal(Unsealed {
version_id: version,
payload: snapshot.into(),
})
.unwrap();
self.service.0.insert(name, (creation, sealed.into()));
}
fn mock_set_latest(&mut self, latest: VersionId) {
let latest = version_to_bytes(latest);
self.service
.0
.insert(LATEST.into(), (INSERTION_TIME, latest));
}
/// Create a copy of this server without any data; used for creating a MockService
/// to compare to with `assert_eq!`
fn empty_clone(&self) -> Self {
Self {
cryptor: self.cryptor.clone(),
cleanup_probability: 0,
service: MockService::new(),
add_version_intercept: None,
}
}
/// Get a decrypted, string-y copy of the data in the HashMap.
fn unencrypted(&self) -> HashMap<String, (u64, String)> {
self.service
.0
.iter()
.map(|(k, v)| {
let k = k.clone();
if k == "latest" {
return (k, (v.0, String::from_utf8(v.1.to_vec()).unwrap()));
}
let version_id;
if let Some((_, v)) = Self::parse_version_name(&k) {
version_id = v;
} else if let Some(v) = Self::parse_snapshot_name(&k) {
version_id = v;
} else {
return (k, (v.0, format!("{:?}", v.1)));
}
let unsealed = self
.cryptor
.unseal(Sealed {
version_id,
payload: v.1.to_vec(),
})
.unwrap();
let vstr = String::from_utf8(unsealed.into()).unwrap();
(k, (v.0, vstr))
})
.collect()
}
}
impl Clone for CloudServer<MockService> {
fn clone(&self) -> Self {
Self {
cryptor: self.cryptor.clone(),
cleanup_probability: self.cleanup_probability,
service: self.service.clone(),
add_version_intercept: None,
}
}
}
const SECRET: &[u8] = b"testing";
async fn make_server() -> CloudServer<MockService> {
let mut server = CloudServer::new(MockService::new(), SECRET.into())
.await
.unwrap();
// Prevent cleanup during tests.
server.cleanup_probability = 0;
server
}
#[test]
fn version_name() {
let p = Uuid::parse_str("a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8").unwrap();
let c = Uuid::parse_str("adcf4e350fa54e4aaf9d3f20f3ba5a32").unwrap();
assert_eq!(
CloudServer::<MockService>::version_name(&p, &c),
"v-a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8-adcf4e350fa54e4aaf9d3f20f3ba5a32"
);
}
#[test]
fn version_name_round_trip() {
let p = Uuid::new_v4();
let c = Uuid::new_v4();
assert_eq!(
CloudServer::<MockService>::parse_version_name(
&CloudServer::<MockService>::version_name(&p, &c)
),
Some((p, c))
);
}
#[test]
fn parse_version_name_bad_prefix() {
assert_eq!(
CloudServer::<MockService>::parse_version_name(
"X-a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8-adcf4e350fa54e4aaf9d3f20f3ba5a32"
),
None
);
}
#[test]
fn parse_version_name_bad_separator() {
assert_eq!(
CloudServer::<MockService>::parse_version_name(
"v-a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8xadcf4e350fa54e4aaf9d3f20f3ba5a32"
),
None
);
}
#[test]
fn parse_version_name_too_short() {
assert_eq!(
CloudServer::<MockService>::parse_version_name(
"v-a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8-adcf4e350fa54e4aaf9d3f20f3ba5a3"
),
None
);
}
#[test]
fn parse_version_name_too_long() {
assert_eq!(
CloudServer::<MockService>::parse_version_name(
"v-a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8-adcf4e350fa54e4aaf9d3f20f3ba5a320"
),
None
);
}
#[test]
fn snapshot_name_round_trip() {
let v = Uuid::new_v4();
assert_eq!(
CloudServer::<MockService>::parse_snapshot_name(
&CloudServer::<MockService>::snapshot_name(&v)
),
Some(v)
);
}
#[test]
fn parse_snapshot_name_invalid() {
assert_eq!(
CloudServer::<MockService>::parse_snapshot_name("s-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"),
None
);
}
#[test]
fn parse_snapshot_name_bad_prefix() {
assert_eq!(
CloudServer::<MockService>::parse_snapshot_name("s:a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8"),
None
);
}
#[test]
fn parse_snapshot_name_too_short() {
assert_eq!(
CloudServer::<MockService>::parse_snapshot_name("s-a1a2a3a4b1b2c1c2d1d2d3d4d5d6"),
None
);
}
#[test]
fn parse_snapshot_name_too_long() {
assert_eq!(
CloudServer::<MockService>::parse_snapshot_name(
"s-a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8000"
),
None
);
}
#[tokio::test]
async fn get_salt_existing() {
let mut service = MockService::new();
assert_eq!(
CloudServer::<MockService>::get_salt(&mut service)
.await
.unwrap(),
b"abcdefghabcdefgh".to_vec()
);
}
#[tokio::test]
async fn get_salt_create() {
let mut service = MockService::new();
service.del("salt").await.unwrap();
let got_salt = CloudServer::<MockService>::get_salt(&mut service)
.await
.unwrap();
let salt_obj = service.get("salt").await.unwrap().unwrap();
assert_eq!(got_salt, salt_obj);
}
#[tokio::test]
async fn get_latest_empty() {
let mut server = make_server().await;
assert_eq!(server.get_latest().await.unwrap(), None);
}
#[tokio::test]
async fn get_latest_exists() {
let mut server = make_server().await;
let latest = Uuid::new_v4();
server.mock_set_latest(latest);
assert_eq!(server.get_latest().await.unwrap(), Some(latest));
}
#[tokio::test]
async fn get_latest_invalid() {
let mut server = make_server().await;
server
.service
.0
.insert(LATEST.into(), (999, b"not-a-uuid".to_vec()));
assert!(server.get_latest().await.is_err());
}
#[tokio::test]
async fn get_child_versions_empty() {
let mut server = make_server().await;
assert_eq!(
server
.get_child_versions(&Uuid::new_v4())
.await
.unwrap()
.len(),
0
);
}
#[tokio::test]
async fn get_child_versions_single() {
let mut server = make_server().await;
let (v1, v2) = (Uuid::new_v4(), Uuid::new_v4());
server.mock_add_version(v2, v1, 1000, b"first");
assert_eq!(server.get_child_versions(&v1).await.unwrap().len(), 0);
assert_eq!(server.get_child_versions(&v2).await.unwrap(), vec![v1]);
}
#[tokio::test]
async fn get_child_versions_multiple() {
let mut server = make_server().await;
let (v1, v2, v3) = (Uuid::new_v4(), Uuid::new_v4(), Uuid::new_v4());
server.mock_add_version(v3, v1, 1000, b"first");
server.mock_add_version(v3, v2, 1000, b"second");
assert_eq!(server.get_child_versions(&v1).await.unwrap().len(), 0);
assert_eq!(server.get_child_versions(&v2).await.unwrap().len(), 0);
let versions = server.get_child_versions(&v3).await.unwrap();
assert!(versions == vec![v1, v2] || versions == vec![v2, v1]);
}
#[tokio::test]
async fn add_version_empty() {
let mut server = make_server().await;
let parent = Uuid::new_v4();
let (res, _) = server
.add_version(parent, b"history".to_vec())
.await
.unwrap();
assert!(matches!(res, AddVersionResult::Ok(_)));
}
#[tokio::test]
async fn add_version_good() {
let mut server = make_server().await;
let (v1, v2) = (Uuid::new_v4(), Uuid::new_v4());
server.mock_add_version(v1, v2, 1000, b"first");
server.mock_set_latest(v2);
let (res, _) = server.add_version(v2, b"history".to_vec()).await.unwrap();
let AddVersionResult::Ok(new_version) = res else {
panic!("expected OK");
};
let mut expected = server.empty_clone();
expected.mock_add_version(v1, v2, 1000, b"first");
expected.mock_add_version(v2, new_version, INSERTION_TIME, b"history");
expected.mock_set_latest(new_version);
assert_eq!(server.unencrypted(), expected.unencrypted());
}
#[tokio::test]
async fn add_version_not_latest() {
// The `add_version` method does nothing if the version is not latest.
let mut server = make_server().await;
let (v1, v2) = (Uuid::new_v4(), Uuid::new_v4());
server.mock_add_version(v1, v2, 1000, b"first");
server.mock_set_latest(v2);
let expected = server.clone();
let (res, _) = server.add_version(v1, b"history".to_vec()).await.unwrap();
assert_eq!(res, AddVersionResult::ExpectedParentVersion(v2));
assert_eq!(server.unencrypted(), expected.unencrypted());
}
#[tokio::test]
async fn add_version_not_latest_race() {
// The `add_version` function effectively checks twice for a conflict: once by just
// fetching "latest", returning early if the value is not as expected; and once in the
// compare-and-swap. This test uses `add_version_intercept` to force the first check to
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | true |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/cloud/iter.rs | src/server/cloud/iter.rs | use crate::{errors::Result, server::cloud::service::ObjectInfo};
use async_trait::async_trait;
#[async_trait]
pub(crate) trait AsyncObjectIterator {
async fn next(&mut self) -> Option<Result<ObjectInfo>>;
}
#[cfg(test)]
// This struct takes a synchronous iterator `I` and adapts it for async.
pub(crate) struct SyncIteratorWrapper<I>
where
I: Iterator<Item = Result<ObjectInfo>> + Send,
{
pub inner: I,
}
#[cfg(test)]
#[async_trait]
impl<I> AsyncObjectIterator for SyncIteratorWrapper<I>
where
I: Iterator<Item = Result<ObjectInfo>> + Send + Sync,
{
async fn next(&mut self) -> Option<Result<ObjectInfo>> {
self.inner.next()
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/local/mod.rs | src/server/local/mod.rs | use crate::errors::Result;
use crate::server::{
AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId, NIL_VERSION_ID,
};
use crate::storage::sqlite::StoredUuid;
use anyhow::Context;
use async_trait::async_trait;
use rusqlite::params;
use rusqlite::OptionalExtension;
use serde::{Deserialize, Serialize};
use std::path::Path;
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug)]
struct Version {
version_id: VersionId,
parent_version_id: VersionId,
history_segment: HistorySegment,
}
pub(crate) struct LocalServer {
con: rusqlite::Connection,
}
impl LocalServer {
fn txn(&'_ mut self) -> Result<rusqlite::Transaction<'_>> {
let txn = self.con.transaction()?;
Ok(txn)
}
/// A server which has no notion of clients, signatures, encryption, etc.
pub(crate) fn new<P: AsRef<Path>>(directory: P) -> Result<LocalServer> {
let db_file = directory
.as_ref()
.join("taskchampion-local-sync-server.sqlite3");
let con = rusqlite::Connection::open(db_file)?;
let queries = vec![
"CREATE TABLE IF NOT EXISTS data (key STRING PRIMARY KEY, value STRING);",
"CREATE TABLE IF NOT EXISTS versions (version_id STRING PRIMARY KEY, parent_version_id STRING, data STRING);",
];
for q in queries {
con.execute(q, []).context("Creating table")?;
}
Ok(LocalServer { con })
}
fn get_latest_version_id(&mut self) -> Result<VersionId> {
let t = self.txn()?;
let result: Option<StoredUuid> = t
.query_row(
"SELECT value FROM data WHERE key = 'latest_version_id' LIMIT 1",
rusqlite::params![],
|r| r.get(0),
)
.optional()?;
Ok(result.map(|x| x.0).unwrap_or(NIL_VERSION_ID))
}
fn set_latest_version_id(&mut self, version_id: VersionId) -> Result<()> {
let t = self.txn()?;
t.execute(
"INSERT OR REPLACE INTO data (key, value) VALUES ('latest_version_id', ?)",
params![&StoredUuid(version_id)],
)
.context("Update task query")?;
t.commit()?;
Ok(())
}
fn get_version_by_parent_version_id(
&mut self,
parent_version_id: VersionId,
) -> Result<Option<Version>> {
let t = self.txn()?;
let r = t.query_row(
"SELECT version_id, parent_version_id, data FROM versions WHERE parent_version_id = ?",
params![&StoredUuid(parent_version_id)],
|r| {
let version_id: StoredUuid = r.get("version_id")?;
let parent_version_id: StoredUuid = r.get("parent_version_id")?;
Ok(Version{
version_id: version_id.0,
parent_version_id: parent_version_id.0,
history_segment: r.get("data")?,
})}
)
.optional()
.context("Get version query")
?;
Ok(r)
}
fn add_version_by_parent_version_id(&mut self, version: Version) -> Result<()> {
let t = self.txn()?;
t.execute(
"INSERT INTO versions (version_id, parent_version_id, data) VALUES (?, ?, ?)",
params![
StoredUuid(version.version_id),
StoredUuid(version.parent_version_id),
version.history_segment
],
)?;
t.commit()?;
Ok(())
}
}
#[async_trait(?Send)]
impl Server for LocalServer {
// TODO: better transaction isolation for add_version (gets and sets should be in the same
// transaction)
async fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> Result<(AddVersionResult, SnapshotUrgency)> {
// no client lookup
// no signature validation
// check the parent_version_id for linearity
let latest_version_id = self.get_latest_version_id()?;
if latest_version_id != NIL_VERSION_ID && parent_version_id != latest_version_id {
return Ok((
AddVersionResult::ExpectedParentVersion(latest_version_id),
SnapshotUrgency::None,
));
}
// invent a new ID for this version
let version_id = Uuid::new_v4();
self.add_version_by_parent_version_id(Version {
version_id,
parent_version_id,
history_segment,
})?;
self.set_latest_version_id(version_id)?;
Ok((AddVersionResult::Ok(version_id), SnapshotUrgency::None))
}
async fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> Result<GetVersionResult> {
if let Some(version) = self.get_version_by_parent_version_id(parent_version_id)? {
Ok(GetVersionResult::Version {
version_id: version.version_id,
parent_version_id: version.parent_version_id,
history_segment: version.history_segment,
})
} else {
Ok(GetVersionResult::NoSuchVersion)
}
}
async fn add_snapshot(&mut self, _version_id: VersionId, _snapshot: Snapshot) -> Result<()> {
// the local server never requests a snapshot, so it should never get one
unreachable!()
}
async fn get_snapshot(&mut self) -> Result<Option<(VersionId, Snapshot)>> {
Ok(None)
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
use tempfile::TempDir;
#[tokio::test]
async fn test_empty() -> Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(tmp_dir.path())?;
let child_version = server.get_child_version(NIL_VERSION_ID).await?;
assert_eq!(child_version, GetVersionResult::NoSuchVersion);
Ok(())
}
#[tokio::test]
async fn test_add_zero_base() -> Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(tmp_dir.path())?;
let history = b"1234".to_vec();
match server.add_version(NIL_VERSION_ID, history.clone()).await?.0 {
AddVersionResult::ExpectedParentVersion(_) => {
panic!("should have accepted the version")
}
AddVersionResult::Ok(version_id) => {
let new_version = server.get_child_version(NIL_VERSION_ID).await?;
assert_eq!(
new_version,
GetVersionResult::Version {
version_id,
parent_version_id: NIL_VERSION_ID,
history_segment: history,
}
);
}
}
Ok(())
}
#[tokio::test]
async fn test_add_nonzero_base() -> Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(tmp_dir.path())?;
let history = b"1234".to_vec();
let parent_version_id = Uuid::new_v4() as VersionId;
// This is OK because the server has no latest_version_id yet
match server
.add_version(parent_version_id, history.clone())
.await?
.0
{
AddVersionResult::ExpectedParentVersion(_) => {
panic!("should have accepted the version")
}
AddVersionResult::Ok(version_id) => {
let new_version = server.get_child_version(parent_version_id).await?;
assert_eq!(
new_version,
GetVersionResult::Version {
version_id,
parent_version_id,
history_segment: history,
}
);
}
}
Ok(())
}
#[tokio::test]
async fn test_add_nonzero_base_forbidden() -> Result<()> {
let tmp_dir = TempDir::new()?;
let mut server = LocalServer::new(tmp_dir.path())?;
let history = b"1234".to_vec();
let parent_version_id = Uuid::new_v4() as VersionId;
// add a version
if let (AddVersionResult::ExpectedParentVersion(_), SnapshotUrgency::None) = server
.add_version(parent_version_id, history.clone())
.await?
{
panic!("should have accepted the version")
}
// then add another, not based on that one
if let (AddVersionResult::Ok(_), SnapshotUrgency::None) =
server.add_version(parent_version_id, history).await?
{
panic!("should not have accepted the version")
}
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/server/sync/mod.rs | src/server/sync/mod.rs | use crate::errors::{Error, Result};
use crate::server::{
http, AddVersionResult, GetVersionResult, HistorySegment, Server, Snapshot, SnapshotUrgency,
VersionId,
};
use async_trait::async_trait;
use reqwest::StatusCode;
use url::Url;
use uuid::Uuid;
use super::encryption::{Cryptor, Sealed, Secret, Unsealed};
pub(crate) struct SyncServer {
base_url: Url,
client_id: Uuid,
cryptor: Cryptor,
client: reqwest::Client,
}
/// The content-type for history segments (opaque blobs of bytes)
const HISTORY_SEGMENT_CONTENT_TYPE: &str = "application/vnd.taskchampion.history-segment";
/// The content-type for snapshots (opaque blobs of bytes)
const SNAPSHOT_CONTENT_TYPE: &str = "application/vnd.taskchampion.snapshot";
/// A SyncServer communicates with a sync server over HTTP.
impl SyncServer {
/// Construct a new SyncServer.
///
/// The `url` parameter represents the base URL of the sync server, encompassing the protocol, hostname, and optional path
/// components where the server is hosted. When constructing URLs for server endpoints, the respective path components
/// will be appended to this base URL.
///
/// Pass a client_id to identify this client to the server. Multiple replicas synchronizing the same task history
/// should use the same client_id.
pub(crate) fn new(
url: String,
client_id: Uuid,
encryption_secret: Vec<u8>,
) -> Result<SyncServer> {
let mut url = Url::parse(&url)
.map_err(|_| Error::Server(format!("Could not parse {url} as a URL")))?;
// Ensure the path has a trailing slash, so that `Url::join` correctly appends
// additional path segments to it.
let path = url.path();
if !path.ends_with('/') {
url.set_path(&format!("{path}/"));
}
Ok(SyncServer {
base_url: url,
client_id,
cryptor: Cryptor::new(client_id, &Secret(encryption_secret.to_vec()))?,
client: http::client()?,
})
}
/// Construct a full endpoint URL by joining the base url with additional
/// path components.
fn construct_endpoint_url(&self, path_components: &str) -> Result<Url> {
self.base_url.join(path_components).map_err(|_| {
Error::Server(format!(
"Could not build url from base {} and path component(s) {}",
self.base_url, path_components
))
})
}
}
/// Read a UUID-bearing header or fail trying
fn get_uuid_header(resp: &reqwest::Response, name: &str) -> Result<Uuid> {
let value = resp
.headers()
.get(name)
.ok_or_else(|| anyhow::anyhow!("Response does not have {} header", name))?
.to_str()
.map_err(|_| anyhow::anyhow!("Response has invalid {} header", name))?;
let value = Uuid::parse_str(value)
.map_err(|e| anyhow::anyhow!("{} header is not a valid UUID: {}", name, e))?;
Ok(value)
}
/// Read the X-Snapshot-Request header and return a SnapshotUrgency
fn get_snapshot_urgency(resp: &reqwest::Response) -> SnapshotUrgency {
match resp.headers().get("X-Snapshot-Request") {
None => SnapshotUrgency::None,
Some(hdr) => match hdr.to_str() {
Ok("urgency=low") => SnapshotUrgency::Low,
Ok("urgency=high") => SnapshotUrgency::High,
_ => SnapshotUrgency::None,
},
}
}
/// Get the content-type header.
fn get_content_type(resp: &reqwest::Response) -> Option<&str> {
match resp.headers().get("Content-Type") {
None => None,
Some(hdr) => hdr.to_str().ok(),
}
}
async fn sealed_from_resp(
resp: reqwest::Response,
version_id: Uuid,
content_type: &str,
) -> Result<Sealed> {
if get_content_type(&resp) == Some(content_type) {
let payload = resp.bytes().await?;
Ok(Sealed {
version_id,
payload: payload.to_vec(),
})
} else {
Err(Error::Server(String::from(
"Response did not have expected content-type",
)))
}
}
#[async_trait(?Send)]
impl Server for SyncServer {
async fn add_version(
&mut self,
parent_version_id: VersionId,
history_segment: HistorySegment,
) -> Result<(AddVersionResult, SnapshotUrgency)> {
let url = self.construct_endpoint_url(
format!("v1/client/add-version/{parent_version_id}").as_str(),
)?;
let unsealed = Unsealed {
version_id: parent_version_id,
payload: history_segment,
};
let sealed = self.cryptor.seal(unsealed)?;
let resp = self
.client
.post(url)
.header("Content-Type", HISTORY_SEGMENT_CONTENT_TYPE)
.header("X-Client-Id", &self.client_id.to_string())
.body(sealed.payload)
.send()
.await?;
if resp.status() == StatusCode::CONFLICT {
let parent_version_id = get_uuid_header(&resp, "X-Parent-Version-Id")?;
return Ok((
AddVersionResult::ExpectedParentVersion(parent_version_id),
SnapshotUrgency::None,
));
}
match resp.error_for_status() {
Ok(resp) => {
let version_id = get_uuid_header(&resp, "X-Version-Id")?;
Ok((
AddVersionResult::Ok(version_id),
get_snapshot_urgency(&resp),
))
}
Err(err) => Err(err.into()),
}
}
async fn get_child_version(
&mut self,
parent_version_id: VersionId,
) -> Result<GetVersionResult> {
let url = self.construct_endpoint_url(
format!("v1/client/get-child-version/{parent_version_id}").as_str(),
)?;
match self
.client
.get(url)
.header("X-Client-Id", &self.client_id.to_string())
.send()
.await?
.error_for_status()
{
Ok(resp) => {
let parent_version_id = get_uuid_header(&resp, "X-Parent-Version-Id")?;
let version_id = get_uuid_header(&resp, "X-Version-Id")?;
let sealed =
sealed_from_resp(resp, parent_version_id, HISTORY_SEGMENT_CONTENT_TYPE).await?;
let history_segment = self.cryptor.unseal(sealed)?.payload;
Ok(GetVersionResult::Version {
version_id,
parent_version_id,
history_segment,
})
}
Err(err) if err.status() == Some(StatusCode::NOT_FOUND) => {
Ok(GetVersionResult::NoSuchVersion)
}
Err(err) => Err(err.into()),
}
}
async fn add_snapshot(&mut self, version_id: VersionId, snapshot: Snapshot) -> Result<()> {
let url =
self.construct_endpoint_url(format!("v1/client/add-snapshot/{version_id}").as_str())?;
let unsealed = Unsealed {
version_id,
payload: snapshot,
};
let sealed = self.cryptor.seal(unsealed)?;
Ok(self
.client
.post(url)
.header("Content-Type", SNAPSHOT_CONTENT_TYPE)
.header("X-Client-Id", &self.client_id.to_string())
.body(sealed.payload)
.send()
.await
.and_then(reqwest::Response::error_for_status)
.map(|_| ())?)
}
async fn get_snapshot(&mut self) -> Result<Option<(VersionId, Snapshot)>> {
let url = self.construct_endpoint_url("v1/client/snapshot")?;
match self
.client
.get(url)
.header("X-Client-Id", &self.client_id.to_string())
.send()
.await?
.error_for_status()
{
Ok(resp) => {
let version_id = get_uuid_header(&resp, "X-Version-Id")?;
let sealed = sealed_from_resp(resp, version_id, SNAPSHOT_CONTENT_TYPE).await?;
let snapshot = self.cryptor.unseal(sealed)?.payload;
Ok(Some((version_id, snapshot)))
}
Err(err) if err.status() == Some(StatusCode::NOT_FOUND) => Ok(None),
Err(err) => Err(err.into()),
}
}
}
// httptest is not available on WASM32, so do not build these tests
// on that platform (they wouldn't run anyway!).
#[cfg(all(not(target_arch = "wasm32"), test))]
mod test {
use super::*;
use crate::Server as ServerTrait;
use httptest::{matchers::*, responders::*, Expectation, Server};
use uuid::uuid;
#[test]
fn sync_server_url_construction() -> anyhow::Result<()> {
let client_id = Uuid::new_v4();
let encryption_secret = vec![];
let bare_domain = SyncServer::new(
"https://example.com".into(),
client_id,
encryption_secret.clone(),
)?;
let no_slash_path = SyncServer::new(
"https://example.com/foo/bar".into(),
client_id,
encryption_secret.clone(),
)?;
let slash_path = SyncServer::new(
"https://example.com/foo/bar/".into(),
client_id,
encryption_secret,
)?;
assert_eq!(
bare_domain.construct_endpoint_url("v1/a/b")?,
Url::parse("https://example.com/v1/a/b")?
);
assert_eq!(
no_slash_path.construct_endpoint_url("v1/a/b")?,
Url::parse("https://example.com/foo/bar/v1/a/b")?
);
assert_eq!(
slash_path.construct_endpoint_url("v1/a/b")?,
Url::parse("https://example.com/foo/bar/v1/a/b")?
);
Ok(())
}
const ENCRYPTION_SECRET: &[u8] = b"abc";
const CLIENT_ID: Uuid = uuid!("ea82d570-3d7e-494a-a581-babe65dc7b3b");
fn encrypt(version_id: Uuid, payload: impl Into<Vec<u8>>) -> anyhow::Result<Sealed> {
let cryptor = Cryptor::new(CLIENT_ID, &Secret(ENCRYPTION_SECRET.to_vec()))?;
let unsealed = Unsealed {
version_id,
payload: payload.into(),
};
Ok(cryptor.seal(unsealed)?)
}
#[tokio::test]
async fn add_version() -> anyhow::Result<()> {
let version_id = uuid!("e2ceb7df-706d-4a9d-ac26-09824e239092");
let parent_version_id = uuid!("785fd86c-c11f-48b6-9557-27ec78bb568c");
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path(
"POST",
format!("/v1/client/add-version/{parent_version_id}")
),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(
status_code(200)
.append_header("x-version-id", version_id.to_string())
.append_header("x-parent-version-id", parent_version_id.to_string())
.append_header("content-type", HISTORY_SEGMENT_CONTENT_TYPE),
),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
let res = svr
.add_version(parent_version_id, b"abc".to_vec())
.await
.unwrap();
assert_eq!(res.0, AddVersionResult::Ok(version_id));
Ok(())
}
#[tokio::test]
async fn add_version_conflict() -> anyhow::Result<()> {
let parent_version_id = uuid!("785fd86c-c11f-48b6-9557-27ec78bb568c");
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path(
"POST",
format!("/v1/client/add-version/{parent_version_id}")
),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(
status_code(409)
.append_header("x-parent-version-id", parent_version_id.to_string()),
),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
let res = svr
.add_version(parent_version_id, b"abc".to_vec())
.await
.unwrap();
assert_eq!(
res.0,
AddVersionResult::ExpectedParentVersion(parent_version_id)
);
Ok(())
}
#[tokio::test]
async fn add_version_error() -> anyhow::Result<()> {
let parent_version_id = uuid!("785fd86c-c11f-48b6-9557-27ec78bb568c");
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path(
"POST",
format!("/v1/client/add-version/{parent_version_id}")
),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(status_code(404)),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
assert!(svr
.add_version(parent_version_id, b"abc".to_vec())
.await
.is_err());
Ok(())
}
#[tokio::test]
async fn get_child_version() -> anyhow::Result<()> {
let version_id = uuid!("e2ceb7df-706d-4a9d-ac26-09824e239092");
let parent_version_id = uuid!("785fd86c-c11f-48b6-9557-27ec78bb568c");
let server = Server::run();
let sealed = encrypt(parent_version_id, b"abc")?;
server.expect(
Expectation::matching(all_of!(
request::method_path(
"GET",
format!("/v1/client/get-child-version/{parent_version_id}")
),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(
status_code(200)
.body(sealed.payload)
.append_header("x-version-id", version_id.to_string())
.append_header("x-parent-version-id", parent_version_id.to_string())
.append_header("content-type", HISTORY_SEGMENT_CONTENT_TYPE),
),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
let res = svr.get_child_version(parent_version_id).await.unwrap();
assert_eq!(
res,
GetVersionResult::Version {
version_id,
parent_version_id,
history_segment: b"abc".to_vec()
}
);
Ok(())
}
#[tokio::test]
async fn get_child_version_not_found() -> anyhow::Result<()> {
let version_id = uuid!("e2ceb7df-706d-4a9d-ac26-09824e239092");
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path("GET", format!("/v1/client/get-child-version/{version_id}")),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(status_code(404)),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
assert_eq!(
svr.get_child_version(version_id).await?,
GetVersionResult::NoSuchVersion
);
Ok(())
}
#[tokio::test]
async fn get_child_version_error() -> anyhow::Result<()> {
let version_id = uuid!("e2ceb7df-706d-4a9d-ac26-09824e239092");
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path("GET", format!("/v1/client/get-child-version/{version_id}")),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(status_code(502)),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
assert!(svr.get_child_version(version_id).await.is_err());
Ok(())
}
#[tokio::test]
async fn add_snapshot() -> anyhow::Result<()> {
let version_id = uuid!("e2ceb7df-706d-4a9d-ac26-09824e239092");
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path("POST", format!("/v1/client/add-snapshot/{version_id}")),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
request::headers(contains(("content-type", SNAPSHOT_CONTENT_TYPE))),
// The encrypted body is different every time, so not tested
))
.respond_with(status_code(200)),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
svr.add_snapshot(version_id, b"abc".to_vec()).await?;
Ok(())
}
#[tokio::test]
async fn add_snapshot_error() -> anyhow::Result<()> {
let version_id = uuid!("e2ceb7df-706d-4a9d-ac26-09824e239092");
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path("POST", format!("/v1/client/add-snapshot/{version_id}")),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
request::headers(contains(("content-type", SNAPSHOT_CONTENT_TYPE))),
// The encrypted body is different every time, so not tested
))
.respond_with(status_code(500)),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
assert!(svr.add_snapshot(version_id, b"abc".to_vec()).await.is_err());
Ok(())
}
#[tokio::test]
async fn get_snapshot_found() -> anyhow::Result<()> {
let version_id = Uuid::new_v4();
let sealed = encrypt(version_id, b"abc")?;
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path("GET", "/v1/client/snapshot"),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(
status_code(200)
.body(sealed.payload)
.append_header("x-version-id", version_id.to_string())
.append_header("content-type", SNAPSHOT_CONTENT_TYPE),
),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
assert_eq!(
svr.get_snapshot().await?,
Some((version_id, b"abc".to_vec()))
);
Ok(())
}
#[tokio::test]
async fn get_snapshot_not_found() -> anyhow::Result<()> {
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path("GET", "/v1/client/snapshot"),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(status_code(404)),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
assert_eq!(svr.get_snapshot().await?, None);
Ok(())
}
#[tokio::test]
async fn get_snapshot_error() -> anyhow::Result<()> {
let server = Server::run();
server.expect(
Expectation::matching(all_of!(
request::method_path("GET", "/v1/client/snapshot"),
request::headers(contains(("x-client-id", CLIENT_ID.to_string()))),
))
.respond_with(status_code(403)),
);
let mut svr = SyncServer::new(server.url("/").to_string(), CLIENT_ID, b"abc".to_vec())?;
assert!(svr.get_snapshot().await.is_err());
Ok(())
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/task/status.rs | src/task/status.rs | /// The status of a task, as defined by the task data model.
#[derive(Debug, PartialEq, Eq, Clone, strum_macros::Display)]
#[repr(C)]
pub enum Status {
Pending,
Completed,
Deleted,
Recurring,
/// Unknown signifies a status in the task DB that was not
/// recognized. This supports forward-compatibility if a
/// new status is added. Tasks with unknown status should
/// be ignored (but not deleted).
Unknown(String),
}
impl Status {
/// Get a Status from the string value in a TaskMap
pub(crate) fn from_taskmap(s: &str) -> Status {
match s {
"pending" => Status::Pending,
"completed" => Status::Completed,
"deleted" => Status::Deleted,
"recurring" => Status::Recurring,
v => Status::Unknown(v.to_string()),
}
}
/// Get the 1-character value for this status to use in the TaskMap.
pub(crate) fn to_taskmap(&self) -> &str {
match self {
Status::Pending => "pending",
Status::Completed => "completed",
Status::Deleted => "deleted",
Status::Recurring => "recurring",
Status::Unknown(v) => v.as_ref(),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn to_taskmap() {
assert_eq!(Status::Pending.to_taskmap(), "pending");
assert_eq!(Status::Completed.to_taskmap(), "completed");
assert_eq!(Status::Deleted.to_taskmap(), "deleted");
assert_eq!(Status::Recurring.to_taskmap(), "recurring");
assert_eq!(Status::Unknown("wishful".into()).to_taskmap(), "wishful");
}
#[test]
fn from_taskmap() {
assert_eq!(Status::from_taskmap("pending"), Status::Pending);
assert_eq!(Status::from_taskmap("completed"), Status::Completed);
assert_eq!(Status::from_taskmap("deleted"), Status::Deleted);
assert_eq!(Status::from_taskmap("recurring"), Status::Recurring);
assert_eq!(
Status::from_taskmap("something-else"),
Status::Unknown("something-else".into())
);
}
#[test]
fn display() {
assert_eq!(format!("{}", Status::Pending), "Pending");
assert_eq!(format!("{}", Status::Completed), "Completed");
assert_eq!(format!("{}", Status::Deleted), "Deleted");
assert_eq!(format!("{}", Status::Recurring), "Recurring");
assert_eq!(format!("{}", Status::Unknown("wishful".into())), "Unknown");
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/task/annotation.rs | src/task/annotation.rs | use super::Timestamp;
/// An annotation for a task
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct Annotation {
/// Time the annotation was made
pub entry: Timestamp,
/// Content of the annotation
pub description: String,
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/task/time.rs | src/task/time.rs | use chrono::{offset::LocalResult, DateTime, TimeZone, Utc};
pub(crate) type Timestamp = DateTime<Utc>;
pub fn utc_timestamp(secs: i64) -> Timestamp {
match Utc.timestamp_opt(secs, 0) {
LocalResult::Single(tz) => tz,
// The other two variants are None and Ambiguous, which both are caused by DST.
_ => unreachable!("We're requesting UTC so daylight saving time isn't a factor."),
}
}
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
GothenburgBitFactory/taskchampion | https://github.com/GothenburgBitFactory/taskchampion/blob/45f5345daff60aba526db9e54dc03c8e0da37f14/src/task/mod.rs | src/task/mod.rs | #![allow(clippy::module_inception)]
mod annotation;
mod data;
mod status;
mod tag;
mod task;
mod time;
pub use annotation::Annotation;
pub use data::TaskData;
pub use status::Status;
pub use tag::Tag;
pub use task::Task;
pub use time::utc_timestamp;
pub(crate) use time::Timestamp;
| rust | MIT | 45f5345daff60aba526db9e54dc03c8e0da37f14 | 2026-01-04T20:19:44.628446Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.