repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/matchers.rs | lib/src/matchers.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashSet;
use std::fmt::Debug;
use globset::Glob;
use itertools::Itertools as _;
use tracing::instrument;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathComponentBuf;
use crate::repo_path::RepoPathTree;
#[derive(PartialEq, Eq, Debug)]
pub enum Visit {
/// Everything in the directory is *guaranteed* to match, no need to check
/// descendants
AllRecursively,
Specific {
dirs: VisitDirs,
files: VisitFiles,
},
/// Nothing in the directory or its subdirectories will match.
///
/// This is the same as `Specific` with no directories or files. Use
/// `Visit::set()` to get create an instance that's `Specific` or
/// `Nothing` depending on the values at runtime.
Nothing,
}
impl Visit {
/// All entries in the directory need to be visited, but they are not
/// guaranteed to match.
const SOME: Self = Self::Specific {
dirs: VisitDirs::All,
files: VisitFiles::All,
};
fn sets(dirs: HashSet<RepoPathComponentBuf>, files: HashSet<RepoPathComponentBuf>) -> Self {
if dirs.is_empty() && files.is_empty() {
Self::Nothing
} else {
Self::Specific {
dirs: VisitDirs::Set(dirs),
files: VisitFiles::Set(files),
}
}
}
pub fn is_nothing(&self) -> bool {
*self == Self::Nothing
}
}
#[derive(PartialEq, Eq, Debug)]
pub enum VisitDirs {
All,
Set(HashSet<RepoPathComponentBuf>),
}
#[derive(PartialEq, Eq, Debug)]
pub enum VisitFiles {
All,
Set(HashSet<RepoPathComponentBuf>),
}
pub trait Matcher: Debug + Send + Sync {
fn matches(&self, file: &RepoPath) -> bool;
fn visit(&self, dir: &RepoPath) -> Visit;
}
impl<T: Matcher + ?Sized> Matcher for &T {
fn matches(&self, file: &RepoPath) -> bool {
<T as Matcher>::matches(self, file)
}
fn visit(&self, dir: &RepoPath) -> Visit {
<T as Matcher>::visit(self, dir)
}
}
impl<T: Matcher + ?Sized> Matcher for Box<T> {
fn matches(&self, file: &RepoPath) -> bool {
<T as Matcher>::matches(self, file)
}
fn visit(&self, dir: &RepoPath) -> Visit {
<T as Matcher>::visit(self, dir)
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct NothingMatcher;
impl Matcher for NothingMatcher {
fn matches(&self, _file: &RepoPath) -> bool {
false
}
fn visit(&self, _dir: &RepoPath) -> Visit {
Visit::Nothing
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct EverythingMatcher;
impl Matcher for EverythingMatcher {
fn matches(&self, _file: &RepoPath) -> bool {
true
}
fn visit(&self, _dir: &RepoPath) -> Visit {
Visit::AllRecursively
}
}
#[derive(PartialEq, Eq, Debug)]
pub struct FilesMatcher {
tree: RepoPathTree<FilesNodeKind>,
}
impl FilesMatcher {
pub fn new(files: impl IntoIterator<Item = impl AsRef<RepoPath>>) -> Self {
let mut tree = RepoPathTree::default();
for f in files {
tree.add(f.as_ref()).set_value(FilesNodeKind::File);
}
Self { tree }
}
}
impl Matcher for FilesMatcher {
fn matches(&self, file: &RepoPath) -> bool {
self.tree
.get(file)
.is_some_and(|sub| *sub.value() == FilesNodeKind::File)
}
fn visit(&self, dir: &RepoPath) -> Visit {
self.tree
.get(dir)
.map_or(Visit::Nothing, files_tree_to_visit_sets)
}
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
enum FilesNodeKind {
/// Represents an intermediate directory.
#[default]
Dir,
/// Represents a file (which might also be an intermediate directory.)
File,
}
fn files_tree_to_visit_sets(tree: &RepoPathTree<FilesNodeKind>) -> Visit {
let mut dirs = HashSet::new();
let mut files = HashSet::new();
for (name, sub) in tree.children() {
// should visit only intermediate directories
if sub.has_children() {
dirs.insert(name.to_owned());
}
if *sub.value() == FilesNodeKind::File {
files.insert(name.to_owned());
}
}
Visit::sets(dirs, files)
}
#[derive(Debug)]
pub struct PrefixMatcher {
tree: RepoPathTree<PrefixNodeKind>,
}
impl PrefixMatcher {
#[instrument(skip(prefixes))]
pub fn new(prefixes: impl IntoIterator<Item = impl AsRef<RepoPath>>) -> Self {
let mut tree = RepoPathTree::default();
for prefix in prefixes {
tree.add(prefix.as_ref()).set_value(PrefixNodeKind::Prefix);
}
Self { tree }
}
}
impl Matcher for PrefixMatcher {
fn matches(&self, file: &RepoPath) -> bool {
self.tree
.walk_to(file)
.any(|(sub, _)| *sub.value() == PrefixNodeKind::Prefix)
}
fn visit(&self, dir: &RepoPath) -> Visit {
for (sub, tail_path) in self.tree.walk_to(dir) {
// ancestor of 'dir' matches prefix paths
if *sub.value() == PrefixNodeKind::Prefix {
return Visit::AllRecursively;
}
// 'dir' found, and is an ancestor of prefix paths
if tail_path.is_root() {
return prefix_tree_to_visit_sets(sub);
}
}
Visit::Nothing
}
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
enum PrefixNodeKind {
/// Represents an intermediate directory.
#[default]
Dir,
/// Represents a file and prefix directory.
Prefix,
}
fn prefix_tree_to_visit_sets(tree: &RepoPathTree<PrefixNodeKind>) -> Visit {
let mut dirs = HashSet::new();
let mut files = HashSet::new();
for (name, sub) in tree.children() {
// should visit both intermediate and prefix directories
dirs.insert(name.to_owned());
if *sub.value() == PrefixNodeKind::Prefix {
files.insert(name.to_owned());
}
}
Visit::sets(dirs, files)
}
/// Matches file or prefix paths with glob patterns.
#[derive(Clone, Debug)]
pub struct GlobsMatcher {
tree: RepoPathTree<Option<regex::bytes::RegexSet>>,
matches_prefix_paths: bool,
}
impl GlobsMatcher {
/// Returns new matcher builder.
pub fn builder<'a>() -> GlobsMatcherBuilder<'a> {
GlobsMatcherBuilder {
dir_patterns: vec![],
matches_prefix_paths: false,
}
}
}
impl Matcher for GlobsMatcher {
fn matches(&self, file: &RepoPath) -> bool {
// check if any ancestor (dir, patterns) matches 'file'
self.tree
.walk_to(file)
.take_while(|(_, tail_path)| !tail_path.is_root()) // only dirs
.any(|(sub, tail_path)| {
let tail = tail_path.as_internal_file_string().as_bytes();
sub.value().as_ref().is_some_and(|pat| pat.is_match(tail))
})
}
fn visit(&self, dir: &RepoPath) -> Visit {
let mut max_visit = Visit::Nothing;
for (sub, tail_path) in self.tree.walk_to(dir) {
// ancestor of 'dir' has patterns
if let Some(pat) = &sub.value() {
let tail = tail_path.as_internal_file_string().as_bytes();
if self.matches_prefix_paths && pat.is_match(tail) {
// 'dir' matches prefix patterns
return Visit::AllRecursively;
} else {
max_visit = Visit::SOME;
}
if !self.matches_prefix_paths {
break; // can't narrow visit anymore
}
}
// 'dir' found, and is an ancestor of pattern paths
if tail_path.is_root() && max_visit == Visit::Nothing {
let sub_dirs = sub.children().map(|(name, _)| name.to_owned()).collect();
return Visit::sets(sub_dirs, HashSet::new());
}
}
max_visit
}
}
/// Constructs [`GlobsMatcher`] from patterns.
#[derive(Clone, Debug)]
pub struct GlobsMatcherBuilder<'a> {
dir_patterns: Vec<(&'a RepoPath, &'a Glob)>,
matches_prefix_paths: bool,
}
impl<'a> GlobsMatcherBuilder<'a> {
/// Whether or not the matcher will match prefix paths.
pub fn prefix_paths(mut self, yes: bool) -> Self {
self.matches_prefix_paths = yes;
self
}
/// Returns true if no patterns have been added yet.
pub fn is_empty(&self) -> bool {
self.dir_patterns.is_empty()
}
/// Adds `pattern` that should be evaluated relative to `dir`.
///
/// The `dir` should be the longest directory path that contains no glob
/// meta characters.
pub fn add(&mut self, dir: &'a RepoPath, pattern: &'a Glob) {
self.dir_patterns.push((dir, pattern));
}
/// Compiles matcher.
pub fn build(self) -> GlobsMatcher {
let Self {
mut dir_patterns,
matches_prefix_paths,
} = self;
dir_patterns.sort_unstable_by_key(|&(dir, _)| dir);
let mut tree: RepoPathTree<Option<regex::bytes::RegexSet>> = Default::default();
for (dir, chunk) in &dir_patterns.into_iter().chunk_by(|&(dir, _)| dir) {
// Based on new_regex() in globset. We don't use GlobSet because
// RepoPath separator should be "/" on all platforms.
let mut regex_builder = if matches_prefix_paths {
let regex_patterns = chunk.map(|(_, pattern)| glob_to_prefix_regex(pattern));
regex::bytes::RegexSetBuilder::new(regex_patterns)
} else {
regex::bytes::RegexSetBuilder::new(chunk.map(|(_, pattern)| pattern.regex()))
};
let regex = regex_builder
.dot_matches_new_line(true)
.build()
.expect("glob regex should be valid");
let sub = tree.add(dir);
assert!(sub.value().is_none());
sub.set_value(Some(regex));
}
GlobsMatcher {
tree,
matches_prefix_paths,
}
}
}
fn glob_to_prefix_regex(glob: &Glob) -> String {
// Here we rely on the implementation detail of the globset crate.
// Alternatively, we can construct an anchored regex automaton and test
// prefix matching by feeding characters one by one.
let prefix = glob
.regex()
.strip_suffix('$')
.expect("glob regex should be anchored");
format!("{prefix}(?:/|$)")
}
/// Matches paths that are matched by any of the input matchers.
#[derive(Clone, Debug)]
pub struct UnionMatcher<M1, M2> {
input1: M1,
input2: M2,
}
impl<M1: Matcher, M2: Matcher> UnionMatcher<M1, M2> {
pub fn new(input1: M1, input2: M2) -> Self {
Self { input1, input2 }
}
}
impl<M1: Matcher, M2: Matcher> Matcher for UnionMatcher<M1, M2> {
fn matches(&self, file: &RepoPath) -> bool {
self.input1.matches(file) || self.input2.matches(file)
}
fn visit(&self, dir: &RepoPath) -> Visit {
match self.input1.visit(dir) {
Visit::AllRecursively => Visit::AllRecursively,
Visit::Nothing => self.input2.visit(dir),
Visit::Specific {
dirs: dirs1,
files: files1,
} => match self.input2.visit(dir) {
Visit::AllRecursively => Visit::AllRecursively,
Visit::Nothing => Visit::Specific {
dirs: dirs1,
files: files1,
},
Visit::Specific {
dirs: dirs2,
files: files2,
} => {
let dirs = match (dirs1, dirs2) {
(VisitDirs::All, _) | (_, VisitDirs::All) => VisitDirs::All,
(VisitDirs::Set(dirs1), VisitDirs::Set(dirs2)) => {
VisitDirs::Set(dirs1.iter().chain(&dirs2).cloned().collect())
}
};
let files = match (files1, files2) {
(VisitFiles::All, _) | (_, VisitFiles::All) => VisitFiles::All,
(VisitFiles::Set(files1), VisitFiles::Set(files2)) => {
VisitFiles::Set(files1.iter().chain(&files2).cloned().collect())
}
};
Visit::Specific { dirs, files }
}
},
}
}
}
/// Matches paths that are matched by the first input matcher but not by the
/// second.
#[derive(Clone, Debug)]
pub struct DifferenceMatcher<M1, M2> {
/// The minuend
wanted: M1,
/// The subtrahend
unwanted: M2,
}
impl<M1: Matcher, M2: Matcher> DifferenceMatcher<M1, M2> {
pub fn new(wanted: M1, unwanted: M2) -> Self {
Self { wanted, unwanted }
}
}
impl<M1: Matcher, M2: Matcher> Matcher for DifferenceMatcher<M1, M2> {
fn matches(&self, file: &RepoPath) -> bool {
self.wanted.matches(file) && !self.unwanted.matches(file)
}
fn visit(&self, dir: &RepoPath) -> Visit {
match self.unwanted.visit(dir) {
Visit::AllRecursively => Visit::Nothing,
Visit::Nothing => self.wanted.visit(dir),
Visit::Specific { .. } => match self.wanted.visit(dir) {
Visit::AllRecursively => Visit::SOME,
wanted_visit => wanted_visit,
},
}
}
}
/// Matches paths that are matched by both input matchers.
#[derive(Clone, Debug)]
pub struct IntersectionMatcher<M1, M2> {
input1: M1,
input2: M2,
}
impl<M1: Matcher, M2: Matcher> IntersectionMatcher<M1, M2> {
pub fn new(input1: M1, input2: M2) -> Self {
Self { input1, input2 }
}
}
impl<M1: Matcher, M2: Matcher> Matcher for IntersectionMatcher<M1, M2> {
fn matches(&self, file: &RepoPath) -> bool {
self.input1.matches(file) && self.input2.matches(file)
}
fn visit(&self, dir: &RepoPath) -> Visit {
match self.input1.visit(dir) {
Visit::AllRecursively => self.input2.visit(dir),
Visit::Nothing => Visit::Nothing,
Visit::Specific {
dirs: dirs1,
files: files1,
} => match self.input2.visit(dir) {
Visit::AllRecursively => Visit::Specific {
dirs: dirs1,
files: files1,
},
Visit::Nothing => Visit::Nothing,
Visit::Specific {
dirs: dirs2,
files: files2,
} => {
let dirs = match (dirs1, dirs2) {
(VisitDirs::All, VisitDirs::All) => VisitDirs::All,
(dirs1, VisitDirs::All) => dirs1,
(VisitDirs::All, dirs2) => dirs2,
(VisitDirs::Set(dirs1), VisitDirs::Set(dirs2)) => {
VisitDirs::Set(dirs1.intersection(&dirs2).cloned().collect())
}
};
let files = match (files1, files2) {
(VisitFiles::All, VisitFiles::All) => VisitFiles::All,
(files1, VisitFiles::All) => files1,
(VisitFiles::All, files2) => files2,
(VisitFiles::Set(files1), VisitFiles::Set(files2)) => {
VisitFiles::Set(files1.intersection(&files2).cloned().collect())
}
};
match (&dirs, &files) {
(VisitDirs::Set(dirs), VisitFiles::Set(files))
if dirs.is_empty() && files.is_empty() =>
{
Visit::Nothing
}
_ => Visit::Specific { dirs, files },
}
}
},
}
}
}
#[cfg(test)]
mod tests {
use maplit::hashset;
use super::*;
use crate::fileset::parse_file_glob;
fn repo_path(value: &str) -> &RepoPath {
RepoPath::from_internal_string(value).unwrap()
}
fn repo_path_component_buf(value: &str) -> RepoPathComponentBuf {
RepoPathComponentBuf::new(value).unwrap()
}
fn glob(s: &str) -> Glob {
let icase = false;
parse_file_glob(s, icase).unwrap()
}
fn new_file_globs_matcher(dir_patterns: &[(&RepoPath, Glob)]) -> GlobsMatcher {
let mut builder = GlobsMatcher::builder();
for (dir, pattern) in dir_patterns {
builder.add(dir, pattern);
}
builder.build()
}
fn new_prefix_globs_matcher(dir_patterns: &[(&RepoPath, Glob)]) -> GlobsMatcher {
let mut builder = GlobsMatcher::builder().prefix_paths(true);
for (dir, pattern) in dir_patterns {
builder.add(dir, pattern);
}
builder.build()
}
#[test]
fn test_nothing_matcher() {
let m = NothingMatcher;
assert!(!m.matches(repo_path("file")));
assert!(!m.matches(repo_path("dir/file")));
assert_eq!(m.visit(RepoPath::root()), Visit::Nothing);
}
#[test]
fn test_files_matcher_empty() {
let m = FilesMatcher::new([] as [&RepoPath; 0]);
assert!(!m.matches(repo_path("file")));
assert!(!m.matches(repo_path("dir/file")));
assert_eq!(m.visit(RepoPath::root()), Visit::Nothing);
}
#[test]
fn test_files_matcher_nonempty() {
let m = FilesMatcher::new([
repo_path("dir1/subdir1/file1"),
repo_path("dir1/subdir1/file2"),
repo_path("dir1/subdir2/file3"),
repo_path("file4"),
]);
assert!(!m.matches(repo_path("dir1")));
assert!(!m.matches(repo_path("dir1/subdir1")));
assert!(m.matches(repo_path("dir1/subdir1/file1")));
assert!(m.matches(repo_path("dir1/subdir1/file2")));
assert!(!m.matches(repo_path("dir1/subdir1/file3")));
assert_eq!(
m.visit(RepoPath::root()),
Visit::sets(
hashset! {repo_path_component_buf("dir1")},
hashset! {repo_path_component_buf("file4")}
)
);
assert_eq!(
m.visit(repo_path("dir1")),
Visit::sets(
hashset! {
repo_path_component_buf("subdir1"),
repo_path_component_buf("subdir2"),
},
hashset! {}
)
);
assert_eq!(
m.visit(repo_path("dir1/subdir1")),
Visit::sets(
hashset! {},
hashset! {
repo_path_component_buf("file1"),
repo_path_component_buf("file2"),
},
)
);
assert_eq!(
m.visit(repo_path("dir1/subdir2")),
Visit::sets(hashset! {}, hashset! {repo_path_component_buf("file3")})
);
}
#[test]
fn test_prefix_matcher_empty() {
let m = PrefixMatcher::new([] as [&RepoPath; 0]);
assert!(!m.matches(repo_path("file")));
assert!(!m.matches(repo_path("dir/file")));
assert_eq!(m.visit(RepoPath::root()), Visit::Nothing);
}
#[test]
fn test_prefix_matcher_root() {
let m = PrefixMatcher::new([RepoPath::root()]);
// Matches all files
assert!(m.matches(repo_path("file")));
assert!(m.matches(repo_path("dir/file")));
// Visits all directories
assert_eq!(m.visit(RepoPath::root()), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo/bar")), Visit::AllRecursively);
}
#[test]
fn test_prefix_matcher_single_prefix() {
let m = PrefixMatcher::new([repo_path("foo/bar")]);
// Parts of the prefix should not match
assert!(!m.matches(repo_path("foo")));
assert!(!m.matches(repo_path("bar")));
// A file matching the prefix exactly should match
assert!(m.matches(repo_path("foo/bar")));
// Files in subdirectories should match
assert!(m.matches(repo_path("foo/bar/baz")));
assert!(m.matches(repo_path("foo/bar/baz/qux")));
// Sibling files should not match
assert!(!m.matches(repo_path("foo/foo")));
// An unrooted "foo/bar" should not match
assert!(!m.matches(repo_path("bar/foo/bar")));
// The matcher should only visit directory foo/ in the root (file "foo"
// shouldn't be visited)
assert_eq!(
m.visit(RepoPath::root()),
Visit::sets(hashset! {repo_path_component_buf("foo")}, hashset! {})
);
// Inside parent directory "foo/", both subdirectory "bar" and file "bar" may
// match
assert_eq!(
m.visit(repo_path("foo")),
Visit::sets(
hashset! {repo_path_component_buf("bar")},
hashset! {repo_path_component_buf("bar")}
)
);
// Inside a directory that matches the prefix, everything matches recursively
assert_eq!(m.visit(repo_path("foo/bar")), Visit::AllRecursively);
// Same thing in subdirectories of the prefix
assert_eq!(m.visit(repo_path("foo/bar/baz")), Visit::AllRecursively);
// Nothing in directories that are siblings of the prefix can match, so don't
// visit
assert_eq!(m.visit(repo_path("bar")), Visit::Nothing);
}
#[test]
fn test_prefix_matcher_nested_prefixes() {
let m = PrefixMatcher::new([repo_path("foo"), repo_path("foo/bar/baz")]);
assert!(m.matches(repo_path("foo")));
assert!(!m.matches(repo_path("bar")));
assert!(m.matches(repo_path("foo/bar")));
// Matches because the "foo" pattern matches
assert!(m.matches(repo_path("foo/baz/foo")));
assert_eq!(
m.visit(RepoPath::root()),
Visit::sets(
hashset! {repo_path_component_buf("foo")},
hashset! {repo_path_component_buf("foo")}
)
);
// Inside a directory that matches the prefix, everything matches recursively
assert_eq!(m.visit(repo_path("foo")), Visit::AllRecursively);
// Same thing in subdirectories of the prefix
assert_eq!(m.visit(repo_path("foo/bar/baz")), Visit::AllRecursively);
}
#[test]
fn test_file_globs_matcher_rooted() {
let m = new_file_globs_matcher(&[(RepoPath::root(), glob("*.rs"))]);
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo.rs")));
assert!(m.matches(repo_path("foo\n.rs"))); // "*" matches newline
assert!(!m.matches(repo_path("foo.rss")));
assert!(!m.matches(repo_path("foo.rs/bar.rs")));
assert!(!m.matches(repo_path("foo/bar.rs")));
assert_eq!(m.visit(RepoPath::root()), Visit::SOME);
// Multiple patterns at the same directory
let m = new_file_globs_matcher(&[
(RepoPath::root(), glob("foo?")),
(repo_path("other"), glob("")),
(RepoPath::root(), glob("**/*.rs")),
]);
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo1")));
assert!(!m.matches(repo_path("Foo1")));
assert!(!m.matches(repo_path("foo1/foo2")));
assert!(m.matches(repo_path("foo.rs")));
assert!(m.matches(repo_path("foo.rs/bar.rs")));
assert!(m.matches(repo_path("foo/bar.rs")));
assert_eq!(m.visit(RepoPath::root()), Visit::SOME);
assert_eq!(m.visit(repo_path("foo")), Visit::SOME);
assert_eq!(m.visit(repo_path("bar/baz")), Visit::SOME);
}
#[test]
fn test_file_globs_matcher_nested() {
let m = new_file_globs_matcher(&[
(repo_path("foo"), glob("**/*.a")),
(repo_path("foo/bar"), glob("*.b")),
(repo_path("baz"), glob("?*")),
]);
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo/x.a")));
assert!(!m.matches(repo_path("foo/x.b")));
assert!(m.matches(repo_path("foo/bar/x.a")));
assert!(m.matches(repo_path("foo/bar/x.b")));
assert!(m.matches(repo_path("foo/bar/baz/x.a")));
assert!(!m.matches(repo_path("foo/bar/baz/x.b")));
assert!(!m.matches(repo_path("baz")));
assert!(m.matches(repo_path("baz/x")));
assert_eq!(
m.visit(RepoPath::root()),
Visit::Specific {
dirs: VisitDirs::Set(hashset! {
repo_path_component_buf("foo"),
repo_path_component_buf("baz"),
}),
files: VisitFiles::Set(hashset! {}),
}
);
assert_eq!(m.visit(repo_path("foo")), Visit::SOME);
assert_eq!(m.visit(repo_path("foo/bar")), Visit::SOME);
assert_eq!(m.visit(repo_path("foo/bar/baz")), Visit::SOME);
assert_eq!(m.visit(repo_path("bar")), Visit::Nothing);
assert_eq!(m.visit(repo_path("baz")), Visit::SOME);
}
#[test]
fn test_file_globs_matcher_wildcard_any() {
// It's not obvious whether "*" should match the root directory path.
// Since "<dir>/*" shouldn't match "<dir>" itself, we can consider that
// "*" has an implicit "<root>/" prefix, and therefore it makes sense
// that "*" doesn't match the root. OTOH, if we compare paths as literal
// strings, "*" matches "". The current implementation is the former.
let m = new_file_globs_matcher(&[(RepoPath::root(), glob("*"))]);
assert!(!m.matches(RepoPath::root()));
assert!(m.matches(repo_path("x")));
assert!(m.matches(repo_path("x.rs")));
assert!(!m.matches(repo_path("foo/bar.rs")));
assert_eq!(m.visit(RepoPath::root()), Visit::SOME);
// "foo/*" shouldn't match "foo"
let m = new_file_globs_matcher(&[(repo_path("foo"), glob("*"))]);
assert!(!m.matches(RepoPath::root()));
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo/x")));
assert!(!m.matches(repo_path("foo/bar/baz")));
assert_eq!(
m.visit(RepoPath::root()),
Visit::Specific {
dirs: VisitDirs::Set(hashset! {repo_path_component_buf("foo")}),
files: VisitFiles::Set(hashset! {}),
}
);
assert_eq!(m.visit(repo_path("foo")), Visit::SOME);
assert_eq!(m.visit(repo_path("bar")), Visit::Nothing);
}
#[test]
fn test_prefix_globs_matcher_rooted() {
let m = new_prefix_globs_matcher(&[(RepoPath::root(), glob("*.rs"))]);
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo.rs")));
assert!(m.matches(repo_path("foo\n.rs"))); // "*" matches newline
assert!(!m.matches(repo_path("foo.rss")));
assert!(m.matches(repo_path("foo.rs/bar")));
assert!(!m.matches(repo_path("foo/bar.rs")));
assert_eq!(m.visit(RepoPath::root()), Visit::SOME);
assert_eq!(m.visit(repo_path("foo.rs")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo.rs/bar")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo.rss")), Visit::SOME);
assert_eq!(m.visit(repo_path("foo.rss/bar")), Visit::SOME);
// Multiple patterns at the same directory
let m = new_prefix_globs_matcher(&[
(RepoPath::root(), glob("foo?")),
(repo_path("other"), glob("")),
(RepoPath::root(), glob("**/*.rs")),
]);
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo1")));
assert!(!m.matches(repo_path("Foo1")));
assert!(m.matches(repo_path("foo1/foo2")));
assert!(m.matches(repo_path("foo.rs")));
assert!(m.matches(repo_path("foo.rs/bar.rs")));
assert!(m.matches(repo_path("foo/bar.rs")));
assert_eq!(m.visit(RepoPath::root()), Visit::SOME);
assert_eq!(m.visit(repo_path("foo")), Visit::SOME);
assert_eq!(m.visit(repo_path("bar/baz")), Visit::SOME);
}
#[test]
fn test_prefix_globs_matcher_nested() {
let m = new_prefix_globs_matcher(&[
(repo_path("foo"), glob("**/*.a")),
(repo_path("foo/bar"), glob("*.b")),
(repo_path("baz"), glob("?*")),
]);
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo/x.a")));
assert!(!m.matches(repo_path("foo/x.b")));
assert!(m.matches(repo_path("foo/bar/x.a")));
assert!(m.matches(repo_path("foo/bar/x.b")));
assert!(m.matches(repo_path("foo/bar/x.b/y")));
assert!(m.matches(repo_path("foo/bar/baz/x.a")));
assert!(!m.matches(repo_path("foo/bar/baz/x.b")));
assert!(!m.matches(repo_path("baz")));
assert!(m.matches(repo_path("baz/x")));
assert!(m.matches(repo_path("baz/x/y")));
assert_eq!(
m.visit(RepoPath::root()),
Visit::Specific {
dirs: VisitDirs::Set(hashset! {
repo_path_component_buf("foo"),
repo_path_component_buf("baz"),
}),
files: VisitFiles::Set(hashset! {}),
}
);
assert_eq!(m.visit(repo_path("foo")), Visit::SOME);
assert_eq!(m.visit(repo_path("foo/x.a")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo/bar")), Visit::SOME);
assert_eq!(m.visit(repo_path("foo/bar/x.a")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo/bar/x.b")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo/bar/baz")), Visit::SOME);
assert_eq!(m.visit(repo_path("bar")), Visit::Nothing);
assert_eq!(m.visit(repo_path("baz")), Visit::SOME);
assert_eq!(m.visit(repo_path("baz/x")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("baz/x/y")), Visit::AllRecursively);
}
#[test]
fn test_prefix_globs_matcher_wildcard_any() {
// It's not obvious whether "*" should match the root directory path.
// Since "<dir>/*" shouldn't match "<dir>" itself, we can consider that
// "*" has an implicit "<root>/" prefix, and therefore it makes sense
// that "*" doesn't match the root. OTOH, if we compare paths as literal
// strings, "*" matches "". The current implementation is the former.
let m = new_prefix_globs_matcher(&[(RepoPath::root(), glob("*"))]);
assert!(!m.matches(RepoPath::root()));
assert!(m.matches(repo_path("x")));
assert!(m.matches(repo_path("x.rs")));
assert!(m.matches(repo_path("foo/bar.rs")));
assert_eq!(m.visit(RepoPath::root()), Visit::AllRecursively);
// "foo/*" shouldn't match "foo"
let m = new_prefix_globs_matcher(&[(repo_path("foo"), glob("*"))]);
assert!(!m.matches(RepoPath::root()));
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo/x")));
assert!(m.matches(repo_path("foo/bar/baz")));
assert_eq!(
m.visit(RepoPath::root()),
Visit::Specific {
dirs: VisitDirs::Set(hashset! {repo_path_component_buf("foo")}),
files: VisitFiles::Set(hashset! {}),
}
);
assert_eq!(m.visit(repo_path("foo")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("bar")), Visit::Nothing);
}
#[test]
fn test_prefix_globs_matcher_wildcard_suffix() {
// explicit "/**" in pattern
let m = new_prefix_globs_matcher(&[(repo_path("foo"), glob("**"))]);
assert!(!m.matches(repo_path("foo")));
assert!(m.matches(repo_path("foo/bar")));
assert!(m.matches(repo_path("foo/bar/baz")));
assert_eq!(m.visit(repo_path("foo")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo/bar")), Visit::AllRecursively);
assert_eq!(m.visit(repo_path("foo/bar/baz")), Visit::AllRecursively);
}
#[test]
fn test_union_matcher_concatenate_roots() {
let m1 = PrefixMatcher::new([repo_path("foo"), repo_path("bar")]);
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/str_util.rs | lib/src/str_util.rs | // Copyright 2021-2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! String helpers.
use std::borrow::Borrow;
use std::borrow::Cow;
use std::collections::BTreeMap;
use std::fmt;
use std::fmt::Debug;
use std::iter;
use std::ops::Deref;
use bstr::ByteSlice as _;
use either::Either;
use globset::Glob;
use globset::GlobBuilder;
use thiserror::Error;
/// Error occurred during pattern string parsing.
#[derive(Debug, Error)]
pub enum StringPatternParseError {
/// Unknown pattern kind is specified.
#[error("Invalid string pattern kind `{0}:`")]
InvalidKind(String),
/// Failed to parse glob pattern.
#[error(transparent)]
GlobPattern(globset::Error),
/// Failed to parse regular expression.
#[error(transparent)]
Regex(regex::Error),
}
/// A wrapper for [`Glob`] with a more concise `Debug` impl.
#[derive(Clone)]
pub struct GlobPattern {
glob: Glob,
}
impl GlobPattern {
/// Returns the original glob pattern.
pub fn as_str(&self) -> &str {
self.glob.glob()
}
/// Converts this glob pattern to a bytes regex.
pub fn to_regex(&self) -> regex::bytes::Regex {
// Based on new_regex() in globset. We don't use GlobMatcher::is_match(path)
// because the input string shouldn't be normalized as path.
regex::bytes::RegexBuilder::new(self.glob.regex())
.dot_matches_new_line(true)
.build()
.expect("glob regex should be valid")
}
}
impl Debug for GlobPattern {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("GlobPattern").field(&self.as_str()).finish()
}
}
fn parse_glob(src: &str, icase: bool) -> Result<GlobPattern, StringPatternParseError> {
let glob = GlobBuilder::new(src)
.case_insensitive(icase)
// Don't use platform-dependent default. This pattern isn't meant for
// testing file-system paths. If backslash escape were disabled, "\" in
// pattern would be normalized to "/" on Windows.
.backslash_escape(true)
.build()
.map_err(StringPatternParseError::GlobPattern)?;
Ok(GlobPattern { glob })
}
fn is_glob_char(c: char) -> bool {
// See globset::escape(). In addition to that, backslash is parsed as an
// escape sequence on all platforms.
matches!(c, '?' | '*' | '[' | ']' | '{' | '}' | '\\')
}
/// Pattern to be tested against string property like commit description or
/// bookmark name.
#[derive(Clone, Debug)]
pub enum StringPattern {
/// Matches strings exactly.
Exact(String),
/// Matches strings case‐insensitively.
ExactI(String),
/// Matches strings that contain a substring.
Substring(String),
/// Matches strings that case‐insensitively contain a substring.
SubstringI(String),
/// Matches with a Unix‐style shell wildcard pattern.
Glob(Box<GlobPattern>),
/// Matches with a case‐insensitive Unix‐style shell wildcard pattern.
GlobI(Box<GlobPattern>),
/// Matches substrings with a regular expression.
Regex(regex::bytes::Regex),
/// Matches substrings with a case‐insensitive regular expression.
RegexI(regex::bytes::Regex),
}
impl StringPattern {
/// Pattern that matches any string.
pub const fn all() -> Self {
Self::Substring(String::new())
}
/// Constructs a pattern that matches exactly.
pub fn exact(src: impl Into<String>) -> Self {
Self::Exact(src.into())
}
/// Constructs a pattern that matches case‐insensitively.
pub fn exact_i(src: impl Into<String>) -> Self {
Self::ExactI(src.into())
}
/// Constructs a pattern that matches a substring.
pub fn substring(src: impl Into<String>) -> Self {
Self::Substring(src.into())
}
/// Constructs a pattern that case‐insensitively matches a substring.
pub fn substring_i(src: impl Into<String>) -> Self {
Self::SubstringI(src.into())
}
/// Parses the given string as a glob pattern.
pub fn glob(src: &str) -> Result<Self, StringPatternParseError> {
if !src.contains(is_glob_char) {
return Ok(Self::exact(src));
}
Ok(Self::Glob(Box::new(parse_glob(src, false)?)))
}
/// Parses the given string as a case‐insensitive glob pattern.
pub fn glob_i(src: &str) -> Result<Self, StringPatternParseError> {
// No special case for !src.contains(is_glob_char) because it's unclear
// whether we'll use unicode case comparison for "exact-i" patterns.
// "glob-i" should always be ASCII-based.
Ok(Self::GlobI(Box::new(parse_glob(src, true)?)))
}
/// Parses the given string as a regular expression.
pub fn regex(src: &str) -> Result<Self, StringPatternParseError> {
let pattern = regex::bytes::Regex::new(src).map_err(StringPatternParseError::Regex)?;
Ok(Self::Regex(pattern))
}
/// Parses the given string as a case-insensitive regular expression.
pub fn regex_i(src: &str) -> Result<Self, StringPatternParseError> {
let pattern = regex::bytes::RegexBuilder::new(src)
.case_insensitive(true)
.build()
.map_err(StringPatternParseError::Regex)?;
Ok(Self::RegexI(pattern))
}
/// Parses the given string as a pattern of the specified `kind`.
pub fn from_str_kind(src: &str, kind: &str) -> Result<Self, StringPatternParseError> {
match kind {
"exact" => Ok(Self::exact(src)),
"exact-i" => Ok(Self::exact_i(src)),
"substring" => Ok(Self::substring(src)),
"substring-i" => Ok(Self::substring_i(src)),
"glob" => Self::glob(src),
"glob-i" => Self::glob_i(src),
"regex" => Self::regex(src),
"regex-i" => Self::regex_i(src),
_ => Err(StringPatternParseError::InvalidKind(kind.to_owned())),
}
}
/// Returns true if this pattern trivially matches any input strings.
fn is_all(&self) -> bool {
match self {
Self::Exact(_) | Self::ExactI(_) => false,
Self::Substring(needle) | Self::SubstringI(needle) => needle.is_empty(),
Self::Glob(pattern) | Self::GlobI(pattern) => pattern.as_str() == "*",
Self::Regex(pattern) | Self::RegexI(pattern) => pattern.as_str().is_empty(),
}
}
/// Returns true if this pattern matches input strings exactly.
pub fn is_exact(&self) -> bool {
self.as_exact().is_some()
}
/// Returns a literal pattern if this should match input strings exactly.
///
/// This can be used to optimize map lookup by exact key.
pub fn as_exact(&self) -> Option<&str> {
// TODO: Handle trivial case‐insensitive patterns here? It might make people
// expect they can use case‐insensitive patterns in contexts where they
// generally can’t.
match self {
Self::Exact(literal) => Some(literal),
_ => None,
}
}
/// Returns the original string of this pattern.
pub fn as_str(&self) -> &str {
match self {
Self::Exact(literal) => literal,
Self::ExactI(literal) => literal,
Self::Substring(needle) => needle,
Self::SubstringI(needle) => needle,
Self::Glob(pattern) => pattern.as_str(),
Self::GlobI(pattern) => pattern.as_str(),
Self::Regex(pattern) => pattern.as_str(),
Self::RegexI(pattern) => pattern.as_str(),
}
}
/// Converts this pattern to a glob string. Returns `None` if the pattern
/// can't be represented as a glob.
pub fn to_glob(&self) -> Option<Cow<'_, str>> {
// TODO: Handle trivial case‐insensitive patterns here? It might make people
// expect they can use case‐insensitive patterns in contexts where they
// generally can’t.
match self {
Self::Exact(literal) => Some(globset::escape(literal).into()),
Self::Substring(needle) => {
if needle.is_empty() {
Some("*".into())
} else {
Some(format!("*{}*", globset::escape(needle)).into())
}
}
Self::Glob(pattern) => Some(pattern.as_str().into()),
Self::ExactI(_) => None,
Self::SubstringI(_) => None,
Self::GlobI(_) => None,
Self::Regex(_) => None,
Self::RegexI(_) => None,
}
}
fn to_match_fn(&self) -> Box<DynMatchFn> {
// TODO: Unicode case folding is complicated and can be
// locale‐specific. The `globset` crate and Gitoxide only deal with
// ASCII case folding, so we do the same here; a more elaborate case
// folding system will require making sure those behave in a matching
// manner where relevant. That said, regex patterns are unicode-aware by
// default, so we already have some inconsistencies.
//
// Care will need to be taken regarding normalization and the choice of an
// appropriate case‐insensitive comparison scheme (`toNFKC_Casefold`?) to ensure
// that it is compatible with the standard case‐insensitivity of haystack
// components (like internationalized domain names in email addresses). The
// availability of normalization and case folding schemes in database backends
// will also need to be considered. A locale‐specific case folding
// scheme would likely not be appropriate for Jujutsu.
//
// For some discussion of this topic, see:
// <https://github.com/unicode-org/icu4x/issues/3151>
match self {
Self::Exact(literal) => {
let literal = literal.clone();
Box::new(move |haystack| haystack == literal.as_bytes())
}
Self::ExactI(literal) => {
let literal = literal.clone();
Box::new(move |haystack| haystack.eq_ignore_ascii_case(literal.as_bytes()))
}
Self::Substring(needle) => {
let needle = needle.clone();
Box::new(move |haystack| haystack.contains_str(&needle))
}
Self::SubstringI(needle) => {
let needle = needle.to_ascii_lowercase();
Box::new(move |haystack| haystack.to_ascii_lowercase().contains_str(&needle))
}
// (Glob, GlobI) and (Regex, RegexI) pairs are identical here, but
// callers might want to translate these to backend-specific query
// differently.
Self::Glob(pattern) | Self::GlobI(pattern) => {
let pattern = pattern.to_regex();
Box::new(move |haystack| pattern.is_match(haystack))
}
Self::Regex(pattern) | Self::RegexI(pattern) => {
let pattern = pattern.clone();
Box::new(move |haystack| pattern.is_match(haystack))
}
}
}
/// Creates matcher object from this pattern.
pub fn to_matcher(&self) -> StringMatcher {
if self.is_all() {
StringMatcher::All
} else if let Some(literal) = self.as_exact() {
StringMatcher::Exact(literal.to_owned())
} else {
StringMatcher::Fn(self.to_match_fn())
}
}
/// Converts the pattern into a bytes regex.
pub fn to_regex(&self) -> regex::bytes::Regex {
match self {
Self::Exact(literal) => {
regex::bytes::RegexBuilder::new(&format!("^{}$", regex::escape(literal)))
.build()
.expect("impossible to fail to compile regex of literal")
}
Self::ExactI(literal) => {
regex::bytes::RegexBuilder::new(&format!("^{}$", regex::escape(literal)))
.case_insensitive(true)
.build()
.expect("impossible to fail to compile regex of literal")
}
Self::Substring(literal) => regex::bytes::RegexBuilder::new(®ex::escape(literal))
.build()
.expect("impossible to fail to compile regex of literal"),
Self::SubstringI(literal) => regex::bytes::RegexBuilder::new(®ex::escape(literal))
.case_insensitive(true)
.build()
.expect("impossible to fail to compile regex of literal"),
Self::Glob(glob_pattern) => glob_pattern.to_regex(),
// The regex generated represents the case insensitivity itself
Self::GlobI(glob_pattern) => glob_pattern.to_regex(),
Self::Regex(regex) => regex.clone(),
Self::RegexI(regex) => regex.clone(),
}
}
}
impl fmt::Display for StringPattern {
/// Shows the original string of this pattern.
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
/// AST-level representation of the string matcher expression.
#[derive(Clone, Debug)]
pub enum StringExpression {
// None and All can be represented by using Pattern. Add them if needed.
/// Matches pattern.
Pattern(Box<StringPattern>),
/// Matches anything other than the expression.
NotIn(Box<Self>),
/// Matches one of the expressions.
Union(Box<Self>, Box<Self>),
/// Matches both expressions.
Intersection(Box<Self>, Box<Self>),
}
impl StringExpression {
/// Expression that matches nothing.
pub fn none() -> Self {
Self::all().negated()
}
/// Expression that matches everything.
pub fn all() -> Self {
Self::pattern(StringPattern::all())
}
/// Expression that matches the given pattern.
pub fn pattern(pattern: StringPattern) -> Self {
Self::Pattern(Box::new(pattern))
}
/// Expression that matches strings exactly.
pub fn exact(src: impl Into<String>) -> Self {
Self::pattern(StringPattern::exact(src))
}
/// Expression that matches substrings.
pub fn substring(src: impl Into<String>) -> Self {
Self::pattern(StringPattern::substring(src))
}
/// Expression that matches anything other than this expression.
pub fn negated(self) -> Self {
Self::NotIn(Box::new(self))
}
/// Expression that matches `self` or `other` (or both).
pub fn union(self, other: Self) -> Self {
Self::Union(Box::new(self), Box::new(other))
}
/// Expression that matches any of the given `expressions`.
pub fn union_all(expressions: Vec<Self>) -> Self {
to_binary_expression(expressions, &Self::none, &Self::union)
}
/// Expression that matches both `self` and `other`.
pub fn intersection(self, other: Self) -> Self {
Self::Intersection(Box::new(self), Box::new(other))
}
fn dfs_pre(&self) -> impl Iterator<Item = &Self> {
let mut stack: Vec<&Self> = vec![self];
iter::from_fn(move || {
let expr = stack.pop()?;
match expr {
Self::Pattern(_) => {}
Self::NotIn(expr) => stack.push(expr),
Self::Union(expr1, expr2) | Self::Intersection(expr1, expr2) => {
stack.push(expr2);
stack.push(expr1);
}
}
Some(expr)
})
}
/// Iterates exact string patterns recursively from this expression.
///
/// For example, `"a", "b", "c"` will be yielded in that order for
/// expression `"a" | glob:"?" & "b" | ~"c"`.
pub fn exact_strings(&self) -> impl Iterator<Item = &str> {
// pre/post-ordering doesn't matter so long as children are visited from
// left to right.
self.dfs_pre().filter_map(|expr| match expr {
Self::Pattern(pattern) => pattern.as_exact(),
_ => None,
})
}
/// Transforms the expression tree to matcher object.
pub fn to_matcher(&self) -> StringMatcher {
match self {
Self::Pattern(pattern) => pattern.to_matcher(),
Self::NotIn(expr) => {
let p = expr.to_matcher().into_match_fn();
StringMatcher::Fn(Box::new(move |haystack| !p(haystack)))
}
Self::Union(expr1, expr2) => {
let p1 = expr1.to_matcher().into_match_fn();
let p2 = expr2.to_matcher().into_match_fn();
StringMatcher::Fn(Box::new(move |haystack| p1(haystack) || p2(haystack)))
}
Self::Intersection(expr1, expr2) => {
let p1 = expr1.to_matcher().into_match_fn();
let p2 = expr2.to_matcher().into_match_fn();
StringMatcher::Fn(Box::new(move |haystack| p1(haystack) && p2(haystack)))
}
}
}
}
/// Constructs binary tree from `expressions` list, `unit` node, and associative
/// `binary` operation.
fn to_binary_expression<T>(
expressions: Vec<T>,
unit: &impl Fn() -> T,
binary: &impl Fn(T, T) -> T,
) -> T {
match expressions.len() {
0 => unit(),
1 => expressions.into_iter().next().unwrap(),
_ => {
// Build balanced tree to minimize the recursion depth.
let mut left = expressions;
let right = left.split_off(left.len() / 2);
binary(
to_binary_expression(left, unit, binary),
to_binary_expression(right, unit, binary),
)
}
}
}
type DynMatchFn = dyn Fn(&[u8]) -> bool;
/// Matcher for strings and bytes.
pub enum StringMatcher {
/// Matches any strings.
All,
/// Matches strings exactly.
Exact(String),
/// Tests matches by arbitrary function.
Fn(Box<DynMatchFn>),
}
impl StringMatcher {
/// Matcher that matches any strings.
pub const fn all() -> Self {
Self::All
}
/// Matcher that matches `src` exactly.
pub fn exact(src: impl Into<String>) -> Self {
Self::Exact(src.into())
}
/// Returns true if this matches the `haystack` string.
pub fn is_match(&self, haystack: &str) -> bool {
self.is_match_bytes(haystack.as_bytes())
}
/// Returns true if this matches the `haystack` bytes.
pub fn is_match_bytes(&self, haystack: &[u8]) -> bool {
match self {
Self::All => true,
Self::Exact(needle) => haystack == needle.as_bytes(),
Self::Fn(predicate) => predicate(haystack),
}
}
/// Iterates over matching lines in `text`.
pub fn match_lines<'a>(&self, text: &'a [u8]) -> impl Iterator<Item = &'a [u8]> {
// The pattern is matched line by line so that it can be anchored to line
// start/end. For example, exact:"" will match blank lines.
text.split_inclusive(|b| *b == b'\n').filter(|line| {
let line = line.strip_suffix(b"\n").unwrap_or(line);
self.is_match_bytes(line)
})
}
fn into_match_fn(self) -> Box<DynMatchFn> {
match self {
Self::All => Box::new(|_haystack| true),
Self::Exact(needle) => Box::new(move |haystack| haystack == needle.as_bytes()),
Self::Fn(predicate) => predicate,
}
}
/// Iterates entries of the given `map` whose string keys match this.
pub fn filter_btree_map<'a, K: Borrow<str> + Ord, V>(
&self,
map: &'a BTreeMap<K, V>,
) -> impl Iterator<Item = (&'a K, &'a V)> {
self.filter_btree_map_with(map, |key| key, |key| key)
}
/// Iterates entries of the given `map` whose string-like keys match this.
///
/// The borrowed key type is constrained by the `Deref::Target`. It must be
/// convertible to/from `str`.
pub fn filter_btree_map_as_deref<'a, K, V>(
&self,
map: &'a BTreeMap<K, V>,
) -> impl Iterator<Item = (&'a K, &'a V)>
where
K: Borrow<K::Target> + Deref + Ord,
K::Target: AsRef<str> + Ord,
str: AsRef<K::Target>,
{
self.filter_btree_map_with(map, AsRef::as_ref, AsRef::as_ref)
}
fn filter_btree_map_with<'a, K, Q, V>(
&self,
map: &'a BTreeMap<K, V>,
from_key: impl Fn(&Q) -> &str,
to_key: impl Fn(&str) -> &Q,
) -> impl Iterator<Item = (&'a K, &'a V)>
where
K: Borrow<Q> + Ord,
Q: Ord + ?Sized,
{
match self {
Self::All => Either::Left(map.iter()),
Self::Exact(key) => {
Either::Right(Either::Left(map.get_key_value(to_key(key)).into_iter()))
}
Self::Fn(predicate) => {
Either::Right(Either::Right(map.iter().filter(move |&(key, _)| {
predicate(from_key(key.borrow()).as_bytes())
})))
}
}
}
}
impl Debug for StringMatcher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::All => write!(f, "All"),
Self::Exact(needle) => f.debug_tuple("Exact").field(needle).finish(),
Self::Fn(_) => f.debug_tuple("Fn").finish_non_exhaustive(),
}
}
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use itertools::Itertools as _;
use maplit::btreemap;
use super::*;
fn insta_settings() -> insta::Settings {
let mut settings = insta::Settings::clone_current();
// Collapse short "Thing(_,)" repeatedly to save vertical space and make
// the output more readable.
for _ in 0..4 {
settings.add_filter(
r"(?x)
\b([A-Z]\w*)\(\n
\s*(.{1,60}),\n
\s*\)",
"$1($2)",
);
}
settings
}
#[test]
fn test_string_pattern_to_glob() {
assert_eq!(StringPattern::all().to_glob(), Some("*".into()));
assert_eq!(StringPattern::exact("a").to_glob(), Some("a".into()));
assert_eq!(StringPattern::exact("*").to_glob(), Some("[*]".into()));
assert_eq!(
StringPattern::glob("*").unwrap().to_glob(),
Some("*".into())
);
assert_eq!(
StringPattern::Substring("a".into()).to_glob(),
Some("*a*".into())
);
assert_eq!(
StringPattern::Substring("*".into()).to_glob(),
Some("*[*]*".into())
);
}
#[test]
fn test_parse() {
// Parse specific pattern kinds.
assert_matches!(
StringPattern::from_str_kind("foo", "exact"),
Ok(StringPattern::Exact(s)) if s == "foo"
);
assert_matches!(
StringPattern::from_str_kind("foo*", "glob"),
Ok(StringPattern::Glob(p)) if p.as_str() == "foo*"
);
assert_matches!(
StringPattern::from_str_kind("foo", "substring"),
Ok(StringPattern::Substring(s)) if s == "foo"
);
assert_matches!(
StringPattern::from_str_kind("foo", "substring-i"),
Ok(StringPattern::SubstringI(s)) if s == "foo"
);
assert_matches!(
StringPattern::from_str_kind("foo", "regex"),
Ok(StringPattern::Regex(p)) if p.as_str() == "foo"
);
assert_matches!(
StringPattern::from_str_kind("foo", "regex-i"),
Ok(StringPattern::RegexI(p)) if p.as_str() == "foo"
);
}
#[test]
fn test_glob_is_match() {
let glob = |src: &str| StringPattern::glob(src).unwrap().to_matcher();
let glob_i = |src: &str| StringPattern::glob_i(src).unwrap().to_matcher();
assert!(glob("foo").is_match("foo"));
assert!(!glob("foo").is_match("foobar"));
// "." in string isn't any special
assert!(glob("*").is_match(".foo"));
// "/" in string isn't any special
assert!(glob("*").is_match("foo/bar"));
assert!(glob(r"*/*").is_match("foo/bar"));
assert!(!glob(r"*/*").is_match(r"foo\bar"));
// "\" is an escape character
assert!(!glob(r"*\*").is_match("foo/bar"));
assert!(glob(r"*\*").is_match("foo*"));
assert!(glob(r"\\").is_match(r"\"));
// "*" matches newline
assert!(glob(r"*").is_match("foo\nbar"));
assert!(!glob("f?O").is_match("Foo"));
assert!(glob_i("f?O").is_match("Foo"));
}
#[test]
fn test_regex_is_match() {
let regex = |src: &str| StringPattern::regex(src).unwrap().to_matcher();
// Unicode mode is enabled by default
assert!(regex(r"^\w$").is_match("\u{c0}"));
assert!(regex(r"^.$").is_match("\u{c0}"));
// ASCII-compatible mode should also work
assert!(regex(r"^(?-u)\w$").is_match("a"));
assert!(!regex(r"^(?-u)\w$").is_match("\u{c0}"));
assert!(regex(r"^(?-u).{2}$").is_match("\u{c0}"));
}
#[test]
fn test_string_pattern_to_regex() {
let check = |pattern: StringPattern, match_to: &str| {
let regex = pattern.to_regex();
regex.is_match(match_to.as_bytes())
};
assert!(check(StringPattern::exact("$a"), "$a"));
assert!(!check(StringPattern::exact("$a"), "$A"));
assert!(!check(StringPattern::exact("a"), "aa"));
assert!(!check(StringPattern::exact("a"), "aa"));
assert!(check(StringPattern::exact_i("a"), "A"));
assert!(check(StringPattern::substring("$a"), "$abc"));
assert!(!check(StringPattern::substring("$a"), "$Abc"));
assert!(check(StringPattern::substring_i("$a"), "$Abc"));
assert!(!check(StringPattern::glob("a").unwrap(), "A"));
assert!(check(StringPattern::glob_i("a").unwrap(), "A"));
assert!(check(StringPattern::regex("^a{1,3}").unwrap(), "abcde"));
assert!(!check(StringPattern::regex("^a{1,3}").unwrap(), "Abcde"));
assert!(check(StringPattern::regex_i("^a{1,3}").unwrap(), "Abcde"));
}
#[test]
fn test_exact_pattern_to_matcher() {
assert_matches!(
StringPattern::exact("").to_matcher(),
StringMatcher::Exact(needle) if needle.is_empty()
);
assert_matches!(
StringPattern::exact("x").to_matcher(),
StringMatcher::Exact(needle) if needle == "x"
);
assert_matches!(
StringPattern::exact_i("").to_matcher(),
StringMatcher::Fn(_) // or Exact
);
assert_matches!(
StringPattern::exact_i("x").to_matcher(),
StringMatcher::Fn(_)
);
}
#[test]
fn test_substring_pattern_to_matcher() {
assert_matches!(
StringPattern::substring("").to_matcher(),
StringMatcher::All
);
assert_matches!(
StringPattern::substring("x").to_matcher(),
StringMatcher::Fn(_)
);
assert_matches!(
StringPattern::substring_i("").to_matcher(),
StringMatcher::All
);
assert_matches!(
StringPattern::substring_i("x").to_matcher(),
StringMatcher::Fn(_)
);
}
#[test]
fn test_glob_pattern_to_matcher() {
assert_matches!(
StringPattern::glob("").unwrap().to_matcher(),
StringMatcher::Exact(_)
);
assert_matches!(
StringPattern::glob("x").unwrap().to_matcher(),
StringMatcher::Exact(_)
);
assert_matches!(
StringPattern::glob("x?").unwrap().to_matcher(),
StringMatcher::Fn(_)
);
assert_matches!(
StringPattern::glob("*").unwrap().to_matcher(),
StringMatcher::All
);
assert_matches!(
StringPattern::glob(r"\\").unwrap().to_matcher(),
StringMatcher::Fn(_) // or Exact(r"\")
);
assert_matches!(
StringPattern::glob_i("").unwrap().to_matcher(),
StringMatcher::Fn(_) // or Exact
);
assert_matches!(
StringPattern::glob_i("x").unwrap().to_matcher(),
StringMatcher::Fn(_)
);
assert_matches!(
StringPattern::glob_i("x?").unwrap().to_matcher(),
StringMatcher::Fn(_)
);
assert_matches!(
StringPattern::glob_i("*").unwrap().to_matcher(),
StringMatcher::All
);
}
#[test]
fn test_regex_pattern_to_matcher() {
assert_matches!(
StringPattern::regex("").unwrap().to_matcher(),
StringMatcher::All
);
assert_matches!(
StringPattern::regex("x").unwrap().to_matcher(),
StringMatcher::Fn(_)
);
assert_matches!(
StringPattern::regex(".").unwrap().to_matcher(),
StringMatcher::Fn(_)
);
assert_matches!(
StringPattern::regex_i("").unwrap().to_matcher(),
StringMatcher::All
);
assert_matches!(
StringPattern::regex_i("x").unwrap().to_matcher(),
StringMatcher::Fn(_)
);
assert_matches!(
StringPattern::regex_i(".").unwrap().to_matcher(),
StringMatcher::Fn(_)
);
}
#[test]
fn test_union_all_expressions() {
let settings = insta_settings();
let _guard = settings.bind_to_scope();
insta::assert_debug_snapshot!(
StringExpression::union_all(vec![]),
@r#"NotIn(Pattern(Substring("")))"#);
insta::assert_debug_snapshot!(
StringExpression::union_all(vec![StringExpression::exact("a")]),
@r#"Pattern(Exact("a"))"#);
insta::assert_debug_snapshot!(
StringExpression::union_all(vec![
StringExpression::exact("a"),
StringExpression::exact("b"),
]),
@r#"
Union(
Pattern(Exact("a")),
Pattern(Exact("b")),
)
"#);
insta::assert_debug_snapshot!(
StringExpression::union_all(vec![
StringExpression::exact("a"),
StringExpression::exact("b"),
StringExpression::exact("c"),
]),
@r#"
Union(
Pattern(Exact("a")),
Union(
Pattern(Exact("b")),
Pattern(Exact("c")),
),
)
"#);
insta::assert_debug_snapshot!(
StringExpression::union_all(vec![
StringExpression::exact("a"),
StringExpression::exact("b"),
StringExpression::exact("c"),
StringExpression::exact("d"),
]),
@r#"
Union(
Union(
Pattern(Exact("a")),
Pattern(Exact("b")),
),
Union(
Pattern(Exact("c")),
Pattern(Exact("d")),
),
)
"#);
}
#[test]
fn test_exact_strings_in_expression() {
assert_eq!(
StringExpression::all().exact_strings().collect_vec(),
[""; 0]
);
assert_eq!(
StringExpression::union_all(vec![
StringExpression::exact("a"),
StringExpression::substring("b"),
StringExpression::intersection(
StringExpression::exact("c"),
StringExpression::exact("d").negated(),
),
])
.exact_strings()
.collect_vec(),
["a", "c", "d"]
);
}
#[test]
fn test_trivial_expression_to_matcher() {
assert_matches!(StringExpression::all().to_matcher(), StringMatcher::All);
assert_matches!(
StringExpression::exact("x").to_matcher(),
StringMatcher::Exact(needle) if needle == "x"
);
}
#[test]
fn test_compound_expression_to_matcher() {
let matcher = StringExpression::exact("foo").negated().to_matcher();
assert!(!matcher.is_match("foo"));
assert!(matcher.is_match("bar"));
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/fileset.rs | lib/src/fileset.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functional language for selecting a set of paths.
use std::collections::HashMap;
use std::iter;
use std::path;
use std::slice;
use std::sync::LazyLock;
use globset::Glob;
use globset::GlobBuilder;
use itertools::Itertools as _;
use thiserror::Error;
use crate::dsl_util::collect_similar;
use crate::fileset_parser;
use crate::fileset_parser::BinaryOp;
use crate::fileset_parser::ExpressionKind;
use crate::fileset_parser::ExpressionNode;
pub use crate::fileset_parser::FilesetDiagnostics;
pub use crate::fileset_parser::FilesetParseError;
pub use crate::fileset_parser::FilesetParseErrorKind;
pub use crate::fileset_parser::FilesetParseResult;
use crate::fileset_parser::FunctionCallNode;
use crate::fileset_parser::UnaryOp;
use crate::matchers::DifferenceMatcher;
use crate::matchers::EverythingMatcher;
use crate::matchers::FilesMatcher;
use crate::matchers::GlobsMatcher;
use crate::matchers::IntersectionMatcher;
use crate::matchers::Matcher;
use crate::matchers::NothingMatcher;
use crate::matchers::PrefixMatcher;
use crate::matchers::UnionMatcher;
use crate::repo_path::RelativePathParseError;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathUiConverter;
use crate::repo_path::UiPathParseError;
/// Error occurred during file pattern parsing.
#[derive(Debug, Error)]
pub enum FilePatternParseError {
/// Unknown pattern kind is specified.
#[error("Invalid file pattern kind `{0}:`")]
InvalidKind(String),
/// Failed to parse input UI path.
#[error(transparent)]
UiPath(#[from] UiPathParseError),
/// Failed to parse input workspace-relative path.
#[error(transparent)]
RelativePath(#[from] RelativePathParseError),
/// Failed to parse glob pattern.
#[error(transparent)]
GlobPattern(#[from] globset::Error),
}
/// Basic pattern to match `RepoPath`.
#[derive(Clone, Debug)]
pub enum FilePattern {
/// Matches file (or exact) path.
FilePath(RepoPathBuf),
/// Matches path prefix.
PrefixPath(RepoPathBuf),
/// Matches file (or exact) path with glob pattern.
FileGlob {
/// Prefix directory path where the `pattern` will be evaluated.
dir: RepoPathBuf,
/// Glob pattern relative to `dir`.
pattern: Box<Glob>,
},
/// Matches path prefix with glob pattern.
PrefixGlob {
/// Prefix directory path where the `pattern` will be evaluated.
dir: RepoPathBuf,
/// Glob pattern relative to `dir`.
pattern: Box<Glob>,
},
// TODO: add more patterns:
// - FilesInPath: files in directory, non-recursively?
// - NameGlob or SuffixGlob: file name with glob?
}
impl FilePattern {
/// Parses the given `input` string as pattern of the specified `kind`.
pub fn from_str_kind(
path_converter: &RepoPathUiConverter,
input: &str,
kind: &str,
) -> Result<Self, FilePatternParseError> {
// Naming convention:
// * path normalization
// * cwd: cwd-relative path (default)
// * root: workspace-relative path
// * where to anchor
// * file: exact file path
// * prefix: path prefix (files under directory recursively)
// * files-in: files in directory non-recursively
// * name: file name component (or suffix match?)
// * substring: substring match?
// * string pattern syntax (+ case sensitivity?)
// * path: literal path (default) (default anchor: prefix)
// * glob: glob pattern (default anchor: file)
// * regex?
match kind {
"cwd" => Self::cwd_prefix_path(path_converter, input),
"cwd-file" | "file" => Self::cwd_file_path(path_converter, input),
"cwd-glob" | "glob" => Self::cwd_file_glob(path_converter, input),
"cwd-glob-i" | "glob-i" => Self::cwd_file_glob_i(path_converter, input),
"cwd-prefix-glob" | "prefix-glob" => Self::cwd_prefix_glob(path_converter, input),
"cwd-prefix-glob-i" | "prefix-glob-i" => Self::cwd_prefix_glob_i(path_converter, input),
"root" => Self::root_prefix_path(input),
"root-file" => Self::root_file_path(input),
"root-glob" => Self::root_file_glob(input),
"root-glob-i" => Self::root_file_glob_i(input),
"root-prefix-glob" => Self::root_prefix_glob(input),
"root-prefix-glob-i" => Self::root_prefix_glob_i(input),
_ => Err(FilePatternParseError::InvalidKind(kind.to_owned())),
}
}
/// Pattern that matches cwd-relative file (or exact) path.
pub fn cwd_file_path(
path_converter: &RepoPathUiConverter,
input: impl AsRef<str>,
) -> Result<Self, FilePatternParseError> {
let path = path_converter.parse_file_path(input.as_ref())?;
Ok(Self::FilePath(path))
}
/// Pattern that matches cwd-relative path prefix.
pub fn cwd_prefix_path(
path_converter: &RepoPathUiConverter,
input: impl AsRef<str>,
) -> Result<Self, FilePatternParseError> {
let path = path_converter.parse_file_path(input.as_ref())?;
Ok(Self::PrefixPath(path))
}
/// Pattern that matches cwd-relative file path glob.
pub fn cwd_file_glob(
path_converter: &RepoPathUiConverter,
input: impl AsRef<str>,
) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path(input.as_ref());
let dir = path_converter.parse_file_path(dir)?;
Self::file_glob_at(dir, pattern, false)
}
/// Pattern that matches cwd-relative file path glob (case-insensitive).
pub fn cwd_file_glob_i(
path_converter: &RepoPathUiConverter,
input: impl AsRef<str>,
) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path_i(input.as_ref());
let dir = path_converter.parse_file_path(dir)?;
Self::file_glob_at(dir, pattern, true)
}
/// Pattern that matches cwd-relative path prefix by glob.
pub fn cwd_prefix_glob(
path_converter: &RepoPathUiConverter,
input: impl AsRef<str>,
) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path(input.as_ref());
let dir = path_converter.parse_file_path(dir)?;
Self::prefix_glob_at(dir, pattern, false)
}
/// Pattern that matches cwd-relative path prefix by glob
/// (case-insensitive).
pub fn cwd_prefix_glob_i(
path_converter: &RepoPathUiConverter,
input: impl AsRef<str>,
) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path_i(input.as_ref());
let dir = path_converter.parse_file_path(dir)?;
Self::prefix_glob_at(dir, pattern, true)
}
/// Pattern that matches workspace-relative file (or exact) path.
pub fn root_file_path(input: impl AsRef<str>) -> Result<Self, FilePatternParseError> {
// TODO: Let caller pass in converter for root-relative paths too
let path = RepoPathBuf::from_relative_path(input.as_ref())?;
Ok(Self::FilePath(path))
}
/// Pattern that matches workspace-relative path prefix.
pub fn root_prefix_path(input: impl AsRef<str>) -> Result<Self, FilePatternParseError> {
let path = RepoPathBuf::from_relative_path(input.as_ref())?;
Ok(Self::PrefixPath(path))
}
/// Pattern that matches workspace-relative file path glob.
pub fn root_file_glob(input: impl AsRef<str>) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path(input.as_ref());
let dir = RepoPathBuf::from_relative_path(dir)?;
Self::file_glob_at(dir, pattern, false)
}
/// Pattern that matches workspace-relative file path glob
/// (case-insensitive).
pub fn root_file_glob_i(input: impl AsRef<str>) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path_i(input.as_ref());
let dir = RepoPathBuf::from_relative_path(dir)?;
Self::file_glob_at(dir, pattern, true)
}
/// Pattern that matches workspace-relative path prefix by glob.
pub fn root_prefix_glob(input: impl AsRef<str>) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path(input.as_ref());
let dir = RepoPathBuf::from_relative_path(dir)?;
Self::prefix_glob_at(dir, pattern, false)
}
/// Pattern that matches workspace-relative path prefix by glob
/// (case-insensitive).
pub fn root_prefix_glob_i(input: impl AsRef<str>) -> Result<Self, FilePatternParseError> {
let (dir, pattern) = split_glob_path_i(input.as_ref());
let dir = RepoPathBuf::from_relative_path(dir)?;
Self::prefix_glob_at(dir, pattern, true)
}
fn file_glob_at(
dir: RepoPathBuf,
input: &str,
icase: bool,
) -> Result<Self, FilePatternParseError> {
if input.is_empty() {
return Ok(Self::FilePath(dir));
}
// Normalize separator to '/', reject ".." which will never match
let normalized = RepoPathBuf::from_relative_path(input)?;
let pattern = Box::new(parse_file_glob(
normalized.as_internal_file_string(),
icase,
)?);
Ok(Self::FileGlob { dir, pattern })
}
fn prefix_glob_at(
dir: RepoPathBuf,
input: &str,
icase: bool,
) -> Result<Self, FilePatternParseError> {
if input.is_empty() {
return Ok(Self::PrefixPath(dir));
}
// Normalize separator to '/', reject ".." which will never match
let normalized = RepoPathBuf::from_relative_path(input)?;
let pattern = Box::new(parse_file_glob(
normalized.as_internal_file_string(),
icase,
)?);
Ok(Self::PrefixGlob { dir, pattern })
}
/// Returns path if this pattern represents a literal path in a workspace.
/// Returns `None` if this is a glob pattern for example.
pub fn as_path(&self) -> Option<&RepoPath> {
match self {
Self::FilePath(path) => Some(path),
Self::PrefixPath(path) => Some(path),
Self::FileGlob { .. } | Self::PrefixGlob { .. } => None,
}
}
}
pub(super) fn parse_file_glob(input: &str, icase: bool) -> Result<Glob, globset::Error> {
GlobBuilder::new(input)
.literal_separator(true)
.case_insensitive(icase)
.build()
}
/// Checks if a character is a glob metacharacter.
fn is_glob_char(c: char) -> bool {
// See globset::escape(). In addition to that, backslash is parsed as an
// escape sequence on Unix.
const GLOB_CHARS: &[char] = if cfg!(windows) {
&['?', '*', '[', ']', '{', '}']
} else {
&['?', '*', '[', ']', '{', '}', '\\']
};
GLOB_CHARS.contains(&c)
}
/// Splits `input` path into literal directory path and glob pattern.
fn split_glob_path(input: &str) -> (&str, &str) {
let prefix_len = input
.split_inclusive(path::is_separator)
.take_while(|component| !component.contains(is_glob_char))
.map(|component| component.len())
.sum();
input.split_at(prefix_len)
}
/// Splits `input` path into literal directory path and glob pattern, for
/// case-insensitive patterns.
fn split_glob_path_i(input: &str) -> (&str, &str) {
let prefix_len = input
.split_inclusive(path::is_separator)
.take_while(|component| {
!component.contains(|c: char| c.is_ascii_alphabetic() || is_glob_char(c))
})
.map(|component| component.len())
.sum();
input.split_at(prefix_len)
}
/// AST-level representation of the fileset expression.
#[derive(Clone, Debug)]
pub enum FilesetExpression {
/// Matches nothing.
None,
/// Matches everything.
All,
/// Matches basic pattern.
Pattern(FilePattern),
/// Matches any of the expressions.
///
/// Use `FilesetExpression::union_all()` to construct a union expression.
/// It will normalize 0-ary or 1-ary union.
UnionAll(Vec<Self>),
/// Matches both expressions.
Intersection(Box<Self>, Box<Self>),
/// Matches the first expression, but not the second expression.
Difference(Box<Self>, Box<Self>),
}
impl FilesetExpression {
/// Expression that matches nothing.
pub fn none() -> Self {
Self::None
}
/// Expression that matches everything.
pub fn all() -> Self {
Self::All
}
/// Expression that matches the given `pattern`.
pub fn pattern(pattern: FilePattern) -> Self {
Self::Pattern(pattern)
}
/// Expression that matches file (or exact) path.
pub fn file_path(path: RepoPathBuf) -> Self {
Self::Pattern(FilePattern::FilePath(path))
}
/// Expression that matches path prefix.
pub fn prefix_path(path: RepoPathBuf) -> Self {
Self::Pattern(FilePattern::PrefixPath(path))
}
/// Expression that matches any of the given `expressions`.
pub fn union_all(expressions: Vec<Self>) -> Self {
match expressions.len() {
0 => Self::none(),
1 => expressions.into_iter().next().unwrap(),
_ => Self::UnionAll(expressions),
}
}
/// Expression that matches both `self` and `other`.
pub fn intersection(self, other: Self) -> Self {
Self::Intersection(Box::new(self), Box::new(other))
}
/// Expression that matches `self` but not `other`.
pub fn difference(self, other: Self) -> Self {
Self::Difference(Box::new(self), Box::new(other))
}
/// Flattens union expression at most one level.
fn as_union_all(&self) -> &[Self] {
match self {
Self::None => &[],
Self::UnionAll(exprs) => exprs,
_ => slice::from_ref(self),
}
}
fn dfs_pre(&self) -> impl Iterator<Item = &Self> {
let mut stack: Vec<&Self> = vec![self];
iter::from_fn(move || {
let expr = stack.pop()?;
match expr {
Self::None | Self::All | Self::Pattern(_) => {}
Self::UnionAll(exprs) => stack.extend(exprs.iter().rev()),
Self::Intersection(expr1, expr2) | Self::Difference(expr1, expr2) => {
stack.push(expr2);
stack.push(expr1);
}
}
Some(expr)
})
}
/// Iterates literal paths recursively from this expression.
///
/// For example, `"a", "b", "c"` will be yielded in that order for
/// expression `"a" | all() & "b" | ~"c"`.
pub fn explicit_paths(&self) -> impl Iterator<Item = &RepoPath> {
// pre/post-ordering doesn't matter so long as children are visited from
// left to right.
self.dfs_pre().filter_map(|expr| match expr {
Self::Pattern(pattern) => pattern.as_path(),
_ => None,
})
}
/// Transforms the expression tree to `Matcher` object.
pub fn to_matcher(&self) -> Box<dyn Matcher> {
build_union_matcher(self.as_union_all())
}
}
/// Transforms the union `expressions` to `Matcher` object.
///
/// Since `Matcher` typically accepts a set of patterns to be OR-ed, this
/// function takes a list of union `expressions` as input.
fn build_union_matcher(expressions: &[FilesetExpression]) -> Box<dyn Matcher> {
let mut file_paths = Vec::new();
let mut prefix_paths = Vec::new();
let mut file_globs = GlobsMatcher::builder().prefix_paths(false);
let mut prefix_globs = GlobsMatcher::builder().prefix_paths(true);
let mut matchers: Vec<Option<Box<dyn Matcher>>> = Vec::new();
for expr in expressions {
let matcher: Box<dyn Matcher> = match expr {
// None and All are supposed to be simplified by caller.
FilesetExpression::None => Box::new(NothingMatcher),
FilesetExpression::All => Box::new(EverythingMatcher),
FilesetExpression::Pattern(pattern) => {
match pattern {
FilePattern::FilePath(path) => file_paths.push(path),
FilePattern::PrefixPath(path) => prefix_paths.push(path),
FilePattern::FileGlob { dir, pattern } => file_globs.add(dir, pattern),
FilePattern::PrefixGlob { dir, pattern } => prefix_globs.add(dir, pattern),
}
continue;
}
// UnionAll is supposed to be flattened by caller.
FilesetExpression::UnionAll(exprs) => build_union_matcher(exprs),
FilesetExpression::Intersection(expr1, expr2) => {
let m1 = build_union_matcher(expr1.as_union_all());
let m2 = build_union_matcher(expr2.as_union_all());
Box::new(IntersectionMatcher::new(m1, m2))
}
FilesetExpression::Difference(expr1, expr2) => {
let m1 = build_union_matcher(expr1.as_union_all());
let m2 = build_union_matcher(expr2.as_union_all());
Box::new(DifferenceMatcher::new(m1, m2))
}
};
matchers.push(Some(matcher));
}
if !file_paths.is_empty() {
matchers.push(Some(Box::new(FilesMatcher::new(file_paths))));
}
if !prefix_paths.is_empty() {
matchers.push(Some(Box::new(PrefixMatcher::new(prefix_paths))));
}
if !file_globs.is_empty() {
matchers.push(Some(Box::new(file_globs.build())));
}
if !prefix_globs.is_empty() {
matchers.push(Some(Box::new(prefix_globs.build())));
}
union_all_matchers(&mut matchers)
}
/// Concatenates all `matchers` as union.
///
/// Each matcher element must be wrapped in `Some` so the matchers can be moved
/// in arbitrary order.
fn union_all_matchers(matchers: &mut [Option<Box<dyn Matcher>>]) -> Box<dyn Matcher> {
match matchers {
[] => Box::new(NothingMatcher),
[matcher] => matcher.take().expect("matcher should still be available"),
_ => {
// Build balanced tree to minimize the recursion depth.
let (left, right) = matchers.split_at_mut(matchers.len() / 2);
let m1 = union_all_matchers(left);
let m2 = union_all_matchers(right);
Box::new(UnionMatcher::new(m1, m2))
}
}
}
type FilesetFunction = fn(
&mut FilesetDiagnostics,
&RepoPathUiConverter,
&FunctionCallNode,
) -> FilesetParseResult<FilesetExpression>;
static BUILTIN_FUNCTION_MAP: LazyLock<HashMap<&str, FilesetFunction>> = LazyLock::new(|| {
// Not using maplit::hashmap!{} or custom declarative macro here because
// code completion inside macro is quite restricted.
let mut map: HashMap<&str, FilesetFunction> = HashMap::new();
map.insert("none", |_diagnostics, _path_converter, function| {
function.expect_no_arguments()?;
Ok(FilesetExpression::none())
});
map.insert("all", |_diagnostics, _path_converter, function| {
function.expect_no_arguments()?;
Ok(FilesetExpression::all())
});
map
});
fn resolve_function(
diagnostics: &mut FilesetDiagnostics,
path_converter: &RepoPathUiConverter,
function: &FunctionCallNode,
) -> FilesetParseResult<FilesetExpression> {
if let Some(func) = BUILTIN_FUNCTION_MAP.get(function.name) {
func(diagnostics, path_converter, function)
} else {
Err(FilesetParseError::new(
FilesetParseErrorKind::NoSuchFunction {
name: function.name.to_owned(),
candidates: collect_similar(function.name, BUILTIN_FUNCTION_MAP.keys()),
},
function.name_span,
))
}
}
fn resolve_expression(
diagnostics: &mut FilesetDiagnostics,
path_converter: &RepoPathUiConverter,
node: &ExpressionNode,
) -> FilesetParseResult<FilesetExpression> {
let wrap_pattern_error =
|err| FilesetParseError::expression("Invalid file pattern", node.span).with_source(err);
match &node.kind {
ExpressionKind::Identifier(name) => {
let pattern =
FilePattern::cwd_prefix_glob(path_converter, name).map_err(wrap_pattern_error)?;
Ok(FilesetExpression::pattern(pattern))
}
ExpressionKind::String(name) => {
let pattern =
FilePattern::cwd_prefix_glob(path_converter, name).map_err(wrap_pattern_error)?;
Ok(FilesetExpression::pattern(pattern))
}
ExpressionKind::StringPattern { kind, value } => {
let pattern = FilePattern::from_str_kind(path_converter, value, kind)
.map_err(wrap_pattern_error)?;
Ok(FilesetExpression::pattern(pattern))
}
ExpressionKind::Unary(op, arg_node) => {
let arg = resolve_expression(diagnostics, path_converter, arg_node)?;
match op {
UnaryOp::Negate => Ok(FilesetExpression::all().difference(arg)),
}
}
ExpressionKind::Binary(op, lhs_node, rhs_node) => {
let lhs = resolve_expression(diagnostics, path_converter, lhs_node)?;
let rhs = resolve_expression(diagnostics, path_converter, rhs_node)?;
match op {
BinaryOp::Intersection => Ok(lhs.intersection(rhs)),
BinaryOp::Difference => Ok(lhs.difference(rhs)),
}
}
ExpressionKind::UnionAll(nodes) => {
let expressions = nodes
.iter()
.map(|node| resolve_expression(diagnostics, path_converter, node))
.try_collect()?;
Ok(FilesetExpression::union_all(expressions))
}
ExpressionKind::FunctionCall(function) => {
resolve_function(diagnostics, path_converter, function)
}
}
}
/// Parses text into `FilesetExpression` without bare string fallback.
pub fn parse(
diagnostics: &mut FilesetDiagnostics,
text: &str,
path_converter: &RepoPathUiConverter,
) -> FilesetParseResult<FilesetExpression> {
let node = fileset_parser::parse_program(text)?;
// TODO: add basic tree substitution pass to eliminate redundant expressions
resolve_expression(diagnostics, path_converter, &node)
}
/// Parses text into `FilesetExpression` with bare string fallback.
///
/// If the text can't be parsed as a fileset expression, and if it doesn't
/// contain any operator-like characters, it will be parsed as a file path.
pub fn parse_maybe_bare(
diagnostics: &mut FilesetDiagnostics,
text: &str,
path_converter: &RepoPathUiConverter,
) -> FilesetParseResult<FilesetExpression> {
let node = fileset_parser::parse_program_or_bare_string(text)?;
// TODO: add basic tree substitution pass to eliminate redundant expressions
resolve_expression(diagnostics, path_converter, &node)
}
#[cfg(test)]
mod tests {
use std::path::PathBuf;
use super::*;
fn repo_path_buf(value: impl Into<String>) -> RepoPathBuf {
RepoPathBuf::from_internal_string(value).unwrap()
}
fn insta_settings() -> insta::Settings {
let mut settings = insta::Settings::clone_current();
// Elide parsed glob options and tokens, which aren't interesting.
settings.add_filter(
r"(?m)^(\s{12}opts):\s*GlobOptions\s*\{\n(\s{16}.*\n)*\s{12}\},",
"$1: _,",
);
settings.add_filter(
r"(?m)^(\s{12}tokens):\s*Tokens\(\n(\s{16}.*\n)*\s{12}\),",
"$1: _,",
);
// Collapse short "Thing(_,)" repeatedly to save vertical space and make
// the output more readable.
for _ in 0..4 {
settings.add_filter(
r"(?x)
\b([A-Z]\w*)\(\n
\s*(.{1,60}),\n
\s*\)",
"$1($2)",
);
}
settings
}
#[test]
fn test_parse_file_pattern() {
let settings = insta_settings();
let _guard = settings.bind_to_scope();
let path_converter = RepoPathUiConverter::Fs {
cwd: PathBuf::from("/ws/cur"),
base: PathBuf::from("/ws"),
};
let parse = |text| parse_maybe_bare(&mut FilesetDiagnostics::new(), text, &path_converter);
// cwd-relative patterns
insta::assert_debug_snapshot!(
parse(".").unwrap(),
@r#"Pattern(PrefixPath("cur"))"#);
insta::assert_debug_snapshot!(
parse("..").unwrap(),
@r#"Pattern(PrefixPath(""))"#);
assert!(parse("../..").is_err());
insta::assert_debug_snapshot!(
parse("foo").unwrap(),
@r#"Pattern(PrefixPath("cur/foo"))"#);
insta::assert_debug_snapshot!(
parse("*.*").unwrap(),
@r#"
Pattern(
PrefixGlob {
dir: "cur",
pattern: Glob {
glob: "*.*",
re: "(?-u)^[^/]*\\.[^/]*$",
opts: _,
tokens: _,
},
},
)
"#);
insta::assert_debug_snapshot!(
parse("cwd:.").unwrap(),
@r#"Pattern(PrefixPath("cur"))"#);
insta::assert_debug_snapshot!(
parse("cwd-file:foo").unwrap(),
@r#"Pattern(FilePath("cur/foo"))"#);
insta::assert_debug_snapshot!(
parse("file:../foo/bar").unwrap(),
@r#"Pattern(FilePath("foo/bar"))"#);
// workspace-relative patterns
insta::assert_debug_snapshot!(
parse("root:.").unwrap(),
@r#"Pattern(PrefixPath(""))"#);
assert!(parse("root:..").is_err());
insta::assert_debug_snapshot!(
parse("root:foo/bar").unwrap(),
@r#"Pattern(PrefixPath("foo/bar"))"#);
insta::assert_debug_snapshot!(
parse("root-file:bar").unwrap(),
@r#"Pattern(FilePath("bar"))"#);
}
#[test]
fn test_parse_glob_pattern() {
let settings = insta_settings();
let _guard = settings.bind_to_scope();
let path_converter = RepoPathUiConverter::Fs {
// meta character in cwd path shouldn't be expanded
cwd: PathBuf::from("/ws/cur*"),
base: PathBuf::from("/ws"),
};
let parse = |text| parse_maybe_bare(&mut FilesetDiagnostics::new(), text, &path_converter);
// cwd-relative, without meta characters
insta::assert_debug_snapshot!(
parse(r#"cwd-glob:"foo""#).unwrap(),
@r#"Pattern(FilePath("cur*/foo"))"#);
// Strictly speaking, glob:"" shouldn't match a file named <cwd>, but
// file pattern doesn't distinguish "foo/" from "foo".
insta::assert_debug_snapshot!(
parse(r#"glob:"""#).unwrap(),
@r#"Pattern(FilePath("cur*"))"#);
insta::assert_debug_snapshot!(
parse(r#"glob:".""#).unwrap(),
@r#"Pattern(FilePath("cur*"))"#);
insta::assert_debug_snapshot!(
parse(r#"glob:"..""#).unwrap(),
@r#"Pattern(FilePath(""))"#);
// cwd-relative, with meta characters
insta::assert_debug_snapshot!(
parse(r#"glob:"*""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "cur*",
pattern: Glob {
glob: "*",
re: "(?-u)^[^/]*$",
opts: _,
tokens: _,
},
},
)
"#);
insta::assert_debug_snapshot!(
parse(r#"glob:"./*""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "cur*",
pattern: Glob {
glob: "*",
re: "(?-u)^[^/]*$",
opts: _,
tokens: _,
},
},
)
"#);
insta::assert_debug_snapshot!(
parse(r#"glob:"../*""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "",
pattern: Glob {
glob: "*",
re: "(?-u)^[^/]*$",
opts: _,
tokens: _,
},
},
)
"#);
// glob:"**" is equivalent to root-glob:"<cwd>/**", not root-glob:"**"
insta::assert_debug_snapshot!(
parse(r#"glob:"**""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "cur*",
pattern: Glob {
glob: "**",
re: "(?-u)^.*$",
opts: _,
tokens: _,
},
},
)
"#);
insta::assert_debug_snapshot!(
parse(r#"glob:"../foo/b?r/baz""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "foo",
pattern: Glob {
glob: "b?r/baz",
re: "(?-u)^b[^/]r/baz$",
opts: _,
tokens: _,
},
},
)
"#);
assert!(parse(r#"glob:"../../*""#).is_err());
assert!(parse(r#"glob-i:"../../*""#).is_err());
assert!(parse(r#"glob:"/*""#).is_err());
assert!(parse(r#"glob-i:"/*""#).is_err());
// no support for relative path component after glob meta character
assert!(parse(r#"glob:"*/..""#).is_err());
assert!(parse(r#"glob-i:"*/..""#).is_err());
if cfg!(windows) {
// cwd-relative, with Windows path separators
insta::assert_debug_snapshot!(
parse(r#"glob:"..\\foo\\*\\bar""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "foo",
pattern: Glob {
glob: "*/bar",
re: "(?-u)^[^/]*/bar$",
opts: _,
tokens: _,
},
},
)
"#);
} else {
// backslash is an escape character on Unix
insta::assert_debug_snapshot!(
parse(r#"glob:"..\\foo\\*\\bar""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "cur*",
pattern: Glob {
glob: "..\\foo\\*\\bar",
re: "(?-u)^\\.\\.foo\\*bar$",
opts: _,
tokens: _,
},
},
)
"#);
}
// workspace-relative, without meta characters
insta::assert_debug_snapshot!(
parse(r#"root-glob:"foo""#).unwrap(),
@r#"Pattern(FilePath("foo"))"#);
insta::assert_debug_snapshot!(
parse(r#"root-glob:"""#).unwrap(),
@r#"Pattern(FilePath(""))"#);
insta::assert_debug_snapshot!(
parse(r#"root-glob:".""#).unwrap(),
@r#"Pattern(FilePath(""))"#);
// workspace-relative, with meta characters
insta::assert_debug_snapshot!(
parse(r#"root-glob:"*""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "",
pattern: Glob {
glob: "*",
re: "(?-u)^[^/]*$",
opts: _,
tokens: _,
},
},
)
"#);
insta::assert_debug_snapshot!(
parse(r#"root-glob:"foo/bar/b[az]""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "foo/bar",
pattern: Glob {
glob: "b[az]",
re: "(?-u)^b[az]$",
opts: _,
tokens: _,
},
},
)
"#);
insta::assert_debug_snapshot!(
parse(r#"root-glob:"foo/bar/b{ar,az}""#).unwrap(), @r#"
Pattern(
FileGlob {
dir: "foo/bar",
pattern: Glob {
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/id_prefix.rs | lib/src/id_prefix.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::iter;
use std::marker::PhantomData;
use std::sync::Arc;
use itertools::Itertools as _;
use once_cell::sync::OnceCell;
use thiserror::Error;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::hex_util;
use crate::index::IndexResult;
use crate::index::ResolvedChangeTargets;
use crate::object_id::HexPrefix;
use crate::object_id::ObjectId;
use crate::object_id::PrefixResolution;
use crate::repo::Repo;
use crate::revset::RevsetEvaluationError;
use crate::revset::RevsetExtensions;
use crate::revset::RevsetResolutionError;
use crate::revset::SymbolResolver;
use crate::revset::SymbolResolverExtension;
use crate::revset::UserRevsetExpression;
use crate::view::View;
#[derive(Debug, Error)]
pub enum IdPrefixIndexLoadError {
#[error("Failed to resolve short-prefixes disambiguation revset")]
Resolution(#[from] RevsetResolutionError),
#[error("Failed to evaluate short-prefixes disambiguation revset")]
Evaluation(#[from] RevsetEvaluationError),
}
struct DisambiguationData {
expression: Arc<UserRevsetExpression>,
indexes: OnceCell<Indexes>,
}
struct Indexes {
commit_change_ids: Vec<(CommitId, ChangeId)>,
commit_index: IdIndex<CommitId, u32, 4>,
change_index: IdIndex<ChangeId, u32, 4>,
}
impl DisambiguationData {
fn indexes(
&self,
repo: &dyn Repo,
extensions: &[Box<dyn SymbolResolverExtension>],
) -> Result<&Indexes, IdPrefixIndexLoadError> {
self.indexes.get_or_try_init(|| {
let symbol_resolver = SymbolResolver::new(repo, extensions);
let revset = self
.expression
.resolve_user_expression(repo, &symbol_resolver)?
.evaluate(repo)?;
let commit_change_ids: Vec<_> = revset.commit_change_ids().try_collect()?;
let mut commit_index = IdIndex::with_capacity(commit_change_ids.len());
let mut change_index = IdIndex::with_capacity(commit_change_ids.len());
for (i, (commit_id, change_id)) in commit_change_ids.iter().enumerate() {
let i: u32 = i.try_into().unwrap();
commit_index.insert(commit_id, i);
change_index.insert(change_id, i);
}
Ok(Indexes {
commit_change_ids,
commit_index: commit_index.build(),
change_index: change_index.build(),
})
})
}
}
impl<'a> IdIndexSource<u32> for &'a [(CommitId, ChangeId)] {
type Entry = &'a (CommitId, ChangeId);
fn entry_at(&self, pointer: &u32) -> Self::Entry {
&self[*pointer as usize]
}
}
impl IdIndexSourceEntry<CommitId> for &'_ (CommitId, ChangeId) {
fn to_key(&self) -> CommitId {
let (commit_id, _) = self;
commit_id.clone()
}
}
impl IdIndexSourceEntry<ChangeId> for &'_ (CommitId, ChangeId) {
fn to_key(&self) -> ChangeId {
let (_, change_id) = self;
change_id.clone()
}
}
/// Manages configuration and cache of commit/change ID disambiguation index.
#[derive(Default)]
pub struct IdPrefixContext {
disambiguation: Option<DisambiguationData>,
extensions: Arc<RevsetExtensions>,
}
impl IdPrefixContext {
pub fn new(extensions: Arc<RevsetExtensions>) -> Self {
Self {
disambiguation: None,
extensions,
}
}
pub fn disambiguate_within(mut self, expression: Arc<UserRevsetExpression>) -> Self {
self.disambiguation = Some(DisambiguationData {
expression,
indexes: OnceCell::new(),
});
self
}
/// Loads disambiguation index once, returns a borrowed index to
/// disambiguate commit/change IDs.
pub fn populate(&self, repo: &dyn Repo) -> Result<IdPrefixIndex<'_>, IdPrefixIndexLoadError> {
let indexes = if let Some(disambiguation) = &self.disambiguation {
Some(disambiguation.indexes(repo, self.extensions.symbol_resolvers())?)
} else {
None
};
Ok(IdPrefixIndex { indexes })
}
}
/// Loaded index to disambiguate commit/change IDs.
pub struct IdPrefixIndex<'a> {
indexes: Option<&'a Indexes>,
}
impl IdPrefixIndex<'_> {
/// Returns an empty index that just falls back to a provided `repo`.
pub const fn empty() -> IdPrefixIndex<'static> {
IdPrefixIndex { indexes: None }
}
/// Resolve an unambiguous commit ID prefix.
pub fn resolve_commit_prefix(
&self,
repo: &dyn Repo,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<CommitId>> {
if let Some(indexes) = self.indexes {
let resolution = indexes
.commit_index
.resolve_prefix_to_key(&*indexes.commit_change_ids, prefix);
match resolution {
PrefixResolution::NoMatch => {
// Fall back to resolving in entire repo
}
PrefixResolution::SingleMatch(id) => {
// The disambiguation set may be loaded from a different repo,
// and contain a commit that doesn't exist in the current repo.
if repo.index().has_id(&id)? {
return Ok(PrefixResolution::SingleMatch(id));
} else {
return Ok(PrefixResolution::NoMatch);
}
}
PrefixResolution::AmbiguousMatch => {
return Ok(PrefixResolution::AmbiguousMatch);
}
}
}
repo.index().resolve_commit_id_prefix(prefix)
}
/// Returns the shortest length of a prefix of `commit_id` that can still be
/// resolved by `resolve_commit_prefix()` and [`SymbolResolver`].
pub fn shortest_commit_prefix_len(
&self,
repo: &dyn Repo,
commit_id: &CommitId,
) -> IndexResult<usize> {
let len = self.shortest_commit_prefix_len_exact(repo, commit_id)?;
Ok(disambiguate_prefix_with_refs(
repo.view(),
&commit_id.to_string(),
len,
))
}
pub fn shortest_commit_prefix_len_exact(
&self,
repo: &dyn Repo,
commit_id: &CommitId,
) -> IndexResult<usize> {
if let Some(indexes) = self.indexes
&& let Some(lookup) = indexes
.commit_index
.lookup_exact(&*indexes.commit_change_ids, commit_id)
{
return Ok(lookup.shortest_unique_prefix_len());
}
repo.index().shortest_unique_commit_id_prefix_len(commit_id)
}
/// Resolve an unambiguous change ID prefix to the commit IDs in the revset.
pub fn resolve_change_prefix(
&self,
repo: &dyn Repo,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<ResolvedChangeTargets>> {
if let Some(indexes) = self.indexes {
let resolution = indexes
.change_index
.resolve_prefix_to_key(&*indexes.commit_change_ids, prefix);
match resolution {
PrefixResolution::NoMatch => {
// Fall back to resolving in entire repo
}
PrefixResolution::SingleMatch(change_id) => {
return match repo.resolve_change_id(&change_id)? {
// There may be more commits with this change id outside the narrower sets.
Some(commit_ids) => Ok(PrefixResolution::SingleMatch(commit_ids)),
// The disambiguation set may contain hidden commits.
None => Ok(PrefixResolution::NoMatch),
};
}
PrefixResolution::AmbiguousMatch => {
return Ok(PrefixResolution::AmbiguousMatch);
}
}
}
repo.resolve_change_id_prefix(prefix)
}
/// Returns the shortest length of a prefix of `change_id` that can still be
/// resolved by `resolve_change_prefix()` and [`SymbolResolver`].
pub fn shortest_change_prefix_len(
&self,
repo: &dyn Repo,
change_id: &ChangeId,
) -> IndexResult<usize> {
let len = self.shortest_change_prefix_len_exact(repo, change_id)?;
Ok(disambiguate_prefix_with_refs(
repo.view(),
&change_id.to_string(),
len,
))
}
fn shortest_change_prefix_len_exact(
&self,
repo: &dyn Repo,
change_id: &ChangeId,
) -> IndexResult<usize> {
if let Some(indexes) = self.indexes
&& let Some(lookup) = indexes
.change_index
.lookup_exact(&*indexes.commit_change_ids, change_id)
{
return Ok(lookup.shortest_unique_prefix_len());
}
repo.shortest_unique_change_id_prefix_len(change_id)
}
}
fn disambiguate_prefix_with_refs(view: &View, id_sym: &str, min_len: usize) -> usize {
debug_assert!(id_sym.is_ascii());
(min_len..id_sym.len())
.find(|&n| {
// Tags, bookmarks, and Git refs have higher priority, but Git refs
// should include "/" char. Extension symbols have lower priority.
let prefix = &id_sym[..n];
view.get_local_tag(prefix.as_ref()).is_absent()
&& view.get_local_bookmark(prefix.as_ref()).is_absent()
})
// No need to test conflicts with the full ID. We have to return some
// valid length anyway.
.unwrap_or(id_sym.len())
}
/// In-memory immutable index to do prefix lookup of key `K` through `P`.
///
/// In a nutshell, this is a mapping of `K` -> `P` -> `S::Entry` where `S:
/// IdIndexSource<P>`. The source table `S` isn't owned by this index.
///
/// This index stores first `N` bytes of each key `K` associated with the
/// pointer `P`. `K` may be a heap-allocated object. `P` is supposed to be
/// a cheap value type like `u32` or `usize`. As the index entry of type
/// `([u8; N], P)` is small and has no indirect reference, constructing
/// the index should be faster than sorting the source `(K, _)` pairs.
///
/// A key `K` must be at least `N` bytes long.
#[derive(Clone, Debug)]
pub struct IdIndex<K, P, const N: usize> {
// Maybe better to build separate (keys, values) vectors, but there's no std function
// to co-sort them.
index: Vec<([u8; N], P)>,
// Let's pretend [u8; N] above were of type K. It helps type inference, and ensures that
// IdIndexSource has the same key type.
phantom_key: PhantomData<K>,
}
/// Source table for `IdIndex` to map pointer of type `P` to entry.
pub trait IdIndexSource<P> {
type Entry;
fn entry_at(&self, pointer: &P) -> Self::Entry;
}
/// Source table entry of `IdIndex`, which is conceptually a `(key, value)`
/// pair.
pub trait IdIndexSourceEntry<K> {
fn to_key(&self) -> K;
}
#[derive(Clone, Debug)]
pub struct IdIndexBuilder<K, P, const N: usize> {
unsorted_index: Vec<([u8; N], P)>,
phantom_key: PhantomData<K>,
}
impl<K, P, const N: usize> IdIndexBuilder<K, P, N>
where
K: ObjectId + Ord,
{
/// Inserts new entry. Multiple values can be associated with a single key.
pub fn insert(&mut self, key: &K, pointer: P) {
let short_key = unwrap_as_short_key(key.as_bytes());
self.unsorted_index.push((*short_key, pointer));
}
pub fn build(self) -> IdIndex<K, P, N> {
let mut index = self.unsorted_index;
index.sort_unstable_by_key(|(s, _)| *s);
let phantom_key = self.phantom_key;
IdIndex { index, phantom_key }
}
}
impl<K, P, const N: usize> IdIndex<K, P, N>
where
K: ObjectId + Ord,
{
pub fn builder() -> IdIndexBuilder<K, P, N> {
IdIndexBuilder {
unsorted_index: Vec::new(),
phantom_key: PhantomData,
}
}
pub fn with_capacity(capacity: usize) -> IdIndexBuilder<K, P, N> {
IdIndexBuilder {
unsorted_index: Vec::with_capacity(capacity),
phantom_key: PhantomData,
}
}
/// Looks up entries with the given prefix, and collects values if matched
/// entries have unambiguous keys.
pub fn resolve_prefix_with<B, S, U>(
&self,
source: S,
prefix: &HexPrefix,
entry_mapper: impl FnMut(S::Entry) -> U,
) -> PrefixResolution<(K, B)>
where
B: FromIterator<U>,
S: IdIndexSource<P>,
S::Entry: IdIndexSourceEntry<K>,
{
fn collect<B, K, E, U>(
mut range: impl Iterator<Item = (K, E)>,
mut entry_mapper: impl FnMut(E) -> U,
) -> PrefixResolution<(K, B)>
where
B: FromIterator<U>,
K: Eq,
{
if let Some((first_key, first_entry)) = range.next() {
let maybe_values: Option<B> = iter::once(Some(entry_mapper(first_entry)))
.chain(range.map(|(k, e)| (k == first_key).then(|| entry_mapper(e))))
.collect();
if let Some(values) = maybe_values {
PrefixResolution::SingleMatch((first_key, values))
} else {
PrefixResolution::AmbiguousMatch
}
} else {
PrefixResolution::NoMatch
}
}
let min_bytes = prefix.min_prefix_bytes();
if min_bytes.is_empty() {
// We consider an empty prefix ambiguous even if the index has a single entry.
return PrefixResolution::AmbiguousMatch;
}
let to_key_entry_pair = |(_, pointer): &(_, P)| -> (K, S::Entry) {
let entry = source.entry_at(pointer);
(entry.to_key(), entry)
};
if min_bytes.len() > N {
// If the min prefix (including odd byte) is longer than the stored short keys,
// we are sure that min_bytes[..N] does not include the odd byte. Use it to
// take contiguous range, then filter by (longer) prefix.matches().
let short_bytes = unwrap_as_short_key(min_bytes);
let pos = self.index.partition_point(|(s, _)| s < short_bytes);
let range = self.index[pos..]
.iter()
.take_while(|(s, _)| s == short_bytes)
.map(to_key_entry_pair)
.filter(|(k, _)| prefix.matches(k));
collect(range, entry_mapper)
} else {
// Otherwise, use prefix.matches() to deal with odd byte. Since the prefix is
// covered by short key width, we're sure that the matching prefixes are sorted.
let pos = self.index.partition_point(|(s, _)| &s[..] < min_bytes);
let range = self.index[pos..]
.iter()
.map(to_key_entry_pair)
.take_while(|(k, _)| prefix.matches(k));
collect(range, entry_mapper)
}
}
/// Looks up unambiguous key with the given prefix.
pub fn resolve_prefix_to_key<S>(&self, source: S, prefix: &HexPrefix) -> PrefixResolution<K>
where
S: IdIndexSource<P>,
S::Entry: IdIndexSourceEntry<K>,
{
self.resolve_prefix_with(source, prefix, |_| ())
.map(|(key, ())| key)
}
/// Looks up entry for the key. Returns accessor to neighbors.
pub fn lookup_exact<'i, 'q, S>(
&'i self,
source: S,
key: &'q K,
) -> Option<IdIndexLookup<'i, 'q, K, P, S, N>>
where
S: IdIndexSource<P>,
S::Entry: IdIndexSourceEntry<K>,
{
let lookup = self.lookup_some(source, key);
lookup.has_key().then_some(lookup)
}
fn lookup_some<'i, 'q, S>(&'i self, source: S, key: &'q K) -> IdIndexLookup<'i, 'q, K, P, S, N>
where
S: IdIndexSource<P>,
{
let short_key = unwrap_as_short_key(key.as_bytes());
let index = &self.index;
let pos = index.partition_point(|(s, _)| s < short_key);
IdIndexLookup {
index,
source,
key,
pos,
}
}
/// This function returns the shortest length of a prefix of `key` that
/// disambiguates it from every other key in the index.
///
/// The length to be returned is a number of hexadecimal digits.
///
/// This has some properties that we do not currently make much use of:
///
/// - The algorithm works even if `key` itself is not in the index.
///
/// - In the special case when there are keys in the trie for which our
/// `key` is an exact prefix, returns `key.len() + 1`. Conceptually, in
/// order to disambiguate, you need every letter of the key *and* the
/// additional fact that it's the entire key). This case is extremely
/// unlikely for hashes with 12+ hexadecimal characters.
pub fn shortest_unique_prefix_len<S>(&self, source: S, key: &K) -> usize
where
S: IdIndexSource<P>,
S::Entry: IdIndexSourceEntry<K>,
{
self.lookup_some(source, key).shortest_unique_prefix_len()
}
}
#[derive(Clone, Copy, Debug)]
pub struct IdIndexLookup<'i, 'q, K, P, S, const N: usize> {
index: &'i Vec<([u8; N], P)>,
source: S,
key: &'q K,
pos: usize, // may be index.len()
}
impl<K, P, S, const N: usize> IdIndexLookup<'_, '_, K, P, S, N>
where
K: ObjectId + Eq,
S: IdIndexSource<P>,
S::Entry: IdIndexSourceEntry<K>,
{
fn has_key(&self) -> bool {
let short_key = unwrap_as_short_key(self.key.as_bytes());
self.index[self.pos..]
.iter()
.take_while(|(s, _)| s == short_key)
.any(|(_, p)| self.source.entry_at(p).to_key() == *self.key)
}
pub fn shortest_unique_prefix_len(&self) -> usize {
// Since entries having the same short key aren't sorted by the full-length key,
// we need to scan all entries in the current chunk, plus left/right neighbors.
// Typically, current.len() is 1.
let short_key = unwrap_as_short_key(self.key.as_bytes());
let left = self.pos.checked_sub(1).map(|p| &self.index[p]);
let (current, right) = {
let range = &self.index[self.pos..];
let count = range.iter().take_while(|(s, _)| s == short_key).count();
(&range[..count], range.get(count))
};
// Left/right neighbors should have unique short keys. For the current chunk,
// we need to look up full-length keys.
let unique_len = |a: &[u8], b: &[u8]| hex_util::common_hex_len(a, b) + 1;
let neighbor_lens = left
.iter()
.chain(&right)
.map(|(s, _)| unique_len(s, short_key));
let current_lens = current
.iter()
.map(|(_, p)| self.source.entry_at(p).to_key())
.filter(|key| key != self.key)
.map(|key| unique_len(key.as_bytes(), self.key.as_bytes()));
// Even if the key is the only one in the index, we require at least one digit.
neighbor_lens.chain(current_lens).max().unwrap_or(1)
}
}
fn unwrap_as_short_key<const N: usize>(key_bytes: &[u8]) -> &[u8; N] {
let short_slice = key_bytes.get(..N).expect("key too short");
short_slice.try_into().unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Clone, Copy, Eq, PartialEq)]
struct Position(usize);
impl<'a, V> IdIndexSource<Position> for &'a [(ChangeId, V)] {
type Entry = &'a (ChangeId, V);
fn entry_at(&self, pointer: &Position) -> Self::Entry {
&self[pointer.0]
}
}
impl<V> IdIndexSourceEntry<ChangeId> for &'_ (ChangeId, V) {
fn to_key(&self) -> ChangeId {
let (change_id, _) = self;
change_id.clone()
}
}
fn build_id_index<V, const N: usize>(
entries: &[(ChangeId, V)],
) -> IdIndex<ChangeId, Position, N> {
let mut builder = IdIndex::with_capacity(entries.len());
for (i, (k, _)) in entries.iter().enumerate() {
builder.insert(k, Position(i));
}
builder.build()
}
#[test]
fn test_id_index_resolve_prefix() {
let source = vec![
(ChangeId::from_hex("0000"), 0),
(ChangeId::from_hex("0099"), 1),
(ChangeId::from_hex("0099"), 2),
(ChangeId::from_hex("0aaa"), 3),
(ChangeId::from_hex("0aab"), 4),
];
// short_key.len() == full_key.len()
let id_index = build_id_index::<_, 2>(&source);
let resolve_prefix = |prefix: &HexPrefix| {
let resolution: PrefixResolution<(_, Vec<_>)> =
id_index.resolve_prefix_with(&*source, prefix, |(_, v)| *v);
resolution.map(|(key, mut values)| {
values.sort(); // order of values might not be preserved by IdIndex
(key, values)
})
};
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0").unwrap()),
PrefixResolution::AmbiguousMatch,
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("00").unwrap()),
PrefixResolution::AmbiguousMatch,
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("000").unwrap()),
PrefixResolution::SingleMatch((ChangeId::from_hex("0000"), vec![0])),
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0001").unwrap()),
PrefixResolution::NoMatch,
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("009").unwrap()),
PrefixResolution::SingleMatch((ChangeId::from_hex("0099"), vec![1, 2])),
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0aa").unwrap()),
PrefixResolution::AmbiguousMatch,
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0aab").unwrap()),
PrefixResolution::SingleMatch((ChangeId::from_hex("0aab"), vec![4])),
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("f").unwrap()),
PrefixResolution::NoMatch,
);
// short_key.len() < full_key.len()
let id_index = build_id_index::<_, 1>(&source);
let resolve_prefix = |prefix: &HexPrefix| {
let resolution: PrefixResolution<(_, Vec<_>)> =
id_index.resolve_prefix_with(&*source, prefix, |(_, v)| *v);
resolution.map(|(key, mut values)| {
values.sort(); // order of values might not be preserved by IdIndex
(key, values)
})
};
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("00").unwrap()),
PrefixResolution::AmbiguousMatch,
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("000").unwrap()),
PrefixResolution::SingleMatch((ChangeId::from_hex("0000"), vec![0])),
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0001").unwrap()),
PrefixResolution::NoMatch,
);
// For short key "00", ["0000", "0099", "0099"] would match. We shouldn't
// break at "009".matches("0000").
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("009").unwrap()),
PrefixResolution::SingleMatch((ChangeId::from_hex("0099"), vec![1, 2])),
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0a").unwrap()),
PrefixResolution::AmbiguousMatch,
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0aa").unwrap()),
PrefixResolution::AmbiguousMatch,
);
assert_eq!(
resolve_prefix(&HexPrefix::try_from_hex("0aab").unwrap()),
PrefixResolution::SingleMatch((ChangeId::from_hex("0aab"), vec![4])),
);
}
#[test]
fn test_lookup_exact() {
// No crash if empty
let source: Vec<(ChangeId, ())> = vec![];
let id_index = build_id_index::<_, 1>(&source);
assert!(
id_index
.lookup_exact(&*source, &ChangeId::from_hex("00"))
.is_none()
);
let source = vec![
(ChangeId::from_hex("ab00"), ()),
(ChangeId::from_hex("ab01"), ()),
];
let id_index = build_id_index::<_, 1>(&source);
assert!(
id_index
.lookup_exact(&*source, &ChangeId::from_hex("aa00"))
.is_none()
);
assert!(
id_index
.lookup_exact(&*source, &ChangeId::from_hex("ab00"))
.is_some()
);
assert!(
id_index
.lookup_exact(&*source, &ChangeId::from_hex("ab01"))
.is_some()
);
assert!(
id_index
.lookup_exact(&*source, &ChangeId::from_hex("ab02"))
.is_none()
);
assert!(
id_index
.lookup_exact(&*source, &ChangeId::from_hex("ac00"))
.is_none()
);
}
#[test]
fn test_id_index_shortest_unique_prefix_len() {
// No crash if empty
let source: Vec<(ChangeId, ())> = vec![];
let id_index = build_id_index::<_, 1>(&source);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("00")),
1
);
let source = vec![
(ChangeId::from_hex("ab"), ()),
(ChangeId::from_hex("acd0"), ()),
(ChangeId::from_hex("acd0"), ()), // duplicated key is allowed
];
let id_index = build_id_index::<_, 1>(&source);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("acd0")),
2
);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("ac")),
3
);
let source = vec![
(ChangeId::from_hex("ab"), ()),
(ChangeId::from_hex("acd0"), ()),
(ChangeId::from_hex("acf0"), ()),
(ChangeId::from_hex("a0"), ()),
(ChangeId::from_hex("ba"), ()),
];
let id_index = build_id_index::<_, 1>(&source);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("a0")),
2
);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("ba")),
1
);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("ab")),
2
);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("acd0")),
3
);
// If it were there, the length would be 1.
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("c0")),
1
);
let source = vec![
(ChangeId::from_hex("000000"), ()),
(ChangeId::from_hex("01ffff"), ()),
(ChangeId::from_hex("010000"), ()),
(ChangeId::from_hex("01fffe"), ()),
(ChangeId::from_hex("ffffff"), ()),
];
let id_index = build_id_index::<_, 1>(&source);
// Multiple candidates in the current chunk "01"
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("01ffff")),
6
);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("010000")),
3
);
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("01fffe")),
6
);
// Only right neighbor
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("000000")),
2
);
// Only left neighbor
assert_eq!(
id_index.shortest_unique_prefix_len(&*source, &ChangeId::from_hex("ffffff")),
1
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/tree.rs | lib/src/tree.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fmt::Debug;
use std::fmt::Error;
use std::fmt::Formatter;
use std::hash::Hash;
use std::hash::Hasher;
use std::sync::Arc;
use itertools::Itertools as _;
use crate::backend;
use crate::backend::BackendResult;
use crate::backend::TreeEntriesNonRecursiveIterator;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::matchers::Matcher;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponent;
use crate::store::Store;
#[derive(Clone)]
pub struct Tree {
store: Arc<Store>,
dir: RepoPathBuf,
id: TreeId,
data: Arc<backend::Tree>,
}
impl Debug for Tree {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_struct("Tree")
.field("dir", &self.dir)
.field("id", &self.id)
.finish()
}
}
impl PartialEq for Tree {
fn eq(&self, other: &Self) -> bool {
self.id == other.id && self.dir == other.dir
}
}
impl Eq for Tree {}
impl Hash for Tree {
fn hash<H: Hasher>(&self, state: &mut H) {
self.dir.hash(state);
self.id.hash(state);
}
}
impl Tree {
pub fn new(store: Arc<Store>, dir: RepoPathBuf, id: TreeId, data: Arc<backend::Tree>) -> Self {
Self {
store,
dir,
id,
data,
}
}
pub fn empty(store: Arc<Store>, dir: RepoPathBuf) -> Self {
let id = store.empty_tree_id().clone();
Self {
store,
dir,
id,
data: Arc::new(backend::Tree::default()),
}
}
pub fn store(&self) -> &Arc<Store> {
&self.store
}
pub fn dir(&self) -> &RepoPath {
&self.dir
}
pub fn id(&self) -> &TreeId {
&self.id
}
pub fn data(&self) -> &backend::Tree {
&self.data
}
pub fn entries_non_recursive(&self) -> TreeEntriesNonRecursiveIterator<'_> {
self.data.entries()
}
pub fn entries_matching<'matcher>(
&self,
matcher: &'matcher dyn Matcher,
) -> TreeEntriesIterator<'matcher> {
TreeEntriesIterator::new(self.clone(), matcher)
}
pub fn value(&self, basename: &RepoPathComponent) -> Option<&TreeValue> {
self.data.value(basename)
}
pub fn path_value(&self, path: &RepoPath) -> BackendResult<Option<TreeValue>> {
assert_eq!(self.dir(), RepoPath::root());
match path.split() {
Some((dir, basename)) => {
let tree = self.sub_tree_recursive(dir)?;
Ok(tree.and_then(|tree| tree.data.value(basename).cloned()))
}
None => Ok(Some(TreeValue::Tree(self.id.clone()))),
}
}
pub fn sub_tree(&self, name: &RepoPathComponent) -> BackendResult<Option<Self>> {
if let Some(sub_tree) = self.data.value(name) {
match sub_tree {
TreeValue::Tree(sub_tree_id) => {
let subdir = self.dir.join(name);
let sub_tree = self.store.get_tree(subdir, sub_tree_id)?;
Ok(Some(sub_tree))
}
_ => Ok(None),
}
} else {
Ok(None)
}
}
fn known_sub_tree(&self, subdir: RepoPathBuf, id: &TreeId) -> Self {
self.store.get_tree(subdir, id).unwrap()
}
/// Look up the tree at the given path.
pub fn sub_tree_recursive(&self, path: &RepoPath) -> BackendResult<Option<Self>> {
let mut current_tree = self.clone();
for name in path.components() {
match current_tree.sub_tree(name)? {
None => {
return Ok(None);
}
Some(sub_tree) => {
current_tree = sub_tree;
}
}
}
// TODO: It would be nice to be able to return a reference here, but
// then we would have to figure out how to share Tree instances
// across threads.
Ok(Some(current_tree))
}
}
pub struct TreeEntriesIterator<'matcher> {
stack: Vec<TreeEntriesDirItem>,
matcher: &'matcher dyn Matcher,
}
struct TreeEntriesDirItem {
tree: Tree,
entries: Vec<(RepoPathBuf, TreeValue)>,
}
impl From<Tree> for TreeEntriesDirItem {
fn from(tree: Tree) -> Self {
let mut entries = tree
.entries_non_recursive()
.map(|entry| (tree.dir().join(entry.name()), entry.value().clone()))
.collect_vec();
entries.reverse();
Self { tree, entries }
}
}
impl<'matcher> TreeEntriesIterator<'matcher> {
fn new(tree: Tree, matcher: &'matcher dyn Matcher) -> Self {
// TODO: Restrict walk according to Matcher::visit()
Self {
stack: vec![TreeEntriesDirItem::from(tree)],
matcher,
}
}
}
impl Iterator for TreeEntriesIterator<'_> {
type Item = (RepoPathBuf, TreeValue);
fn next(&mut self) -> Option<Self::Item> {
while let Some(top) = self.stack.last_mut() {
if let Some((path, value)) = top.entries.pop() {
match value {
TreeValue::Tree(id) => {
// TODO: Handle the other cases (specific files and trees)
if self.matcher.visit(&path).is_nothing() {
continue;
}
let subtree = top.tree.known_sub_tree(path, &id);
self.stack.push(TreeEntriesDirItem::from(subtree));
}
value => {
if self.matcher.matches(&path) {
return Some((path, value));
}
}
};
} else {
self.stack.pop();
}
}
None
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/ref_name.rs | lib/src/ref_name.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Name types for commit references.
//!
//! Name types can be constructed from a string:
//! ```
//! # use jj_lib::ref_name::*;
//! let _: RefNameBuf = "main".into();
//! let _: &RemoteName = "origin".as_ref();
//! ```
//!
//! However, they cannot be converted to other name types:
//! ```compile_fail
//! # use jj_lib::ref_name::*;
//! let _: RefNameBuf = RemoteName::new("origin").into();
//! ```
//! ```compile_fail
//! # use jj_lib::ref_name::*;
//! let _: &RemoteName = RefName::new("main").as_ref();
//! ```
use std::borrow::Borrow;
use std::fmt;
use std::fmt::Display;
use std::ops::Deref;
use ref_cast::RefCastCustom;
use ref_cast::ref_cast_custom;
use crate::content_hash::ContentHash;
use crate::revset;
/// Owned Git ref name in fully-qualified form (e.g. `refs/heads/main`.)
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `String`.
// Eq, Hash, and Ord must be compatible with GitRefName.
#[derive(Clone, ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct GitRefNameBuf(String);
/// Borrowed Git ref name in fully-qualified form (e.g. `refs/heads/main`.)
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `str`.
#[derive(ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, RefCastCustom)]
#[repr(transparent)]
pub struct GitRefName(str);
/// Owned local (or local part of remote) bookmark or tag name.
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `String`.
// Eq, Hash, and Ord must be compatible with RefName.
#[derive(Clone, ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct RefNameBuf(String);
/// Borrowed local (or local part of remote) bookmark or tag name.
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `str`.
#[derive(ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, RefCastCustom)]
#[repr(transparent)]
pub struct RefName(str);
/// Owned remote name.
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `String`.
// Eq, Hash, and Ord must be compatible with RemoteName.
#[derive(Clone, ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct RemoteNameBuf(String);
/// Borrowed remote name.
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `str`.
#[derive(ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, RefCastCustom)]
#[repr(transparent)]
pub struct RemoteName(str);
/// Owned workspace name.
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `String`.
// Eq, Hash, and Ord must be compatible with WorkspaceName.
#[derive(Clone, ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, serde::Serialize)]
#[serde(transparent)]
pub struct WorkspaceNameBuf(String);
/// Borrowed workspace name.
///
/// Use `.as_str()` or `.as_symbol()` for displaying. Other than that, this can
/// be considered an immutable `str`.
#[derive(
ContentHash, Debug, Eq, Hash, Ord, PartialEq, PartialOrd, RefCastCustom, serde::Serialize,
)]
#[serde(transparent)]
#[repr(transparent)]
pub struct WorkspaceName(str);
macro_rules! impl_partial_eq {
($borrowed_ty:ty, $lhs:ty, $rhs:ty) => {
impl PartialEq<$rhs> for $lhs {
fn eq(&self, other: &$rhs) -> bool {
<$borrowed_ty as PartialEq>::eq(self, other)
}
}
impl PartialEq<$lhs> for $rhs {
fn eq(&self, other: &$lhs) -> bool {
<$borrowed_ty as PartialEq>::eq(self, other)
}
}
};
}
macro_rules! impl_partial_eq_str {
($borrowed_ty:ty, $lhs:ty, $rhs:ty) => {
impl PartialEq<$rhs> for $lhs {
fn eq(&self, other: &$rhs) -> bool {
<$borrowed_ty as PartialEq>::eq(self, other.as_ref())
}
}
impl PartialEq<$lhs> for $rhs {
fn eq(&self, other: &$lhs) -> bool {
<$borrowed_ty as PartialEq>::eq(self.as_ref(), other)
}
}
};
}
macro_rules! impl_name_type {
($owned_ty:ident, $borrowed_ty:ident) => {
impl $owned_ty {
/// Consumes this and returns the underlying string.
pub fn into_string(self) -> String {
self.0
}
}
impl $borrowed_ty {
/// Wraps string name.
#[ref_cast_custom]
pub const fn new(name: &str) -> &Self;
/// Returns the underlying string.
pub const fn as_str(&self) -> &str {
&self.0
}
/// Converts to symbol for displaying.
pub fn as_symbol(&self) -> &RefSymbol {
RefSymbol::new(&self.0)
}
}
// Owned type can be constructed from (weakly-typed) string:
impl From<String> for $owned_ty {
fn from(value: String) -> Self {
$owned_ty(value)
}
}
impl From<&String> for $owned_ty {
fn from(value: &String) -> Self {
$owned_ty(value.clone())
}
}
impl From<&str> for $owned_ty {
fn from(value: &str) -> Self {
$owned_ty(value.to_owned())
}
}
// Owned type can be constructed from borrowed type:
impl From<&$owned_ty> for $owned_ty {
fn from(value: &$owned_ty) -> Self {
value.clone()
}
}
impl From<&$borrowed_ty> for $owned_ty {
fn from(value: &$borrowed_ty) -> Self {
value.to_owned()
}
}
// Borrowed type can be constructed from (weakly-typed) string:
impl AsRef<$borrowed_ty> for String {
fn as_ref(&self) -> &$borrowed_ty {
$borrowed_ty::new(self)
}
}
impl AsRef<$borrowed_ty> for str {
fn as_ref(&self) -> &$borrowed_ty {
$borrowed_ty::new(self)
}
}
// Types can be converted to (weakly-typed) string:
impl From<$owned_ty> for String {
fn from(value: $owned_ty) -> Self {
value.0
}
}
impl From<&$owned_ty> for String {
fn from(value: &$owned_ty) -> Self {
value.0.clone()
}
}
impl From<&$borrowed_ty> for String {
fn from(value: &$borrowed_ty) -> Self {
value.0.to_owned()
}
}
impl AsRef<str> for $owned_ty {
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl AsRef<str> for $borrowed_ty {
fn as_ref(&self) -> &str {
self.as_str()
}
}
// Types can be converted to borrowed type, and back to owned type:
impl AsRef<$borrowed_ty> for $owned_ty {
fn as_ref(&self) -> &$borrowed_ty {
self
}
}
impl AsRef<$borrowed_ty> for $borrowed_ty {
fn as_ref(&self) -> &$borrowed_ty {
self
}
}
impl Borrow<$borrowed_ty> for $owned_ty {
fn borrow(&self) -> &$borrowed_ty {
self
}
}
impl Deref for $owned_ty {
type Target = $borrowed_ty;
fn deref(&self) -> &Self::Target {
$borrowed_ty::new(&self.0)
}
}
impl ToOwned for $borrowed_ty {
type Owned = $owned_ty;
fn to_owned(&self) -> Self::Owned {
$owned_ty(self.0.to_owned())
}
}
// Owned and borrowed types can be compared:
impl_partial_eq!($borrowed_ty, $owned_ty, $borrowed_ty);
impl_partial_eq!($borrowed_ty, $owned_ty, &$borrowed_ty);
// Types can be compared with (weakly-typed) string:
impl_partial_eq_str!($borrowed_ty, $owned_ty, str);
impl_partial_eq_str!($borrowed_ty, $owned_ty, &str);
impl_partial_eq_str!($borrowed_ty, $owned_ty, String);
impl_partial_eq_str!($borrowed_ty, $borrowed_ty, str);
impl_partial_eq_str!($borrowed_ty, $borrowed_ty, &str);
impl_partial_eq_str!($borrowed_ty, $borrowed_ty, String);
impl_partial_eq_str!($borrowed_ty, &$borrowed_ty, str);
impl_partial_eq_str!($borrowed_ty, &$borrowed_ty, String);
};
}
impl_name_type!(GitRefNameBuf, GitRefName);
// TODO: split RefName into BookmarkName and TagName? That will make sense at
// repo/view API surface, but we'll need generic RemoteRefSymbol type, etc.
impl_name_type!(RefNameBuf, RefName);
impl_name_type!(RemoteNameBuf, RemoteName);
impl_name_type!(WorkspaceNameBuf, WorkspaceName);
impl RefName {
/// Constructs a remote symbol with this local name.
pub fn to_remote_symbol<'a>(&'a self, remote: &'a RemoteName) -> RemoteRefSymbol<'a> {
RemoteRefSymbol { name: self, remote }
}
}
impl WorkspaceName {
/// Default workspace name.
pub const DEFAULT: &Self = Self::new("default");
}
/// Symbol for displaying.
///
/// This type can be displayed with quoting and escaping if necessary.
#[derive(Debug, RefCastCustom)]
#[repr(transparent)]
pub struct RefSymbol(str);
impl RefSymbol {
/// Wraps string name.
#[ref_cast_custom]
const fn new(name: &str) -> &Self;
}
impl Display for RefSymbol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(&revset::format_symbol(&self.0))
}
}
/// Owned remote bookmark or tag name.
///
/// This type can be displayed in `{name}@{remote}` form, with quoting and
/// escaping if necessary.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct RemoteRefSymbolBuf {
/// Local name.
pub name: RefNameBuf,
/// Remote name.
pub remote: RemoteNameBuf,
}
impl RemoteRefSymbolBuf {
/// Converts to reference type.
pub fn as_ref(&self) -> RemoteRefSymbol<'_> {
RemoteRefSymbol {
name: &self.name,
remote: &self.remote,
}
}
}
/// Borrowed remote bookmark or tag name.
///
/// This type can be displayed in `{name}@{remote}` form, with quoting and
/// escaping if necessary.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct RemoteRefSymbol<'a> {
/// Local name.
pub name: &'a RefName,
/// Remote name.
pub remote: &'a RemoteName,
}
impl RemoteRefSymbol<'_> {
/// Converts to owned type.
pub fn to_owned(self) -> RemoteRefSymbolBuf {
RemoteRefSymbolBuf {
name: self.name.to_owned(),
remote: self.remote.to_owned(),
}
}
}
impl From<RemoteRefSymbol<'_>> for RemoteRefSymbolBuf {
fn from(value: RemoteRefSymbol<'_>) -> Self {
value.to_owned()
}
}
impl PartialEq<RemoteRefSymbol<'_>> for RemoteRefSymbolBuf {
fn eq(&self, other: &RemoteRefSymbol) -> bool {
self.as_ref() == *other
}
}
impl PartialEq<RemoteRefSymbol<'_>> for &RemoteRefSymbolBuf {
fn eq(&self, other: &RemoteRefSymbol) -> bool {
self.as_ref() == *other
}
}
impl PartialEq<RemoteRefSymbolBuf> for RemoteRefSymbol<'_> {
fn eq(&self, other: &RemoteRefSymbolBuf) -> bool {
*self == other.as_ref()
}
}
impl PartialEq<&RemoteRefSymbolBuf> for RemoteRefSymbol<'_> {
fn eq(&self, other: &&RemoteRefSymbolBuf) -> bool {
*self == other.as_ref()
}
}
impl Display for RemoteRefSymbolBuf {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.as_ref(), f)
}
}
impl Display for RemoteRefSymbol<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let RemoteRefSymbol { name, remote } = self;
f.pad(&revset::format_remote_symbol(&name.0, &remote.0))
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/local_working_copy.rs | lib/src/local_working_copy.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::borrow::Cow;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::collections::HashSet;
use std::error::Error;
use std::fs;
use std::fs::DirEntry;
use std::fs::File;
use std::fs::Metadata;
use std::fs::OpenOptions;
use std::io;
use std::io::Read as _;
use std::io::Write as _;
use std::iter;
use std::mem;
use std::ops::Range;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt as _;
use std::path::Path;
use std::path::PathBuf;
use std::slice;
use std::sync::Arc;
use std::sync::OnceLock;
use std::sync::mpsc::Sender;
use std::sync::mpsc::channel;
use std::time::UNIX_EPOCH;
use async_trait::async_trait;
use either::Either;
use futures::StreamExt as _;
use itertools::EitherOrBoth;
use itertools::Itertools as _;
use once_cell::unsync::OnceCell;
use pollster::FutureExt as _;
use prost::Message as _;
use rayon::iter::IntoParallelIterator as _;
use rayon::prelude::IndexedParallelIterator as _;
use rayon::prelude::ParallelIterator as _;
use tempfile::NamedTempFile;
use thiserror::Error;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt as _;
use tracing::instrument;
use tracing::trace_span;
use crate::backend::BackendError;
use crate::backend::CopyId;
use crate::backend::FileId;
use crate::backend::MillisSinceEpoch;
use crate::backend::SymlinkId;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::commit::Commit;
use crate::config::ConfigGetError;
use crate::conflict_labels::ConflictLabels;
use crate::conflicts;
use crate::conflicts::ConflictMarkerStyle;
use crate::conflicts::ConflictMaterializeOptions;
use crate::conflicts::MIN_CONFLICT_MARKER_LEN;
use crate::conflicts::MaterializedTreeValue;
use crate::conflicts::choose_materialized_conflict_marker_len;
use crate::conflicts::materialize_merge_result_to_bytes;
use crate::conflicts::materialize_tree_value;
pub use crate::eol::EolConversionMode;
use crate::eol::TargetEolStrategy;
use crate::file_util::BlockingAsyncReader;
use crate::file_util::check_symlink_support;
use crate::file_util::copy_async_to_sync;
use crate::file_util::persist_temp_file;
use crate::file_util::symlink_file;
use crate::fsmonitor::FsmonitorSettings;
#[cfg(feature = "watchman")]
use crate::fsmonitor::WatchmanConfig;
#[cfg(feature = "watchman")]
use crate::fsmonitor::watchman;
use crate::gitignore::GitIgnoreFile;
use crate::lock::FileLock;
use crate::matchers::DifferenceMatcher;
use crate::matchers::EverythingMatcher;
use crate::matchers::FilesMatcher;
use crate::matchers::IntersectionMatcher;
use crate::matchers::Matcher;
use crate::matchers::PrefixMatcher;
use crate::merge::Merge;
use crate::merge::MergeBuilder;
use crate::merge::MergedTreeValue;
use crate::merge::SameChange;
use crate::merged_tree::MergedTree;
use crate::merged_tree::MergedTreeBuilder;
use crate::merged_tree::TreeDiffEntry;
use crate::object_id::ObjectId as _;
use crate::op_store::OperationId;
use crate::ref_name::WorkspaceName;
use crate::ref_name::WorkspaceNameBuf;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponent;
use crate::settings::UserSettings;
use crate::store::Store;
use crate::working_copy::CheckoutError;
use crate::working_copy::CheckoutStats;
use crate::working_copy::LockedWorkingCopy;
use crate::working_copy::ResetError;
use crate::working_copy::SnapshotError;
use crate::working_copy::SnapshotOptions;
use crate::working_copy::SnapshotProgress;
use crate::working_copy::SnapshotStats;
use crate::working_copy::UntrackedReason;
use crate::working_copy::WorkingCopy;
use crate::working_copy::WorkingCopyFactory;
use crate::working_copy::WorkingCopyStateError;
fn symlink_target_convert_to_store(path: &Path) -> Option<Cow<'_, str>> {
let path = path.to_str()?;
if std::path::MAIN_SEPARATOR == '/' {
Some(Cow::Borrowed(path))
} else {
// When storing the symlink target on Windows, convert "\" to "/", so that the
// symlink remains valid on Unix.
//
// Note that we don't use std::path to handle the conversion, because it
// performs poorly with Windows verbatim paths like \\?\Global\C:\file.txt.
Some(Cow::Owned(path.replace(std::path::MAIN_SEPARATOR_STR, "/")))
}
}
fn symlink_target_convert_to_disk(path: &str) -> PathBuf {
let path = if std::path::MAIN_SEPARATOR == '/' {
Cow::Borrowed(path)
} else {
// Use the main separator to reformat the input path to avoid creating a broken
// symlink with the incorrect separator "/".
//
// See https://github.com/jj-vcs/jj/issues/6934 for the relevant bug.
Cow::Owned(path.replace("/", std::path::MAIN_SEPARATOR_STR))
};
PathBuf::from(path.as_ref())
}
/// How to propagate executable bit changes in file metadata to/from the repo.
///
/// On Windows, executable bits are always ignored, but on Unix they are
/// respected by default, but may be ignored by user settings or if we find
/// that the filesystem of the working copy doesn't support executable bits.
#[derive(Clone, Copy, Debug)]
enum ExecChangePolicy {
Ignore,
#[cfg_attr(windows, expect(dead_code))]
Respect,
}
/// The executable bit change setting as exposed to the user.
#[derive(Clone, Copy, Debug, Default, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum ExecChangeSetting {
Ignore,
Respect,
#[default]
Auto,
}
impl ExecChangePolicy {
/// Get the executable bit policy based on user settings and executable bit
/// support in the working copy's state path.
///
/// On Unix we check whether executable bits are supported in the working
/// copy to determine respect/ignorance, but we default to respect.
#[cfg_attr(windows, expect(unused_variables))]
fn new(exec_change_setting: ExecChangeSetting, state_path: &Path) -> Self {
#[cfg(windows)]
return Self::Ignore;
#[cfg(unix)]
return match exec_change_setting {
ExecChangeSetting::Ignore => Self::Ignore,
ExecChangeSetting::Respect => Self::Respect,
ExecChangeSetting::Auto => {
match crate::file_util::check_executable_bit_support(state_path) {
Ok(false) => Self::Ignore,
Ok(true) => Self::Respect,
Err(err) => {
tracing::warn!(?err, "Error when checking for executable bit support");
Self::Respect
}
}
}
};
}
}
/// On-disk state of file executable as cached in the file states. This does
/// *not* necessarily equal the `executable` field of [`TreeValue::File`]: the
/// two are allowed to diverge if and only if we're ignoring executable bit
/// changes.
///
/// This will only ever be true on Windows if the repo is also being accessed
/// from a Unix version of jj, such as when accessed from WSL.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct ExecBit(bool);
impl ExecBit {
/// Get the executable bit for a tree value to write to the repo store.
///
/// If we're ignoring the executable bit, then we fallback to the previous
/// in-repo executable bit if present.
fn for_tree_value(
self,
exec_policy: ExecChangePolicy,
prev_in_repo: impl FnOnce() -> Option<bool>,
) -> bool {
match exec_policy {
ExecChangePolicy::Ignore => prev_in_repo().unwrap_or(false),
ExecChangePolicy::Respect => self.0,
}
}
/// Set the on-disk executable bit to be written based on the in-repo bit or
/// the previous on-disk executable bit.
///
/// On Windows, we return `false` because when we later write files, we
/// always create them anew, and the executable bit will be `false` even if
/// shared with a Unix machine.
///
/// `prev_on_disk` is a closure because it is somewhat expensive and is only
/// used if ignoring the executable bit on Unix.
fn new_from_repo(
in_repo: bool,
exec_policy: ExecChangePolicy,
prev_on_disk: impl FnOnce() -> Option<Self>,
) -> Self {
match exec_policy {
_ if cfg!(windows) => Self(false),
ExecChangePolicy::Ignore => prev_on_disk().unwrap_or(Self(false)),
ExecChangePolicy::Respect => Self(in_repo),
}
}
/// Load the on-disk executable bit from file metadata.
#[cfg_attr(windows, expect(unused_variables))]
fn new_from_disk(metadata: &Metadata) -> Self {
#[cfg(unix)]
return Self(metadata.permissions().mode() & 0o111 != 0);
#[cfg(windows)]
return Self(false);
}
}
/// Set the executable bit of a file on-disk. This is a no-op on Windows.
///
/// On Unix, we manually set the executable bit to the previous value on-disk.
/// This is necessary because we write all files by creating them new, so files
/// won't preserve their permissions naturally.
#[cfg_attr(windows, expect(unused_variables))]
fn set_executable(exec_bit: ExecBit, disk_path: &Path) -> Result<(), io::Error> {
#[cfg(unix)]
{
let mode = if exec_bit.0 { 0o755 } else { 0o644 };
fs::set_permissions(disk_path, fs::Permissions::from_mode(mode))?;
}
Ok(())
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum FileType {
Normal { exec_bit: ExecBit },
Symlink,
GitSubmodule,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct MaterializedConflictData {
pub conflict_marker_len: u32,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct FileState {
pub file_type: FileType,
pub mtime: MillisSinceEpoch,
pub size: u64,
pub materialized_conflict_data: Option<MaterializedConflictData>,
/* TODO: What else do we need here? Git stores a lot of fields.
* TODO: Could possibly handle case-insensitive file systems keeping an
* Option<PathBuf> with the actual path here. */
}
impl FileState {
/// Check whether a file state appears clean compared to a previous file
/// state, ignoring materialized conflict data.
pub fn is_clean(&self, old_file_state: &Self) -> bool {
self.file_type == old_file_state.file_type
&& self.mtime == old_file_state.mtime
&& self.size == old_file_state.size
}
/// Indicates that a file exists in the tree but that it needs to be
/// re-stat'ed on the next snapshot.
fn placeholder() -> Self {
Self {
file_type: FileType::Normal {
exec_bit: ExecBit(false),
},
mtime: MillisSinceEpoch(0),
size: 0,
materialized_conflict_data: None,
}
}
fn for_file(exec_bit: ExecBit, size: u64, metadata: &Metadata) -> Self {
Self {
file_type: FileType::Normal { exec_bit },
mtime: mtime_from_metadata(metadata),
size,
materialized_conflict_data: None,
}
}
fn for_symlink(metadata: &Metadata) -> Self {
// When using fscrypt, the reported size is not the content size. So if
// we were to record the content size here (like we do for regular files), we
// would end up thinking the file has changed every time we snapshot.
Self {
file_type: FileType::Symlink,
mtime: mtime_from_metadata(metadata),
size: metadata.len(),
materialized_conflict_data: None,
}
}
fn for_gitsubmodule() -> Self {
Self {
file_type: FileType::GitSubmodule,
mtime: MillisSinceEpoch(0),
size: 0,
materialized_conflict_data: None,
}
}
}
/// Owned map of path to file states, backed by proto data.
#[derive(Clone, Debug)]
struct FileStatesMap {
data: Vec<crate::protos::local_working_copy::FileStateEntry>,
}
impl FileStatesMap {
fn new() -> Self {
Self { data: Vec::new() }
}
fn from_proto(
mut data: Vec<crate::protos::local_working_copy::FileStateEntry>,
is_sorted: bool,
) -> Self {
if !is_sorted {
data.sort_unstable_by(|entry1, entry2| {
let path1 = RepoPath::from_internal_string(&entry1.path).unwrap();
let path2 = RepoPath::from_internal_string(&entry2.path).unwrap();
path1.cmp(path2)
});
}
debug_assert!(is_file_state_entries_proto_unique_and_sorted(&data));
Self { data }
}
/// Merges changed and deleted entries into this map. The changed entries
/// must be sorted by path.
fn merge_in(
&mut self,
changed_file_states: Vec<(RepoPathBuf, FileState)>,
deleted_files: &HashSet<RepoPathBuf>,
) {
if changed_file_states.is_empty() && deleted_files.is_empty() {
return;
}
debug_assert!(
changed_file_states.is_sorted_by(|(path1, _), (path2, _)| path1 < path2),
"changed_file_states must be sorted and have no duplicates"
);
self.data = itertools::merge_join_by(
mem::take(&mut self.data),
changed_file_states,
|old_entry, (changed_path, _)| {
RepoPath::from_internal_string(&old_entry.path)
.unwrap()
.cmp(changed_path)
},
)
.filter_map(|diff| match diff {
EitherOrBoth::Both(_, (path, state)) | EitherOrBoth::Right((path, state)) => {
debug_assert!(!deleted_files.contains(&path));
Some(file_state_entry_to_proto(path, &state))
}
EitherOrBoth::Left(entry) => {
let present =
!deleted_files.contains(RepoPath::from_internal_string(&entry.path).unwrap());
present.then_some(entry)
}
})
.collect();
}
fn clear(&mut self) {
self.data.clear();
}
/// Returns read-only map containing all file states.
fn all(&self) -> FileStates<'_> {
FileStates::from_sorted(&self.data)
}
}
/// Read-only map of path to file states, possibly filtered by path prefix.
#[derive(Clone, Copy, Debug)]
pub struct FileStates<'a> {
data: &'a [crate::protos::local_working_copy::FileStateEntry],
}
impl<'a> FileStates<'a> {
fn from_sorted(data: &'a [crate::protos::local_working_copy::FileStateEntry]) -> Self {
debug_assert!(is_file_state_entries_proto_unique_and_sorted(data));
Self { data }
}
/// Returns file states under the given directory path.
pub fn prefixed(&self, base: &RepoPath) -> Self {
let range = self.prefixed_range(base);
Self::from_sorted(&self.data[range])
}
/// Faster version of `prefixed("<dir>/<base>")`. Requires that all entries
/// share the same prefix `dir`.
fn prefixed_at(&self, dir: &RepoPath, base: &RepoPathComponent) -> Self {
let range = self.prefixed_range_at(dir, base);
Self::from_sorted(&self.data[range])
}
/// Returns true if this contains no entries.
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Returns true if the given `path` exists.
pub fn contains_path(&self, path: &RepoPath) -> bool {
self.exact_position(path).is_some()
}
/// Returns file state for the given `path`.
pub fn get(&self, path: &RepoPath) -> Option<FileState> {
let pos = self.exact_position(path)?;
let (_, state) = file_state_entry_from_proto(&self.data[pos]);
Some(state)
}
/// Returns the executable bit state if `path` is a normal file.
pub fn get_exec_bit(&self, path: &RepoPath) -> Option<ExecBit> {
match self.get(path)?.file_type {
FileType::Normal { exec_bit } => Some(exec_bit),
FileType::Symlink | FileType::GitSubmodule => None,
}
}
/// Faster version of `get("<dir>/<name>")`. Requires that all entries share
/// the same prefix `dir`.
fn get_at(&self, dir: &RepoPath, name: &RepoPathComponent) -> Option<FileState> {
let pos = self.exact_position_at(dir, name)?;
let (_, state) = file_state_entry_from_proto(&self.data[pos]);
Some(state)
}
fn exact_position(&self, path: &RepoPath) -> Option<usize> {
self.data
.binary_search_by(|entry| {
RepoPath::from_internal_string(&entry.path)
.unwrap()
.cmp(path)
})
.ok()
}
fn exact_position_at(&self, dir: &RepoPath, name: &RepoPathComponent) -> Option<usize> {
debug_assert!(self.paths().all(|path| path.starts_with(dir)));
let slash_len = usize::from(!dir.is_root());
let prefix_len = dir.as_internal_file_string().len() + slash_len;
self.data
.binary_search_by(|entry| {
let tail = entry.path.get(prefix_len..).unwrap_or("");
match tail.split_once('/') {
// "<name>/*" > "<name>"
Some((pre, _)) => pre.cmp(name.as_internal_str()).then(Ordering::Greater),
None => tail.cmp(name.as_internal_str()),
}
})
.ok()
}
fn prefixed_range(&self, base: &RepoPath) -> Range<usize> {
let start = self
.data
.partition_point(|entry| RepoPath::from_internal_string(&entry.path).unwrap() < base);
let len = self.data[start..].partition_point(|entry| {
RepoPath::from_internal_string(&entry.path)
.unwrap()
.starts_with(base)
});
start..(start + len)
}
fn prefixed_range_at(&self, dir: &RepoPath, base: &RepoPathComponent) -> Range<usize> {
debug_assert!(self.paths().all(|path| path.starts_with(dir)));
let slash_len = usize::from(!dir.is_root());
let prefix_len = dir.as_internal_file_string().len() + slash_len;
let start = self.data.partition_point(|entry| {
let tail = entry.path.get(prefix_len..).unwrap_or("");
let entry_name = tail.split_once('/').map_or(tail, |(name, _)| name);
entry_name < base.as_internal_str()
});
let len = self.data[start..].partition_point(|entry| {
let tail = entry.path.get(prefix_len..).unwrap_or("");
let entry_name = tail.split_once('/').map_or(tail, |(name, _)| name);
entry_name == base.as_internal_str()
});
start..(start + len)
}
/// Iterates file state entries sorted by path.
pub fn iter(&self) -> FileStatesIter<'a> {
self.data.iter().map(file_state_entry_from_proto)
}
/// Iterates sorted file paths.
pub fn paths(&self) -> impl ExactSizeIterator<Item = &'a RepoPath> + use<'a> {
self.data
.iter()
.map(|entry| RepoPath::from_internal_string(&entry.path).unwrap())
}
}
type FileStatesIter<'a> = iter::Map<
slice::Iter<'a, crate::protos::local_working_copy::FileStateEntry>,
fn(&crate::protos::local_working_copy::FileStateEntry) -> (&RepoPath, FileState),
>;
impl<'a> IntoIterator for FileStates<'a> {
type Item = (&'a RepoPath, FileState);
type IntoIter = FileStatesIter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
fn file_state_from_proto(proto: &crate::protos::local_working_copy::FileState) -> FileState {
let file_type = match proto.file_type() {
crate::protos::local_working_copy::FileType::Normal => FileType::Normal {
exec_bit: ExecBit(false),
},
// On Windows, `FileType::Executable` can exist if the repo is being
// shared with a Unix version of jj, such as when accessed from WSL.
crate::protos::local_working_copy::FileType::Executable => FileType::Normal {
exec_bit: ExecBit(true),
},
crate::protos::local_working_copy::FileType::Symlink => FileType::Symlink,
crate::protos::local_working_copy::FileType::Conflict => FileType::Normal {
exec_bit: ExecBit(false),
},
crate::protos::local_working_copy::FileType::GitSubmodule => FileType::GitSubmodule,
};
FileState {
file_type,
mtime: MillisSinceEpoch(proto.mtime_millis_since_epoch),
size: proto.size,
materialized_conflict_data: proto.materialized_conflict_data.as_ref().map(|data| {
MaterializedConflictData {
conflict_marker_len: data.conflict_marker_len,
}
}),
}
}
fn file_state_to_proto(file_state: &FileState) -> crate::protos::local_working_copy::FileState {
let mut proto = crate::protos::local_working_copy::FileState::default();
let file_type = match &file_state.file_type {
FileType::Normal { exec_bit } => {
if exec_bit.0 {
crate::protos::local_working_copy::FileType::Executable
} else {
crate::protos::local_working_copy::FileType::Normal
}
}
FileType::Symlink => crate::protos::local_working_copy::FileType::Symlink,
FileType::GitSubmodule => crate::protos::local_working_copy::FileType::GitSubmodule,
};
proto.file_type = file_type as i32;
proto.mtime_millis_since_epoch = file_state.mtime.0;
proto.size = file_state.size;
proto.materialized_conflict_data = file_state.materialized_conflict_data.map(|data| {
crate::protos::local_working_copy::MaterializedConflictData {
conflict_marker_len: data.conflict_marker_len,
}
});
proto
}
fn file_state_entry_from_proto(
proto: &crate::protos::local_working_copy::FileStateEntry,
) -> (&RepoPath, FileState) {
let path = RepoPath::from_internal_string(&proto.path).unwrap();
(path, file_state_from_proto(proto.state.as_ref().unwrap()))
}
fn file_state_entry_to_proto(
path: RepoPathBuf,
state: &FileState,
) -> crate::protos::local_working_copy::FileStateEntry {
crate::protos::local_working_copy::FileStateEntry {
path: path.into_internal_string(),
state: Some(file_state_to_proto(state)),
}
}
fn is_file_state_entries_proto_unique_and_sorted(
data: &[crate::protos::local_working_copy::FileStateEntry],
) -> bool {
data.iter()
.map(|entry| RepoPath::from_internal_string(&entry.path).unwrap())
.is_sorted_by(|path1, path2| path1 < path2)
}
fn sparse_patterns_from_proto(
proto: Option<&crate::protos::local_working_copy::SparsePatterns>,
) -> Vec<RepoPathBuf> {
let mut sparse_patterns = vec![];
if let Some(proto_sparse_patterns) = proto {
for prefix in &proto_sparse_patterns.prefixes {
sparse_patterns.push(RepoPathBuf::from_internal_string(prefix).unwrap());
}
} else {
// For compatibility with old working copies.
// TODO: Delete this is late 2022 or so.
sparse_patterns.push(RepoPathBuf::root());
}
sparse_patterns
}
/// Creates intermediate directories from the `working_copy_path` to the
/// `repo_path` parent. Returns disk path for the `repo_path` file.
///
/// If an intermediate directory exists and if it is a file or symlink, this
/// function returns `Ok(None)` to signal that the path should be skipped.
/// The `working_copy_path` directory may be a symlink.
///
/// If an existing or newly-created sub directory points to ".git" or ".jj",
/// this function returns an error.
///
/// Note that this does not prevent TOCTOU bugs caused by concurrent checkouts.
/// Another process may remove the directory created by this function and put a
/// symlink there.
fn create_parent_dirs(
working_copy_path: &Path,
repo_path: &RepoPath,
) -> Result<Option<PathBuf>, CheckoutError> {
let (parent_path, basename) = repo_path.split().expect("repo path shouldn't be root");
let mut dir_path = working_copy_path.to_owned();
for c in parent_path.components() {
// Ensure that the name is a normal entry of the current dir_path.
dir_path.push(c.to_fs_name().map_err(|err| err.with_path(repo_path))?);
// A directory named ".git" or ".jj" can be temporarily created. It
// might trick workspace path discovery, but is harmless so long as the
// directory is empty.
let new_dir_created = match fs::create_dir(&dir_path) {
Ok(()) => true, // New directory
Err(err) => match dir_path.symlink_metadata() {
Ok(m) if m.is_dir() => false, // Existing directory
Ok(_) => {
return Ok(None); // Skip existing file or symlink
}
Err(_) => {
return Err(CheckoutError::Other {
message: format!(
"Failed to create parent directories for {}",
repo_path.to_fs_path_unchecked(working_copy_path).display(),
),
err: err.into(),
});
}
},
};
// Invalid component (e.g. "..") should have been rejected.
// The current dir_path should be an entry of dir_path.parent().
reject_reserved_existing_path(&dir_path).inspect_err(|_| {
if new_dir_created {
fs::remove_dir(&dir_path).ok();
}
})?;
}
let mut file_path = dir_path;
file_path.push(
basename
.to_fs_name()
.map_err(|err| err.with_path(repo_path))?,
);
Ok(Some(file_path))
}
/// Removes existing file named `disk_path` if any. Returns `Ok(true)` if the
/// file was there and got removed, meaning that new file can be safely created.
///
/// If the existing file points to ".git" or ".jj", this function returns an
/// error.
fn remove_old_file(disk_path: &Path) -> Result<bool, CheckoutError> {
reject_reserved_existing_path(disk_path)?;
match fs::remove_file(disk_path) {
Ok(()) => Ok(true),
Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(false),
// TODO: Use io::ErrorKind::IsADirectory if it gets stabilized
Err(_) if disk_path.symlink_metadata().is_ok_and(|m| m.is_dir()) => Ok(false),
Err(err) => Err(CheckoutError::Other {
message: format!("Failed to remove file {}", disk_path.display()),
err: err.into(),
}),
}
}
/// Checks if new file or symlink named `disk_path` can be created.
///
/// If the file already exists, this function return `Ok(false)` to signal
/// that the path should be skipped.
///
/// If the path may point to ".git" or ".jj" entry, this function returns an
/// error.
///
/// This function can fail if `disk_path.parent()` isn't a directory.
fn can_create_new_file(disk_path: &Path) -> Result<bool, CheckoutError> {
// New file or symlink will be created by caller. If it were pointed to by
// name ".git" or ".jj", git/jj CLI could be tricked to load configuration
// from an attacker-controlled location. So we first test the path by
// creating an empty file.
let new_file = match OpenOptions::new()
.write(true)
.create_new(true) // Don't overwrite, don't follow symlink
.open(disk_path)
{
Ok(file) => Some(file),
Err(err) if err.kind() == io::ErrorKind::AlreadyExists => None,
// Workaround for "Access is denied. (os error 5)" error on Windows.
Err(_) => match disk_path.symlink_metadata() {
Ok(_) => None,
Err(err) => {
return Err(CheckoutError::Other {
message: format!("Failed to stat {}", disk_path.display()),
err: err.into(),
});
}
},
};
let new_file_created = new_file.is_some();
if let Some(new_file) = new_file {
reject_reserved_existing_file(new_file, disk_path).inspect_err(|_| {
// We keep the error from `reject_reserved_existing_file`
fs::remove_file(disk_path).ok();
})?;
fs::remove_file(disk_path).map_err(|err| CheckoutError::Other {
message: format!("Failed to remove temporary file {}", disk_path.display()),
err: err.into(),
})?;
} else {
reject_reserved_existing_path(disk_path)?;
}
Ok(new_file_created)
}
const RESERVED_DIR_NAMES: &[&str] = &[".git", ".jj"];
fn same_file_handle_from_path(disk_path: &Path) -> io::Result<Option<same_file::Handle>> {
match same_file::Handle::from_path(disk_path) {
Ok(handle) => Ok(Some(handle)),
Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(None),
Err(err) => Err(err),
}
}
/// Wrapper for [`reject_reserved_existing_handle`] which avoids a syscall
/// by converting the provided `file` to a `same_file::Handle` via its
/// file descriptor.
///
/// See [`reject_reserved_existing_handle`] for more info.
fn reject_reserved_existing_file(file: File, disk_path: &Path) -> Result<(), CheckoutError> {
// Note: since the file is open, we don't expect that it's possible for
// `io::ErrorKind::NotFound` to be a possible error returned here.
let file_handle = same_file::Handle::from_file(file).map_err(|err| CheckoutError::Other {
message: format!("Failed to validate path {}", disk_path.display()),
err: err.into(),
})?;
reject_reserved_existing_handle(file_handle, disk_path)
}
/// Wrapper for [`reject_reserved_existing_handle`] which converts
/// the provided `disk_path` to a `same_file::Handle`.
///
/// See [`reject_reserved_existing_handle`] for more info.
///
/// # Remarks
///
/// Incurs an additional syscall cost to open and close the file
/// descriptor/`HANDLE` for `disk_path`.
fn reject_reserved_existing_path(disk_path: &Path) -> Result<(), CheckoutError> {
let Some(disk_handle) =
same_file_handle_from_path(disk_path).map_err(|err| CheckoutError::Other {
message: format!("Failed to validate path {}", disk_path.display()),
err: err.into(),
})?
else {
// If the existing disk_path pointed to the reserved path, we would have
// gotten a handle back. Since we got nothing, the file does not exist
// and cannot be a reserved path name.
return Ok(());
};
reject_reserved_existing_handle(disk_handle, disk_path)
}
/// Suppose the `disk_path` exists, checks if the last component points to
/// ".git" or ".jj" in the same parent directory.
///
/// `disk_handle` is expected to be a handle to the file described by
/// `disk_path`.
///
/// # Remarks
///
/// Incurs a syscall cost to open and close a file descriptor/`HANDLE` for
/// each filename in `RESERVED_DIR_NAMES`.
fn reject_reserved_existing_handle(
disk_handle: same_file::Handle,
disk_path: &Path,
) -> Result<(), CheckoutError> {
let parent_dir_path = disk_path.parent().expect("content path shouldn't be root");
for name in RESERVED_DIR_NAMES {
let reserved_path = parent_dir_path.join(name);
let Some(reserved_handle) =
same_file_handle_from_path(&reserved_path).map_err(|err| CheckoutError::Other {
message: format!("Failed to validate path {}", disk_path.display()),
err: err.into(),
})?
else {
// If the existing disk_path pointed to the reserved path, we would have
// gotten a handle back. Since we got nothing, the file does not exist
// and cannot be a reserved path name.
continue;
};
if disk_handle == reserved_handle {
return Err(CheckoutError::ReservedPathComponent {
path: disk_path.to_owned(),
name,
});
}
}
Ok(())
}
fn mtime_from_metadata(metadata: &Metadata) -> MillisSinceEpoch {
let time = metadata
.modified()
.expect("File mtime not supported on this platform?");
let since_epoch = time
.duration_since(UNIX_EPOCH)
.expect("mtime before unix epoch");
MillisSinceEpoch(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/revset.rs | lib/src/revset.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::any::Any;
use std::collections::HashMap;
use std::collections::hash_map;
use std::convert::Infallible;
use std::fmt;
use std::ops::ControlFlow;
use std::ops::Range;
use std::sync::Arc;
use std::sync::LazyLock;
use itertools::Itertools as _;
use thiserror::Error;
use crate::backend::BackendError;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::dsl_util;
use crate::dsl_util::collect_similar;
use crate::fileset;
use crate::fileset::FilesetDiagnostics;
use crate::fileset::FilesetExpression;
use crate::graph::GraphNode;
use crate::id_prefix::IdPrefixContext;
use crate::id_prefix::IdPrefixIndex;
use crate::index::ResolvedChangeTargets;
use crate::object_id::HexPrefix;
use crate::object_id::PrefixResolution;
use crate::op_store::RefTarget;
use crate::op_store::RemoteRefState;
use crate::op_walk;
use crate::ref_name::RemoteName;
use crate::ref_name::RemoteRefSymbol;
use crate::ref_name::RemoteRefSymbolBuf;
use crate::ref_name::WorkspaceName;
use crate::ref_name::WorkspaceNameBuf;
use crate::repo::ReadonlyRepo;
use crate::repo::Repo;
use crate::repo::RepoLoaderError;
use crate::repo_path::RepoPathUiConverter;
use crate::revset_parser;
pub use crate::revset_parser::BinaryOp;
pub use crate::revset_parser::ExpressionKind;
pub use crate::revset_parser::ExpressionNode;
pub use crate::revset_parser::FunctionCallNode;
pub use crate::revset_parser::RevsetAliasesMap;
pub use crate::revset_parser::RevsetDiagnostics;
pub use crate::revset_parser::RevsetParseError;
pub use crate::revset_parser::RevsetParseErrorKind;
pub use crate::revset_parser::UnaryOp;
pub use crate::revset_parser::expect_literal;
pub use crate::revset_parser::parse_program;
pub use crate::revset_parser::parse_program_with_modifier;
pub use crate::revset_parser::parse_symbol;
use crate::store::Store;
use crate::str_util::StringExpression;
use crate::str_util::StringPattern;
use crate::time_util::DatePattern;
use crate::time_util::DatePatternContext;
/// Error occurred during symbol resolution.
#[derive(Debug, Error)]
pub enum RevsetResolutionError {
#[error("Revision `{name}` doesn't exist")]
NoSuchRevision {
name: String,
candidates: Vec<String>,
},
#[error("Workspace `{}` doesn't have a working-copy commit", name.as_symbol())]
WorkspaceMissingWorkingCopy { name: WorkspaceNameBuf },
#[error("An empty string is not a valid revision")]
EmptyString,
#[error("Commit ID prefix `{0}` is ambiguous")]
AmbiguousCommitIdPrefix(String),
#[error("Change ID prefix `{0}` is ambiguous")]
AmbiguousChangeIdPrefix(String),
#[error("Change ID `{symbol}` is divergent")]
DivergentChangeId {
symbol: String,
visible_targets: Vec<(usize, CommitId)>,
},
#[error("Name `{symbol}` is conflicted")]
ConflictedRef {
kind: &'static str,
symbol: String,
targets: Vec<CommitId>,
},
#[error("Unexpected error from commit backend")]
Backend(#[source] BackendError),
#[error(transparent)]
Other(#[from] Box<dyn std::error::Error + Send + Sync>),
}
/// Error occurred during revset evaluation.
#[derive(Debug, Error)]
pub enum RevsetEvaluationError {
#[error("Unexpected error from commit backend")]
Backend(#[from] BackendError),
#[error(transparent)]
Other(Box<dyn std::error::Error + Send + Sync>),
}
impl RevsetEvaluationError {
// TODO: Create a higher-level error instead of putting non-BackendErrors in a
// BackendError
pub fn into_backend_error(self) -> BackendError {
match self {
Self::Backend(err) => err,
Self::Other(err) => BackendError::Other(err),
}
}
}
// assumes index has less than u64::MAX entries.
pub const GENERATION_RANGE_FULL: Range<u64> = 0..u64::MAX;
pub const GENERATION_RANGE_EMPTY: Range<u64> = 0..0;
pub const PARENTS_RANGE_FULL: Range<u32> = 0..u32::MAX;
/// Global flag applied to the entire expression.
///
/// The core revset engine doesn't use this value. It's up to caller to
/// interpret it to change the evaluation behavior.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RevsetModifier {
/// Expression can be evaluated to multiple revisions even if a single
/// revision is expected by default.
All,
}
/// Symbol or function to be resolved to `CommitId`s.
#[derive(Clone, Debug)]
pub enum RevsetCommitRef {
WorkingCopy(WorkspaceNameBuf),
WorkingCopies,
Symbol(String),
RemoteSymbol(RemoteRefSymbolBuf),
ChangeId(HexPrefix),
CommitId(HexPrefix),
Bookmarks(StringExpression),
RemoteBookmarks {
bookmark: StringExpression,
remote: StringExpression,
remote_ref_state: Option<RemoteRefState>,
},
Tags(StringExpression),
GitRefs,
GitHead,
}
/// A custom revset filter expression, defined by an extension.
pub trait RevsetFilterExtension: std::fmt::Debug + Any + Send + Sync {
/// Returns true iff this filter matches the specified commit.
fn matches_commit(&self, commit: &Commit) -> bool;
}
impl dyn RevsetFilterExtension {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: RevsetFilterExtension>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
#[derive(Clone, Debug)]
pub enum RevsetFilterPredicate {
/// Commits with number of parents in the range.
ParentCount(Range<u32>),
/// Commits with description matching the pattern.
Description(StringExpression),
/// Commits with first line of the description matching the pattern.
Subject(StringExpression),
/// Commits with author name matching the pattern.
AuthorName(StringExpression),
/// Commits with author email matching the pattern.
AuthorEmail(StringExpression),
/// Commits with author dates matching the given date pattern.
AuthorDate(DatePattern),
/// Commits with committer name matching the pattern.
CommitterName(StringExpression),
/// Commits with committer email matching the pattern.
CommitterEmail(StringExpression),
/// Commits with committer dates matching the given date pattern.
CommitterDate(DatePattern),
/// Commits modifying the paths specified by the fileset.
File(FilesetExpression),
/// Commits containing diffs matching the `text` pattern within the `files`.
DiffContains {
text: StringExpression,
files: FilesetExpression,
},
/// Commits with conflicts
HasConflict,
/// Commits that are cryptographically signed.
Signed,
/// Custom predicates provided by extensions
Extension(Arc<dyn RevsetFilterExtension>),
}
mod private {
/// Defines [`RevsetExpression`] variants depending on resolution state.
pub trait ExpressionState {
type CommitRef: Clone;
type Operation: Clone;
}
// Not constructible because these state types just define associated types.
#[derive(Debug)]
pub enum UserExpressionState {}
#[derive(Debug)]
pub enum ResolvedExpressionState {}
}
use private::ExpressionState;
use private::ResolvedExpressionState;
use private::UserExpressionState;
impl ExpressionState for UserExpressionState {
type CommitRef = RevsetCommitRef;
type Operation = String;
}
impl ExpressionState for ResolvedExpressionState {
type CommitRef = Infallible;
type Operation = Infallible;
}
/// [`RevsetExpression`] that may contain unresolved commit refs.
pub type UserRevsetExpression = RevsetExpression<UserExpressionState>;
/// [`RevsetExpression`] that never contains unresolved commit refs.
pub type ResolvedRevsetExpression = RevsetExpression<ResolvedExpressionState>;
/// Tree of revset expressions describing DAG operations.
///
/// Use [`UserRevsetExpression`] or [`ResolvedRevsetExpression`] to construct
/// expression of that state.
#[derive(Clone, Debug)]
pub enum RevsetExpression<St: ExpressionState> {
None,
All,
VisibleHeads,
/// Visible heads and all referenced commits within the current expression
/// scope. Used as the default of `Range`/`DagRange` heads.
VisibleHeadsOrReferenced,
Root,
Commits(Vec<CommitId>),
CommitRef(St::CommitRef),
Ancestors {
heads: Arc<Self>,
generation: Range<u64>,
parents_range: Range<u32>,
},
Descendants {
roots: Arc<Self>,
generation: Range<u64>,
},
// Commits that are ancestors of "heads" but not ancestors of "roots"
Range {
roots: Arc<Self>,
heads: Arc<Self>,
generation: Range<u64>,
// Parents range is only used for traversing heads, not roots
parents_range: Range<u32>,
},
// Commits that are descendants of "roots" and ancestors of "heads"
DagRange {
roots: Arc<Self>,
heads: Arc<Self>,
// TODO: maybe add generation_from_roots/heads?
},
// Commits reachable from "sources" within "domain"
Reachable {
sources: Arc<Self>,
domain: Arc<Self>,
},
Heads(Arc<Self>),
/// Heads of the set of commits which are ancestors of `heads` but are not
/// ancestors of `roots`, and which also are contained in `filter`.
HeadsRange {
roots: Arc<Self>,
heads: Arc<Self>,
parents_range: Range<u32>,
filter: Arc<Self>,
},
Roots(Arc<Self>),
ForkPoint(Arc<Self>),
Bisect(Arc<Self>),
HasSize {
candidates: Arc<Self>,
count: usize,
},
Latest {
candidates: Arc<Self>,
count: usize,
},
Filter(RevsetFilterPredicate),
/// Marker for subtree that should be intersected as filter.
AsFilter(Arc<Self>),
/// Resolves symbols and visibility at the specified operation.
AtOperation {
operation: St::Operation,
candidates: Arc<Self>,
},
/// Makes `All` include the commits and their ancestors in addition to the
/// visible heads.
WithinReference {
candidates: Arc<Self>,
/// Commits explicitly referenced within the scope.
commits: Vec<CommitId>,
},
/// Resolves visibility within the specified repo state.
WithinVisibility {
candidates: Arc<Self>,
/// Copy of `repo.view().heads()` at the operation.
visible_heads: Vec<CommitId>,
},
Coalesce(Arc<Self>, Arc<Self>),
Present(Arc<Self>),
NotIn(Arc<Self>),
Union(Arc<Self>, Arc<Self>),
Intersection(Arc<Self>, Arc<Self>),
Difference(Arc<Self>, Arc<Self>),
}
// Leaf expression that never contains unresolved commit refs, which can be
// either user or resolved expression
impl<St: ExpressionState> RevsetExpression<St> {
pub fn none() -> Arc<Self> {
Arc::new(Self::None)
}
/// Ancestors of visible heads and all referenced commits within the current
/// expression scope, which may include hidden commits.
pub fn all() -> Arc<Self> {
Arc::new(Self::All)
}
pub fn visible_heads() -> Arc<Self> {
Arc::new(Self::VisibleHeads)
}
fn visible_heads_or_referenced() -> Arc<Self> {
Arc::new(Self::VisibleHeadsOrReferenced)
}
pub fn root() -> Arc<Self> {
Arc::new(Self::Root)
}
pub fn commit(commit_id: CommitId) -> Arc<Self> {
Self::commits(vec![commit_id])
}
pub fn commits(commit_ids: Vec<CommitId>) -> Arc<Self> {
Arc::new(Self::Commits(commit_ids))
}
pub fn filter(predicate: RevsetFilterPredicate) -> Arc<Self> {
Arc::new(Self::Filter(predicate))
}
/// Find any empty commits.
pub fn is_empty() -> Arc<Self> {
Self::filter(RevsetFilterPredicate::File(FilesetExpression::all())).negated()
}
}
// Leaf expression that represents unresolved commit refs
impl<St: ExpressionState<CommitRef = RevsetCommitRef>> RevsetExpression<St> {
pub fn working_copy(name: WorkspaceNameBuf) -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::WorkingCopy(name)))
}
pub fn working_copies() -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::WorkingCopies))
}
pub fn symbol(value: String) -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::Symbol(value)))
}
pub fn remote_symbol(value: RemoteRefSymbolBuf) -> Arc<Self> {
let commit_ref = RevsetCommitRef::RemoteSymbol(value);
Arc::new(Self::CommitRef(commit_ref))
}
pub fn change_id_prefix(prefix: HexPrefix) -> Arc<Self> {
let commit_ref = RevsetCommitRef::ChangeId(prefix);
Arc::new(Self::CommitRef(commit_ref))
}
pub fn commit_id_prefix(prefix: HexPrefix) -> Arc<Self> {
let commit_ref = RevsetCommitRef::CommitId(prefix);
Arc::new(Self::CommitRef(commit_ref))
}
pub fn bookmarks(expression: StringExpression) -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::Bookmarks(expression)))
}
pub fn remote_bookmarks(
bookmark: StringExpression,
remote: StringExpression,
remote_ref_state: Option<RemoteRefState>,
) -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::RemoteBookmarks {
bookmark,
remote,
remote_ref_state,
}))
}
pub fn tags(expression: StringExpression) -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::Tags(expression)))
}
pub fn git_refs() -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::GitRefs))
}
pub fn git_head() -> Arc<Self> {
Arc::new(Self::CommitRef(RevsetCommitRef::GitHead))
}
}
// Compound expression
impl<St: ExpressionState> RevsetExpression<St> {
pub fn latest(self: &Arc<Self>, count: usize) -> Arc<Self> {
Arc::new(Self::Latest {
candidates: self.clone(),
count,
})
}
/// Commits in `self` that don't have descendants in `self`.
pub fn heads(self: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Heads(self.clone()))
}
/// Commits in `self` that don't have ancestors in `self`.
pub fn roots(self: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Roots(self.clone()))
}
/// Parents of `self`.
pub fn parents(self: &Arc<Self>) -> Arc<Self> {
self.ancestors_at(1)
}
/// Ancestors of `self`, including `self`.
pub fn ancestors(self: &Arc<Self>) -> Arc<Self> {
self.ancestors_range(GENERATION_RANGE_FULL)
}
/// Ancestors of `self` at an offset of `generation` behind `self`.
/// The `generation` offset is zero-based starting from `self`.
pub fn ancestors_at(self: &Arc<Self>, generation: u64) -> Arc<Self> {
self.ancestors_range(generation..generation.saturating_add(1))
}
/// Ancestors of `self` in the given range.
pub fn ancestors_range(self: &Arc<Self>, generation_range: Range<u64>) -> Arc<Self> {
Arc::new(Self::Ancestors {
heads: self.clone(),
generation: generation_range,
parents_range: PARENTS_RANGE_FULL,
})
}
/// First-parent ancestors of `self`, including `self`.
pub fn first_ancestors(self: &Arc<Self>) -> Arc<Self> {
self.first_ancestors_range(GENERATION_RANGE_FULL)
}
/// First-parent ancestors of `self` at an offset of `generation` behind
/// `self`. The `generation` offset is zero-based starting from `self`.
pub fn first_ancestors_at(self: &Arc<Self>, generation: u64) -> Arc<Self> {
self.first_ancestors_range(generation..generation.saturating_add(1))
}
/// First-parent ancestors of `self` in the given range.
pub fn first_ancestors_range(self: &Arc<Self>, generation_range: Range<u64>) -> Arc<Self> {
Arc::new(Self::Ancestors {
heads: self.clone(),
generation: generation_range,
parents_range: 0..1,
})
}
/// Children of `self`.
pub fn children(self: &Arc<Self>) -> Arc<Self> {
self.descendants_at(1)
}
/// Descendants of `self`, including `self`.
pub fn descendants(self: &Arc<Self>) -> Arc<Self> {
self.descendants_range(GENERATION_RANGE_FULL)
}
/// Descendants of `self` at an offset of `generation` ahead of `self`.
/// The `generation` offset is zero-based starting from `self`.
pub fn descendants_at(self: &Arc<Self>, generation: u64) -> Arc<Self> {
self.descendants_range(generation..generation.saturating_add(1))
}
/// Descendants of `self` in the given range.
pub fn descendants_range(self: &Arc<Self>, generation_range: Range<u64>) -> Arc<Self> {
Arc::new(Self::Descendants {
roots: self.clone(),
generation: generation_range,
})
}
/// Fork point (best common ancestors) of `self`.
pub fn fork_point(self: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::ForkPoint(self.clone()))
}
/// Commits with ~half of the descendants in `self`.
pub fn bisect(self: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Bisect(self.clone()))
}
/// Commits in `self`, the number of which must be exactly equal to `count`.
pub fn has_size(self: &Arc<Self>, count: usize) -> Arc<Self> {
Arc::new(Self::HasSize {
candidates: self.clone(),
count,
})
}
/// Filter all commits by `predicate` in `self`.
pub fn filtered(self: &Arc<Self>, predicate: RevsetFilterPredicate) -> Arc<Self> {
self.intersection(&Self::filter(predicate))
}
/// Commits that are descendants of `self` and ancestors of `heads`, both
/// inclusive.
pub fn dag_range_to(self: &Arc<Self>, heads: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::DagRange {
roots: self.clone(),
heads: heads.clone(),
})
}
/// Connects any ancestors and descendants in the set by adding the commits
/// between them.
pub fn connected(self: &Arc<Self>) -> Arc<Self> {
self.dag_range_to(self)
}
/// All commits within `domain` reachable from this set of commits, by
/// traversing either parent or child edges.
pub fn reachable(self: &Arc<Self>, domain: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Reachable {
sources: self.clone(),
domain: domain.clone(),
})
}
/// Commits reachable from `heads` but not from `self`.
pub fn range(self: &Arc<Self>, heads: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Range {
roots: self.clone(),
heads: heads.clone(),
generation: GENERATION_RANGE_FULL,
parents_range: PARENTS_RANGE_FULL,
})
}
/// Suppresses name resolution error within `self`.
pub fn present(self: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Present(self.clone()))
}
/// Commits that are not in `self`, i.e. the complement of `self`.
pub fn negated(self: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::NotIn(self.clone()))
}
/// Commits that are in `self` or in `other` (or both).
pub fn union(self: &Arc<Self>, other: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Union(self.clone(), other.clone()))
}
/// Commits that are in any of the `expressions`.
pub fn union_all(expressions: &[Arc<Self>]) -> Arc<Self> {
to_binary_expression(expressions, &Self::none, &Self::union)
}
/// Commits that are in `self` and in `other`.
pub fn intersection(self: &Arc<Self>, other: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Intersection(self.clone(), other.clone()))
}
/// Commits that are in `self` but not in `other`.
pub fn minus(self: &Arc<Self>, other: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Difference(self.clone(), other.clone()))
}
/// Commits that are in the first expression in `expressions` that is not
/// `none()`.
pub fn coalesce(expressions: &[Arc<Self>]) -> Arc<Self> {
to_binary_expression(expressions, &Self::none, &Self::coalesce2)
}
fn coalesce2(self: &Arc<Self>, other: &Arc<Self>) -> Arc<Self> {
Arc::new(Self::Coalesce(self.clone(), other.clone()))
}
}
impl<St: ExpressionState<CommitRef = RevsetCommitRef>> RevsetExpression<St> {
/// Returns symbol string if this expression is of that type.
pub fn as_symbol(&self) -> Option<&str> {
match self {
Self::CommitRef(RevsetCommitRef::Symbol(name)) => Some(name),
_ => None,
}
}
}
impl UserRevsetExpression {
/// Resolve a user-provided expression. Symbols will be resolved using the
/// provided [`SymbolResolver`].
pub fn resolve_user_expression(
&self,
repo: &dyn Repo,
symbol_resolver: &SymbolResolver,
) -> Result<Arc<ResolvedRevsetExpression>, RevsetResolutionError> {
resolve_symbols(repo, self, symbol_resolver)
}
}
impl ResolvedRevsetExpression {
/// Optimizes and evaluates this expression.
pub fn evaluate<'index>(
self: Arc<Self>,
repo: &'index dyn Repo,
) -> Result<Box<dyn Revset + 'index>, RevsetEvaluationError> {
let expr = optimize(self).to_backend_expression(repo);
repo.index().evaluate_revset(&expr, repo.store())
}
/// Evaluates this expression without optimizing it.
///
/// Use this function if `self` is already optimized, or to debug
/// optimization pass.
pub fn evaluate_unoptimized<'index>(
self: &Arc<Self>,
repo: &'index dyn Repo,
) -> Result<Box<dyn Revset + 'index>, RevsetEvaluationError> {
// Since referenced commits change the evaluation result, they must be
// collected no matter if optimization is disabled.
let expr = resolve_referenced_commits(self)
.as_ref()
.unwrap_or(self)
.to_backend_expression(repo);
repo.index().evaluate_revset(&expr, repo.store())
}
/// Transforms this expression to the form which the `Index` backend will
/// process.
pub fn to_backend_expression(&self, repo: &dyn Repo) -> ResolvedExpression {
resolve_visibility(repo, self)
}
}
#[derive(Clone, Debug)]
pub enum ResolvedPredicateExpression {
/// Pure filter predicate.
Filter(RevsetFilterPredicate),
/// Set expression to be evaluated as filter. This is typically a subtree
/// node of `Union` with a pure filter predicate.
Set(Box<ResolvedExpression>),
NotIn(Box<Self>),
Union(Box<Self>, Box<Self>),
Intersection(Box<Self>, Box<Self>),
}
/// Describes evaluation plan of revset expression.
///
/// Unlike `RevsetExpression`, this doesn't contain unresolved symbols or `View`
/// properties.
///
/// Use `RevsetExpression` API to build a query programmatically.
// TODO: rename to BackendExpression?
#[derive(Clone, Debug)]
pub enum ResolvedExpression {
Commits(Vec<CommitId>),
Ancestors {
heads: Box<Self>,
generation: Range<u64>,
parents_range: Range<u32>,
},
/// Commits that are ancestors of `heads` but not ancestors of `roots`.
Range {
roots: Box<Self>,
heads: Box<Self>,
generation: Range<u64>,
// Parents range is only used for traversing heads, not roots
parents_range: Range<u32>,
},
/// Commits that are descendants of `roots` and ancestors of `heads`.
DagRange {
roots: Box<Self>,
heads: Box<Self>,
generation_from_roots: Range<u64>,
},
/// Commits reachable from `sources` within `domain`.
Reachable {
sources: Box<Self>,
domain: Box<Self>,
},
Heads(Box<Self>),
/// Heads of the set of commits which are ancestors of `heads` but are not
/// ancestors of `roots`, and which also are contained in `filter`.
HeadsRange {
roots: Box<Self>,
heads: Box<Self>,
parents_range: Range<u32>,
filter: Option<ResolvedPredicateExpression>,
},
Roots(Box<Self>),
ForkPoint(Box<Self>),
Bisect(Box<Self>),
HasSize {
candidates: Box<Self>,
count: usize,
},
Latest {
candidates: Box<Self>,
count: usize,
},
Coalesce(Box<Self>, Box<Self>),
Union(Box<Self>, Box<Self>),
/// Intersects `candidates` with `predicate` by filtering.
FilterWithin {
candidates: Box<Self>,
predicate: ResolvedPredicateExpression,
},
/// Intersects expressions by merging.
Intersection(Box<Self>, Box<Self>),
Difference(Box<Self>, Box<Self>),
}
pub type RevsetFunction = fn(
&mut RevsetDiagnostics,
&FunctionCallNode,
&LoweringContext,
) -> Result<Arc<UserRevsetExpression>, RevsetParseError>;
static BUILTIN_FUNCTION_MAP: LazyLock<HashMap<&str, RevsetFunction>> = LazyLock::new(|| {
// Not using maplit::hashmap!{} or custom declarative macro here because
// code completion inside macro is quite restricted.
let mut map: HashMap<&str, RevsetFunction> = HashMap::new();
map.insert("parents", |diagnostics, function, context| {
let ([arg], [depth_opt_arg]) = function.expect_arguments()?;
let expression = lower_expression(diagnostics, arg, context)?;
if let Some(depth_arg) = depth_opt_arg {
let depth = expect_literal("integer", depth_arg)?;
Ok(expression.ancestors_at(depth))
} else {
Ok(expression.parents())
}
});
map.insert("children", |diagnostics, function, context| {
let ([arg], [depth_opt_arg]) = function.expect_arguments()?;
let expression = lower_expression(diagnostics, arg, context)?;
if let Some(depth_arg) = depth_opt_arg {
let depth = expect_literal("integer", depth_arg)?;
Ok(expression.descendants_at(depth))
} else {
Ok(expression.children())
}
});
map.insert("ancestors", |diagnostics, function, context| {
let ([heads_arg], [depth_opt_arg]) = function.expect_arguments()?;
let heads = lower_expression(diagnostics, heads_arg, context)?;
let generation = if let Some(depth_arg) = depth_opt_arg {
let depth = expect_literal("integer", depth_arg)?;
0..depth
} else {
GENERATION_RANGE_FULL
};
Ok(heads.ancestors_range(generation))
});
map.insert("descendants", |diagnostics, function, context| {
let ([roots_arg], [depth_opt_arg]) = function.expect_arguments()?;
let roots = lower_expression(diagnostics, roots_arg, context)?;
let generation = if let Some(depth_arg) = depth_opt_arg {
let depth = expect_literal("integer", depth_arg)?;
0..depth
} else {
GENERATION_RANGE_FULL
};
Ok(roots.descendants_range(generation))
});
map.insert("first_parent", |diagnostics, function, context| {
let ([arg], [depth_opt_arg]) = function.expect_arguments()?;
let expression = lower_expression(diagnostics, arg, context)?;
let depth = if let Some(depth_arg) = depth_opt_arg {
expect_literal("integer", depth_arg)?
} else {
1
};
Ok(expression.first_ancestors_at(depth))
});
map.insert("first_ancestors", |diagnostics, function, context| {
let ([heads_arg], [depth_opt_arg]) = function.expect_arguments()?;
let heads = lower_expression(diagnostics, heads_arg, context)?;
let generation = if let Some(depth_arg) = depth_opt_arg {
let depth = expect_literal("integer", depth_arg)?;
0..depth
} else {
GENERATION_RANGE_FULL
};
Ok(heads.first_ancestors_range(generation))
});
map.insert("connected", |diagnostics, function, context| {
let [arg] = function.expect_exact_arguments()?;
let candidates = lower_expression(diagnostics, arg, context)?;
Ok(candidates.connected())
});
map.insert("reachable", |diagnostics, function, context| {
let [source_arg, domain_arg] = function.expect_exact_arguments()?;
let sources = lower_expression(diagnostics, source_arg, context)?;
let domain = lower_expression(diagnostics, domain_arg, context)?;
Ok(sources.reachable(&domain))
});
map.insert("none", |_diagnostics, function, _context| {
function.expect_no_arguments()?;
Ok(RevsetExpression::none())
});
map.insert("all", |_diagnostics, function, _context| {
function.expect_no_arguments()?;
Ok(RevsetExpression::all())
});
map.insert("working_copies", |_diagnostics, function, _context| {
function.expect_no_arguments()?;
Ok(RevsetExpression::working_copies())
});
map.insert("heads", |diagnostics, function, context| {
let [arg] = function.expect_exact_arguments()?;
let candidates = lower_expression(diagnostics, arg, context)?;
Ok(candidates.heads())
});
map.insert("roots", |diagnostics, function, context| {
let [arg] = function.expect_exact_arguments()?;
let candidates = lower_expression(diagnostics, arg, context)?;
Ok(candidates.roots())
});
map.insert("visible_heads", |_diagnostics, function, _context| {
function.expect_no_arguments()?;
Ok(RevsetExpression::visible_heads())
});
map.insert("root", |_diagnostics, function, _context| {
function.expect_no_arguments()?;
Ok(RevsetExpression::root())
});
map.insert("change_id", |diagnostics, function, _context| {
let [arg] = function.expect_exact_arguments()?;
let prefix = revset_parser::catch_aliases(diagnostics, arg, |_diagnostics, arg| {
let value = revset_parser::expect_string_literal("change ID prefix", arg)?;
HexPrefix::try_from_reverse_hex(value)
.ok_or_else(|| RevsetParseError::expression("Invalid change ID prefix", arg.span))
})?;
Ok(RevsetExpression::change_id_prefix(prefix))
});
map.insert("commit_id", |diagnostics, function, _context| {
let [arg] = function.expect_exact_arguments()?;
let prefix = revset_parser::catch_aliases(diagnostics, arg, |_diagnostics, arg| {
let value = revset_parser::expect_string_literal("commit ID prefix", arg)?;
HexPrefix::try_from_hex(value)
.ok_or_else(|| RevsetParseError::expression("Invalid commit ID prefix", arg.span))
})?;
Ok(RevsetExpression::commit_id_prefix(prefix))
});
map.insert("bookmarks", |diagnostics, function, context| {
let ([], [opt_arg]) = function.expect_arguments()?;
let expr = if let Some(arg) = opt_arg {
expect_string_expression(diagnostics, arg, context)?
} else {
StringExpression::all()
};
Ok(RevsetExpression::bookmarks(expr))
});
map.insert("remote_bookmarks", |diagnostics, function, context| {
parse_remote_bookmarks_arguments(diagnostics, function, None, context)
});
map.insert(
"tracked_remote_bookmarks",
|diagnostics, function, context| {
parse_remote_bookmarks_arguments(
diagnostics,
function,
Some(RemoteRefState::Tracked),
context,
)
},
);
map.insert(
"untracked_remote_bookmarks",
|diagnostics, function, context| {
parse_remote_bookmarks_arguments(
diagnostics,
function,
Some(RemoteRefState::New),
context,
)
},
);
map.insert("tags", |diagnostics, function, context| {
let ([], [opt_arg]) = function.expect_arguments()?;
let expr = if let Some(arg) = opt_arg {
expect_string_expression(diagnostics, arg, context)?
} else {
StringExpression::all()
};
Ok(RevsetExpression::tags(expr))
});
// TODO: Remove in jj 0.43+
map.insert("git_refs", |diagnostics, function, _context| {
diagnostics.add_warning(RevsetParseError::expression(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/simple_backend.rs | lib/src/simple_backend.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fmt::Debug;
use std::fs;
use std::fs::File;
use std::io::Cursor;
use std::io::Read as _;
use std::io::Write as _;
use std::path::Path;
use std::path::PathBuf;
use std::pin::Pin;
use std::time::SystemTime;
use async_trait::async_trait;
use blake2::Blake2b512;
use blake2::Digest as _;
use futures::stream;
use futures::stream::BoxStream;
use pollster::FutureExt as _;
use prost::Message as _;
use tempfile::NamedTempFile;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt as _;
use crate::backend::Backend;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::Commit;
use crate::backend::CommitId;
use crate::backend::CopyHistory;
use crate::backend::CopyId;
use crate::backend::CopyRecord;
use crate::backend::FileId;
use crate::backend::MillisSinceEpoch;
use crate::backend::SecureSig;
use crate::backend::Signature;
use crate::backend::SigningFn;
use crate::backend::SymlinkId;
use crate::backend::Timestamp;
use crate::backend::Tree;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::backend::make_root_commit;
use crate::conflict_labels::ConflictLabels;
use crate::content_hash::blake2b_hash;
use crate::file_util::persist_content_addressed_temp_file;
use crate::index::Index;
use crate::merge::MergeBuilder;
use crate::object_id::ObjectId;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponentBuf;
const COMMIT_ID_LENGTH: usize = 64;
const CHANGE_ID_LENGTH: usize = 16;
fn map_not_found_err(err: std::io::Error, id: &impl ObjectId) -> BackendError {
if err.kind() == std::io::ErrorKind::NotFound {
BackendError::ObjectNotFound {
object_type: id.object_type(),
hash: id.hex(),
source: Box::new(err),
}
} else {
BackendError::ReadObject {
object_type: id.object_type(),
hash: id.hex(),
source: Box::new(err),
}
}
}
fn to_other_err(err: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> BackendError {
BackendError::Other(err.into())
}
#[derive(Debug)]
pub struct SimpleBackend {
path: PathBuf,
root_commit_id: CommitId,
root_change_id: ChangeId,
empty_tree_id: TreeId,
}
impl SimpleBackend {
pub fn name() -> &'static str {
"Simple"
}
pub fn init(store_path: &Path) -> Self {
fs::create_dir(store_path.join("commits")).unwrap();
fs::create_dir(store_path.join("trees")).unwrap();
fs::create_dir(store_path.join("files")).unwrap();
fs::create_dir(store_path.join("symlinks")).unwrap();
fs::create_dir(store_path.join("conflicts")).unwrap();
let backend = Self::load(store_path);
let empty_tree_id = backend
.write_tree(RepoPath::root(), &Tree::default())
.block_on()
.unwrap();
assert_eq!(empty_tree_id, backend.empty_tree_id);
backend
}
pub fn load(store_path: &Path) -> Self {
let root_commit_id = CommitId::from_bytes(&[0; COMMIT_ID_LENGTH]);
let root_change_id = ChangeId::from_bytes(&[0; CHANGE_ID_LENGTH]);
let empty_tree_id = TreeId::from_hex(
"482ae5a29fbe856c7272f2071b8b0f0359ee2d89ff392b8a900643fbd0836eccd067b8bf41909e206c90d45d6e7d8b6686b93ecaee5fe1a9060d87b672101310",
);
Self {
path: store_path.to_path_buf(),
root_commit_id,
root_change_id,
empty_tree_id,
}
}
fn file_path(&self, id: &FileId) -> PathBuf {
self.path.join("files").join(id.hex())
}
fn symlink_path(&self, id: &SymlinkId) -> PathBuf {
self.path.join("symlinks").join(id.hex())
}
fn tree_path(&self, id: &TreeId) -> PathBuf {
self.path.join("trees").join(id.hex())
}
fn commit_path(&self, id: &CommitId) -> PathBuf {
self.path.join("commits").join(id.hex())
}
}
#[async_trait]
impl Backend for SimpleBackend {
fn name(&self) -> &str {
Self::name()
}
fn commit_id_length(&self) -> usize {
COMMIT_ID_LENGTH
}
fn change_id_length(&self) -> usize {
CHANGE_ID_LENGTH
}
fn root_commit_id(&self) -> &CommitId {
&self.root_commit_id
}
fn root_change_id(&self) -> &ChangeId {
&self.root_change_id
}
fn empty_tree_id(&self) -> &TreeId {
&self.empty_tree_id
}
fn concurrency(&self) -> usize {
1
}
async fn read_file(
&self,
path: &RepoPath,
id: &FileId,
) -> BackendResult<Pin<Box<dyn AsyncRead + Send>>> {
let disk_path = self.file_path(id);
let mut file = File::open(disk_path).map_err(|err| map_not_found_err(err, id))?;
let mut buf = vec![];
file.read_to_end(&mut buf)
.map_err(|err| BackendError::ReadFile {
path: path.to_owned(),
id: id.clone(),
source: err.into(),
})?;
Ok(Box::pin(Cursor::new(buf)))
}
async fn write_file(
&self,
_path: &RepoPath,
contents: &mut (dyn AsyncRead + Send + Unpin),
) -> BackendResult<FileId> {
// TODO: Write temporary file in the destination directory (#5712)
let temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
let mut file = temp_file.as_file();
let mut hasher = Blake2b512::new();
let mut buff: Vec<u8> = vec![0; 1 << 14];
loop {
let bytes_read = contents.read(&mut buff).await.map_err(to_other_err)?;
if bytes_read == 0 {
break;
}
let bytes = &buff[..bytes_read];
file.write_all(bytes).map_err(to_other_err)?;
hasher.update(bytes);
}
file.flush().map_err(to_other_err)?;
let id = FileId::new(hasher.finalize().to_vec());
persist_content_addressed_temp_file(temp_file, self.file_path(&id))
.map_err(to_other_err)?;
Ok(id)
}
async fn read_symlink(&self, _path: &RepoPath, id: &SymlinkId) -> BackendResult<String> {
let path = self.symlink_path(id);
let target = fs::read_to_string(path).map_err(|err| map_not_found_err(err, id))?;
Ok(target)
}
async fn write_symlink(&self, _path: &RepoPath, target: &str) -> BackendResult<SymlinkId> {
// TODO: Write temporary file in the destination directory (#5712)
let mut temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
temp_file
.write_all(target.as_bytes())
.map_err(to_other_err)?;
let mut hasher = Blake2b512::new();
hasher.update(target.as_bytes());
let id = SymlinkId::new(hasher.finalize().to_vec());
persist_content_addressed_temp_file(temp_file, self.symlink_path(&id))
.map_err(to_other_err)?;
Ok(id)
}
async fn read_copy(&self, _id: &CopyId) -> BackendResult<CopyHistory> {
Err(BackendError::Unsupported(
"The simple backend doesn't support copies".to_string(),
))
}
async fn write_copy(&self, _contents: &CopyHistory) -> BackendResult<CopyId> {
Err(BackendError::Unsupported(
"The simple backend doesn't support copies".to_string(),
))
}
async fn get_related_copies(&self, _copy_id: &CopyId) -> BackendResult<Vec<CopyHistory>> {
Err(BackendError::Unsupported(
"The simple backend doesn't support copies".to_string(),
))
}
async fn read_tree(&self, _path: &RepoPath, id: &TreeId) -> BackendResult<Tree> {
let path = self.tree_path(id);
let buf = fs::read(path).map_err(|err| map_not_found_err(err, id))?;
let proto = crate::protos::simple_store::Tree::decode(&*buf).map_err(to_other_err)?;
Ok(tree_from_proto(proto))
}
async fn write_tree(&self, _path: &RepoPath, tree: &Tree) -> BackendResult<TreeId> {
// TODO: Write temporary file in the destination directory (#5712)
let temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
let proto = tree_to_proto(tree);
temp_file
.as_file()
.write_all(&proto.encode_to_vec())
.map_err(to_other_err)?;
let id = TreeId::new(blake2b_hash(tree).to_vec());
persist_content_addressed_temp_file(temp_file, self.tree_path(&id))
.map_err(to_other_err)?;
Ok(id)
}
async fn read_commit(&self, id: &CommitId) -> BackendResult<Commit> {
if *id == self.root_commit_id {
return Ok(make_root_commit(
self.root_change_id().clone(),
self.empty_tree_id.clone(),
));
}
let path = self.commit_path(id);
let buf = fs::read(path).map_err(|err| map_not_found_err(err, id))?;
let proto = crate::protos::simple_store::Commit::decode(&*buf).map_err(to_other_err)?;
Ok(commit_from_proto(proto))
}
async fn write_commit(
&self,
mut commit: Commit,
sign_with: Option<&mut SigningFn>,
) -> BackendResult<(CommitId, Commit)> {
assert!(commit.secure_sig.is_none(), "commit.secure_sig was set");
if commit.parents.is_empty() {
return Err(BackendError::Other(
"Cannot write a commit with no parents".into(),
));
}
// TODO: Write temporary file in the destination directory (#5712)
let temp_file = NamedTempFile::new_in(&self.path).map_err(to_other_err)?;
let mut proto = commit_to_proto(&commit);
if let Some(sign) = sign_with {
let data = proto.encode_to_vec();
let sig = sign(&data).map_err(to_other_err)?;
proto.secure_sig = Some(sig.clone());
commit.secure_sig = Some(SecureSig { data, sig });
}
temp_file
.as_file()
.write_all(&proto.encode_to_vec())
.map_err(to_other_err)?;
let id = CommitId::new(blake2b_hash(&commit).to_vec());
persist_content_addressed_temp_file(temp_file, self.commit_path(&id))
.map_err(to_other_err)?;
Ok((id, commit))
}
fn get_copy_records(
&self,
_paths: Option<&[RepoPathBuf]>,
_root: &CommitId,
_head: &CommitId,
) -> BackendResult<BoxStream<'_, BackendResult<CopyRecord>>> {
Ok(Box::pin(stream::empty()))
}
fn gc(&self, _index: &dyn Index, _keep_newer: SystemTime) -> BackendResult<()> {
Ok(())
}
}
#[expect(clippy::assigning_clones)]
pub fn commit_to_proto(commit: &Commit) -> crate::protos::simple_store::Commit {
let mut proto = crate::protos::simple_store::Commit::default();
for parent in &commit.parents {
proto.parents.push(parent.to_bytes());
}
for predecessor in &commit.predecessors {
proto.predecessors.push(predecessor.to_bytes());
}
proto.root_tree = commit.root_tree.iter().map(|id| id.to_bytes()).collect();
if !commit.conflict_labels.is_resolved() {
proto.conflict_labels = commit.conflict_labels.as_slice().to_owned();
}
proto.change_id = commit.change_id.to_bytes();
proto.description = commit.description.clone();
proto.author = Some(signature_to_proto(&commit.author));
proto.committer = Some(signature_to_proto(&commit.committer));
proto
}
fn commit_from_proto(mut proto: crate::protos::simple_store::Commit) -> Commit {
// Note how .take() sets the secure_sig field to None before we encode the data.
// Needs to be done first since proto is partially moved a bunch below
let secure_sig = proto.secure_sig.take().map(|sig| SecureSig {
data: proto.encode_to_vec(),
sig,
});
let parents = proto.parents.into_iter().map(CommitId::new).collect();
let predecessors = proto.predecessors.into_iter().map(CommitId::new).collect();
let merge_builder: MergeBuilder<_> = proto.root_tree.into_iter().map(TreeId::new).collect();
let root_tree = merge_builder.build();
let conflict_labels = ConflictLabels::from_vec(proto.conflict_labels);
let change_id = ChangeId::new(proto.change_id);
Commit {
parents,
predecessors,
root_tree,
conflict_labels: conflict_labels.into_merge(),
change_id,
description: proto.description,
author: signature_from_proto(proto.author.unwrap_or_default()),
committer: signature_from_proto(proto.committer.unwrap_or_default()),
secure_sig,
}
}
fn tree_to_proto(tree: &Tree) -> crate::protos::simple_store::Tree {
let mut proto = crate::protos::simple_store::Tree::default();
for entry in tree.entries() {
proto
.entries
.push(crate::protos::simple_store::tree::Entry {
name: entry.name().as_internal_str().to_owned(),
value: Some(tree_value_to_proto(entry.value())),
});
}
proto
}
fn tree_from_proto(proto: crate::protos::simple_store::Tree) -> Tree {
// Serialized data should be sorted
let entries = proto
.entries
.into_iter()
.map(|proto_entry| {
let value = tree_value_from_proto(proto_entry.value.unwrap());
(RepoPathComponentBuf::new(proto_entry.name).unwrap(), value)
})
.collect();
Tree::from_sorted_entries(entries)
}
fn tree_value_to_proto(value: &TreeValue) -> crate::protos::simple_store::TreeValue {
let mut proto = crate::protos::simple_store::TreeValue::default();
match value {
TreeValue::File {
id,
executable,
copy_id,
} => {
proto.value = Some(crate::protos::simple_store::tree_value::Value::File(
crate::protos::simple_store::tree_value::File {
id: id.to_bytes(),
executable: *executable,
copy_id: copy_id.to_bytes(),
},
));
}
TreeValue::Symlink(id) => {
proto.value = Some(crate::protos::simple_store::tree_value::Value::SymlinkId(
id.to_bytes(),
));
}
TreeValue::GitSubmodule(_id) => {
panic!("cannot store git submodules");
}
TreeValue::Tree(id) => {
proto.value = Some(crate::protos::simple_store::tree_value::Value::TreeId(
id.to_bytes(),
));
}
}
proto
}
fn tree_value_from_proto(proto: crate::protos::simple_store::TreeValue) -> TreeValue {
match proto.value.unwrap() {
crate::protos::simple_store::tree_value::Value::TreeId(id) => {
TreeValue::Tree(TreeId::new(id))
}
crate::protos::simple_store::tree_value::Value::File(
crate::protos::simple_store::tree_value::File {
id,
executable,
copy_id,
},
) => TreeValue::File {
id: FileId::new(id),
executable,
copy_id: CopyId::new(copy_id),
},
crate::protos::simple_store::tree_value::Value::SymlinkId(id) => {
TreeValue::Symlink(SymlinkId::new(id))
}
}
}
fn signature_to_proto(signature: &Signature) -> crate::protos::simple_store::commit::Signature {
crate::protos::simple_store::commit::Signature {
name: signature.name.clone(),
email: signature.email.clone(),
timestamp: Some(crate::protos::simple_store::commit::Timestamp {
millis_since_epoch: signature.timestamp.timestamp.0,
tz_offset: signature.timestamp.tz_offset,
}),
}
}
fn signature_from_proto(proto: crate::protos::simple_store::commit::Signature) -> Signature {
let timestamp = proto.timestamp.unwrap_or_default();
Signature {
name: proto.name,
email: proto.email,
timestamp: Timestamp {
timestamp: MillisSinceEpoch(timestamp.millis_since_epoch),
tz_offset: timestamp.tz_offset,
},
}
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use pollster::FutureExt as _;
use super::*;
use crate::merge::Merge;
use crate::tests::new_temp_dir;
/// Test that parents get written correctly
#[test]
fn write_commit_parents() {
let temp_dir = new_temp_dir();
let store_path = temp_dir.path();
let backend = SimpleBackend::init(store_path);
let mut commit = Commit {
parents: vec![],
predecessors: vec![],
root_tree: Merge::resolved(backend.empty_tree_id().clone()),
conflict_labels: Merge::resolved(String::new()),
change_id: ChangeId::from_hex("abc123"),
description: "".to_string(),
author: create_signature(),
committer: create_signature(),
secure_sig: None,
};
let write_commit = |commit: Commit| -> BackendResult<(CommitId, Commit)> {
backend.write_commit(commit, None).block_on()
};
// No parents
commit.parents = vec![];
assert_matches!(
write_commit(commit.clone()),
Err(BackendError::Other(err)) if err.to_string().contains("no parents")
);
// Only root commit as parent
commit.parents = vec![backend.root_commit_id().clone()];
let first_id = write_commit(commit.clone()).unwrap().0;
let first_commit = backend.read_commit(&first_id).block_on().unwrap();
assert_eq!(first_commit, commit);
// Only non-root commit as parent
commit.parents = vec![first_id.clone()];
let second_id = write_commit(commit.clone()).unwrap().0;
let second_commit = backend.read_commit(&second_id).block_on().unwrap();
assert_eq!(second_commit, commit);
// Merge commit
commit.parents = vec![first_id.clone(), second_id.clone()];
let merge_id = write_commit(commit.clone()).unwrap().0;
let merge_commit = backend.read_commit(&merge_id).block_on().unwrap();
assert_eq!(merge_commit, commit);
// Merge commit with root as one parent
commit.parents = vec![first_id, backend.root_commit_id().clone()];
let root_merge_id = write_commit(commit.clone()).unwrap().0;
let root_merge_commit = backend.read_commit(&root_merge_id).block_on().unwrap();
assert_eq!(root_merge_commit, commit);
}
fn create_signature() -> Signature {
Signature {
name: "Someone".to_string(),
email: "someone@example.com".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
},
}
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/hex_util.rs | lib/src/hex_util.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Hex string helpers.
const FORWARD_HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
const REVERSE_HEX_CHARS: &[u8; 16] = b"zyxwvutsrqponmlk";
fn forward_hex_value(b: u8) -> Option<u8> {
match b {
b'0'..=b'9' => Some(b - b'0'),
b'a'..=b'f' => Some(b - b'a' + 10),
b'A'..=b'F' => Some(b - b'A' + 10),
_ => None,
}
}
fn reverse_hex_value(b: u8) -> Option<u8> {
match b {
b'k'..=b'z' => Some(b'z' - b),
b'K'..=b'Z' => Some(b'Z' - b),
_ => None,
}
}
/// Decodes `hex` as normal hex string.
pub fn decode_hex(hex: impl AsRef<[u8]>) -> Option<Vec<u8>> {
decode_hex_inner(hex.as_ref(), forward_hex_value)
}
/// Decodes `hex` as normal hex string prefix. The output may have odd-length
/// byte. Returns `(bytes, has_odd_byte)`.
pub fn decode_hex_prefix(hex: impl AsRef<[u8]>) -> Option<(Vec<u8>, bool)> {
decode_hex_prefix_inner(hex.as_ref(), forward_hex_value)
}
/// Decodes `reverse_hex` as hex string using `z-k` "digits".
pub fn decode_reverse_hex(reverse_hex: impl AsRef<[u8]>) -> Option<Vec<u8>> {
decode_hex_inner(reverse_hex.as_ref(), reverse_hex_value)
}
/// Decodes `reverse_hex` as hex string prefix using `z-k` "digits". The output
/// may have odd-length byte. Returns `(bytes, has_odd_byte)`.
pub fn decode_reverse_hex_prefix(reverse_hex: impl AsRef<[u8]>) -> Option<(Vec<u8>, bool)> {
decode_hex_prefix_inner(reverse_hex.as_ref(), reverse_hex_value)
}
fn decode_hex_inner(reverse_hex: &[u8], hex_value: impl Fn(u8) -> Option<u8>) -> Option<Vec<u8>> {
if !reverse_hex.len().is_multiple_of(2) {
return None;
}
let (decoded, _) = decode_hex_prefix_inner(reverse_hex, hex_value)?;
Some(decoded)
}
fn decode_hex_prefix_inner(
reverse_hex: &[u8],
hex_value: impl Fn(u8) -> Option<u8>,
) -> Option<(Vec<u8>, bool)> {
let mut decoded = Vec::with_capacity(usize::div_ceil(reverse_hex.len(), 2));
let (chunks, remainder) = reverse_hex.as_chunks();
for &[hi, lo] in chunks {
decoded.push(hex_value(hi)? << 4 | hex_value(lo)?);
}
if let &[hi] = remainder {
decoded.push(hex_value(hi)? << 4);
Some((decoded, true))
} else {
Some((decoded, false))
}
}
/// Encodes `data` as normal hex string.
pub fn encode_hex(data: &[u8]) -> String {
encode_hex_inner(data, FORWARD_HEX_CHARS)
}
/// Encodes `data` as hex string using `z-k` "digits".
pub fn encode_reverse_hex(data: &[u8]) -> String {
encode_hex_inner(data, REVERSE_HEX_CHARS)
}
fn encode_hex_inner(data: &[u8], chars: &[u8; 16]) -> String {
let encoded = data
.iter()
.flat_map(|b| [chars[usize::from(b >> 4)], chars[usize::from(b & 0xf)]])
.collect();
String::from_utf8(encoded).unwrap()
}
/// Calculates common prefix length of two byte sequences. The length
/// to be returned is a number of hexadecimal digits.
pub fn common_hex_len(bytes_a: &[u8], bytes_b: &[u8]) -> usize {
std::iter::zip(bytes_a, bytes_b)
.enumerate()
.find_map(|(i, (a, b))| match a ^ b {
0 => None,
d if d & 0xf0 == 0 => Some(i * 2 + 1),
_ => Some(i * 2),
})
.unwrap_or_else(|| bytes_a.len().min(bytes_b.len()) * 2)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_forward_hex() {
// Empty string
assert_eq!(decode_hex(""), Some(vec![]));
assert_eq!(decode_hex_prefix(""), Some((vec![], false)));
assert_eq!(encode_hex(b""), "".to_string());
// Single digit
assert_eq!(decode_hex("0"), None);
assert_eq!(decode_hex_prefix("f"), Some((vec![0xf0], true)));
// All digits
assert_eq!(
decode_hex("0123456789abcDEF"),
Some(b"\x01\x23\x45\x67\x89\xab\xcd\xef".to_vec())
);
assert_eq!(
decode_hex_prefix("0123456789ABCdef"),
Some((b"\x01\x23\x45\x67\x89\xab\xcd\xef".to_vec(), false))
);
assert_eq!(
encode_hex(b"\x01\x23\x45\x67\x89\xab\xcd\xef"),
"0123456789abcdef".to_string()
);
// Invalid digit
assert_eq!(decode_hex("gg"), None);
assert_eq!(decode_hex_prefix("gg"), None);
}
#[test]
fn test_reverse_hex() {
// Empty string
assert_eq!(decode_reverse_hex(""), Some(vec![]));
assert_eq!(decode_reverse_hex_prefix(""), Some((vec![], false)));
assert_eq!(encode_reverse_hex(b""), "".to_string());
// Single digit
assert_eq!(decode_reverse_hex("z"), None);
assert_eq!(decode_reverse_hex_prefix("k"), Some((vec![0xf0], true)));
// All digits
assert_eq!(
decode_reverse_hex("zyxwvutsRQPONMLK"),
Some(b"\x01\x23\x45\x67\x89\xab\xcd\xef".to_vec())
);
assert_eq!(
decode_reverse_hex_prefix("ZYXWVUTSrqponmlk"),
Some((b"\x01\x23\x45\x67\x89\xab\xcd\xef".to_vec(), false))
);
assert_eq!(
encode_reverse_hex(b"\x01\x23\x45\x67\x89\xab\xcd\xef"),
"zyxwvutsrqponmlk".to_string()
);
// Invalid digit
assert_eq!(decode_reverse_hex("jj"), None);
assert_eq!(decode_reverse_hex_prefix("jj"), None);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/commit.rs | lib/src/commit.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::cmp::Ordering;
use std::fmt::Debug;
use std::fmt::Error;
use std::fmt::Formatter;
use std::hash::Hash;
use std::hash::Hasher;
use std::sync::Arc;
use futures::future::try_join_all;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use crate::backend;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::backend::Signature;
use crate::backend::TreeId;
use crate::conflict_labels::ConflictLabels;
use crate::index::IndexResult;
use crate::merge::Merge;
use crate::merged_tree::MergedTree;
use crate::repo::Repo;
use crate::rewrite::merge_commit_trees;
use crate::signing::SignResult;
use crate::signing::Verification;
use crate::store::Store;
#[derive(Clone, serde::Serialize)]
pub struct Commit {
#[serde(skip)]
store: Arc<Store>,
#[serde(rename = "commit_id")]
id: CommitId,
#[serde(flatten)]
data: Arc<backend::Commit>,
}
impl Debug for Commit {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_struct("Commit").field("id", &self.id).finish()
// We intentionally don't print the `data` field. You can debug-print
// `commit.store_commit()` to get those details.
//
// The reason is that `Commit` objects are debug-printed as part of many
// other data structures and in tracing.
}
}
impl PartialEq for Commit {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for Commit {}
impl Ord for Commit {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}
impl PartialOrd for Commit {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Hash for Commit {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
impl Commit {
pub fn new(store: Arc<Store>, id: CommitId, data: Arc<backend::Commit>) -> Self {
Self { store, id, data }
}
pub fn store(&self) -> &Arc<Store> {
&self.store
}
pub fn id(&self) -> &CommitId {
&self.id
}
pub fn parent_ids(&self) -> &[CommitId] {
&self.data.parents
}
pub fn parents(&self) -> impl Iterator<Item = BackendResult<Self>> {
self.data.parents.iter().map(|id| self.store.get_commit(id))
}
pub async fn parents_async(&self) -> BackendResult<Vec<Self>> {
try_join_all(
self.data
.parents
.iter()
.map(|id| self.store.get_commit_async(id)),
)
.await
}
pub fn tree(&self) -> MergedTree {
MergedTree::new(
self.store.clone(),
self.data.root_tree.clone(),
ConflictLabels::from_merge(self.data.conflict_labels.clone()),
)
}
pub fn tree_ids(&self) -> &Merge<TreeId> {
&self.data.root_tree
}
pub fn parent_tree(&self, repo: &dyn Repo) -> BackendResult<MergedTree> {
self.parent_tree_async(repo).block_on()
}
/// Return the parent tree, merging the parent trees if there are multiple
/// parents.
pub async fn parent_tree_async(&self, repo: &dyn Repo) -> BackendResult<MergedTree> {
// Avoid merging parent trees if known to be empty. The index could be
// queried only when parents.len() > 1, but index query would be cheaper
// than extracting parent commit from the store.
if is_commit_empty_by_index(repo, &self.id)? == Some(true) {
return Ok(self.tree());
}
let parents: Vec<_> = self.parents_async().await?;
merge_commit_trees(repo, &parents).await
}
/// Returns whether commit's content is empty. Commit description is not
/// taken into consideration.
pub fn is_empty(&self, repo: &dyn Repo) -> BackendResult<bool> {
if let Some(empty) = is_commit_empty_by_index(repo, &self.id)? {
return Ok(empty);
}
is_backend_commit_empty(repo, &self.store, &self.data)
}
pub fn has_conflict(&self) -> bool {
!self.tree_ids().is_resolved()
}
pub fn change_id(&self) -> &ChangeId {
&self.data.change_id
}
pub fn store_commit(&self) -> &Arc<backend::Commit> {
&self.data
}
pub fn description(&self) -> &str {
&self.data.description
}
pub fn author(&self) -> &Signature {
&self.data.author
}
pub fn committer(&self) -> &Signature {
&self.data.committer
}
/// A commit is hidden if its commit id is not in the change id index.
pub fn is_hidden(&self, repo: &dyn Repo) -> IndexResult<bool> {
let maybe_targets = repo.resolve_change_id(self.change_id())?;
Ok(maybe_targets.is_none_or(|targets| !targets.has_visible(&self.id)))
}
/// A commit is discardable if it has no change from its parent, and an
/// empty description.
pub fn is_discardable(&self, repo: &dyn Repo) -> BackendResult<bool> {
Ok(self.description().is_empty() && self.is_empty(repo)?)
}
/// A quick way to just check if a signature is present.
pub fn is_signed(&self) -> bool {
self.data.secure_sig.is_some()
}
/// A slow (but cached) way to get the full verification.
pub fn verification(&self) -> SignResult<Option<Verification>> {
self.data
.secure_sig
.as_ref()
.map(|sig| self.store.signer().verify(&self.id, &sig.data, &sig.sig))
.transpose()
}
/// A string describing the commit to be used in conflict markers. If a
/// description is set, it will include the first line of the description.
pub fn conflict_label(&self) -> String {
if let Some(subject) = self.description().lines().next() {
// Example: nlqwxzwn 7dd24e73 "first line of description"
format!(
"{} \"{}\"",
self.conflict_label_short(),
// Control characters shouldn't be written in conflict markers, and '\0' isn't
// supported by the Git backend, so we just remove them. Unicode characters are
// supported, so we don't have to remove them.
subject.trim().replace(char::is_control, "")
)
} else {
self.conflict_label_short()
}
}
/// A short string describing the commit to be used in conflict markers.
/// Does not include the commit description.
fn conflict_label_short(&self) -> String {
// Example: nlqwxzwn 7dd24e73
format!("{:.8} {:.8}", self.change_id(), self.id())
}
/// A string describing the commit's parents to be used in conflict markers.
pub fn parents_conflict_label(&self) -> BackendResult<String> {
let parents: Vec<_> = self.parents().try_collect()?;
Ok(conflict_label_for_commits(&parents))
}
}
// If there is a single commit, returns the detailed conflict label for that
// commit. If there are multiple commits, joins the short conflict labels of
// each commit.
pub fn conflict_label_for_commits(commits: &[Commit]) -> String {
if commits.len() == 1 {
commits[0].conflict_label()
} else {
commits.iter().map(Commit::conflict_label_short).join(", ")
}
}
pub(crate) fn is_backend_commit_empty(
repo: &dyn Repo,
store: &Arc<Store>,
commit: &backend::Commit,
) -> BackendResult<bool> {
if let [parent_id] = &*commit.parents {
return Ok(commit.root_tree == *store.get_commit(parent_id)?.tree_ids());
}
let parents: Vec<_> = commit
.parents
.iter()
.map(|id| store.get_commit(id))
.try_collect()?;
let parent_tree = merge_commit_trees(repo, &parents).block_on()?;
Ok(commit.root_tree == *parent_tree.tree_ids())
}
fn is_commit_empty_by_index(repo: &dyn Repo, id: &CommitId) -> BackendResult<Option<bool>> {
let maybe_paths = repo
.index()
.changed_paths_in_commit(id)
// TODO: index error shouldn't be a "BackendError"
.map_err(|err| BackendError::Other(err.into()))?;
Ok(maybe_paths.map(|mut paths| paths.next().is_none()))
}
pub trait CommitIteratorExt<'c, I> {
fn ids(self) -> impl Iterator<Item = &'c CommitId>;
}
impl<'c, I> CommitIteratorExt<'c, I> for I
where
I: Iterator<Item = &'c Commit>,
{
fn ids(self) -> impl Iterator<Item = &'c CommitId> {
self.map(|commit| commit.id())
}
}
/// Wrapper to sort `Commit` by committer timestamp.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub(crate) struct CommitByCommitterTimestamp(pub Commit);
impl Ord for CommitByCommitterTimestamp {
fn cmp(&self, other: &Self) -> Ordering {
let self_timestamp = &self.0.committer().timestamp.timestamp;
let other_timestamp = &other.0.committer().timestamp.timestamp;
self_timestamp
.cmp(other_timestamp)
.then_with(|| self.0.cmp(&other.0)) // to comply with Eq
}
}
impl PartialOrd for CommitByCommitterTimestamp {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/simple_op_heads_store.rs | lib/src/simple_op_heads_store.rs | // Copyright 2021-2022 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fs;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use async_trait::async_trait;
use thiserror::Error;
use crate::backend::BackendInitError;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::hex_util;
use crate::lock::FileLock;
use crate::object_id::ObjectId as _;
use crate::op_heads_store::OpHeadsStore;
use crate::op_heads_store::OpHeadsStoreError;
use crate::op_heads_store::OpHeadsStoreLock;
use crate::op_store::OperationId;
/// Error that may occur during [`SimpleOpHeadsStore`] initialization.
#[derive(Debug, Error)]
#[error("Failed to initialize simple operation heads store")]
pub struct SimpleOpHeadsStoreInitError(#[from] pub PathError);
impl From<SimpleOpHeadsStoreInitError> for BackendInitError {
fn from(err: SimpleOpHeadsStoreInitError) -> Self {
Self(err.into())
}
}
pub struct SimpleOpHeadsStore {
dir: PathBuf,
}
impl Debug for SimpleOpHeadsStore {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SimpleOpHeadsStore")
.field("dir", &self.dir)
.finish()
}
}
impl SimpleOpHeadsStore {
pub fn name() -> &'static str {
"simple_op_heads_store"
}
pub fn init(dir: &Path) -> Result<Self, SimpleOpHeadsStoreInitError> {
let op_heads_dir = dir.join("heads");
fs::create_dir(&op_heads_dir).context(&op_heads_dir)?;
Ok(Self { dir: op_heads_dir })
}
pub fn load(dir: &Path) -> Self {
let op_heads_dir = dir.join("heads");
Self { dir: op_heads_dir }
}
fn add_op_head(&self, id: &OperationId) -> io::Result<()> {
std::fs::write(self.dir.join(id.hex()), "")
}
fn remove_op_head(&self, id: &OperationId) -> io::Result<()> {
std::fs::remove_file(self.dir.join(id.hex())).or_else(|err| {
if err.kind() == io::ErrorKind::NotFound {
// It's fine if the old head was not found. It probably means
// that we're on a distributed file system where the locking
// doesn't work. We'll probably end up with two current
// heads. We'll detect that next time we load the view.
Ok(())
} else {
Err(err)
}
})
}
}
struct SimpleOpHeadsStoreLock {
_lock: FileLock,
}
impl OpHeadsStoreLock for SimpleOpHeadsStoreLock {}
#[async_trait]
impl OpHeadsStore for SimpleOpHeadsStore {
fn name(&self) -> &str {
Self::name()
}
async fn update_op_heads(
&self,
old_ids: &[OperationId],
new_id: &OperationId,
) -> Result<(), OpHeadsStoreError> {
assert!(!old_ids.contains(new_id));
self.add_op_head(new_id)
.map_err(|err| OpHeadsStoreError::Write {
new_op_id: new_id.clone(),
source: err.into(),
})?;
for old_id in old_ids {
self.remove_op_head(old_id)
.map_err(|err| OpHeadsStoreError::Write {
new_op_id: new_id.clone(),
source: err.into(),
})?;
}
Ok(())
}
async fn get_op_heads(&self) -> Result<Vec<OperationId>, OpHeadsStoreError> {
let mut op_heads = vec![];
for op_head_entry in
std::fs::read_dir(&self.dir).map_err(|err| OpHeadsStoreError::Read(err.into()))?
{
let op_head_file_name = op_head_entry
.map_err(|err| OpHeadsStoreError::Read(err.into()))?
.file_name();
let op_head_file_name = op_head_file_name.to_str().ok_or_else(|| {
OpHeadsStoreError::Read(
format!("Non-utf8 in op head file name: {op_head_file_name:?}").into(),
)
})?;
if let Some(op_head) = hex_util::decode_hex(op_head_file_name) {
op_heads.push(OperationId::new(op_head));
}
}
Ok(op_heads)
}
async fn lock(&self) -> Result<Box<dyn OpHeadsStoreLock + '_>, OpHeadsStoreError> {
let lock = FileLock::lock(self.dir.join("lock"))
.map_err(|err| OpHeadsStoreError::Lock(err.into()))?;
Ok(Box::new(SimpleOpHeadsStoreLock { _lock: lock }))
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_submodule_store.rs | lib/src/default_submodule_store.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::path::Path;
use std::path::PathBuf;
use crate::submodule_store::SubmoduleStore;
#[derive(Debug)]
pub struct DefaultSubmoduleStore {
#[expect(dead_code)]
path: PathBuf,
}
impl DefaultSubmoduleStore {
/// Load an existing SubmoduleStore
pub fn load(store_path: &Path) -> Self {
Self {
path: store_path.to_path_buf(),
}
}
pub fn init(store_path: &Path) -> Self {
Self {
path: store_path.to_path_buf(),
}
}
pub fn name() -> &'static str {
"default"
}
}
impl SubmoduleStore for DefaultSubmoduleStore {
fn name(&self) -> &str {
Self::name()
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/evolution.rs | lib/src/evolution.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Utility for commit evolution history.
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::collections::hash_map::Entry;
use std::slice;
use itertools::Itertools as _;
use thiserror::Error;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::dag_walk;
use crate::index::IndexError;
use crate::op_store::OpStoreError;
use crate::op_store::OpStoreResult;
use crate::op_walk;
use crate::operation::Operation;
use crate::repo::ReadonlyRepo;
use crate::repo::Repo as _;
/// Commit with predecessor information.
#[derive(Clone, Debug, serde::Serialize)]
pub struct CommitEvolutionEntry {
/// Commit id and metadata.
pub commit: Commit,
/// Operation where the commit was created or rewritten.
pub operation: Option<Operation>,
/// Reachable predecessor ids reconstructed from the commit metadata. This
/// should be set if the associated `operation` is unknown.
// TODO: remove with legacy commit.predecessors support
#[serde(skip)]
reachable_predecessors: Option<Vec<CommitId>>,
}
impl CommitEvolutionEntry {
/// Predecessor ids of this commit.
pub fn predecessor_ids(&self) -> &[CommitId] {
match &self.operation {
Some(op) => op.predecessors_for_commit(self.commit.id()).unwrap(),
None => self.reachable_predecessors.as_ref().unwrap(),
}
}
/// Predecessor commit objects of this commit.
pub fn predecessors(&self) -> impl ExactSizeIterator<Item = BackendResult<Commit>> {
let store = self.commit.store();
self.predecessor_ids().iter().map(|id| store.get_commit(id))
}
}
#[expect(missing_docs)]
#[derive(Debug, Error)]
pub enum WalkPredecessorsError {
#[error(transparent)]
Backend(#[from] BackendError),
#[error(transparent)]
Index(#[from] IndexError),
#[error(transparent)]
OpStore(#[from] OpStoreError),
#[error("Predecessors cycle detected around commit {0}")]
CycleDetected(CommitId),
}
/// Walks operations to emit commit predecessors in reverse topological order.
pub fn walk_predecessors<'repo>(
repo: &'repo ReadonlyRepo,
start_commits: &[CommitId],
) -> impl Iterator<Item = Result<CommitEvolutionEntry, WalkPredecessorsError>> + use<'repo> {
WalkPredecessors {
repo,
op_ancestors: op_walk::walk_ancestors(slice::from_ref(repo.operation())),
to_visit: start_commits.to_vec(),
queued: VecDeque::new(),
}
}
struct WalkPredecessors<'repo, I> {
repo: &'repo ReadonlyRepo,
op_ancestors: I,
to_visit: Vec<CommitId>,
queued: VecDeque<CommitEvolutionEntry>,
}
impl<I> WalkPredecessors<'_, I>
where
I: Iterator<Item = OpStoreResult<Operation>>,
{
fn try_next(&mut self) -> Result<Option<CommitEvolutionEntry>, WalkPredecessorsError> {
while !self.to_visit.is_empty() && self.queued.is_empty() {
let Some(op) = self.op_ancestors.next().transpose()? else {
// Scanned all operations, no fallback needed.
self.flush_commits()?;
break;
};
if !op.stores_commit_predecessors() {
// There may be concurrent ops, but let's simply switch to the
// legacy commit traversal. Operation history should be mostly
// linear.
self.scan_commits()?;
break;
}
self.visit_op(&op)?;
}
Ok(self.queued.pop_front())
}
/// Looks for predecessors within the given operation.
fn visit_op(&mut self, op: &Operation) -> Result<(), WalkPredecessorsError> {
let mut to_emit = Vec::new(); // transitive edges should be short
let mut has_dup = false;
let mut i = 0;
while let Some(cur_id) = self.to_visit.get(i) {
if let Some(next_ids) = op.predecessors_for_commit(cur_id) {
if to_emit.contains(cur_id) {
self.to_visit.remove(i);
has_dup = true;
continue;
}
to_emit.extend(self.to_visit.splice(i..=i, next_ids.iter().cloned()));
} else {
i += 1;
}
}
let store = self.repo.store();
let mut emit = |id: &CommitId| -> BackendResult<()> {
let commit = store.get_commit(id)?;
self.queued.push_back(CommitEvolutionEntry {
commit,
operation: Some(op.clone()),
reachable_predecessors: None,
});
Ok(())
};
match &*to_emit {
[] => {}
[id] if !has_dup => emit(id)?,
_ => {
let sorted_ids = dag_walk::topo_order_reverse_ok(
to_emit.iter().map(Ok),
|&id| id,
|&id| op.predecessors_for_commit(id).into_iter().flatten().map(Ok),
|id| id, // Err(&CommitId) if graph has cycle
)
.map_err(|id| WalkPredecessorsError::CycleDetected(id.clone()))?;
for &id in &sorted_ids {
if op.predecessors_for_commit(id).is_some() {
emit(id)?;
}
}
}
}
Ok(())
}
/// Traverses predecessors from remainder commits.
fn scan_commits(&mut self) -> Result<(), WalkPredecessorsError> {
let store = self.repo.store();
let index = self.repo.index();
let mut commit_predecessors: HashMap<CommitId, Vec<CommitId>> = HashMap::new();
let commits = dag_walk::topo_order_reverse_ok(
self.to_visit.drain(..).map(|id| {
store
.get_commit(&id)
.map_err(WalkPredecessorsError::Backend)
}),
|commit: &Commit| commit.id().clone(),
|commit: &Commit| {
let ids = match commit_predecessors.entry(commit.id().clone()) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let mut filtered = vec![];
for id in &commit.store_commit().predecessors {
match index.has_id(id) {
Ok(true) => {
filtered.push(id.clone());
}
Ok(false) => {
// Ignore unreachable predecessors
}
Err(err) => {
return vec![Err(WalkPredecessorsError::Index(err))];
}
}
}
entry.insert(filtered)
}
};
ids.iter()
.map(|id| store.get_commit(id).map_err(WalkPredecessorsError::Backend))
.collect_vec()
},
|_| panic!("graph has cycle"),
)?;
self.queued.extend(commits.into_iter().map(|commit| {
let predecessors = commit_predecessors
.remove(commit.id())
.expect("commit must be visited once");
CommitEvolutionEntry {
commit,
operation: None,
reachable_predecessors: Some(predecessors),
}
}));
Ok(())
}
/// Moves remainder commits to output queue.
fn flush_commits(&mut self) -> BackendResult<()> {
self.queued.reserve(self.to_visit.len());
for id in self.to_visit.drain(..) {
let commit = self.repo.store().get_commit(&id)?;
self.queued.push_back(CommitEvolutionEntry {
commit,
operation: None,
// There were no legacy operations, so the commit should have no
// predecessors.
reachable_predecessors: Some(vec![]),
});
}
Ok(())
}
}
impl<I> Iterator for WalkPredecessors<'_, I>
where
I: Iterator<Item = OpStoreResult<Operation>>,
{
type Item = Result<CommitEvolutionEntry, WalkPredecessorsError>;
fn next(&mut self) -> Option<Self::Item> {
self.try_next().transpose()
}
}
/// Collects predecessor records from `new_ops` to `old_ops`, and resolves
/// transitive entries.
///
/// This function assumes that there exists a single greatest common ancestors
/// between `old_ops` and `new_ops`. If `old_ops` and `new_ops` have ancestors
/// and descendants each other, or if criss-crossed merges exist between these
/// operations, the returned mapping would be lossy.
pub fn accumulate_predecessors(
new_ops: &[Operation],
old_ops: &[Operation],
) -> Result<BTreeMap<CommitId, Vec<CommitId>>, WalkPredecessorsError> {
if new_ops.is_empty() || old_ops.is_empty() {
return Ok(BTreeMap::new()); // No common ancestor exists
}
// Fast path for the single forward operation case.
if let [op] = new_ops
&& op.parent_ids().iter().eq(old_ops.iter().map(|op| op.id()))
{
let Some(map) = &op.store_operation().commit_predecessors else {
return Ok(BTreeMap::new());
};
return resolve_transitive_edges(map, map.keys())
.map_err(|id| WalkPredecessorsError::CycleDetected(id.clone()));
}
// Follow reverse edges from the common ancestor to old_ops. Here we use
// BTreeMap to stabilize order of the reversed edges.
let mut accumulated = BTreeMap::new();
let reverse_ops = op_walk::walk_ancestors_range(old_ops, new_ops);
if !try_collect_predecessors_into(&mut accumulated, reverse_ops)? {
return Ok(BTreeMap::new());
}
let mut accumulated = reverse_edges(accumulated);
// Follow forward edges from new_ops to the common ancestor.
let forward_ops = op_walk::walk_ancestors_range(new_ops, old_ops);
if !try_collect_predecessors_into(&mut accumulated, forward_ops)? {
return Ok(BTreeMap::new());
}
let new_commit_ids = new_ops
.iter()
.filter_map(|op| op.store_operation().commit_predecessors.as_ref())
.flat_map(|map| map.keys());
resolve_transitive_edges(&accumulated, new_commit_ids)
.map_err(|id| WalkPredecessorsError::CycleDetected(id.clone()))
}
fn try_collect_predecessors_into(
collected: &mut BTreeMap<CommitId, Vec<CommitId>>,
ops: impl IntoIterator<Item = OpStoreResult<Operation>>,
) -> OpStoreResult<bool> {
for op in ops {
let op = op?;
let Some(map) = &op.store_operation().commit_predecessors else {
return Ok(false);
};
// Just insert. There should be no duplicate entries.
collected.extend(map.iter().map(|(k, v)| (k.clone(), v.clone())));
}
Ok(true)
}
/// Resolves transitive edges in `graph` starting from the `start` nodes,
/// returns new DAG. The returned DAG only includes edges reachable from the
/// `start` nodes.
fn resolve_transitive_edges<'a: 'b, 'b>(
graph: &'a BTreeMap<CommitId, Vec<CommitId>>,
start: impl IntoIterator<Item = &'b CommitId>,
) -> Result<BTreeMap<CommitId, Vec<CommitId>>, &'b CommitId> {
let mut new_graph: BTreeMap<CommitId, Vec<CommitId>> = BTreeMap::new();
let sorted_ids = dag_walk::topo_order_forward_ok(
start.into_iter().map(Ok),
|&id| id,
|&id| graph.get(id).into_iter().flatten().map(Ok),
|id| id, // Err(&CommitId) if graph has cycle
)?;
for cur_id in sorted_ids {
let Some(neighbors) = graph.get(cur_id) else {
continue;
};
let lookup = |id| new_graph.get(id).map_or(slice::from_ref(id), Vec::as_slice);
let new_neighbors = match &neighbors[..] {
[id] => lookup(id).to_vec(), // unique() not needed
ids => ids.iter().flat_map(lookup).unique().cloned().collect(),
};
new_graph.insert(cur_id.clone(), new_neighbors);
}
Ok(new_graph)
}
fn reverse_edges(graph: BTreeMap<CommitId, Vec<CommitId>>) -> BTreeMap<CommitId, Vec<CommitId>> {
let mut new_graph: BTreeMap<CommitId, Vec<CommitId>> = BTreeMap::new();
for (node1, neighbors) in graph {
for node2 in neighbors {
new_graph.entry(node2).or_default().push(node1.clone());
}
}
new_graph
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/conflict_labels.rs | lib/src/conflict_labels.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Labels for conflicted trees.
use std::fmt;
use crate::merge::Merge;
/// Optionally contains a set of labels for the terms of a conflict. Resolved
/// merges cannot be labeled.
#[derive(PartialEq, Eq, Clone)]
pub struct ConflictLabels {
// If the merge is resolved, the label must be empty.
labels: Merge<String>,
}
impl ConflictLabels {
/// Create a `ConflictLabels` with no labels.
pub const fn unlabeled() -> Self {
Self {
labels: Merge::resolved(String::new()),
}
}
/// Create a `ConflictLabels` from a `Merge<String>`. If the merge is
/// resolved, the labels will be discarded since resolved merges cannot have
/// labels.
pub fn from_merge(labels: Merge<String>) -> Self {
if labels.is_resolved() || labels.iter().all(|label| label.is_empty()) {
Self::unlabeled()
} else {
Self { labels }
}
}
/// Create a `ConflictLabels` from a `Vec<String>`, with an empty vec
/// representing no labels.
pub fn from_vec(labels: Vec<String>) -> Self {
if labels.is_empty() {
Self::unlabeled()
} else {
Self::from_merge(Merge::from_vec(labels))
}
}
/// Returns true if there are labels present.
pub fn has_labels(&self) -> bool {
!self.labels.is_resolved()
}
/// Returns the number of sides of the underlying merge if any terms have
/// labels, or `None` if there are no labels.
pub fn num_sides(&self) -> Option<usize> {
self.has_labels().then_some(self.labels.num_sides())
}
/// Returns the underlying `Merge<String>`.
pub fn as_merge(&self) -> &Merge<String> {
&self.labels
}
/// Extracts the underlying `Merge<String>`.
pub fn into_merge(self) -> Merge<String> {
self.labels
}
/// Returns the conflict labels as a slice. If there are no labels, returns
/// an empty slice.
pub fn as_slice(&self) -> &[String] {
if self.has_labels() {
self.labels.as_slice()
} else {
&[]
}
}
/// Get the label for a side at an index.
pub fn get_add(&self, add_index: usize) -> Option<&str> {
self.labels
.get_add(add_index)
.filter(|label| !label.is_empty())
.map(String::as_str)
}
/// Get the label for a base at an index.
pub fn get_remove(&self, remove_index: usize) -> Option<&str> {
self.labels
.get_remove(remove_index)
.filter(|label| !label.is_empty())
.map(String::as_str)
}
/// Simplify a merge with the same number of sides while preserving the
/// conflict labels corresponding to each side of the merge.
pub fn simplify_with<T: PartialEq + Clone>(&self, merge: &Merge<T>) -> (Self, Merge<T>) {
if self.has_labels() {
let (labels, simplified) = self
.labels
.as_ref()
.zip(merge.as_ref())
.simplify_by(|&(_label, item)| item)
.unzip();
(Self::from_merge(labels.cloned()), simplified.cloned())
} else {
let simplified = merge.simplify();
(Self::unlabeled(), simplified)
}
}
}
impl fmt::Debug for ConflictLabels {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.has_labels() {
f.debug_tuple("Labeled")
.field(&self.labels.as_slice())
.finish()
} else {
write!(f, "Unlabeled")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_conflict_labels_from_vec() {
// From empty vec for unlabeled
assert_eq!(
ConflictLabels::from_vec(vec![]),
ConflictLabels::unlabeled()
);
// From non-empty vec of terms
assert_eq!(
ConflictLabels::from_vec(vec![
String::from("left"),
String::from("base"),
String::from("right")
]),
ConflictLabels::from_merge(Merge::from_vec(vec![
String::from("left"),
String::from("base"),
String::from("right")
]))
);
}
#[test]
fn test_conflict_labels_as_slice() {
// Empty slice for unlabeled
let empty: &[String] = &[];
assert_eq!(ConflictLabels::unlabeled().as_slice(), empty);
// Slice of terms for labeled
assert_eq!(
ConflictLabels::from_merge(Merge::from_vec(vec![
String::from("left"),
String::from("base"),
String::from("right")
]))
.as_slice(),
&[
String::from("left"),
String::from("base"),
String::from("right")
]
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/tree_builder.rs | lib/src/tree_builder.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::BTreeMap;
use std::sync::Arc;
use pollster::FutureExt as _;
use crate::backend;
use crate::backend::BackendResult;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponentBuf;
use crate::store::Store;
use crate::tree::Tree;
#[derive(Debug)]
enum Override {
Tombstone,
Replace(TreeValue),
}
#[derive(Debug)]
pub struct TreeBuilder {
store: Arc<Store>,
base_tree_id: TreeId,
overrides: BTreeMap<RepoPathBuf, Override>,
}
impl TreeBuilder {
pub fn new(store: Arc<Store>, base_tree_id: TreeId) -> Self {
let overrides = BTreeMap::new();
Self {
store,
base_tree_id,
overrides,
}
}
pub fn store(&self) -> &Store {
self.store.as_ref()
}
pub fn set(&mut self, path: RepoPathBuf, value: TreeValue) {
assert!(!path.is_root());
self.overrides.insert(path, Override::Replace(value));
}
pub fn remove(&mut self, path: RepoPathBuf) {
assert!(!path.is_root());
self.overrides.insert(path, Override::Tombstone);
}
pub fn set_or_remove(&mut self, path: RepoPathBuf, value: Option<TreeValue>) {
assert!(!path.is_root());
if let Some(value) = value {
self.overrides.insert(path, Override::Replace(value));
} else {
self.overrides.insert(path, Override::Tombstone);
}
}
pub fn write_tree(self) -> BackendResult<TreeId> {
if self.overrides.is_empty() {
return Ok(self.base_tree_id);
}
let mut trees_to_write = self.get_base_trees()?;
// Update entries in parent trees for file overrides
for (path, file_override) in self.overrides {
let (dir, basename) = path.split().unwrap();
let tree_entries = trees_to_write.get_mut(dir).unwrap();
match file_override {
Override::Replace(value) => {
tree_entries.insert(basename.to_owned(), value);
}
Override::Tombstone => {
tree_entries.remove(basename);
}
}
}
// Write trees in reverse lexicographical order, starting with trees without
// children.
// TODO: Writing trees concurrently should help on high-latency backends
let store = &self.store;
while let Some((dir, cur_entries)) = trees_to_write.pop_last() {
if let Some((parent, basename)) = dir.split() {
let parent_entries = trees_to_write.get_mut(parent).unwrap();
if cur_entries.is_empty() {
if let Some(TreeValue::Tree(_)) = parent_entries.get(basename) {
parent_entries.remove(basename);
} else {
// Entry would have been replaced with file (see above)
}
} else {
let data =
backend::Tree::from_sorted_entries(cur_entries.into_iter().collect());
let tree = store.write_tree(&dir, data).block_on()?;
parent_entries.insert(basename.to_owned(), TreeValue::Tree(tree.id().clone()));
}
} else {
// We're writing the root tree. Write it even if empty. Return its id.
assert!(trees_to_write.is_empty());
let data = backend::Tree::from_sorted_entries(cur_entries.into_iter().collect());
let written_tree = store.write_tree(&dir, data).block_on()?;
return Ok(written_tree.id().clone());
}
}
unreachable!("trees_to_write must contain the root tree");
}
fn get_base_trees(
&self,
) -> BackendResult<BTreeMap<RepoPathBuf, BTreeMap<RepoPathComponentBuf, TreeValue>>> {
let store = &self.store;
let mut tree_cache = {
let dir = RepoPathBuf::root();
let tree = store.get_tree(dir.clone(), &self.base_tree_id)?;
BTreeMap::from([(dir, tree)])
};
fn populate_trees<'a>(
tree_cache: &'a mut BTreeMap<RepoPathBuf, Tree>,
store: &Arc<Store>,
dir: &RepoPath,
) -> BackendResult<&'a Tree> {
// `if let Some(tree) = ...` doesn't pass lifetime check as of Rust 1.84.0
if tree_cache.contains_key(dir) {
return Ok(tree_cache.get(dir).unwrap());
}
let (parent, basename) = dir.split().expect("root must be populated");
let tree = populate_trees(tree_cache, store, parent)?
.sub_tree(basename)?
.unwrap_or_else(|| Tree::empty(store.clone(), dir.to_owned()));
Ok(tree_cache.entry(dir.to_owned()).or_insert(tree))
}
for path in self.overrides.keys() {
let parent = path.parent().unwrap();
populate_trees(&mut tree_cache, store, parent)?;
}
Ok(tree_cache
.into_iter()
.map(|(dir, tree)| {
let entries = tree
.data()
.entries()
.map(|entry| (entry.name().to_owned(), entry.value().clone()))
.collect();
(dir, entries)
})
.collect())
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/time_util.rs | lib/src/time_util.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Provides support for parsing and matching date ranges.
use chrono::DateTime;
use chrono::FixedOffset;
use chrono::Local;
use chrono::TimeZone;
use interim::DateError;
use interim::Dialect;
use interim::parse_date_string;
use thiserror::Error;
use crate::backend::MillisSinceEpoch;
use crate::backend::Timestamp;
/// Context needed to create a DatePattern during revset evaluation.
#[derive(Copy, Clone, Debug)]
pub enum DatePatternContext {
/// Interpret date patterns using the local machine's time zone
Local(DateTime<Local>),
/// Interpret date patterns using any FixedOffset time zone
Fixed(DateTime<FixedOffset>),
}
impl DatePatternContext {
/// Parses a DatePattern from the given string and kind.
pub fn parse_relative(
&self,
s: &str,
kind: &str,
) -> Result<DatePattern, DatePatternParseError> {
match *self {
Self::Local(dt) => DatePattern::from_str_kind(s, kind, dt),
Self::Fixed(dt) => DatePattern::from_str_kind(s, kind, dt),
}
}
}
impl From<DateTime<Local>> for DatePatternContext {
fn from(value: DateTime<Local>) -> Self {
Self::Local(value)
}
}
impl From<DateTime<FixedOffset>> for DatePatternContext {
fn from(value: DateTime<FixedOffset>) -> Self {
Self::Fixed(value)
}
}
/// Error occurred during date pattern parsing.
#[derive(Debug, Error)]
pub enum DatePatternParseError {
/// Unknown pattern kind is specified.
#[error("Invalid date pattern kind `{0}:`")]
InvalidKind(String),
/// Failed to parse timestamp.
#[error(transparent)]
ParseError(#[from] DateError),
}
/// Represents an range of dates that may be matched against.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum DatePattern {
/// Represents all dates at or after the given instant.
AtOrAfter(MillisSinceEpoch),
/// Represents all dates before, but not including, the given instant.
Before(MillisSinceEpoch),
}
impl DatePattern {
/// Parses a string into a DatePattern.
///
/// * `s` is the string to be parsed.
///
/// * `kind` must be either "after" or "before". This determines whether the
/// pattern will match dates after or before the parsed date.
///
/// * `now` is the user's current time. This is a [`DateTime<Tz>`] because
/// knowledge of offset changes is needed to correctly process relative
/// times like "today". For example, California entered DST on March 10,
/// 2024, shifting clocks from UTC-8 to UTC-7 at 2:00 AM. If the pattern
/// "today" was parsed at noon on that day, it should be interpreted as
/// 2024-03-10T00:00:00-08:00 even though the current offset is -07:00.
pub fn from_str_kind<Tz: TimeZone>(
s: &str,
kind: &str,
now: DateTime<Tz>,
) -> Result<Self, DatePatternParseError>
where
Tz::Offset: Copy,
{
let d =
parse_date_string(s, now, Dialect::Us).map_err(DatePatternParseError::ParseError)?;
let millis_since_epoch = MillisSinceEpoch(d.timestamp_millis());
match kind {
"after" => Ok(Self::AtOrAfter(millis_since_epoch)),
"before" => Ok(Self::Before(millis_since_epoch)),
kind => Err(DatePatternParseError::InvalidKind(kind.to_owned())),
}
}
/// Determines whether a given timestamp is matched by the pattern.
pub fn matches(&self, timestamp: &Timestamp) -> bool {
match self {
Self::AtOrAfter(earliest) => *earliest <= timestamp.timestamp,
Self::Before(latest) => timestamp.timestamp < *latest,
}
}
}
// @TODO ideally we would have this unified with the other parsing code. However
// we use the interim crate which does not handle explicitly given time zone
// information
/// Parse a string with time zone information into a `Timestamp`
pub fn parse_datetime(s: &str) -> chrono::ParseResult<Timestamp> {
Ok(Timestamp::from_datetime(
DateTime::parse_from_rfc2822(s).or_else(|_| DateTime::parse_from_rfc3339(s))?,
))
}
#[cfg(test)]
mod tests {
use super::*;
fn test_equal<Tz: TimeZone>(now: DateTime<Tz>, expression: &str, should_equal_time: &str)
where
Tz::Offset: Copy,
{
let expression = DatePattern::from_str_kind(expression, "after", now).unwrap();
assert_eq!(
expression,
DatePattern::AtOrAfter(MillisSinceEpoch(
DateTime::parse_from_rfc3339(should_equal_time)
.unwrap()
.timestamp_millis()
))
);
}
#[test]
fn test_date_pattern_parses_dates_without_times_as_the_date_at_local_midnight() {
let now = DateTime::parse_from_rfc3339("2024-01-01T00:00:00-08:00").unwrap();
test_equal(now, "2023-03-25", "2023-03-25T08:00:00Z");
test_equal(now, "3/25/2023", "2023-03-25T08:00:00Z");
test_equal(now, "3/25/23", "2023-03-25T08:00:00Z");
}
#[test]
fn test_date_pattern_parses_dates_with_times_without_specifying_an_offset() {
let now = DateTime::parse_from_rfc3339("2024-01-01T00:00:00-08:00").unwrap();
test_equal(now, "2023-03-25T00:00:00", "2023-03-25T08:00:00Z");
test_equal(now, "2023-03-25 00:00:00", "2023-03-25T08:00:00Z");
}
#[test]
fn test_date_pattern_parses_dates_with_a_specified_offset() {
let now = DateTime::parse_from_rfc3339("2024-01-01T00:00:00-08:00").unwrap();
test_equal(
now,
"2023-03-25T00:00:00-05:00",
"2023-03-25T00:00:00-05:00",
);
}
#[test]
fn test_date_pattern_parses_dates_with_the_z_offset() {
let now = DateTime::parse_from_rfc3339("2024-01-01T00:00:00-08:00").unwrap();
test_equal(now, "2023-03-25T00:00:00Z", "2023-03-25T00:00:00Z");
}
#[test]
fn test_date_pattern_parses_relative_durations() {
let now = DateTime::parse_from_rfc3339("2024-01-01T00:00:00-08:00").unwrap();
test_equal(now, "2 hours ago", "2024-01-01T06:00:00Z");
test_equal(now, "5 minutes", "2024-01-01T08:05:00Z");
test_equal(now, "1 week ago", "2023-12-25T08:00:00Z");
test_equal(now, "yesterday", "2023-12-31T08:00:00Z");
test_equal(now, "tomorrow", "2024-01-02T08:00:00Z");
}
#[test]
fn test_date_pattern_parses_relative_dates_with_times() {
let now = DateTime::parse_from_rfc3339("2024-01-01T08:00:00-08:00").unwrap();
test_equal(now, "yesterday 5pm", "2024-01-01T01:00:00Z");
test_equal(now, "yesterday 10am", "2023-12-31T18:00:00Z");
test_equal(now, "yesterday 10:30", "2023-12-31T18:30:00Z");
}
#[test]
fn test_parse_datetime_non_sense_yields_error() {
let parse_error = parse_datetime("aaaaa").err().unwrap();
assert_eq!(parse_error.kind(), chrono::format::ParseErrorKind::Invalid);
}
#[test]
fn test_parse_datetime_human_readable() {
// this is the example given in the help text for `jj metaedit
// --author-timestamp`
let timestamp = parse_datetime("2000-01-23T01:23:45-08:00").unwrap();
let human_readable = parse_datetime("Sun, 23 Jan 2000 01:23:45 PST").unwrap();
let human_readable_explicit = parse_datetime("Sun, 23 Jan 2000 01:23:45 -0800").unwrap();
assert_eq!(timestamp, human_readable);
assert_eq!(timestamp, human_readable_explicit);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/op_store.rs | lib/src/op_store.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::any::Any;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt::Debug;
use std::iter;
use std::sync::LazyLock;
use std::time::SystemTime;
use async_trait::async_trait;
use itertools::Itertools as _;
use thiserror::Error;
use crate::backend::CommitId;
use crate::backend::MillisSinceEpoch;
use crate::backend::Timestamp;
use crate::content_hash::ContentHash;
use crate::merge::Merge;
use crate::object_id::HexPrefix;
use crate::object_id::ObjectId as _;
use crate::object_id::PrefixResolution;
use crate::object_id::id_type;
use crate::ref_name::GitRefNameBuf;
use crate::ref_name::RefName;
use crate::ref_name::RefNameBuf;
use crate::ref_name::RemoteName;
use crate::ref_name::RemoteNameBuf;
use crate::ref_name::RemoteRefSymbol;
use crate::ref_name::WorkspaceNameBuf;
id_type!(pub ViewId { hex() });
id_type!(pub OperationId { hex() });
#[derive(ContentHash, PartialEq, Eq, Hash, Clone, Debug, serde::Serialize)]
#[serde(transparent)]
pub struct RefTarget {
merge: Merge<Option<CommitId>>,
}
impl Default for RefTarget {
fn default() -> Self {
Self::absent()
}
}
impl RefTarget {
/// Creates non-conflicting target pointing to no commit.
pub fn absent() -> Self {
Self::from_merge(Merge::absent())
}
/// Returns non-conflicting target pointing to no commit.
///
/// This will typically be used in place of `None` returned by map lookup.
pub fn absent_ref() -> &'static Self {
static TARGET: LazyLock<RefTarget> = LazyLock::new(RefTarget::absent);
&TARGET
}
/// Creates non-conflicting target that optionally points to a commit.
pub fn resolved(maybe_id: Option<CommitId>) -> Self {
Self::from_merge(Merge::resolved(maybe_id))
}
/// Creates non-conflicting target pointing to a commit.
pub fn normal(id: CommitId) -> Self {
Self::from_merge(Merge::normal(id))
}
/// Creates target from removed/added ids.
pub fn from_legacy_form(
removed_ids: impl IntoIterator<Item = CommitId>,
added_ids: impl IntoIterator<Item = CommitId>,
) -> Self {
Self::from_merge(Merge::from_legacy_form(removed_ids, added_ids))
}
pub fn from_merge(merge: Merge<Option<CommitId>>) -> Self {
Self { merge }
}
/// Returns the underlying value if this target is non-conflicting.
pub fn as_resolved(&self) -> Option<&Option<CommitId>> {
self.merge.as_resolved()
}
/// Returns id if this target is non-conflicting and points to a commit.
pub fn as_normal(&self) -> Option<&CommitId> {
self.merge.as_normal()
}
/// Returns true if this target points to no commit.
pub fn is_absent(&self) -> bool {
self.merge.is_absent()
}
/// Returns true if this target points to any commit. Conflicting target is
/// always "present" as it should have at least one commit id.
pub fn is_present(&self) -> bool {
self.merge.is_present()
}
/// Whether this target has conflicts.
pub fn has_conflict(&self) -> bool {
!self.merge.is_resolved()
}
pub fn removed_ids(&self) -> impl Iterator<Item = &CommitId> {
self.merge.removes().flatten()
}
pub fn added_ids(&self) -> impl Iterator<Item = &CommitId> {
self.merge.adds().flatten()
}
pub fn as_merge(&self) -> &Merge<Option<CommitId>> {
&self.merge
}
}
/// Remote bookmark or tag.
#[derive(ContentHash, Clone, Debug, Eq, Hash, PartialEq)]
pub struct RemoteRef {
pub target: RefTarget,
pub state: RemoteRefState,
}
impl RemoteRef {
/// Creates remote ref pointing to no commit.
pub fn absent() -> Self {
Self {
target: RefTarget::absent(),
state: RemoteRefState::New,
}
}
/// Returns remote ref pointing to no commit.
///
/// This will typically be used in place of `None` returned by map lookup.
pub fn absent_ref() -> &'static Self {
static TARGET: LazyLock<RemoteRef> = LazyLock::new(RemoteRef::absent);
&TARGET
}
/// Returns true if the target points to no commit.
pub fn is_absent(&self) -> bool {
self.target.is_absent()
}
/// Returns true if the target points to any commit.
pub fn is_present(&self) -> bool {
self.target.is_present()
}
/// Returns true if the ref is supposed to be merged in to the local ref.
pub fn is_tracked(&self) -> bool {
self.state == RemoteRefState::Tracked
}
/// Target that should have been merged in to the local ref.
///
/// Use this as the base or known target when merging new remote ref in to
/// local or pushing local ref to remote.
pub fn tracked_target(&self) -> &RefTarget {
if self.is_tracked() {
&self.target
} else {
RefTarget::absent_ref()
}
}
}
/// Whether the ref is tracked or not.
#[derive(ContentHash, Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum RemoteRefState {
/// Remote ref is not merged in to the local ref.
New,
/// Remote ref has been merged in to the local ref. Incoming ref will be
/// merged, too.
Tracked,
}
/// Helper to strip redundant `Option<T>` from `RefTarget` lookup result.
pub trait RefTargetOptionExt {
type Value;
fn flatten(self) -> Self::Value;
}
impl RefTargetOptionExt for Option<RefTarget> {
type Value = RefTarget;
fn flatten(self) -> Self::Value {
self.unwrap_or_else(RefTarget::absent)
}
}
impl<'a> RefTargetOptionExt for Option<&'a RefTarget> {
type Value = &'a RefTarget;
fn flatten(self) -> Self::Value {
self.unwrap_or_else(|| RefTarget::absent_ref())
}
}
impl RefTargetOptionExt for Option<RemoteRef> {
type Value = RemoteRef;
fn flatten(self) -> Self::Value {
self.unwrap_or_else(RemoteRef::absent)
}
}
impl<'a> RefTargetOptionExt for Option<&'a RemoteRef> {
type Value = &'a RemoteRef;
fn flatten(self) -> Self::Value {
self.unwrap_or_else(|| RemoteRef::absent_ref())
}
}
/// Local and remote refs of the same name.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct LocalRemoteRefTarget<'a> {
/// The commit the ref points to locally.
pub local_target: &'a RefTarget,
/// `(remote_name, remote_ref)` pairs in lexicographical order.
pub remote_refs: Vec<(&'a RemoteName, &'a RemoteRef)>,
}
/// Represents the way the repo looks at a given time, just like how a Tree
/// object represents how the file system looks at a given time.
#[derive(ContentHash, PartialEq, Eq, Clone, Debug)]
pub struct View {
/// All head commits. There should be at least one head commit.
pub head_ids: HashSet<CommitId>,
pub local_bookmarks: BTreeMap<RefNameBuf, RefTarget>,
pub local_tags: BTreeMap<RefNameBuf, RefTarget>,
pub remote_views: BTreeMap<RemoteNameBuf, RemoteView>,
pub git_refs: BTreeMap<GitRefNameBuf, RefTarget>,
/// The commit the Git HEAD points to.
// TODO: Support multiple Git worktrees?
// TODO: Do we want to store the current bookmark name too?
pub git_head: RefTarget,
// The commit that *should be* checked out in the workspace. Note that the working copy
// (.jj/working_copy/) has the source of truth about which commit *is* checked out (to be
// precise: the commit to which we most recently completed an update to).
pub wc_commit_ids: BTreeMap<WorkspaceNameBuf, CommitId>,
}
impl View {
/// Creates new (mostly empty) view containing the given commit as the head.
pub fn make_root(root_commit_id: CommitId) -> Self {
Self {
head_ids: HashSet::from([root_commit_id]),
local_bookmarks: BTreeMap::new(),
local_tags: BTreeMap::new(),
remote_views: BTreeMap::new(),
git_refs: BTreeMap::new(),
git_head: RefTarget::absent(),
wc_commit_ids: BTreeMap::new(),
}
}
}
/// Represents the state of the remote repo.
#[derive(ContentHash, Clone, Debug, Default, Eq, PartialEq)]
pub struct RemoteView {
// TODO: Do we need to support tombstones for remote bookmarks? For example, if the bookmark
// has been deleted locally and you pull from a remote, maybe it should make a difference
// whether the bookmark is known to have existed on the remote. We may not want to resurrect
// the bookmark if the bookmark's state on the remote was just not known.
pub bookmarks: BTreeMap<RefNameBuf, RemoteRef>,
pub tags: BTreeMap<RefNameBuf, RemoteRef>,
}
/// Iterates pair of local and remote refs by name.
pub(crate) fn merge_join_ref_views<'a>(
local_refs: &'a BTreeMap<RefNameBuf, RefTarget>,
remote_views: &'a BTreeMap<RemoteNameBuf, RemoteView>,
get_remote_refs: impl FnMut(&RemoteView) -> &BTreeMap<RefNameBuf, RemoteRef>,
) -> impl Iterator<Item = (&'a RefName, LocalRemoteRefTarget<'a>)> {
let mut local_refs_iter = local_refs
.iter()
.map(|(name, target)| (&**name, target))
.peekable();
let mut remote_refs_iter = flatten_remote_refs(remote_views, get_remote_refs).peekable();
iter::from_fn(move || {
// Pick earlier bookmark name
let (name, local_target) = if let Some((symbol, _)) = remote_refs_iter.peek() {
local_refs_iter
.next_if(|&(local_name, _)| local_name <= symbol.name)
.unwrap_or((symbol.name, RefTarget::absent_ref()))
} else {
local_refs_iter.next()?
};
let remote_refs = remote_refs_iter
.peeking_take_while(|(symbol, _)| symbol.name == name)
.map(|(symbol, remote_ref)| (symbol.remote, remote_ref))
.collect();
let local_remote_target = LocalRemoteRefTarget {
local_target,
remote_refs,
};
Some((name, local_remote_target))
})
}
/// Iterates `(symbol, remote_ref)`s in lexicographical order.
pub(crate) fn flatten_remote_refs(
remote_views: &BTreeMap<RemoteNameBuf, RemoteView>,
mut get_remote_refs: impl FnMut(&RemoteView) -> &BTreeMap<RefNameBuf, RemoteRef>,
) -> impl Iterator<Item = (RemoteRefSymbol<'_>, &RemoteRef)> {
remote_views
.iter()
.map(|(remote, remote_view)| {
get_remote_refs(remote_view)
.iter()
.map(move |(name, remote_ref)| (name.to_remote_symbol(remote), remote_ref))
})
.kmerge_by(|(symbol1, _), (symbol2, _)| symbol1 < symbol2)
}
#[derive(Clone, ContentHash, Debug, Eq, PartialEq, serde::Serialize)]
pub struct TimestampRange {
// Could be aliased to Range<Timestamp> if needed.
pub start: Timestamp,
pub end: Timestamp,
}
/// Represents an operation (transaction) on the repo view, just like how a
/// Commit object represents an operation on the tree.
///
/// Operations and views are not meant to be exchanged between repos or users;
/// they represent local state and history.
///
/// The operation history will almost always be linear. It will only have
/// forks when parallel operations occurred. The parent is determined when
/// the transaction starts. When the transaction commits, a lock will be
/// taken and it will be checked that the current head of the operation
/// graph is unchanged. If the current head has changed, there has been
/// concurrent operation.
#[derive(ContentHash, PartialEq, Eq, Clone, Debug, serde::Serialize)]
pub struct Operation {
#[serde(skip)] // TODO: should be exposed?
pub view_id: ViewId,
pub parents: Vec<OperationId>,
#[serde(flatten)]
pub metadata: OperationMetadata,
/// Mapping from new commit to its predecessors, or `None` if predecessors
/// weren't recorded when the operation was written.
///
/// * `commit_id: []` if the commit was newly created.
/// * `commit_id: [predecessor_id, ..]` if the commit was rewritten.
///
/// This mapping preserves all transitive predecessors if a commit was
/// rewritten multiple times within the same transaction. For example, if
/// `X` was rewritten as `Y`, then rebased as `Z`, these modifications are
/// recorded as `{Y: [X], Z: [Y]}`.
///
/// Existing commits (including commits imported from Git) aren't tracked
/// even if they became visible at this operation.
// BTreeMap for ease of deterministic serialization. If the deserialization
// cost matters, maybe this can be changed to sorted Vec.
#[serde(skip)] // TODO: should be exposed?
pub commit_predecessors: Option<BTreeMap<CommitId, Vec<CommitId>>>,
}
impl Operation {
pub fn make_root(root_view_id: ViewId) -> Self {
let timestamp = Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
};
let metadata = OperationMetadata {
time: TimestampRange {
start: timestamp,
end: timestamp,
},
description: "".to_string(),
hostname: "".to_string(),
username: "".to_string(),
is_snapshot: false,
tags: HashMap::new(),
};
Self {
view_id: root_view_id,
parents: vec![],
metadata,
// The root operation is guaranteed to have no new commits. The root
// commit could be considered born at the root operation, but there
// may be other commits created within the abandoned operations.
// They don't have any predecessors records as well.
commit_predecessors: Some(BTreeMap::new()),
}
}
}
#[derive(ContentHash, PartialEq, Eq, Clone, Debug, serde::Serialize)]
pub struct OperationMetadata {
pub time: TimestampRange,
// Whatever is useful to the user, such as exact command line call
pub description: String,
pub hostname: String,
pub username: String,
/// Whether this operation represents a pure snapshotting of the working
/// copy.
pub is_snapshot: bool,
pub tags: HashMap<String, String>,
}
/// Data to be loaded into the root operation/view.
#[derive(Clone, Debug)]
pub struct RootOperationData {
/// The root commit ID, which should exist in the root view.
pub root_commit_id: CommitId,
}
#[derive(Debug, Error)]
pub enum OpStoreError {
#[error("Object {hash} of type {object_type} not found")]
ObjectNotFound {
object_type: String,
hash: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Error when reading object {hash} of type {object_type}")]
ReadObject {
object_type: String,
hash: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Could not write object of type {object_type}")]
WriteObject {
object_type: &'static str,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error(transparent)]
Other(Box<dyn std::error::Error + Send + Sync>),
}
pub type OpStoreResult<T> = Result<T, OpStoreError>;
#[async_trait]
pub trait OpStore: Any + Send + Sync + Debug {
fn name(&self) -> &str;
fn root_operation_id(&self) -> &OperationId;
async fn read_view(&self, id: &ViewId) -> OpStoreResult<View>;
async fn write_view(&self, contents: &View) -> OpStoreResult<ViewId>;
async fn read_operation(&self, id: &OperationId) -> OpStoreResult<Operation>;
async fn write_operation(&self, contents: &Operation) -> OpStoreResult<OperationId>;
/// Resolves an unambiguous operation ID prefix.
async fn resolve_operation_id_prefix(
&self,
prefix: &HexPrefix,
) -> OpStoreResult<PrefixResolution<OperationId>>;
/// Prunes unreachable operations and views.
///
/// All operations and views reachable from the `head_ids` won't be
/// removed. In addition to that, objects created after `keep_newer` will be
/// preserved. This mitigates a risk of deleting new heads created
/// concurrently by another process.
// TODO: return stats?
fn gc(&self, head_ids: &[OperationId], keep_newer: SystemTime) -> OpStoreResult<()>;
}
impl dyn OpStore {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: OpStore>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
#[cfg(test)]
mod tests {
use maplit::btreemap;
use super::*;
#[test]
fn test_merge_join_bookmark_views() {
let remote_ref = |target: &RefTarget| RemoteRef {
target: target.clone(),
state: RemoteRefState::Tracked, // doesn't matter
};
let local_bookmark1_target = RefTarget::normal(CommitId::from_hex("111111"));
let local_bookmark2_target = RefTarget::normal(CommitId::from_hex("222222"));
let git_bookmark1_remote_ref = remote_ref(&RefTarget::normal(CommitId::from_hex("333333")));
let git_bookmark2_remote_ref = remote_ref(&RefTarget::normal(CommitId::from_hex("444444")));
let remote1_bookmark1_remote_ref =
remote_ref(&RefTarget::normal(CommitId::from_hex("555555")));
let remote2_bookmark2_remote_ref =
remote_ref(&RefTarget::normal(CommitId::from_hex("666666")));
let local_bookmarks = btreemap! {
"bookmark1".into() => local_bookmark1_target.clone(),
"bookmark2".into() => local_bookmark2_target.clone(),
};
let remote_views = btreemap! {
"git".into() => RemoteView {
bookmarks: btreemap! {
"bookmark1".into() => git_bookmark1_remote_ref.clone(),
"bookmark2".into() => git_bookmark2_remote_ref.clone(),
},
tags: btreemap! {},
},
"remote1".into() => RemoteView {
bookmarks: btreemap! {
"bookmark1".into() => remote1_bookmark1_remote_ref.clone(),
},
tags: btreemap! {},
},
"remote2".into() => RemoteView {
bookmarks: btreemap! {
"bookmark2".into() => remote2_bookmark2_remote_ref.clone(),
},
tags: btreemap! {},
},
};
assert_eq!(
merge_join_ref_views(&local_bookmarks, &remote_views, |view| &view.bookmarks)
.collect_vec(),
vec![
(
"bookmark1".as_ref(),
LocalRemoteRefTarget {
local_target: &local_bookmark1_target,
remote_refs: vec![
("git".as_ref(), &git_bookmark1_remote_ref),
("remote1".as_ref(), &remote1_bookmark1_remote_ref),
],
},
),
(
"bookmark2".as_ref(),
LocalRemoteRefTarget {
local_target: &local_bookmark2_target.clone(),
remote_refs: vec![
("git".as_ref(), &git_bookmark2_remote_ref),
("remote2".as_ref(), &remote2_bookmark2_remote_ref),
],
},
),
],
);
// Local only
let local_bookmarks = btreemap! {
"bookmark1".into() => local_bookmark1_target.clone(),
};
let remote_views = btreemap! {};
assert_eq!(
merge_join_ref_views(&local_bookmarks, &remote_views, |view| &view.bookmarks)
.collect_vec(),
vec![(
"bookmark1".as_ref(),
LocalRemoteRefTarget {
local_target: &local_bookmark1_target,
remote_refs: vec![]
},
)],
);
// Remote only
let local_bookmarks = btreemap! {};
let remote_views = btreemap! {
"remote1".into() => RemoteView {
bookmarks: btreemap! {
"bookmark1".into() => remote1_bookmark1_remote_ref.clone(),
},
tags: btreemap! {},
},
};
assert_eq!(
merge_join_ref_views(&local_bookmarks, &remote_views, |view| &view.bookmarks)
.collect_vec(),
vec![(
"bookmark1".as_ref(),
LocalRemoteRefTarget {
local_target: RefTarget::absent_ref(),
remote_refs: vec![("remote1".as_ref(), &remote1_bookmark1_remote_ref)],
},
)],
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/fsmonitor.rs | lib/src/fsmonitor.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Filesystem monitor tool interface.
//!
//! Interfaces with a filesystem monitor tool to efficiently query for
//! filesystem updates, without having to crawl the entire working copy. This is
//! particularly useful for large working copies, or for working copies for
//! which it's expensive to materialize files, such those backed by a network or
//! virtualized filesystem.
#![warn(missing_docs)]
use std::path::PathBuf;
use crate::config::ConfigGetError;
use crate::settings::UserSettings;
/// Config for Watchman filesystem monitor (<https://facebook.github.io/watchman/>).
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct WatchmanConfig {
/// Whether to use triggers to monitor for changes in the background.
pub register_trigger: bool,
}
/// The recognized kinds of filesystem monitors.
#[derive(Eq, PartialEq, Clone, Debug)]
pub enum FsmonitorSettings {
/// The Watchman filesystem monitor (<https://facebook.github.io/watchman/>).
Watchman(WatchmanConfig),
/// Only used in tests.
Test {
/// The set of changed files to pretend that the filesystem monitor is
/// reporting.
changed_files: Vec<PathBuf>,
},
/// No filesystem monitor. This is the default if nothing is configured, but
/// also makes it possible to turn off the monitor on a case-by-case basis
/// when the user gives an option like `--config=fsmonitor.backend=none`;
/// useful when e.g. doing analysis of snapshot performance.
None,
}
impl FsmonitorSettings {
/// Creates an `FsmonitorSettings` from a `config`.
pub fn from_settings(settings: &UserSettings) -> Result<Self, ConfigGetError> {
let name = "fsmonitor.backend";
match settings.get_string(name)?.as_ref() {
"watchman" => Ok(Self::Watchman(WatchmanConfig {
register_trigger: settings
.get_bool("fsmonitor.watchman.register-snapshot-trigger")?,
})),
"test" => Err(ConfigGetError::Type {
name: name.to_owned(),
error: "Cannot use test fsmonitor in real repository".into(),
source_path: None,
}),
"none" => Ok(Self::None),
other => Err(ConfigGetError::Type {
name: name.to_owned(),
error: format!("Unknown fsmonitor kind: {other}").into(),
source_path: None,
}),
}
}
}
/// Filesystem monitor integration using Watchman
/// (<https://facebook.github.io/watchman/>). Requires `watchman` to already be
/// installed on the system.
#[cfg(feature = "watchman")]
pub mod watchman {
use std::path::Path;
use std::path::PathBuf;
use itertools::Itertools as _;
use thiserror::Error;
use tracing::info;
use tracing::instrument;
use watchman_client::expr;
use watchman_client::prelude::Clock as InnerClock;
use watchman_client::prelude::ClockSpec;
use watchman_client::prelude::NameOnly;
use watchman_client::prelude::QueryRequestCommon;
use watchman_client::prelude::QueryResult;
use watchman_client::prelude::TriggerRequest;
/// Represents an instance in time from the perspective of the filesystem
/// monitor.
///
/// This can be used to perform incremental queries. When making a query,
/// the result will include an associated "clock" representing the time
/// that the query was made. By passing the same clock into a future
/// query, we inform the filesystem monitor that we only wish to get
/// changed files since the previous point in time.
#[derive(Clone, Debug)]
pub struct Clock(InnerClock);
impl From<crate::protos::local_working_copy::WatchmanClock> for Clock {
fn from(clock: crate::protos::local_working_copy::WatchmanClock) -> Self {
use crate::protos::local_working_copy::watchman_clock::WatchmanClock;
let watchman_clock = clock.watchman_clock.unwrap();
let clock = match watchman_clock {
WatchmanClock::StringClock(string_clock) => {
InnerClock::Spec(ClockSpec::StringClock(string_clock))
}
WatchmanClock::UnixTimestamp(unix_timestamp) => {
InnerClock::Spec(ClockSpec::UnixTimestamp(unix_timestamp))
}
};
Self(clock)
}
}
impl From<Clock> for crate::protos::local_working_copy::WatchmanClock {
fn from(clock: Clock) -> Self {
use crate::protos::local_working_copy::watchman_clock;
let Clock(clock) = clock;
let watchman_clock = match clock {
InnerClock::Spec(ClockSpec::StringClock(string_clock)) => {
watchman_clock::WatchmanClock::StringClock(string_clock)
}
InnerClock::Spec(ClockSpec::UnixTimestamp(unix_timestamp)) => {
watchman_clock::WatchmanClock::UnixTimestamp(unix_timestamp)
}
InnerClock::ScmAware(_) => {
unimplemented!("SCM-aware Watchman clocks not supported")
}
};
Self {
watchman_clock: Some(watchman_clock),
}
}
}
#[expect(missing_docs)]
#[derive(Debug, Error)]
pub enum Error {
#[error("Could not connect to Watchman")]
WatchmanConnectError(#[source] watchman_client::Error),
#[error("Could not canonicalize working copy root path")]
CanonicalizeRootError(#[source] std::io::Error),
#[error("Watchman failed to resolve the working copy root path")]
ResolveRootError(#[source] watchman_client::Error),
#[error("Failed to query Watchman")]
WatchmanQueryError(#[source] watchman_client::Error),
#[error("Failed to register Watchman trigger")]
WatchmanTriggerError(#[source] watchman_client::Error),
}
/// Handle to the underlying Watchman instance.
pub struct Fsmonitor {
client: watchman_client::Client,
resolved_root: watchman_client::ResolvedRoot,
}
impl Fsmonitor {
/// Initialize the Watchman filesystem monitor. If it's not already
/// running, this will start it and have it crawl the working
/// copy to build up its in-memory representation of the
/// filesystem, which may take some time.
#[instrument]
pub async fn init(
working_copy_path: &Path,
config: &super::WatchmanConfig,
) -> Result<Self, Error> {
info!("Initializing Watchman filesystem monitor...");
let connector = watchman_client::Connector::new();
let client = connector
.connect()
.await
.map_err(Error::WatchmanConnectError)?;
let working_copy_root = watchman_client::CanonicalPath::canonicalize(working_copy_path)
.map_err(Error::CanonicalizeRootError)?;
let resolved_root = client
.resolve_root(working_copy_root)
.await
.map_err(Error::ResolveRootError)?;
let monitor = Self {
client,
resolved_root,
};
// Registering the trigger causes an unconditional evaluation of the query, so
// test if it is already registered first.
if !config.register_trigger {
monitor.unregister_trigger().await?;
} else if !monitor.is_trigger_registered().await? {
monitor.register_trigger().await?;
}
Ok(monitor)
}
/// Query for changed files since the previous point in time.
///
/// The returned list of paths is relative to the `working_copy_path`.
/// If it is `None`, then the caller must crawl the entire working copy
/// themselves.
#[instrument(skip(self))]
pub async fn query_changed_files(
&self,
previous_clock: Option<Clock>,
) -> Result<(Clock, Option<Vec<PathBuf>>), Error> {
// TODO: might be better to specify query options by caller, but we
// shouldn't expose the underlying watchman API too much.
info!("Querying Watchman for changed files...");
let QueryResult {
version: _,
is_fresh_instance,
files,
clock,
state_enter: _,
state_leave: _,
state_metadata: _,
saved_state_info: _,
debug: _,
}: QueryResult<NameOnly> = self
.client
.query(
&self.resolved_root,
QueryRequestCommon {
since: previous_clock.map(|Clock(clock)| clock),
expression: Some(self.build_exclude_expr()),
..Default::default()
},
)
.await
.map_err(Error::WatchmanQueryError)?;
let clock = Clock(clock);
if is_fresh_instance {
// The Watchman documentation states that if it was a fresh
// instance, we need to delete any tree entries that didn't appear
// in the returned list of changed files. For now, the caller will
// handle this by manually crawling the working copy again.
Ok((clock, None))
} else {
let paths = files
.unwrap_or_default()
.into_iter()
.map(|NameOnly { name }| name.into_inner())
.collect_vec();
Ok((clock, Some(paths)))
}
}
/// Return whether or not a trigger has been registered already.
#[instrument(skip(self))]
pub async fn is_trigger_registered(&self) -> Result<bool, Error> {
info!("Checking for an existing Watchman trigger...");
Ok(self
.client
.list_triggers(&self.resolved_root)
.await
.map_err(Error::WatchmanTriggerError)?
.triggers
.iter()
.any(|t| t.name == "jj-background-monitor"))
}
/// Register trigger for changed files.
#[instrument(skip(self))]
async fn register_trigger(&self) -> Result<(), Error> {
info!("Registering Watchman trigger...");
self.client
.register_trigger(
&self.resolved_root,
TriggerRequest {
name: "jj-background-monitor".to_string(),
command: vec![
"jj".to_string(),
"debug".to_string(),
"snapshot".to_string(),
],
expression: Some(self.build_exclude_expr()),
..Default::default()
},
)
.await
.map_err(Error::WatchmanTriggerError)?;
Ok(())
}
/// Register trigger for changed files.
#[instrument(skip(self))]
async fn unregister_trigger(&self) -> Result<(), Error> {
info!("Unregistering Watchman trigger...");
self.client
.remove_trigger(&self.resolved_root, "jj-background-monitor")
.await
.map_err(Error::WatchmanTriggerError)?;
Ok(())
}
/// Build an exclude expr for `working_copy_path`.
fn build_exclude_expr(&self) -> expr::Expr {
// TODO: consider parsing `.gitignore`.
let exclude_dirs = [Path::new(".git"), Path::new(".jj")];
let excludes = itertools::chain(
// the directories themselves
[expr::Expr::Name(expr::NameTerm {
paths: exclude_dirs.iter().map(|&name| name.to_owned()).collect(),
wholename: true,
})],
// and all files under the directories
exclude_dirs.iter().map(|&name| {
expr::Expr::DirName(expr::DirNameTerm {
path: name.to_owned(),
depth: None,
})
}),
)
.collect();
expr::Expr::Not(Box::new(expr::Expr::Any(excludes)))
}
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/file_util.rs | lib/src/file_util.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::borrow::Cow;
use std::ffi::OsString;
use std::fs;
use std::fs::File;
use std::io;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
use std::path::Component;
use std::path::Path;
use std::path::PathBuf;
use std::pin::Pin;
use std::task::Poll;
use tempfile::NamedTempFile;
use tempfile::PersistError;
use thiserror::Error;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt as _;
use tokio::io::ReadBuf;
#[cfg(unix)]
pub use self::platform::check_executable_bit_support;
pub use self::platform::check_symlink_support;
pub use self::platform::symlink_dir;
pub use self::platform::symlink_file;
#[derive(Debug, Error)]
#[error("Cannot access {path}")]
pub struct PathError {
pub path: PathBuf,
pub source: io::Error,
}
pub trait IoResultExt<T> {
fn context(self, path: impl AsRef<Path>) -> Result<T, PathError>;
}
impl<T> IoResultExt<T> for io::Result<T> {
fn context(self, path: impl AsRef<Path>) -> Result<T, PathError> {
self.map_err(|error| PathError {
path: path.as_ref().to_path_buf(),
source: error,
})
}
}
/// Creates a directory or does nothing if the directory already exists.
///
/// Returns the underlying error if the directory can't be created.
/// The function will also fail if intermediate directories on the path do not
/// already exist.
pub fn create_or_reuse_dir(dirname: &Path) -> io::Result<()> {
match fs::create_dir(dirname) {
Ok(()) => Ok(()),
Err(_) if dirname.is_dir() => Ok(()),
Err(e) => Err(e),
}
}
/// Removes all files in the directory, but not the directory itself.
///
/// The directory must exist, and there should be no sub directories.
pub fn remove_dir_contents(dirname: &Path) -> Result<(), PathError> {
for entry in dirname.read_dir().context(dirname)? {
let entry = entry.context(dirname)?;
let path = entry.path();
fs::remove_file(&path).context(&path)?;
}
Ok(())
}
/// Checks if path points at an empty directory.
pub fn is_empty_dir(path: &Path) -> Result<bool, PathError> {
match path.read_dir() {
Ok(mut entries) => Ok(entries.next().is_none()),
Err(error) => match error.kind() {
ErrorKind::NotADirectory => Ok(false),
ErrorKind::NotFound => Ok(false),
_ => Err(error).context(path)?,
},
}
}
#[derive(Debug, Error)]
#[error(transparent)]
pub struct BadPathEncoding(platform::BadOsStrEncoding);
/// Constructs [`Path`] from `bytes` in platform-specific manner.
///
/// On Unix, this function never fails because paths are just bytes. On Windows,
/// this may return error if the input wasn't well-formed UTF-8.
pub fn path_from_bytes(bytes: &[u8]) -> Result<&Path, BadPathEncoding> {
let s = platform::os_str_from_bytes(bytes).map_err(BadPathEncoding)?;
Ok(Path::new(s))
}
/// Converts `path` to bytes in platform-specific manner.
///
/// On Unix, this function never fails because paths are just bytes. On Windows,
/// this may return error if the input wasn't well-formed UTF-8.
///
/// The returned byte sequence can be considered a superset of ASCII (such as
/// UTF-8 bytes.)
pub fn path_to_bytes(path: &Path) -> Result<&[u8], BadPathEncoding> {
platform::os_str_to_bytes(path.as_ref()).map_err(BadPathEncoding)
}
/// Expands "~/" to "$HOME/".
pub fn expand_home_path(path_str: &str) -> PathBuf {
if let Some(remainder) = path_str.strip_prefix("~/")
&& let Ok(home_dir_str) = std::env::var("HOME")
{
return PathBuf::from(home_dir_str).join(remainder);
}
PathBuf::from(path_str)
}
/// Turns the given `to` path into relative path starting from the `from` path.
///
/// Both `from` and `to` paths are supposed to be absolute and normalized in the
/// same manner.
pub fn relative_path(from: &Path, to: &Path) -> PathBuf {
// Find common prefix.
for (i, base) in from.ancestors().enumerate() {
if let Ok(suffix) = to.strip_prefix(base) {
if i == 0 && suffix.as_os_str().is_empty() {
return ".".into();
} else {
let mut result = PathBuf::from_iter(std::iter::repeat_n("..", i));
result.push(suffix);
return result;
}
}
}
// No common prefix found. Return the original (absolute) path.
to.to_owned()
}
/// Consumes as much `..` and `.` as possible without considering symlinks.
pub fn normalize_path(path: &Path) -> PathBuf {
let mut result = PathBuf::new();
for c in path.components() {
match c {
Component::CurDir => {}
Component::ParentDir
if matches!(result.components().next_back(), Some(Component::Normal(_))) =>
{
// Do not pop ".."
let popped = result.pop();
assert!(popped);
}
_ => {
result.push(c);
}
}
}
if result.as_os_str().is_empty() {
".".into()
} else {
result
}
}
/// Converts the given `path` to Unix-like path separated by "/".
///
/// The returned path might not work on Windows if it was canonicalized. On
/// Unix, this function is noop.
pub fn slash_path(path: &Path) -> Cow<'_, Path> {
if cfg!(windows) {
Cow::Owned(to_slash_separated(path).into())
} else {
Cow::Borrowed(path)
}
}
fn to_slash_separated(path: &Path) -> OsString {
let mut buf = OsString::with_capacity(path.as_os_str().len());
let mut components = path.components();
match components.next() {
Some(c) => buf.push(c),
None => return buf,
}
for c in components {
buf.push("/");
buf.push(c);
}
buf
}
/// Persists the temporary file after synchronizing the content.
///
/// After system crash, the persisted file should have a valid content if
/// existed. However, the persisted file name (or directory entry) could be
/// lost. It's up to caller to synchronize the directory entries.
///
/// See also <https://lwn.net/Articles/457667/> for the behavior on Linux.
pub fn persist_temp_file<P: AsRef<Path>>(
temp_file: NamedTempFile,
new_path: P,
) -> io::Result<File> {
// Ensure persisted file content is flushed to disk.
temp_file.as_file().sync_data()?;
temp_file
.persist(new_path)
.map_err(|PersistError { error, file: _ }| error)
}
/// Like [`persist_temp_file()`], but doesn't try to overwrite the existing
/// target on Windows.
pub fn persist_content_addressed_temp_file<P: AsRef<Path>>(
temp_file: NamedTempFile,
new_path: P,
) -> io::Result<File> {
// Ensure new file content is flushed to disk, so the old file content
// wouldn't be lost if existed at the same location.
temp_file.as_file().sync_data()?;
if cfg!(windows) {
// On Windows, overwriting file can fail if the file is opened without
// FILE_SHARE_DELETE for example. We don't need to take a risk if the
// file already exists.
match temp_file.persist_noclobber(&new_path) {
Ok(file) => Ok(file),
Err(PersistError { error, file: _ }) => {
if let Ok(existing_file) = File::open(new_path) {
// TODO: Update mtime to help GC keep this file
Ok(existing_file)
} else {
Err(error)
}
}
}
} else {
// On Unix, rename() is atomic and should succeed even if the
// destination file exists. Checking if the target exists might involve
// non-atomic operation, so don't use persist_noclobber().
temp_file
.persist(new_path)
.map_err(|PersistError { error, file: _ }| error)
}
}
/// Reads from an async source and writes to a sync destination. Does not spawn
/// a task, so writes will block.
pub async fn copy_async_to_sync<R: AsyncRead, W: Write + ?Sized>(
reader: R,
writer: &mut W,
) -> io::Result<usize> {
let mut buf = vec![0; 16 << 10];
let mut total_written_bytes = 0;
let mut reader = std::pin::pin!(reader);
loop {
let written_bytes = reader.read(&mut buf).await?;
if written_bytes == 0 {
return Ok(total_written_bytes);
}
writer.write_all(&buf[0..written_bytes])?;
total_written_bytes += written_bytes;
}
}
/// `AsyncRead` implementation backed by a `Read`. It is not actually async;
/// the goal is simply to avoid reading the full contents from the `Read` into
/// memory.
pub struct BlockingAsyncReader<R> {
reader: R,
}
impl<R: Read + Unpin> BlockingAsyncReader<R> {
/// Creates a new `BlockingAsyncReader`
pub fn new(reader: R) -> Self {
Self { reader }
}
}
impl<R: Read + Unpin> AsyncRead for BlockingAsyncReader<R> {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let num_bytes_read = self.reader.read(buf.initialize_unfilled())?;
buf.advance(num_bytes_read);
Poll::Ready(Ok(()))
}
}
#[cfg(unix)]
mod platform {
use std::convert::Infallible;
use std::ffi::OsStr;
use std::io;
use std::os::unix::ffi::OsStrExt as _;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::fs::symlink;
use std::path::Path;
pub type BadOsStrEncoding = Infallible;
pub fn os_str_from_bytes(data: &[u8]) -> Result<&OsStr, BadOsStrEncoding> {
Ok(OsStr::from_bytes(data))
}
pub fn os_str_to_bytes(data: &OsStr) -> Result<&[u8], BadOsStrEncoding> {
Ok(data.as_bytes())
}
/// Whether changing executable bits is permitted on the filesystem of this
/// directory, and whether attempting to flip one has an observable effect.
pub fn check_executable_bit_support(path: impl AsRef<Path>) -> io::Result<bool> {
// Get current permissions and try to flip just the user's executable bit.
let temp_file = tempfile::tempfile_in(path)?;
let old_mode = temp_file.metadata()?.permissions().mode();
let new_mode = old_mode ^ 0o100;
let result = temp_file.set_permissions(PermissionsExt::from_mode(new_mode));
match result {
// If permission was denied, we do not have executable bit support.
Err(err) if err.kind() == io::ErrorKind::PermissionDenied => Ok(false),
Err(err) => Err(err),
Ok(()) => {
// Verify that the permission change was not silently ignored.
let mode = temp_file.metadata()?.permissions().mode();
Ok(mode == new_mode)
}
}
}
/// Symlinks are always available on Unix.
pub fn check_symlink_support() -> io::Result<bool> {
Ok(true)
}
/// Creates a new symlink `link` pointing to the `original` path.
///
/// On Unix, the `original` path doesn't have to be a directory.
pub fn symlink_dir<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> {
symlink(original, link)
}
/// Creates a new symlink `link` pointing to the `original` path.
///
/// On Unix, the `original` path doesn't have to be a file.
pub fn symlink_file<P: AsRef<Path>, Q: AsRef<Path>>(original: P, link: Q) -> io::Result<()> {
symlink(original, link)
}
}
#[cfg(windows)]
mod platform {
use std::io;
pub use std::os::windows::fs::symlink_dir;
pub use std::os::windows::fs::symlink_file;
use winreg::RegKey;
use winreg::enums::HKEY_LOCAL_MACHINE;
pub use super::fallback::BadOsStrEncoding;
pub use super::fallback::os_str_from_bytes;
pub use super::fallback::os_str_to_bytes;
/// Symlinks may or may not be enabled on Windows. They require the
/// Developer Mode setting, which is stored in the registry key below.
///
/// Note: If developer mode is not enabled, the error code of symlink
/// creation will be 1314, `ERROR_PRIVILEGE_NOT_HELD`.
pub fn check_symlink_support() -> io::Result<bool> {
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let sideloading =
hklm.open_subkey("SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\AppModelUnlock")?;
let developer_mode: u32 = sideloading.get_value("AllowDevelopmentWithoutDevLicense")?;
Ok(developer_mode == 1)
}
}
#[cfg_attr(unix, expect(dead_code))]
mod fallback {
use std::ffi::OsStr;
use thiserror::Error;
// Define error per platform so we can explicitly say UTF-8 is expected.
#[derive(Debug, Error)]
#[error("Invalid UTF-8 sequence")]
pub struct BadOsStrEncoding;
pub fn os_str_from_bytes(data: &[u8]) -> Result<&OsStr, BadOsStrEncoding> {
Ok(str::from_utf8(data).map_err(|_| BadOsStrEncoding)?.as_ref())
}
pub fn os_str_to_bytes(data: &OsStr) -> Result<&[u8], BadOsStrEncoding> {
Ok(data.to_str().ok_or(BadOsStrEncoding)?.as_ref())
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use std::io::Write as _;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use test_case::test_case;
use super::*;
use crate::tests::new_temp_dir;
#[test]
#[cfg(unix)]
fn exec_bit_support_in_temp_dir() {
// Temporary directories on Unix should always have executable support.
// Note that it would be problematic to test in a non-temp directory, as
// a developer's filesystem may or may not have executable bit support.
let dir = new_temp_dir();
let supported = check_executable_bit_support(dir.path()).unwrap();
assert!(supported);
}
#[test]
fn test_path_bytes_roundtrip() {
let bytes = b"ascii";
let path = path_from_bytes(bytes).unwrap();
assert_eq!(path_to_bytes(path).unwrap(), bytes);
let bytes = b"utf-8.\xc3\xa0";
let path = path_from_bytes(bytes).unwrap();
assert_eq!(path_to_bytes(path).unwrap(), bytes);
let bytes = b"latin1.\xe0";
if cfg!(unix) {
let path = path_from_bytes(bytes).unwrap();
assert_eq!(path_to_bytes(path).unwrap(), bytes);
} else {
assert!(path_from_bytes(bytes).is_err());
}
}
#[test]
fn normalize_too_many_dot_dot() {
assert_eq!(normalize_path(Path::new("foo/..")), Path::new("."));
assert_eq!(normalize_path(Path::new("foo/../..")), Path::new(".."));
assert_eq!(
normalize_path(Path::new("foo/../../..")),
Path::new("../..")
);
assert_eq!(
normalize_path(Path::new("foo/../../../bar/baz/..")),
Path::new("../../bar")
);
}
#[test]
fn test_slash_path() {
assert_eq!(slash_path(Path::new("")), Path::new(""));
assert_eq!(slash_path(Path::new("foo")), Path::new("foo"));
assert_eq!(slash_path(Path::new("foo/bar")), Path::new("foo/bar"));
assert_eq!(slash_path(Path::new("foo/bar/..")), Path::new("foo/bar/.."));
assert_eq!(
slash_path(Path::new(r"foo\bar")),
if cfg!(windows) {
Path::new("foo/bar")
} else {
Path::new(r"foo\bar")
}
);
assert_eq!(
slash_path(Path::new(r"..\foo\bar")),
if cfg!(windows) {
Path::new("../foo/bar")
} else {
Path::new(r"..\foo\bar")
}
);
}
#[test]
fn test_persist_no_existing_file() {
let temp_dir = new_temp_dir();
let target = temp_dir.path().join("file");
let mut temp_file = NamedTempFile::new_in(&temp_dir).unwrap();
temp_file.write_all(b"contents").unwrap();
assert!(persist_content_addressed_temp_file(temp_file, target).is_ok());
}
#[test_case(false ; "existing file open")]
#[test_case(true ; "existing file closed")]
fn test_persist_target_exists(existing_file_closed: bool) {
let temp_dir = new_temp_dir();
let target = temp_dir.path().join("file");
let mut temp_file = NamedTempFile::new_in(&temp_dir).unwrap();
temp_file.write_all(b"contents").unwrap();
let mut file = File::create(&target).unwrap();
file.write_all(b"contents").unwrap();
if existing_file_closed {
drop(file);
}
assert!(persist_content_addressed_temp_file(temp_file, &target).is_ok());
}
#[test]
fn test_copy_async_to_sync_small() {
let input = b"hello";
let mut output = vec![];
let result = copy_async_to_sync(Cursor::new(&input), &mut output).block_on();
assert!(result.is_ok());
assert_eq!(result.unwrap(), 5);
assert_eq!(output, input);
}
#[test]
fn test_copy_async_to_sync_large() {
// More than 1 buffer worth of data
let input = (0..100u8).cycle().take(40000).collect_vec();
let mut output = vec![];
let result = copy_async_to_sync(Cursor::new(&input), &mut output).block_on();
assert!(result.is_ok());
assert_eq!(result.unwrap(), 40000);
assert_eq!(output, input);
}
#[test]
fn test_blocking_async_reader() {
let input = b"hello";
let sync_reader = Cursor::new(&input);
let mut async_reader = BlockingAsyncReader::new(sync_reader);
let mut buf = [0u8; 3];
let num_bytes_read = async_reader.read(&mut buf).block_on().unwrap();
assert_eq!(num_bytes_read, 3);
assert_eq!(&buf, &input[0..3]);
let num_bytes_read = async_reader.read(&mut buf).block_on().unwrap();
assert_eq!(num_bytes_read, 2);
assert_eq!(&buf[0..2], &input[3..5]);
}
#[test]
fn test_blocking_async_reader_read_to_end() {
let input = b"hello";
let sync_reader = Cursor::new(&input);
let mut async_reader = BlockingAsyncReader::new(sync_reader);
let mut buf = vec![];
let num_bytes_read = async_reader.read_to_end(&mut buf).block_on().unwrap();
assert_eq!(num_bytes_read, input.len());
assert_eq!(&buf, &input);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/conflicts.rs | lib/src/conflicts.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::io;
use std::io::Write;
use std::iter::zip;
use std::pin::Pin;
use bstr::BStr;
use bstr::BString;
use bstr::ByteSlice as _;
use futures::Stream;
use futures::StreamExt as _;
use futures::stream::BoxStream;
use futures::try_join;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt as _;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::CommitId;
use crate::backend::CopyId;
use crate::backend::FileId;
use crate::backend::SymlinkId;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::conflict_labels::ConflictLabels;
use crate::copies::CopiesTreeDiffEntry;
use crate::copies::CopiesTreeDiffEntryPath;
use crate::diff::ContentDiff;
use crate::diff::DiffHunk;
use crate::diff::DiffHunkKind;
use crate::files;
use crate::files::MergeResult;
use crate::merge::Diff;
use crate::merge::Merge;
use crate::merge::MergedTreeValue;
use crate::merge::SameChange;
use crate::repo_path::RepoPath;
use crate::store::Store;
use crate::tree_merge::MergeOptions;
/// Minimum length of conflict markers.
pub const MIN_CONFLICT_MARKER_LEN: usize = 7;
/// If a file already contains lines which look like conflict markers of length
/// N, then the conflict markers we add will be of length (N + increment). This
/// number is chosen to make the conflict markers noticeably longer than the
/// existing markers.
const CONFLICT_MARKER_LEN_INCREMENT: usize = 4;
/// Comment for missing terminating newline in a term of a conflict.
const NO_EOL_COMMENT: &str = " (no terminating newline)";
fn write_diff_hunks(hunks: &[DiffHunk], file: &mut dyn Write) -> io::Result<()> {
for hunk in hunks {
match hunk.kind {
DiffHunkKind::Matching => {
debug_assert!(hunk.contents.iter().all_equal());
for line in hunk.contents[0].lines_with_terminator() {
file.write_all(b" ")?;
write_and_ensure_newline(file, line)?;
}
}
DiffHunkKind::Different => {
for line in hunk.contents[0].lines_with_terminator() {
file.write_all(b"-")?;
write_and_ensure_newline(file, line)?;
}
for line in hunk.contents[1].lines_with_terminator() {
file.write_all(b"+")?;
write_and_ensure_newline(file, line)?;
}
}
}
}
Ok(())
}
async fn get_file_contents(
store: &Store,
path: &RepoPath,
term: &Option<FileId>,
) -> BackendResult<BString> {
match term {
Some(id) => {
let mut reader = store.read_file(path, id).await?;
let mut content = vec![];
reader
.read_to_end(&mut content)
.await
.map_err(|err| BackendError::ReadFile {
path: path.to_owned(),
id: id.clone(),
source: err.into(),
})?;
Ok(BString::new(content))
}
// If the conflict had removed the file on one side, we pretend that the file
// was empty there.
None => Ok(BString::new(vec![])),
}
}
pub async fn extract_as_single_hunk(
merge: &Merge<Option<FileId>>,
store: &Store,
path: &RepoPath,
) -> BackendResult<Merge<BString>> {
merge
.try_map_async(|term| get_file_contents(store, path, term))
.await
}
/// A type similar to `MergedTreeValue` but with associated data to include in
/// e.g. the working copy or in a diff.
pub enum MaterializedTreeValue {
Absent,
AccessDenied(Box<dyn std::error::Error + Send + Sync>),
File(MaterializedFileValue),
Symlink {
id: SymlinkId,
target: String,
},
FileConflict(MaterializedFileConflictValue),
OtherConflict {
id: MergedTreeValue,
labels: ConflictLabels,
},
GitSubmodule(CommitId),
Tree(TreeId),
}
impl MaterializedTreeValue {
pub fn is_absent(&self) -> bool {
matches!(self, Self::Absent)
}
pub fn is_present(&self) -> bool {
!self.is_absent()
}
}
/// [`TreeValue::File`] with file content `reader`.
pub struct MaterializedFileValue {
pub id: FileId,
pub executable: bool,
pub copy_id: CopyId,
pub reader: Pin<Box<dyn AsyncRead + Send>>,
}
impl MaterializedFileValue {
/// Reads file content until EOF. The provided `path` is used only for error
/// reporting purpose.
pub async fn read_all(&mut self, path: &RepoPath) -> BackendResult<Vec<u8>> {
let mut buf = Vec::new();
self.reader
.read_to_end(&mut buf)
.await
.map_err(|err| BackendError::ReadFile {
path: path.to_owned(),
id: self.id.clone(),
source: err.into(),
})?;
Ok(buf)
}
}
/// Conflicted [`TreeValue::File`]s with file contents.
pub struct MaterializedFileConflictValue {
/// File ids which preserve the shape of the tree conflict, to be used with
/// [`Merge::update_from_simplified()`].
pub unsimplified_ids: Merge<Option<FileId>>,
/// Simplified file ids, in which redundant id pairs are dropped.
pub ids: Merge<Option<FileId>>,
/// Simplified conflict labels, matching `ids`.
pub labels: ConflictLabels,
/// File contents corresponding to the simplified `ids`.
// TODO: or Vec<(FileId, Box<dyn Read>)> so that caller can stop reading
// when null bytes found?
pub contents: Merge<BString>,
/// Merged executable bit. `None` if there are changes in both executable
/// bit and file absence.
pub executable: Option<bool>,
/// Merged copy id. `None` if no single value could be determined.
pub copy_id: Option<CopyId>,
}
/// Reads the data associated with a `MergedTreeValue` so it can be written to
/// e.g. the working copy or diff.
pub async fn materialize_tree_value(
store: &Store,
path: &RepoPath,
value: MergedTreeValue,
conflict_labels: &ConflictLabels,
) -> BackendResult<MaterializedTreeValue> {
match materialize_tree_value_no_access_denied(store, path, value, conflict_labels).await {
Err(BackendError::ReadAccessDenied { source, .. }) => {
Ok(MaterializedTreeValue::AccessDenied(source))
}
result => result,
}
}
async fn materialize_tree_value_no_access_denied(
store: &Store,
path: &RepoPath,
value: MergedTreeValue,
conflict_labels: &ConflictLabels,
) -> BackendResult<MaterializedTreeValue> {
match value.into_resolved() {
Ok(None) => Ok(MaterializedTreeValue::Absent),
Ok(Some(TreeValue::File {
id,
executable,
copy_id,
})) => {
let reader = store.read_file(path, &id).await?;
Ok(MaterializedTreeValue::File(MaterializedFileValue {
id,
executable,
copy_id,
reader,
}))
}
Ok(Some(TreeValue::Symlink(id))) => {
let target = store.read_symlink(path, &id).await?;
Ok(MaterializedTreeValue::Symlink { id, target })
}
Ok(Some(TreeValue::GitSubmodule(id))) => Ok(MaterializedTreeValue::GitSubmodule(id)),
Ok(Some(TreeValue::Tree(id))) => Ok(MaterializedTreeValue::Tree(id)),
Err(conflict) => {
match try_materialize_file_conflict_value(store, path, &conflict, conflict_labels)
.await?
{
Some(file) => Ok(MaterializedTreeValue::FileConflict(file)),
None => Ok(MaterializedTreeValue::OtherConflict {
id: conflict,
labels: conflict_labels.clone(),
}),
}
}
}
}
/// Suppose `conflict` contains only files or absent entries, reads the file
/// contents.
pub async fn try_materialize_file_conflict_value(
store: &Store,
path: &RepoPath,
conflict: &MergedTreeValue,
conflict_labels: &ConflictLabels,
) -> BackendResult<Option<MaterializedFileConflictValue>> {
let (Some(unsimplified_ids), Some(executable_bits)) =
(conflict.to_file_merge(), conflict.to_executable_merge())
else {
return Ok(None);
};
let (labels, ids) = conflict_labels.simplify_with(&unsimplified_ids);
let contents = extract_as_single_hunk(&ids, store, path).await?;
let executable = resolve_file_executable(&executable_bits);
Ok(Some(MaterializedFileConflictValue {
unsimplified_ids,
ids,
labels,
contents,
executable,
copy_id: Some(CopyId::placeholder()),
}))
}
/// Resolves conflicts in file executable bit, returns the original state if the
/// file is deleted and executable bit is unchanged.
pub fn resolve_file_executable(merge: &Merge<Option<bool>>) -> Option<bool> {
let resolved = merge.resolve_trivial(SameChange::Accept).copied()?;
if resolved.is_some() {
resolved
} else {
// If the merge is resolved to None (absent), there should be the same
// number of Some(true) and Some(false). Pick the old state if
// unambiguous, so the new file inherits the original executable bit.
merge.removes().flatten().copied().all_equal_value().ok()
}
}
/// Describes what style should be used when materializing conflicts.
#[derive(Clone, Copy, PartialEq, Eq, Debug, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum ConflictMarkerStyle {
/// Style which shows a snapshot and a series of diffs to apply.
Diff,
/// Similar to "diff", but always picks the first side as the snapshot. May
/// become the default in a future version.
DiffExperimental,
/// Style which shows a snapshot for each base and side.
Snapshot,
/// Style which replicates Git's "diff3" style to support external tools.
Git,
}
impl ConflictMarkerStyle {
/// Returns true if this style allows `%%%%%%%` conflict markers.
pub fn allows_diff(&self) -> bool {
matches!(self, Self::Diff | Self::DiffExperimental)
}
}
/// Options for conflict materialization.
#[derive(Clone, Debug)]
pub struct ConflictMaterializeOptions {
pub marker_style: ConflictMarkerStyle,
pub marker_len: Option<usize>,
pub merge: MergeOptions,
}
/// Characters which can be repeated to form a conflict marker line when
/// materializing and parsing conflicts.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
enum ConflictMarkerLineChar {
ConflictStart = b'<',
ConflictEnd = b'>',
Add = b'+',
Remove = b'-',
Diff = b'%',
Note = b'\\',
GitAncestor = b'|',
GitSeparator = b'=',
}
impl ConflictMarkerLineChar {
/// Get the ASCII byte used for this conflict marker.
fn to_byte(self) -> u8 {
self as u8
}
/// Parse a byte to see if it corresponds with any kind of conflict marker.
fn parse_byte(byte: u8) -> Option<Self> {
match byte {
b'<' => Some(Self::ConflictStart),
b'>' => Some(Self::ConflictEnd),
b'+' => Some(Self::Add),
b'-' => Some(Self::Remove),
b'%' => Some(Self::Diff),
b'\\' => Some(Self::Note),
b'|' => Some(Self::GitAncestor),
b'=' => Some(Self::GitSeparator),
_ => None,
}
}
}
/// Represents a conflict marker line parsed from the file. Conflict marker
/// lines consist of a single ASCII character repeated for a certain length.
struct ConflictMarkerLine {
kind: ConflictMarkerLineChar,
len: usize,
}
/// Write a conflict marker to an output file.
fn write_conflict_marker(
output: &mut dyn Write,
kind: ConflictMarkerLineChar,
len: usize,
suffix_text: &str,
) -> io::Result<()> {
let conflict_marker = BString::new(vec![kind.to_byte(); len]);
if suffix_text.is_empty() {
writeln!(output, "{conflict_marker}")
} else {
writeln!(output, "{conflict_marker} {suffix_text}")
}
}
/// Parse a conflict marker from a line of a file. The conflict marker may have
/// any length (even less than MIN_CONFLICT_MARKER_LEN).
fn parse_conflict_marker_any_len(line: &[u8]) -> Option<ConflictMarkerLine> {
let first_byte = *line.first()?;
let kind = ConflictMarkerLineChar::parse_byte(first_byte)?;
let len = line.iter().take_while(|&&b| b == first_byte).count();
if let Some(next_byte) = line.get(len) {
// If there is a character after the marker, it must be ASCII whitespace
if !next_byte.is_ascii_whitespace() {
return None;
}
}
Some(ConflictMarkerLine { kind, len })
}
/// Parse a conflict marker, expecting it to be at least a certain length. Any
/// shorter conflict markers are ignored.
fn parse_conflict_marker(line: &[u8], expected_len: usize) -> Option<ConflictMarkerLineChar> {
parse_conflict_marker_any_len(line)
.filter(|marker| marker.len >= expected_len)
.map(|marker| marker.kind)
}
/// Given a Merge of files, choose the conflict marker length to use when
/// materializing conflicts.
pub fn choose_materialized_conflict_marker_len<T: AsRef<[u8]>>(single_hunk: &Merge<T>) -> usize {
let max_existing_marker_len = single_hunk
.iter()
.flat_map(|file| file.as_ref().lines_with_terminator())
.filter_map(parse_conflict_marker_any_len)
.map(|marker| marker.len)
.max()
.unwrap_or_default();
max_existing_marker_len
.saturating_add(CONFLICT_MARKER_LEN_INCREMENT)
.max(MIN_CONFLICT_MARKER_LEN)
}
pub fn materialize_merge_result<T: AsRef<[u8]>>(
single_hunk: &Merge<T>,
labels: &ConflictLabels,
output: &mut dyn Write,
options: &ConflictMaterializeOptions,
) -> io::Result<()> {
let merge_result = files::merge_hunks(single_hunk, &options.merge);
match &merge_result {
MergeResult::Resolved(content) => output.write_all(content),
MergeResult::Conflict(hunks) => {
let marker_len = options
.marker_len
.unwrap_or_else(|| choose_materialized_conflict_marker_len(single_hunk));
materialize_conflict_hunks(hunks, options.marker_style, marker_len, labels, output)
}
}
}
pub fn materialize_merge_result_to_bytes<T: AsRef<[u8]>>(
single_hunk: &Merge<T>,
labels: &ConflictLabels,
options: &ConflictMaterializeOptions,
) -> BString {
let merge_result = files::merge_hunks(single_hunk, &options.merge);
match merge_result {
MergeResult::Resolved(content) => content,
MergeResult::Conflict(hunks) => {
let marker_len = options
.marker_len
.unwrap_or_else(|| choose_materialized_conflict_marker_len(single_hunk));
let mut output = Vec::new();
materialize_conflict_hunks(
&hunks,
options.marker_style,
marker_len,
labels,
&mut output,
)
.expect("writing to an in-memory buffer should never fail");
output.into()
}
}
}
fn materialize_conflict_hunks(
hunks: &[Merge<BString>],
conflict_marker_style: ConflictMarkerStyle,
conflict_marker_len: usize,
labels: &ConflictLabels,
output: &mut dyn Write,
) -> io::Result<()> {
let num_conflicts = hunks
.iter()
.filter(|hunk| hunk.as_resolved().is_none())
.count();
let mut conflict_index = 0;
for hunk in hunks {
if let Some(content) = hunk.as_resolved() {
output.write_all(content)?;
} else {
conflict_index += 1;
let conflict_info = format!("conflict {conflict_index} of {num_conflicts}");
match (conflict_marker_style, hunk.as_slice()) {
// 2-sided conflicts can use Git-style conflict markers
(ConflictMarkerStyle::Git, [left, base, right]) => {
materialize_git_style_conflict(
left,
base,
right,
conflict_marker_len,
labels,
output,
)?;
}
_ => {
materialize_jj_style_conflict(
hunk,
&conflict_info,
conflict_marker_style,
conflict_marker_len,
labels,
output,
)?;
}
}
}
}
Ok(())
}
fn materialize_git_style_conflict(
left: &[u8],
base: &[u8],
right: &[u8],
conflict_marker_len: usize,
labels: &ConflictLabels,
output: &mut dyn Write,
) -> io::Result<()> {
write_conflict_marker(
output,
ConflictMarkerLineChar::ConflictStart,
conflict_marker_len,
labels.get_add(0).unwrap_or("side #1"),
)?;
write_and_ensure_newline(output, left)?;
write_conflict_marker(
output,
ConflictMarkerLineChar::GitAncestor,
conflict_marker_len,
labels.get_remove(0).unwrap_or("base"),
)?;
write_and_ensure_newline(output, base)?;
// VS Code doesn't seem to support any trailing text on the separator line
write_conflict_marker(
output,
ConflictMarkerLineChar::GitSeparator,
conflict_marker_len,
"",
)?;
write_and_ensure_newline(output, right)?;
write_conflict_marker(
output,
ConflictMarkerLineChar::ConflictEnd,
conflict_marker_len,
labels.get_add(1).unwrap_or("side #2"),
)?;
Ok(())
}
fn materialize_jj_style_conflict(
hunk: &Merge<BString>,
conflict_info: &str,
conflict_marker_style: ConflictMarkerStyle,
conflict_marker_len: usize,
labels: &ConflictLabels,
output: &mut dyn Write,
) -> io::Result<()> {
let get_side_label = |add_index: usize| -> String {
labels.get_add(add_index).map_or_else(
|| format!("side #{}", add_index + 1),
|label| label.to_owned(),
)
};
let get_base_label = |base_index: usize| -> String {
labels
.get_remove(base_index)
.map(|label| label.to_owned())
.unwrap_or_else(|| {
// The vast majority of conflicts one actually tries to resolve manually have 1
// base.
if hunk.removes().len() == 1 {
"base".to_string()
} else {
format!("base #{}", base_index + 1)
}
})
};
// Write a positive snapshot (side) of a conflict
let write_side = |add_index: usize, data: &[u8], output: &mut dyn Write| {
write_conflict_marker(
output,
ConflictMarkerLineChar::Add,
conflict_marker_len,
&(get_side_label(add_index) + maybe_no_eol_comment(data)),
)?;
write_and_ensure_newline(output, data)
};
// Write a negative snapshot (base) of a conflict
let write_base = |base_index: usize, data: &[u8], output: &mut dyn Write| {
write_conflict_marker(
output,
ConflictMarkerLineChar::Remove,
conflict_marker_len,
&(get_base_label(base_index) + maybe_no_eol_comment(data)),
)?;
write_and_ensure_newline(output, data)
};
// Write a diff from a negative term to a positive term
let write_diff =
|base_index: usize, add_index: usize, diff: &[DiffHunk], output: &mut dyn Write| {
let (remove_last_hunk, add_last_hunk) = diff
.last()
.map_or((BStr::new(""), BStr::new("")), |diff_hunk| {
(diff_hunk.contents[0], diff_hunk.contents[1])
});
if labels.get_add(add_index).is_none()
&& labels.get_remove(base_index).is_none()
&& !has_no_eol(remove_last_hunk)
&& !has_no_eol(add_last_hunk)
{
// TODO: remove this format when all conflicts have labels
// Use simple conflict markers when there are no conflict labels and no comment
// for missing terminating newline.
write_conflict_marker(
output,
ConflictMarkerLineChar::Diff,
conflict_marker_len,
&format!(
"diff from {} to {}",
get_base_label(base_index),
get_side_label(add_index)
),
)?;
} else {
write_conflict_marker(
output,
ConflictMarkerLineChar::Diff,
conflict_marker_len,
&format!(
"diff from: {}{}",
get_base_label(base_index),
maybe_no_eol_comment(remove_last_hunk)
),
)?;
write_conflict_marker(
output,
ConflictMarkerLineChar::Note,
conflict_marker_len,
&format!(
" to: {}{}",
get_side_label(add_index),
maybe_no_eol_comment(add_last_hunk)
),
)?;
}
write_diff_hunks(diff, output)
};
write_conflict_marker(
output,
ConflictMarkerLineChar::ConflictStart,
conflict_marker_len,
conflict_info,
)?;
let mut snapshot_written = false;
// The only conflict marker style which can start with a diff is "diff".
if conflict_marker_style != ConflictMarkerStyle::Diff {
write_side(0, hunk.first(), output)?;
snapshot_written = true;
}
for (base_index, left) in hunk.removes().enumerate() {
let add_index = if snapshot_written {
base_index + 1
} else {
base_index
};
let right1 = hunk.get_add(add_index).unwrap();
// Write the base and side separately if the conflict marker style doesn't
// support diffs.
if !conflict_marker_style.allows_diff() {
write_base(base_index, left, output)?;
write_side(add_index, right1, output)?;
continue;
}
let diff1 = ContentDiff::by_line([&left, &right1]).hunks().collect_vec();
// If we haven't written a snapshot yet, then we need to decide whether to
// format the current side as a snapshot or a diff. We write the current side as
// a diff unless the next side has a smaller diff compared to the current base.
if !snapshot_written {
let right2 = hunk.get_add(add_index + 1).unwrap();
let diff2 = ContentDiff::by_line([&left, &right2]).hunks().collect_vec();
if diff_size(&diff2) < diff_size(&diff1) {
// If the next positive term is a better match, emit the current positive term
// as a snapshot and the next positive term as a diff.
write_side(add_index, right1, output)?;
write_diff(base_index, add_index + 1, &diff2, output)?;
snapshot_written = true;
continue;
}
}
write_diff(base_index, add_index, &diff1, output)?;
}
// If we still didn't emit a snapshot, the last side is the snapshot.
if !snapshot_written {
let add_index = hunk.num_sides() - 1;
write_side(add_index, hunk.get_add(add_index).unwrap(), output)?;
}
write_conflict_marker(
output,
ConflictMarkerLineChar::ConflictEnd,
conflict_marker_len,
&format!("{conflict_info} ends"),
)?;
Ok(())
}
fn maybe_no_eol_comment(slice: &[u8]) -> &'static str {
if has_no_eol(slice) {
NO_EOL_COMMENT
} else {
""
}
}
// Write a chunk of data, ensuring that it doesn't end with a line which is
// missing its terminating newline.
fn write_and_ensure_newline(output: &mut dyn Write, data: &[u8]) -> io::Result<()> {
output.write_all(data)?;
if has_no_eol(data) {
writeln!(output)?;
}
Ok(())
}
// Check whether a slice is missing its terminating newline character.
fn has_no_eol(slice: &[u8]) -> bool {
slice.last().is_some_and(|&last| last != b'\n')
}
fn diff_size(hunks: &[DiffHunk]) -> usize {
hunks
.iter()
.map(|hunk| match hunk.kind {
DiffHunkKind::Matching => 0,
DiffHunkKind::Different => hunk.contents.iter().map(|content| content.len()).sum(),
})
.sum()
}
pub struct MaterializedTreeDiffEntry {
pub path: CopiesTreeDiffEntryPath,
pub values: BackendResult<Diff<MaterializedTreeValue>>,
}
pub fn materialized_diff_stream(
store: &Store,
tree_diff: BoxStream<'_, CopiesTreeDiffEntry>,
conflict_labels: Diff<&ConflictLabels>,
) -> impl Stream<Item = MaterializedTreeDiffEntry> {
tree_diff
.map(async |CopiesTreeDiffEntry { path, values }| match values {
Err(err) => MaterializedTreeDiffEntry {
path,
values: Err(err),
},
Ok(values) => {
let before_future = materialize_tree_value(
store,
path.source(),
values.before,
conflict_labels.before,
);
let after_future = materialize_tree_value(
store,
path.target(),
values.after,
conflict_labels.after,
);
let values = try_join!(before_future, after_future)
.map(|(before, after)| Diff { before, after });
MaterializedTreeDiffEntry { path, values }
}
})
.buffered((store.concurrency() / 2).max(1))
}
/// Parses conflict markers from a slice.
///
/// Returns `None` if there were no valid conflict markers. The caller
/// has to provide the expected number of merge sides (adds). Conflict
/// markers that are otherwise valid will be considered invalid if
/// they don't have the expected arity.
///
/// All conflict markers in the file must be at least as long as the expected
/// length. Any shorter conflict markers will be ignored.
// TODO: "parse" is not usually the opposite of "materialize", so maybe we
// should rename them to "serialize" and "deserialize"?
pub fn parse_conflict(
input: &[u8],
num_sides: usize,
expected_marker_len: usize,
) -> Option<Vec<Merge<BString>>> {
if input.is_empty() {
return None;
}
let mut hunks = vec![];
let mut pos = 0;
let mut resolved_start = 0;
let mut conflict_start = None;
let mut conflict_start_len = 0;
for line in input.lines_with_terminator() {
match parse_conflict_marker(line, expected_marker_len) {
Some(ConflictMarkerLineChar::ConflictStart) => {
conflict_start = Some(pos);
conflict_start_len = line.len();
}
Some(ConflictMarkerLineChar::ConflictEnd) => {
if let Some(conflict_start_index) = conflict_start.take() {
let conflict_body = &input[conflict_start_index + conflict_start_len..pos];
let hunk = parse_conflict_hunk(conflict_body, expected_marker_len);
if hunk.num_sides() == num_sides {
let resolved_slice = &input[resolved_start..conflict_start_index];
if !resolved_slice.is_empty() {
hunks.push(Merge::resolved(BString::from(resolved_slice)));
}
hunks.push(hunk);
resolved_start = pos + line.len();
}
}
}
_ => {}
}
pos += line.len();
}
if hunks.is_empty() {
None
} else {
if resolved_start < input.len() {
hunks.push(Merge::resolved(BString::from(&input[resolved_start..])));
}
Some(hunks)
}
}
/// This method handles parsing both JJ-style and Git-style conflict markers,
/// meaning that switching conflict marker styles won't prevent existing files
/// with other conflict marker styles from being parsed successfully. The
/// conflict marker style to use for parsing is determined based on the first
/// line of the hunk.
fn parse_conflict_hunk(input: &[u8], expected_marker_len: usize) -> Merge<BString> {
// If the hunk starts with a conflict marker, find its first character
let initial_conflict_marker = input
.lines_with_terminator()
.next()
.and_then(|line| parse_conflict_marker(line, expected_marker_len));
match initial_conflict_marker {
// JJ-style conflicts must start with one of these 3 conflict marker lines
Some(
ConflictMarkerLineChar::Diff
| ConflictMarkerLineChar::Remove
| ConflictMarkerLineChar::Add,
) => parse_jj_style_conflict_hunk(input, expected_marker_len),
// Git-style conflicts either must not start with a conflict marker line, or must start with
// the "|||||||" conflict marker line (if the first side was empty)
None | Some(ConflictMarkerLineChar::GitAncestor) => {
parse_git_style_conflict_hunk(input, expected_marker_len)
}
// No other conflict markers are allowed at the start of a hunk
Some(_) => Merge::resolved(BString::new(vec![])),
}
}
fn parse_jj_style_conflict_hunk(input: &[u8], expected_marker_len: usize) -> Merge<BString> {
enum State {
Diff,
Remove,
Add,
Unknown,
}
let mut state = State::Unknown;
let mut removes = vec![];
let mut adds = vec![];
for line in input.lines_with_terminator() {
match parse_conflict_marker(line, expected_marker_len) {
Some(ConflictMarkerLineChar::Diff) => {
state = State::Diff;
removes.push(BString::new(vec![]));
adds.push(BString::new(vec![]));
continue;
}
Some(ConflictMarkerLineChar::Remove) => {
state = State::Remove;
removes.push(BString::new(vec![]));
continue;
}
Some(ConflictMarkerLineChar::Add) => {
state = State::Add;
adds.push(BString::new(vec![]));
continue;
}
Some(ConflictMarkerLineChar::Note) => {
continue;
}
_ => {}
}
match state {
State::Diff => {
if let Some(rest) = line.strip_prefix(b"-") {
removes.last_mut().unwrap().extend_from_slice(rest);
} else if let Some(rest) = line.strip_prefix(b"+") {
adds.last_mut().unwrap().extend_from_slice(rest);
} else if let Some(rest) = line.strip_prefix(b" ") {
removes.last_mut().unwrap().extend_from_slice(rest);
adds.last_mut().unwrap().extend_from_slice(rest);
} else if line == b"\n" || line == b"\r\n" {
// Some editors strip trailing whitespace, so " \n" might become "\n". It would
// be unfortunate if this prevented the conflict from being parsed, so we add
// the empty line to the "remove" and "add" as if there was a space in front
removes.last_mut().unwrap().extend_from_slice(line);
adds.last_mut().unwrap().extend_from_slice(line);
} else {
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/trailer.rs | lib/src/trailer.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Parsing trailers from commit messages.
use itertools::Itertools as _;
use thiserror::Error;
/// A key-value pair representing a trailer in a commit message, of the
/// form `Key: Value`.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct Trailer {
/// trailer key
pub key: String,
/// trailer value
///
/// It is trimmed at the start and the end but includes new line characters
/// (\n) and multi-line escape chars ( ) for multi line values.
pub value: String,
}
#[expect(missing_docs)]
#[derive(Error, Debug)]
pub enum TrailerParseError {
#[error("The trailer paragraph can't contain a blank line")]
BlankLine,
#[error("Invalid trailer line: {line}")]
NonTrailerLine { line: String },
}
/// Parse the trailers from a commit message; these are simple key-value
/// pairs, separated by a colon, describing extra information in a commit
/// message; an example is the following:
///
/// ```text
/// chore: update itertools to version 0.14.0
///
/// Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
/// tempor incididunt ut labore et dolore magna aliqua.
///
/// Co-authored-by: Alice <alice@example.com>
/// Co-authored-by: Bob <bob@example.com>
/// Reviewed-by: Charlie <charlie@example.com>
/// Change-Id: I1234567890abcdef1234567890abcdef12345678
/// ```
///
/// In this case, there are four trailers: two `Co-authored-by` lines, one
/// `Reviewed-by` line, and one `Change-Id` line.
pub fn parse_description_trailers(body: &str) -> Vec<Trailer> {
let (trailers, blank, found_git_trailer, non_trailer) = parse_trailers_impl(body);
if !blank {
// no blank found, this means there was a single paragraph, so whatever
// was found can't come from the trailer
vec![]
} else if non_trailer.is_some() && !found_git_trailer {
// at least one non trailer line was found in the trailers paragraph
// the trailers are considered as trailers only if there is a predefined
// trailers from git
vec![]
} else {
trailers
}
}
/// Parse the trailers from a trailer paragraph. This function behaves like
/// `parse_description_trailer`, but will return an error if a blank or
/// non trailer line is found.
pub fn parse_trailers(body: &str) -> Result<Vec<Trailer>, TrailerParseError> {
let (trailers, blank, _, non_trailer) = parse_trailers_impl(body);
if blank {
return Err(TrailerParseError::BlankLine);
}
if let Some(line) = non_trailer {
return Err(TrailerParseError::NonTrailerLine { line });
}
Ok(trailers)
}
fn parse_trailers_impl(body: &str) -> (Vec<Trailer>, bool, bool, Option<String>) {
// a trailer always comes at the end of a message; we can split the message
// by newline, but we need to immediately reverse the order of the lines
// to ensure we parse the trailer in an unambiguous manner; this avoids cases
// where a colon in the body of the message is mistaken for a trailer
let lines = body.trim_ascii_end().lines().rev();
let trailer_re =
regex::Regex::new(r"^([a-zA-Z0-9-]+) *: *(.*)$").expect("Trailer regex should be valid");
let mut trailers: Vec<Trailer> = Vec::new();
let mut multiline_value = vec![];
let mut found_blank = false;
let mut found_git_trailer = false;
let mut non_trailer_line = None;
for line in lines {
if line.starts_with(' ') {
multiline_value.push(line);
} else if let Some(groups) = trailer_re.captures(line) {
let key = groups[1].to_string();
multiline_value.push(groups.get(2).unwrap().as_str());
// trim the end of the multiline value
// the start is already trimmed with the regex
multiline_value[0] = multiline_value[0].trim_ascii_end();
let value = multiline_value.iter().rev().join("\n");
multiline_value.clear();
if key == "Signed-off-by" {
found_git_trailer = true;
}
trailers.push(Trailer { key, value });
} else if line.starts_with("(cherry picked from commit ") {
found_git_trailer = true;
non_trailer_line = Some(line.to_owned());
multiline_value.clear();
} else if line.trim_ascii().is_empty() {
// end of the trailer
found_blank = true;
break;
} else {
// a non trailer in the trailer paragraph
// the line is ignored, as well as the multiline value that may
// have previously been accumulated
multiline_value.clear();
non_trailer_line = Some(line.to_owned());
}
}
// reverse the insert order, since we parsed the trailer in reverse
trailers.reverse();
(trailers, found_blank, found_git_trailer, non_trailer_line)
}
#[cfg(test)]
mod tests {
use indoc::indoc;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn test_simple_trailers() {
let descriptions = indoc! {r#"
chore: update itertools to version 0.14.0
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed
do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Co-authored-by: Alice <alice@example.com>
Co-authored-by: Bob <bob@example.com>
Reviewed-by: Charlie <charlie@example.com>
Change-Id: I1234567890abcdef1234567890abcdef12345678
"#};
let trailers = parse_description_trailers(descriptions);
assert_eq!(trailers.len(), 4);
assert_eq!(trailers[0].key, "Co-authored-by");
assert_eq!(trailers[0].value, "Alice <alice@example.com>");
assert_eq!(trailers[1].key, "Co-authored-by");
assert_eq!(trailers[1].value, "Bob <bob@example.com>");
assert_eq!(trailers[2].key, "Reviewed-by");
assert_eq!(trailers[2].value, "Charlie <charlie@example.com>");
assert_eq!(trailers[3].key, "Change-Id");
assert_eq!(
trailers[3].value,
"I1234567890abcdef1234567890abcdef12345678"
);
}
#[test]
fn test_trailers_with_colon_in_body() {
let descriptions = indoc! {r#"
chore: update itertools to version 0.14.0
Summary: Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
Change-Id: I1234567890abcdef1234567890abcdef12345678
"#};
let trailers = parse_description_trailers(descriptions);
// should only have Change-Id
assert_eq!(trailers.len(), 1);
assert_eq!(trailers[0].key, "Change-Id");
}
#[test]
fn test_multiline_trailer() {
let description = indoc! {r#"
chore: update itertools to version 0.14.0
key: This is a very long value, with spaces and
newlines in it.
"#};
let trailers = parse_description_trailers(description);
// should only have Change-Id
assert_eq!(trailers.len(), 1);
assert_eq!(trailers[0].key, "key");
assert_eq!(
trailers[0].value,
indoc! {r"
This is a very long value, with spaces and
newlines in it."}
);
}
#[test]
fn test_ignore_line_in_trailer() {
let description = indoc! {r#"
chore: update itertools to version 0.14.0
Signed-off-by: Random J Developer <random@developer.example.org>
[lucky@maintainer.example.org: struct foo moved from foo.c to foo.h]
Signed-off-by: Lucky K Maintainer <lucky@maintainer.example.org>
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 2);
}
#[test]
fn test_trailers_with_single_line_description() {
let description = r#"chore: update itertools to version 0.14.0"#;
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 0);
}
#[test]
fn test_parse_trailers() {
let trailers_txt = indoc! {r#"
foo: 1
bar: 2
"#};
let res = parse_trailers(trailers_txt);
let trailers = res.expect("trailers to be valid");
assert_eq!(trailers.len(), 2);
assert_eq!(trailers[0].key, "foo");
assert_eq!(trailers[0].value, "1");
assert_eq!(trailers[1].key, "bar");
assert_eq!(trailers[1].value, "2");
}
#[test]
fn test_blank_line_in_trailers() {
let trailers = indoc! {r#"
foo: 1
foo: 2
"#};
let res = parse_trailers(trailers);
assert!(matches!(res, Err(TrailerParseError::BlankLine)));
}
#[test]
fn test_non_trailer_line_in_trailers() {
let trailers = indoc! {r#"
bar
foo: 1
"#};
let res = parse_trailers(trailers);
assert!(matches!(
res,
Err(TrailerParseError::NonTrailerLine { line: _ })
));
}
#[test]
fn test_blank_line_after_trailer() {
let description = indoc! {r#"
subject
foo: 1
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
}
#[test]
fn test_blank_line_inbetween() {
let description = indoc! {r#"
subject
foo: 1
bar: 2
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
}
#[test]
fn test_no_blank_line() {
let description = indoc! {r#"
subject: whatever
foo: 1
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 0);
}
#[test]
fn test_whitespace_before_key() {
let description = indoc! {r#"
subject
foo: 1
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 0);
}
#[test]
fn test_whitespace_after_key() {
let description = indoc! {r#"
subject
foo : 1
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
assert_eq!(trailers[0].key, "foo");
}
#[test]
fn test_whitespace_around_value() {
let description = indoc! {"
subject
foo: 1\x20
"};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
assert_eq!(trailers[0].value, "1");
}
#[test]
fn test_whitespace_around_multiline_value() {
let description = indoc! {"
subject
foo: 1\x20
2\x20
"};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
assert_eq!(trailers[0].value, "1 \n 2");
}
#[test]
fn test_whitespace_around_multiliple_trailers() {
let description = indoc! {"
subject
foo: 1\x20
bar: 2\x20
"};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 2);
assert_eq!(trailers[0].value, "1");
assert_eq!(trailers[1].value, "2");
}
#[test]
fn test_no_whitespace_before_value() {
let description = indoc! {r#"
subject
foo:1
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
}
#[test]
fn test_empty_value() {
let description = indoc! {r#"
subject
foo:
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
}
#[test]
fn test_invalid_key() {
let description = indoc! {r#"
subject
f_o_o: bar
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 0);
}
#[test]
fn test_content_after_trailer() {
let description = indoc! {r#"
subject
foo: bar
baz
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 0);
}
#[test]
fn test_invalid_content_after_trailer() {
let description = indoc! {r#"
subject
foo: bar
baz
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 0);
}
#[test]
fn test_empty_description() {
let description = "";
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 0);
}
#[test]
fn test_cherry_pick_trailer() {
let description = indoc! {r#"
subject
some non-trailer text
foo: bar
(cherry picked from commit 72bb9f9cf4bbb6bbb11da9cda4499c55c44e87b9)
"#};
let trailers = parse_description_trailers(description);
assert_eq!(trailers.len(), 1);
assert_eq!(trailers[0].key, "foo");
assert_eq!(trailers[0].value, "bar");
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/tree_merge.rs | lib/src/tree_merge.rs | // Copyright 2023-2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Merge trees by recursing into entries (subtrees, files)
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::iter::zip;
use std::sync::Arc;
use std::vec;
use futures::FutureExt as _;
use futures::StreamExt as _;
use futures::future::BoxFuture;
use futures::future::try_join_all;
use futures::stream::FuturesUnordered;
use itertools::Itertools as _;
use tokio::io::AsyncReadExt as _;
use crate::backend;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::config::ConfigGetError;
use crate::files;
use crate::files::FileMergeHunkLevel;
use crate::merge::Merge;
use crate::merge::MergedTreeVal;
use crate::merge::MergedTreeValue;
use crate::merge::SameChange;
use crate::merged_tree::all_merged_tree_entries;
use crate::object_id::ObjectId as _;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponentBuf;
use crate::settings::UserSettings;
use crate::store::Store;
use crate::tree::Tree;
/// Options for tree/file conflict resolution.
#[derive(Clone, Debug)]
pub struct MergeOptions {
/// Granularity of hunks when merging files.
pub hunk_level: FileMergeHunkLevel,
/// Whether to resolve conflict that makes the same change at all sides.
pub same_change: SameChange,
}
impl MergeOptions {
/// Loads merge options from `settings`.
pub fn from_settings(settings: &UserSettings) -> Result<Self, ConfigGetError> {
Ok(Self {
// Maybe we can add hunk-level=file to disable content merging if
// needed. It wouldn't be translated to FileMergeHunkLevel.
hunk_level: settings.get("merge.hunk-level")?,
same_change: settings.get("merge.same-change")?,
})
}
}
/// The returned conflict will either be resolved or have the same number of
/// sides as the input.
pub async fn merge_trees(store: &Arc<Store>, merge: Merge<TreeId>) -> BackendResult<Merge<TreeId>> {
let merge = match merge.into_resolved() {
Ok(tree) => return Ok(Merge::resolved(tree)),
Err(merge) => merge,
};
let mut merger = TreeMerger {
store: store.clone(),
trees_to_resolve: BTreeMap::new(),
work: FuturesUnordered::new(),
unstarted_work: BTreeMap::new(),
};
merger.enqueue_tree_read(
RepoPathBuf::root(),
merge.map(|tree_id| Some(TreeValue::Tree(tree_id.clone()))),
);
let trees = merger.merge().await?;
Ok(trees.map(|tree| tree.id().clone()))
}
struct MergedTreeInput {
resolved: BTreeMap<RepoPathComponentBuf, TreeValue>,
/// Entries that we're currently waiting for data for in order to resolve
/// them. When this set becomes empty, we're ready to write the tree(s).
pending_lookup: HashSet<RepoPathComponentBuf>,
conflicts: BTreeMap<RepoPathComponentBuf, MergedTreeValue>,
}
impl MergedTreeInput {
fn new(resolved: BTreeMap<RepoPathComponentBuf, TreeValue>) -> Self {
Self {
resolved,
pending_lookup: HashSet::new(),
conflicts: BTreeMap::new(),
}
}
fn mark_completed(
&mut self,
basename: RepoPathComponentBuf,
value: MergedTreeValue,
same_change: SameChange,
) {
let was_pending = self.pending_lookup.remove(&basename);
assert!(was_pending, "No pending lookup for {basename:?}");
if let Some(resolved) = value.resolve_trivial(same_change) {
if let Some(resolved) = resolved.as_ref() {
self.resolved.insert(basename, resolved.clone());
}
} else {
self.conflicts.insert(basename, value);
}
}
fn into_backend_trees(self) -> Merge<backend::Tree> {
assert!(self.pending_lookup.is_empty());
fn by_name(
(name1, _): &(RepoPathComponentBuf, TreeValue),
(name2, _): &(RepoPathComponentBuf, TreeValue),
) -> bool {
name1 < name2
}
if self.conflicts.is_empty() {
let all_entries = self.resolved.into_iter().collect();
Merge::resolved(backend::Tree::from_sorted_entries(all_entries))
} else {
// Create a Merge with the conflict entries for each side.
let mut conflict_entries = self.conflicts.first_key_value().unwrap().1.map(|_| vec![]);
for (basename, value) in self.conflicts {
assert_eq!(value.num_sides(), conflict_entries.num_sides());
for (entries, value) in zip(&mut conflict_entries, value) {
if let Some(value) = value {
entries.push((basename.clone(), value));
}
}
}
let mut backend_trees = vec![];
for entries in conflict_entries {
let backend_tree = backend::Tree::from_sorted_entries(
self.resolved
.iter()
.map(|(name, value)| (name.clone(), value.clone()))
.merge_by(entries, by_name)
.collect(),
);
backend_trees.push(backend_tree);
}
Merge::from_vec(backend_trees)
}
}
}
/// The result from an asynchronously scheduled work item.
enum TreeMergerWorkOutput {
/// Trees that have been read (i.e. `Read` is past tense)
ReadTrees {
dir: RepoPathBuf,
result: BackendResult<Merge<Tree>>,
},
WrittenTrees {
dir: RepoPathBuf,
result: BackendResult<Merge<Tree>>,
},
MergedFiles {
path: RepoPathBuf,
result: BackendResult<MergedTreeValue>,
},
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
enum TreeMergeWorkItemKey {
// `MergeFiles` variant before `ReadTrees` so files are polled before trees because they
// typically take longer to process.
MergeFiles { path: RepoPathBuf },
ReadTrees { dir: RepoPathBuf },
}
struct TreeMerger {
store: Arc<Store>,
// Trees we're currently working on.
trees_to_resolve: BTreeMap<RepoPathBuf, MergedTreeInput>,
// Futures we're currently processing. In order to respect the backend's concurrency limit.
work: FuturesUnordered<BoxFuture<'static, TreeMergerWorkOutput>>,
// Futures we haven't started polling yet, in order to respect the backend's concurrency limit.
unstarted_work: BTreeMap<TreeMergeWorkItemKey, BoxFuture<'static, TreeMergerWorkOutput>>,
}
impl TreeMerger {
async fn merge(mut self) -> BackendResult<Merge<Tree>> {
while let Some(work_item) = self.work.next().await {
match work_item {
TreeMergerWorkOutput::ReadTrees { dir, result } => {
let tree = result?;
self.process_tree(dir, tree);
}
TreeMergerWorkOutput::WrittenTrees { dir, result } => {
let tree = result?;
if dir.is_root() {
assert!(self.trees_to_resolve.is_empty());
assert!(self.work.is_empty());
assert!(self.unstarted_work.is_empty());
return Ok(tree);
}
// Propagate the write to the parent tree, replacing empty trees by `None`.
let new_value = tree.map(|tree| {
(tree.id() != self.store.empty_tree_id())
.then(|| TreeValue::Tree(tree.id().clone()))
});
self.mark_completed(&dir, new_value);
}
TreeMergerWorkOutput::MergedFiles { path, result } => {
let value = result?;
self.mark_completed(&path, value);
}
}
while self.work.len() < self.store.concurrency() {
if let Some((_key, work)) = self.unstarted_work.pop_first() {
self.work.push(work);
} else {
break;
}
}
}
unreachable!("There was no work item for writing the root tree");
}
fn process_tree(&mut self, dir: RepoPathBuf, tree: Merge<Tree>) {
// First resolve trivial merges (those that we don't need to load any more data
// for)
let same_change = self.store.merge_options().same_change;
let mut resolved = vec![];
let mut non_trivial = vec![];
for (basename, path_merge) in all_merged_tree_entries(&tree) {
if let Some(value) = path_merge.resolve_trivial(same_change) {
if let Some(value) = value.cloned() {
resolved.push((basename.to_owned(), value));
}
} else {
non_trivial.push((basename.to_owned(), path_merge.cloned()));
}
}
// If there are no non-trivial merges, we can write the tree now.
if non_trivial.is_empty() {
let backend_trees = Merge::resolved(backend::Tree::from_sorted_entries(resolved));
self.enqueue_tree_write(dir, backend_trees);
return;
}
let mut unmerged_tree = MergedTreeInput::new(resolved.into_iter().collect());
for (basename, value) in non_trivial {
let path = dir.join(&basename);
unmerged_tree.pending_lookup.insert(basename);
if value.is_tree() {
self.enqueue_tree_read(path, value);
} else {
// TODO: If it's e.g. a dir/file conflict, there's no need to try to
// resolve it as a file. We should mark them to
// `unmerged_tree.conflicts` instead.
self.enqueue_file_merge(path, value);
}
}
self.trees_to_resolve.insert(dir, unmerged_tree);
}
fn enqueue_tree_read(&mut self, dir: RepoPathBuf, value: MergedTreeValue) {
let key = TreeMergeWorkItemKey::ReadTrees { dir: dir.clone() };
let work_fut = read_trees(self.store.clone(), dir.clone(), value)
.map(|result| TreeMergerWorkOutput::ReadTrees { dir, result });
if self.work.len() < self.store.concurrency() {
self.work.push(Box::pin(work_fut));
} else {
self.unstarted_work.insert(key, Box::pin(work_fut));
}
}
fn enqueue_tree_write(&mut self, dir: RepoPathBuf, backend_trees: Merge<backend::Tree>) {
let work_fut = write_trees(self.store.clone(), dir.clone(), backend_trees)
.map(|result| TreeMergerWorkOutput::WrittenTrees { dir, result });
// Bypass the `unstarted_work` queue because writing trees usually results in
// saving memory (each tree gets replaced by a `TreeValue::Tree`)
self.work.push(Box::pin(work_fut));
}
fn enqueue_file_merge(&mut self, path: RepoPathBuf, value: MergedTreeValue) {
let key = TreeMergeWorkItemKey::MergeFiles { path: path.clone() };
let work_fut = resolve_file_values_owned(self.store.clone(), path.clone(), value)
.map(|result| TreeMergerWorkOutput::MergedFiles { path, result });
if self.work.len() < self.store.concurrency() {
self.work.push(Box::pin(work_fut));
} else {
self.unstarted_work.insert(key, Box::pin(work_fut));
}
}
fn mark_completed(&mut self, path: &RepoPath, value: MergedTreeValue) {
let (dir, basename) = path.split().unwrap();
let tree = self.trees_to_resolve.get_mut(dir).unwrap();
let same_change = self.store.merge_options().same_change;
tree.mark_completed(basename.to_owned(), value, same_change);
// If all entries in this tree have been processed (either resolved or still a
// conflict), schedule the writing of the tree(s) to the backend.
if tree.pending_lookup.is_empty() {
let tree = self.trees_to_resolve.remove(dir).unwrap();
self.enqueue_tree_write(dir.to_owned(), tree.into_backend_trees());
}
}
}
async fn read_trees(
store: Arc<Store>,
dir: RepoPathBuf,
value: MergedTreeValue,
) -> BackendResult<Merge<Tree>> {
let trees = value
.to_tree_merge(&store, &dir)
.await?
.expect("Should be tree merge");
Ok(trees)
}
async fn write_trees(
store: Arc<Store>,
dir: RepoPathBuf,
backend_trees: Merge<backend::Tree>,
) -> BackendResult<Merge<Tree>> {
// TODO: Could use `backend_trees.try_map_async()` here if it took `self` by
// value or if `Backend::write_tree()` to an `Arc<backend::Tree>`.
let trees = try_join_all(
backend_trees
.into_iter()
.map(|backend_tree| store.write_tree(&dir, backend_tree)),
)
.await?;
Ok(Merge::from_vec(trees))
}
async fn resolve_file_values_owned(
store: Arc<Store>,
path: RepoPathBuf,
values: MergedTreeValue,
) -> BackendResult<MergedTreeValue> {
let maybe_resolved = try_resolve_file_values(&store, &path, &values).await?;
Ok(maybe_resolved.unwrap_or(values))
}
/// Tries to resolve file conflicts by merging the file contents. Treats missing
/// files as empty. If the file conflict cannot be resolved, returns the passed
/// `values` unmodified.
pub async fn resolve_file_values(
store: &Arc<Store>,
path: &RepoPath,
values: MergedTreeValue,
) -> BackendResult<MergedTreeValue> {
let same_change = store.merge_options().same_change;
if let Some(resolved) = values.resolve_trivial(same_change) {
return Ok(Merge::resolved(resolved.clone()));
}
let maybe_resolved = try_resolve_file_values(store, path, &values).await?;
Ok(maybe_resolved.unwrap_or(values))
}
async fn try_resolve_file_values<T: Borrow<TreeValue>>(
store: &Arc<Store>,
path: &RepoPath,
values: &Merge<Option<T>>,
) -> BackendResult<Option<MergedTreeValue>> {
// The values may contain trees canceling each other (notably padded absent
// trees), so we need to simplify them first.
let simplified = values
.map(|value| value.as_ref().map(Borrow::borrow))
.simplify();
// No fast path for simplified.is_resolved(). If it could be resolved, it would
// have been caught by values.resolve_trivial() above.
if let Some(resolved) = try_resolve_file_conflict(store, path, &simplified).await? {
Ok(Some(Merge::normal(resolved)))
} else {
// Failed to merge the files, or the paths are not files
Ok(None)
}
}
/// Resolves file-level conflict by merging content hunks.
///
/// The input `conflict` is supposed to be simplified. It shouldn't contain
/// non-file values that cancel each other.
async fn try_resolve_file_conflict(
store: &Store,
filename: &RepoPath,
conflict: &MergedTreeVal<'_>,
) -> BackendResult<Option<TreeValue>> {
let options = store.merge_options();
// If there are any non-file or any missing parts in the conflict, we can't
// merge it. We check early so we don't waste time reading file contents if
// we can't merge them anyway. At the same time we determine whether the
// resulting file should be executable.
let Ok(file_id_conflict) = conflict.try_map(|term| match term {
Some(TreeValue::File {
id,
executable: _,
copy_id: _,
}) => Ok(id),
_ => Err(()),
}) else {
return Ok(None);
};
let Ok(executable_conflict) = conflict.try_map(|term| match term {
Some(TreeValue::File {
id: _,
executable,
copy_id: _,
}) => Ok(executable),
_ => Err(()),
}) else {
return Ok(None);
};
let Ok(copy_id_conflict) = conflict.try_map(|term| match term {
Some(TreeValue::File {
id: _,
executable: _,
copy_id,
}) => Ok(copy_id),
_ => Err(()),
}) else {
return Ok(None);
};
// TODO: Whether to respect options.same_change to merge executable and
// copy_id? Should also update conflicts::resolve_file_executable().
let Some(&&executable) = executable_conflict.resolve_trivial(SameChange::Accept) else {
// We're unable to determine whether the result should be executable
return Ok(None);
};
let Some(©_id) = copy_id_conflict.resolve_trivial(SameChange::Accept) else {
// We're unable to determine the file's copy ID
return Ok(None);
};
if let Some(&resolved_file_id) = file_id_conflict.resolve_trivial(options.same_change) {
// Don't bother reading the file contents if the conflict can be trivially
// resolved.
return Ok(Some(TreeValue::File {
id: resolved_file_id.clone(),
executable,
copy_id: copy_id.clone(),
}));
}
// While the input conflict should be simplified by caller, it might contain
// terms which only differ in executable bits. Simplify the conflict further
// for two reasons:
// 1. Avoid reading unchanged file contents
// 2. The simplified conflict can sometimes be resolved when the unsimplfied one
// cannot
let file_id_conflict = file_id_conflict.simplify();
let contents = file_id_conflict
.try_map_async(async |file_id| {
let mut content = vec![];
let mut reader = store.read_file(filename, file_id).await?;
reader
.read_to_end(&mut content)
.await
.map_err(|err| BackendError::ReadObject {
object_type: file_id.object_type(),
hash: file_id.hex(),
source: err.into(),
})?;
BackendResult::Ok(content)
})
.await?;
if let Some(merged_content) = files::try_merge(&contents, options) {
let id = store
.write_file(filename, &mut merged_content.as_slice())
.await?;
Ok(Some(TreeValue::File {
id,
executable,
copy_id: copy_id.clone(),
}))
} else {
Ok(None)
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/signing.rs | lib/src/signing.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic APIs to work with cryptographic signatures created and verified by
//! various backends.
use std::fmt::Debug;
use std::fmt::Display;
use std::sync::Mutex;
use clru::CLruCache;
use thiserror::Error;
use crate::backend::CommitId;
use crate::config::ConfigGetError;
use crate::gpg_signing::GpgBackend;
use crate::gpg_signing::GpgsmBackend;
use crate::settings::UserSettings;
use crate::ssh_signing::SshBackend;
use crate::store::COMMIT_CACHE_CAPACITY;
#[cfg(feature = "testing")]
use crate::test_signing_backend::TestSigningBackend;
/// A status of the signature, part of the [Verification] type.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SigStatus {
/// Valid signature that matches the data.
Good,
/// Valid signature that could not be verified (e.g. due to an unknown key).
Unknown,
/// Valid signature that does not match the signed data.
Bad,
}
impl Display for SigStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
Self::Good => "good",
Self::Unknown => "unknown",
Self::Bad => "bad",
};
write!(f, "{s}")
}
}
/// The result of a signature verification.
/// Key and display are optional additional info that backends can or can not
/// provide to add additional information for the templater to potentially show.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Verification {
/// The status of the signature.
pub status: SigStatus,
/// The key id representation, if available. For GPG, this will be the key
/// fingerprint.
pub key: Option<String>,
/// A display string, if available. For GPG, this will be formatted primary
/// user ID.
pub display: Option<String>,
}
impl Verification {
/// A shortcut to create an `Unknown` verification with no additional
/// metadata.
pub fn unknown() -> Self {
Self {
status: SigStatus::Unknown,
key: None,
display: None,
}
}
/// Create a new verification
pub fn new(status: SigStatus, key: Option<String>, display: Option<String>) -> Self {
Self {
status,
key,
display,
}
}
}
/// The backend for signing and verifying cryptographic signatures.
///
/// This allows using different signers, such as GPG or SSH, or different
/// versions of them.
pub trait SigningBackend: Debug + Send + Sync {
/// Name of the backend, used in the config and for display.
fn name(&self) -> &str;
/// Check if the signature can be read and verified by this backend.
///
/// Should check the signature format, usually just looks at the prefix.
fn can_read(&self, signature: &[u8]) -> bool;
/// Create a signature for arbitrary data.
///
/// The `key` parameter is what `jj sign` receives as key argument, or what
/// is configured in the `signing.key` config.
fn sign(&self, data: &[u8], key: Option<&str>) -> SignResult<Vec<u8>>;
/// Verify a signature. Should be reflexive with `sign`:
/// ```rust,ignore
/// verify(data, sign(data)?)?.status == SigStatus::Good
/// ```
fn verify(&self, data: &[u8], signature: &[u8]) -> SignResult<Verification>;
}
/// An error type for the signing/verifying operations
#[derive(Debug, Error)]
pub enum SignError {
/// The verification failed because the signature *format* was invalid.
#[error("Invalid signature")]
InvalidSignatureFormat,
/// A generic error from the backend impl.
#[error("Signing error")]
Backend(#[source] Box<dyn std::error::Error + Send + Sync>),
}
/// A result type for the signing/verifying operations
pub type SignResult<T> = Result<T, SignError>;
/// An error type for the signing backend initialization.
#[derive(Debug, Error)]
pub enum SignInitError {
/// If the backend name specified in the config is not known.
#[error("Unknown signing backend configured: {0}")]
UnknownBackend(String),
/// Failed to load backend configuration.
#[error("Failed to configure signing backend")]
BackendConfig(#[source] ConfigGetError),
}
/// A enum that describes if a created/rewritten commit should be signed or not.
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum SignBehavior {
/// Drop existing signatures.
/// This is what jj did before signing support or does now when a signing
/// backend is not configured.
Drop,
/// Only sign commits that were authored by self and already signed,
/// "preserving" the signature across rewrites.
/// This is what jj does when a signing backend is configured.
Keep,
/// Sign/re-sign commits that were authored by self and drop them for
/// others. This is what jj does when configured to always sign.
Own,
/// Always sign commits, regardless of who authored or signed them before.
/// This is what jj does on `jj sign -f`.
Force,
}
/// Wraps low-level signing backends and adds caching, similar to `Store`.
#[derive(Debug)]
pub struct Signer {
/// The backend that is used for signing commits.
/// Optional because signing might not be configured.
main_backend: Option<Box<dyn SigningBackend>>,
/// All known backends without the main one - used for verification.
/// Main backend is also used for verification, but it's not in this list
/// for ownership reasons.
backends: Vec<Box<dyn SigningBackend>>,
cache: Mutex<CLruCache<CommitId, Verification>>,
}
impl Signer {
/// Creates a signer based on user settings. Uses all known backends, and
/// chooses one of them to be used for signing depending on the config.
pub fn from_settings(settings: &UserSettings) -> Result<Self, SignInitError> {
let mut backends: Vec<Box<dyn SigningBackend>> = vec![
Box::new(GpgBackend::from_settings(settings).map_err(SignInitError::BackendConfig)?),
Box::new(GpgsmBackend::from_settings(settings).map_err(SignInitError::BackendConfig)?),
Box::new(SshBackend::from_settings(settings).map_err(SignInitError::BackendConfig)?),
#[cfg(feature = "testing")]
Box::new(TestSigningBackend),
];
let main_backend = settings
.signing_backend()
.map_err(SignInitError::BackendConfig)?
.map(|backend| {
backends
.iter()
.position(|b| b.name() == backend)
.map(|i| backends.remove(i))
.ok_or(SignInitError::UnknownBackend(backend))
})
.transpose()?;
Ok(Self::new(main_backend, backends))
}
/// Creates a signer with the given backends.
pub fn new(
main_backend: Option<Box<dyn SigningBackend>>,
other_backends: Vec<Box<dyn SigningBackend>>,
) -> Self {
Self {
main_backend,
backends: other_backends,
cache: Mutex::new(CLruCache::new(COMMIT_CACHE_CAPACITY.try_into().unwrap())),
}
}
/// Checks if the signer can sign, i.e. if a main backend is configured.
pub fn can_sign(&self) -> bool {
self.main_backend.is_some()
}
/// This is just a pass-through to the main backend that unconditionally
/// creates a signature.
pub fn sign(&self, data: &[u8], key: Option<&str>) -> SignResult<Vec<u8>> {
self.main_backend
.as_ref()
.expect("tried to sign without checking can_sign first")
.sign(data, key)
}
/// Looks for backend that can verify the signature and returns the result
/// of its verification.
pub fn verify(
&self,
commit_id: &CommitId,
data: &[u8],
signature: &[u8],
) -> SignResult<Verification> {
let cached = self.cache.lock().unwrap().get(commit_id).cloned();
if let Some(check) = cached {
return Ok(check);
}
let verification = self
.main_backend
.iter()
.chain(self.backends.iter())
.filter(|b| b.can_read(signature))
// skip unknown and invalid sigs to allow other backends that can read to try
// for example, we might have gpg and sq, both of which could read a PGP signature
.find_map(|backend| match backend.verify(data, signature) {
Ok(check) if check.status == SigStatus::Unknown => None,
Err(SignError::InvalidSignatureFormat) => None,
e => Some(e),
})
.transpose()?;
if let Some(verification) = verification {
// a key might get imported before next call?.
// realistically this is unlikely, but technically
// it's correct to not cache unknowns here
if verification.status != SigStatus::Unknown {
self.cache
.lock()
.unwrap()
.put(commit_id.clone(), verification.clone());
}
Ok(verification)
} else {
// now here it's correct to cache unknowns, as we don't
// have a backend that knows how to handle this signature
//
// not sure about how much of an optimization this is
self.cache
.lock()
.unwrap()
.put(commit_id.clone(), Verification::unknown());
Ok(Verification::unknown())
}
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/files.rs | lib/src/files.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::borrow::Borrow;
use std::collections::VecDeque;
use std::iter;
use std::mem;
use bstr::BStr;
use bstr::BString;
use either::Either;
use itertools::Itertools as _;
use crate::diff::ContentDiff;
use crate::diff::DiffHunk;
use crate::diff::DiffHunkKind;
use crate::merge::Merge;
use crate::merge::SameChange;
use crate::tree_merge::MergeOptions;
/// A diff line which may contain small hunks originating from both sides.
#[derive(PartialEq, Eq, Clone, Debug)]
pub struct DiffLine<'a> {
pub line_number: DiffLineNumber,
pub hunks: Vec<(DiffLineHunkSide, &'a BStr)>,
}
impl DiffLine<'_> {
pub fn has_left_content(&self) -> bool {
self.hunks
.iter()
.any(|&(side, _)| side != DiffLineHunkSide::Right)
}
pub fn has_right_content(&self) -> bool {
self.hunks
.iter()
.any(|&(side, _)| side != DiffLineHunkSide::Left)
}
pub fn is_unmodified(&self) -> bool {
self.hunks
.iter()
.all(|&(side, _)| side == DiffLineHunkSide::Both)
}
fn take(&mut self) -> Self {
Self {
line_number: self.line_number,
hunks: mem::take(&mut self.hunks),
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct DiffLineNumber {
pub left: u32,
pub right: u32,
}
/// Which side a `DiffLine` hunk belongs to?
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum DiffLineHunkSide {
Both,
Left,
Right,
}
pub struct DiffLineIterator<'a, I> {
diff_hunks: iter::Fuse<I>,
current_line: DiffLine<'a>,
queued_lines: VecDeque<DiffLine<'a>>,
}
impl<'a, I> DiffLineIterator<'a, I>
where
I: Iterator,
I::Item: Borrow<DiffHunk<'a>>,
{
/// Iterates `diff_hunks` by line. Each hunk should have exactly two inputs.
pub fn new(diff_hunks: I) -> Self {
let line_number = DiffLineNumber { left: 1, right: 1 };
Self::with_line_number(diff_hunks, line_number)
}
/// Iterates `diff_hunks` by line. Each hunk should have exactly two inputs.
/// Hunk's line numbers start from the given `line_number`.
pub fn with_line_number(diff_hunks: I, line_number: DiffLineNumber) -> Self {
let current_line = DiffLine {
line_number,
hunks: vec![],
};
Self {
diff_hunks: diff_hunks.fuse(),
current_line,
queued_lines: VecDeque::new(),
}
}
}
impl<I> DiffLineIterator<'_, I> {
/// Returns line number of the next hunk. After all hunks are consumed, this
/// returns the next line number if the last hunk ends with newline.
pub fn next_line_number(&self) -> DiffLineNumber {
let next_line = self.queued_lines.front().unwrap_or(&self.current_line);
next_line.line_number
}
}
impl<'a, I> Iterator for DiffLineIterator<'a, I>
where
I: Iterator,
I::Item: Borrow<DiffHunk<'a>>,
{
type Item = DiffLine<'a>;
fn next(&mut self) -> Option<Self::Item> {
// TODO: Should we attempt to interpret as utf-8 and otherwise break only at
// newlines?
while self.queued_lines.is_empty() {
let Some(hunk) = self.diff_hunks.next() else {
break;
};
let hunk = hunk.borrow();
match hunk.kind {
DiffHunkKind::Matching => {
// TODO: add support for unmatched contexts?
debug_assert!(hunk.contents.iter().all_equal());
let text = hunk.contents[0];
let lines = text.split_inclusive(|b| *b == b'\n').map(BStr::new);
for line in lines {
self.current_line.hunks.push((DiffLineHunkSide::Both, line));
if line.ends_with(b"\n") {
self.queued_lines.push_back(self.current_line.take());
self.current_line.line_number.left += 1;
self.current_line.line_number.right += 1;
}
}
}
DiffHunkKind::Different => {
let [left_text, right_text] = hunk.contents[..]
.try_into()
.expect("hunk should have exactly two inputs");
let left_lines = left_text.split_inclusive(|b| *b == b'\n').map(BStr::new);
for left_line in left_lines {
self.current_line
.hunks
.push((DiffLineHunkSide::Left, left_line));
if left_line.ends_with(b"\n") {
self.queued_lines.push_back(self.current_line.take());
self.current_line.line_number.left += 1;
}
}
let mut right_lines =
right_text.split_inclusive(|b| *b == b'\n').map(BStr::new);
// Omit blank right line if matching hunk of the same line
// number has already been queued. Here we only need to
// check the first queued line since the other lines should
// be created in the left_lines loop above.
if right_text.starts_with(b"\n")
&& self.current_line.hunks.is_empty()
&& self
.queued_lines
.front()
.is_some_and(|queued| queued.has_right_content())
{
let blank_line = right_lines.next().unwrap();
assert_eq!(blank_line, b"\n");
self.current_line.line_number.right += 1;
}
for right_line in right_lines {
self.current_line
.hunks
.push((DiffLineHunkSide::Right, right_line));
if right_line.ends_with(b"\n") {
self.queued_lines.push_back(self.current_line.take());
self.current_line.line_number.right += 1;
}
}
}
}
}
if let Some(line) = self.queued_lines.pop_front() {
return Some(line);
}
if !self.current_line.hunks.is_empty() {
return Some(self.current_line.take());
}
None
}
}
/// Diff hunk that may be unresolved conflicts.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ConflictDiffHunk<'input> {
pub kind: DiffHunkKind,
pub lefts: Merge<&'input BStr>,
pub rights: Merge<&'input BStr>,
}
/// Iterator adaptor that translates non-conflict hunks to resolved `Merge`.
///
/// Trivial conflicts in the diff inputs should have been resolved by caller.
pub fn conflict_diff_hunks<'input, I>(
diff_hunks: I,
num_lefts: usize,
) -> impl Iterator<Item = ConflictDiffHunk<'input>>
where
I: IntoIterator,
I::Item: Borrow<DiffHunk<'input>>,
{
fn to_merge<'input>(contents: &[&'input BStr]) -> Merge<&'input BStr> {
// Not using trivial_merge() so that the original content can be
// reproduced by concatenating hunks.
if contents.iter().all_equal() {
Merge::resolved(contents[0])
} else {
Merge::from_vec(contents)
}
}
diff_hunks.into_iter().map(move |hunk| {
let hunk = hunk.borrow();
let (lefts, rights) = hunk.contents.split_at(num_lefts);
if let ([left], [right]) = (lefts, rights) {
// Non-conflicting diff shouldn't have identical contents
ConflictDiffHunk {
kind: hunk.kind,
lefts: Merge::resolved(left),
rights: Merge::resolved(right),
}
} else {
let lefts = to_merge(lefts);
let rights = to_merge(rights);
let kind = match hunk.kind {
DiffHunkKind::Matching => DiffHunkKind::Matching,
DiffHunkKind::Different if lefts == rights => DiffHunkKind::Matching,
DiffHunkKind::Different => DiffHunkKind::Different,
};
ConflictDiffHunk {
kind,
lefts,
rights,
}
}
})
}
/// Granularity of hunks when merging files.
#[derive(Clone, Copy, Debug, Eq, PartialEq, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum FileMergeHunkLevel {
/// Splits into line hunks.
Line,
/// Splits into word hunks.
Word,
}
/// Merge result in either fully-resolved or conflicts form, akin to
/// `Result<BString, Vec<Merge<BString>>>`.
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum MergeResult {
/// Resolved content if inputs can be merged successfully.
Resolved(BString),
/// List of partially-resolved hunks if some of them cannot be merged.
Conflict(Vec<Merge<BString>>),
}
/// Splits `inputs` into hunks, resolves trivial merge conflicts for each.
///
/// Returns either fully-resolved content or list of partially-resolved hunks.
pub fn merge_hunks<T: AsRef<[u8]>>(inputs: &Merge<T>, options: &MergeOptions) -> MergeResult {
merge_inner(inputs, options)
}
/// Splits `inputs` into hunks, resolves trivial merge conflicts for each, then
/// concatenates the outcome back to single `Merge` object.
///
/// The returned merge object is either fully resolved or conflict having the
/// same number of terms as the `inputs`.
pub fn merge<T: AsRef<[u8]>>(inputs: &Merge<T>, options: &MergeOptions) -> Merge<BString> {
merge_inner(inputs, options)
}
/// Splits `inputs` into hunks, attempts to resolve trivial merge conflicts for
/// each.
///
/// If all input hunks can be merged successfully, returns the merged content.
pub fn try_merge<T: AsRef<[u8]>>(inputs: &Merge<T>, options: &MergeOptions) -> Option<BString> {
merge_inner(inputs, options)
}
fn merge_inner<'input, T, B>(inputs: &'input Merge<T>, options: &MergeOptions) -> B
where
T: AsRef<[u8]>,
B: FromMergeHunks<'input>,
{
// TODO: Using the first remove as base (first in the inputs) is how it's
// usually done for 3-way conflicts. Are there better heuristics when there are
// more than 3 parts?
let num_diffs = inputs.removes().len();
let diff = ContentDiff::by_line(inputs.removes().chain(inputs.adds()));
let hunks = resolve_diff_hunks(&diff, num_diffs, options.same_change);
match options.hunk_level {
FileMergeHunkLevel::Line => B::from_hunks(hunks.map(MergeHunk::Borrowed)),
FileMergeHunkLevel::Word => {
B::from_hunks(hunks.map(|h| merge_hunk_by_word(h, options.same_change)))
}
}
}
fn merge_hunk_by_word(inputs: Merge<&BStr>, same_change: SameChange) -> MergeHunk<'_> {
if inputs.is_resolved() {
return MergeHunk::Borrowed(inputs);
}
let num_diffs = inputs.removes().len();
let diff = ContentDiff::by_word(inputs.removes().chain(inputs.adds()));
let hunks = resolve_diff_hunks(&diff, num_diffs, same_change);
// We could instead use collect_merged() to return partially-merged hunk.
// This would be more consistent with the line-based merge function, but
// might produce surprising results. Partially-merged conflicts would be
// hard to review because they would have mixed contexts.
if let Some(content) = collect_resolved(hunks.map(MergeHunk::Borrowed)) {
MergeHunk::Owned(Merge::resolved(content))
} else {
drop(diff);
MergeHunk::Borrowed(inputs)
}
}
/// `Cow`-like type over `Merge<T>`.
#[derive(Clone, Debug)]
enum MergeHunk<'input> {
Borrowed(Merge<&'input BStr>),
Owned(Merge<BString>),
}
impl MergeHunk<'_> {
fn len(&self) -> usize {
match self {
MergeHunk::Borrowed(merge) => merge.as_slice().len(),
MergeHunk::Owned(merge) => merge.as_slice().len(),
}
}
fn iter(&self) -> impl Iterator<Item = &BStr> {
match self {
MergeHunk::Borrowed(merge) => Either::Left(merge.iter().copied()),
MergeHunk::Owned(merge) => Either::Right(merge.iter().map(Borrow::borrow)),
}
}
fn as_resolved(&self) -> Option<&BStr> {
match self {
MergeHunk::Borrowed(merge) => merge.as_resolved().copied(),
MergeHunk::Owned(merge) => merge.as_resolved().map(Borrow::borrow),
}
}
fn into_owned(self) -> Merge<BString> {
match self {
MergeHunk::Borrowed(merge) => merge.map(|&s| s.to_owned()),
MergeHunk::Owned(merge) => merge,
}
}
}
/// `FromIterator` for merge result.
trait FromMergeHunks<'input>: Sized {
fn from_hunks<I: IntoIterator<Item = MergeHunk<'input>>>(hunks: I) -> Self;
}
impl<'input> FromMergeHunks<'input> for MergeResult {
fn from_hunks<I: IntoIterator<Item = MergeHunk<'input>>>(hunks: I) -> Self {
collect_hunks(hunks)
}
}
impl<'input> FromMergeHunks<'input> for Merge<BString> {
fn from_hunks<I: IntoIterator<Item = MergeHunk<'input>>>(hunks: I) -> Self {
collect_merged(hunks)
}
}
impl<'input> FromMergeHunks<'input> for Option<BString> {
fn from_hunks<I: IntoIterator<Item = MergeHunk<'input>>>(hunks: I) -> Self {
collect_resolved(hunks)
}
}
/// Collects merged hunks into either fully-resolved content or list of
/// partially-resolved hunks.
fn collect_hunks<'input>(hunks: impl IntoIterator<Item = MergeHunk<'input>>) -> MergeResult {
let mut resolved_hunk = BString::new(vec![]);
let mut merge_hunks: Vec<Merge<BString>> = vec![];
for hunk in hunks {
if let Some(content) = hunk.as_resolved() {
resolved_hunk.extend_from_slice(content);
} else {
if !resolved_hunk.is_empty() {
merge_hunks.push(Merge::resolved(resolved_hunk));
resolved_hunk = BString::new(vec![]);
}
merge_hunks.push(hunk.into_owned());
}
}
if merge_hunks.is_empty() {
MergeResult::Resolved(resolved_hunk)
} else {
if !resolved_hunk.is_empty() {
merge_hunks.push(Merge::resolved(resolved_hunk));
}
MergeResult::Conflict(merge_hunks)
}
}
/// Collects merged hunks back to single `Merge` object, duplicating resolved
/// hunks to all positive and negative terms.
fn collect_merged<'input>(hunks: impl IntoIterator<Item = MergeHunk<'input>>) -> Merge<BString> {
let mut maybe_resolved = Merge::resolved(BString::default());
for hunk in hunks {
if let Some(content) = hunk.as_resolved() {
for buf in &mut maybe_resolved {
buf.extend_from_slice(content);
}
} else {
maybe_resolved = match maybe_resolved.into_resolved() {
Ok(content) => Merge::from_vec(vec![content; hunk.len()]),
Err(conflict) => conflict,
};
assert_eq!(maybe_resolved.as_slice().len(), hunk.len());
for (buf, s) in iter::zip(&mut maybe_resolved, hunk.iter()) {
buf.extend_from_slice(s);
}
}
}
maybe_resolved
}
/// Collects resolved merge hunks. Short-circuits on unresolved hunk.
fn collect_resolved<'input>(hunks: impl IntoIterator<Item = MergeHunk<'input>>) -> Option<BString> {
let mut resolved_content = BString::default();
for hunk in hunks {
resolved_content.extend_from_slice(hunk.as_resolved()?);
}
Some(resolved_content)
}
/// Iterator that attempts to resolve trivial merge conflict for each hunk.
fn resolve_diff_hunks<'input>(
diff: &ContentDiff<'input>,
num_diffs: usize,
same_change: SameChange,
) -> impl Iterator<Item = Merge<&'input BStr>> {
diff.hunks().map(move |diff_hunk| match diff_hunk.kind {
DiffHunkKind::Matching => {
debug_assert!(diff_hunk.contents.iter().all_equal());
Merge::resolved(diff_hunk.contents[0])
}
DiffHunkKind::Different => {
let merge = Merge::from_removes_adds(
diff_hunk.contents[..num_diffs].iter().copied(),
diff_hunk.contents[num_diffs..].iter().copied(),
);
match merge.resolve_trivial(same_change) {
Some(&content) => Merge::resolved(content),
None => merge,
}
}
})
}
#[cfg(test)]
mod tests {
use indoc::indoc;
use super::*;
fn conflict<const N: usize>(values: [&[u8]; N]) -> Merge<BString> {
Merge::from_vec(values.map(hunk).to_vec())
}
fn resolved(value: &[u8]) -> Merge<BString> {
Merge::resolved(hunk(value))
}
fn hunk(data: &[u8]) -> BString {
data.into()
}
#[test]
fn test_diff_line_iterator_line_numbers() {
let mut line_iter = DiffLineIterator::with_line_number(
[DiffHunk::different(["a\nb", "c\nd\n"])].into_iter(),
DiffLineNumber { left: 1, right: 10 },
);
// Nothing queued
assert_eq!(
line_iter.next_line_number(),
DiffLineNumber { left: 1, right: 10 }
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 1, right: 10 },
hunks: vec![(DiffLineHunkSide::Left, "a\n".as_ref())],
}
);
// Multiple lines queued
assert_eq!(
line_iter.next_line_number(),
DiffLineNumber { left: 2, right: 10 }
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 2, right: 10 },
hunks: vec![
(DiffLineHunkSide::Left, "b".as_ref()),
(DiffLineHunkSide::Right, "c\n".as_ref()),
],
}
);
// Single line queued
assert_eq!(
line_iter.next_line_number(),
DiffLineNumber { left: 2, right: 11 }
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 2, right: 11 },
hunks: vec![(DiffLineHunkSide::Right, "d\n".as_ref())],
}
);
// No more lines: left remains 2 as it lacks newline
assert_eq!(
line_iter.next_line_number(),
DiffLineNumber { left: 2, right: 12 }
);
assert!(line_iter.next().is_none());
assert_eq!(
line_iter.next_line_number(),
DiffLineNumber { left: 2, right: 12 }
);
}
#[test]
fn test_diff_line_iterator_blank_right_line_single_left() {
let mut line_iter = DiffLineIterator::new(
[
DiffHunk::matching(["a"].repeat(2)),
DiffHunk::different(["x\n", "\ny\n"]),
]
.into_iter(),
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 1, right: 1 },
hunks: vec![
(DiffLineHunkSide::Both, "a".as_ref()),
(DiffLineHunkSide::Left, "x\n".as_ref()),
],
}
);
// "\n" (line_number.right = 1) can be omitted because the previous diff
// line has a right content.
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 2, right: 2 },
hunks: vec![(DiffLineHunkSide::Right, "y\n".as_ref())],
}
);
}
#[test]
fn test_diff_line_iterator_blank_right_line_multiple_lefts() {
let mut line_iter = DiffLineIterator::new(
[
DiffHunk::matching(["a"].repeat(2)),
DiffHunk::different(["x\n\n", "\ny\n"]),
]
.into_iter(),
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 1, right: 1 },
hunks: vec![
(DiffLineHunkSide::Both, "a".as_ref()),
(DiffLineHunkSide::Left, "x\n".as_ref()),
],
}
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 2, right: 1 },
hunks: vec![(DiffLineHunkSide::Left, "\n".as_ref())],
}
);
// "\n" (line_number.right = 1) can still be omitted because one of the
// preceding diff line has a right content.
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 3, right: 2 },
hunks: vec![(DiffLineHunkSide::Right, "y\n".as_ref())],
}
);
}
#[test]
fn test_diff_line_iterator_blank_right_line_after_non_empty_left() {
let mut line_iter = DiffLineIterator::new(
[
DiffHunk::matching(["a"].repeat(2)),
DiffHunk::different(["x\nz", "\ny\n"]),
]
.into_iter(),
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 1, right: 1 },
hunks: vec![
(DiffLineHunkSide::Both, "a".as_ref()),
(DiffLineHunkSide::Left, "x\n".as_ref()),
],
}
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 2, right: 1 },
hunks: vec![
(DiffLineHunkSide::Left, "z".as_ref()),
(DiffLineHunkSide::Right, "\n".as_ref()),
],
}
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 2, right: 2 },
hunks: vec![(DiffLineHunkSide::Right, "y\n".as_ref())],
}
);
}
#[test]
fn test_diff_line_iterator_blank_right_line_without_preceding_lines() {
let mut line_iter = DiffLineIterator::new([DiffHunk::different(["", "\ny\n"])].into_iter());
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 1, right: 1 },
hunks: vec![(DiffLineHunkSide::Right, "\n".as_ref())],
}
);
assert_eq!(
line_iter.next().unwrap(),
DiffLine {
line_number: DiffLineNumber { left: 1, right: 2 },
hunks: vec![(DiffLineHunkSide::Right, "y\n".as_ref())],
}
);
}
#[test]
fn test_conflict_diff_hunks_no_conflicts() {
let diff_hunks = [
DiffHunk::matching(["a\n"].repeat(2)),
DiffHunk::different(["b\n", "c\n"]),
];
let num_lefts = 1;
insta::assert_debug_snapshot!(
conflict_diff_hunks(&diff_hunks, num_lefts).collect_vec(), @r#"
[
ConflictDiffHunk {
kind: Matching,
lefts: Resolved(
"a\n",
),
rights: Resolved(
"a\n",
),
},
ConflictDiffHunk {
kind: Different,
lefts: Resolved(
"b\n",
),
rights: Resolved(
"c\n",
),
},
]
"#);
}
#[test]
fn test_conflict_diff_hunks_simple_conflicts() {
let diff_hunks = [
// conflict hunk
DiffHunk::different(["a\n", "X\n", "b\n", "c\n"]),
DiffHunk::matching(["d\n"].repeat(4)),
// non-conflict hunk
DiffHunk::different(["e\n", "e\n", "e\n", "f\n"]),
];
let num_lefts = 3;
insta::assert_debug_snapshot!(
conflict_diff_hunks(&diff_hunks, num_lefts).collect_vec(), @r#"
[
ConflictDiffHunk {
kind: Different,
lefts: Conflicted(
[
"a\n",
"X\n",
"b\n",
],
),
rights: Resolved(
"c\n",
),
},
ConflictDiffHunk {
kind: Matching,
lefts: Resolved(
"d\n",
),
rights: Resolved(
"d\n",
),
},
ConflictDiffHunk {
kind: Different,
lefts: Resolved(
"e\n",
),
rights: Resolved(
"f\n",
),
},
]
"#);
}
#[test]
fn test_conflict_diff_hunks_matching_conflicts() {
let diff_hunks = [
// matching conflict hunk
DiffHunk::different(["a\n", "X\n", "b\n", "a\n", "X\n", "b\n"]),
DiffHunk::matching(["c\n"].repeat(6)),
];
let num_lefts = 3;
insta::assert_debug_snapshot!(
conflict_diff_hunks(&diff_hunks, num_lefts).collect_vec(), @r#"
[
ConflictDiffHunk {
kind: Matching,
lefts: Conflicted(
[
"a\n",
"X\n",
"b\n",
],
),
rights: Conflicted(
[
"a\n",
"X\n",
"b\n",
],
),
},
ConflictDiffHunk {
kind: Matching,
lefts: Resolved(
"c\n",
),
rights: Resolved(
"c\n",
),
},
]
"#);
}
#[test]
fn test_conflict_diff_hunks_no_trivial_resolution() {
let diff_hunks = [DiffHunk::different(["a", "b", "a", "a"])];
let num_lefts = 1;
insta::assert_debug_snapshot!(
conflict_diff_hunks(&diff_hunks, num_lefts).collect_vec(), @r#"
[
ConflictDiffHunk {
kind: Different,
lefts: Resolved(
"a",
),
rights: Conflicted(
[
"b",
"a",
"a",
],
),
},
]
"#);
let num_lefts = 3;
insta::assert_debug_snapshot!(
conflict_diff_hunks(&diff_hunks, num_lefts).collect_vec(), @r#"
[
ConflictDiffHunk {
kind: Different,
lefts: Conflicted(
[
"a",
"b",
"a",
],
),
rights: Resolved(
"a",
),
},
]
"#);
}
#[test]
fn test_merge_single_hunk() {
let options = MergeOptions {
hunk_level: FileMergeHunkLevel::Line,
same_change: SameChange::Accept,
};
let merge_hunks = |inputs: &_| merge_hunks(inputs, &options);
// Unchanged and empty on all sides
assert_eq!(
merge_hunks(&conflict([b"", b"", b""])),
MergeResult::Resolved(hunk(b""))
);
// Unchanged on all sides
assert_eq!(
merge_hunks(&conflict([b"a", b"a", b"a"])),
MergeResult::Resolved(hunk(b"a"))
);
// One side removed, one side unchanged
assert_eq!(
merge_hunks(&conflict([b"", b"a\n", b"a\n"])),
MergeResult::Resolved(hunk(b""))
);
// One side unchanged, one side removed
assert_eq!(
merge_hunks(&conflict([b"a\n", b"a\n", b""])),
MergeResult::Resolved(hunk(b""))
);
// Both sides removed same line
assert_eq!(
merge_hunks(&conflict([b"", b"a\n", b""])),
MergeResult::Resolved(hunk(b""))
);
// One side modified, one side unchanged
assert_eq!(
merge_hunks(&conflict([b"a b", b"a", b"a"])),
MergeResult::Resolved(hunk(b"a b"))
);
// One side unchanged, one side modified
assert_eq!(
merge_hunks(&conflict([b"a", b"a", b"a b"])),
MergeResult::Resolved(hunk(b"a b"))
);
// All sides added same content
assert_eq!(
merge_hunks(&conflict([b"a\n", b"", b"a\n", b"", b"a\n"])),
MergeResult::Resolved(hunk(b"a\n"))
);
// One side modified, two sides added
assert_eq!(
merge_hunks(&conflict([b"b", b"a", b"b", b"", b"b"])),
MergeResult::Conflict(vec![conflict([b"b", b"a", b"b", b"", b"b"])])
);
// All sides removed same content
assert_eq!(
merge_hunks(&conflict([b"", b"a\n", b"", b"a\n", b"", b"a\n", b""])),
MergeResult::Resolved(hunk(b""))
);
// One side modified, two sides removed
assert_eq!(
merge_hunks(&conflict([b"b\n", b"a\n", b"", b"a\n", b""])),
MergeResult::Conflict(vec![conflict([b"b\n", b"a\n", b"", b"a\n", b""])])
);
// Three sides made the same change
assert_eq!(
merge_hunks(&conflict([b"b", b"a", b"b", b"a", b"b"])),
MergeResult::Resolved(hunk(b"b"))
);
// One side removed, one side modified
assert_eq!(
merge_hunks(&conflict([b"", b"a\n", b"b\n"])),
MergeResult::Conflict(vec![conflict([b"", b"a\n", b"b\n"])])
);
// One side modified, one side removed
assert_eq!(
merge_hunks(&conflict([b"b\n", b"a\n", b""])),
MergeResult::Conflict(vec![conflict([b"b\n", b"a\n", b""])])
);
// Two sides modified in different ways
assert_eq!(
merge_hunks(&conflict([b"b", b"a", b"c"])),
MergeResult::Conflict(vec![conflict([b"b", b"a", b"c"])])
);
// Two of three sides don't change, third side changes
assert_eq!(
merge_hunks(&conflict([b"a", b"a", b"", b"a", b"a"])),
MergeResult::Resolved(hunk(b""))
);
// One side unchanged, two other sides make the same change
assert_eq!(
merge_hunks(&conflict([b"b", b"a", b"a", b"a", b"b"])),
MergeResult::Resolved(hunk(b"b"))
);
// One side unchanged, two other sides make the different change
assert_eq!(
merge_hunks(&conflict([b"b", b"a", b"a", b"a", b"c"])),
MergeResult::Conflict(vec![conflict([b"b", b"a", b"a", b"a", b"c"])])
);
// Merge of an unresolved conflict and another branch, where the other branch
// undid the change from one of the inputs to the unresolved conflict in the
// first.
assert_eq!(
merge_hunks(&conflict([b"b", b"a", b"a", b"b", b"c"])),
MergeResult::Resolved(hunk(b"c"))
);
// Merge of an unresolved conflict and another branch.
assert_eq!(
merge_hunks(&conflict([b"c", b"a", b"d", b"b", b"e"])),
MergeResult::Conflict(vec![conflict([b"c", b"a", b"d", b"b", b"e"])])
);
// Two sides made the same change, third side made a different change
assert_eq!(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/annotate.rs | lib/src/annotate.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Methods that allow annotation (attribution and blame) for a file in a
//! repository.
//!
//! TODO: Add support for different blame layers with a trait in the future.
//! Like commit metadata and more.
use std::collections::HashMap;
use std::collections::hash_map;
use std::iter;
use std::ops::Range;
use std::sync::Arc;
use bstr::BStr;
use bstr::BString;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::conflicts::ConflictMarkerStyle;
use crate::conflicts::ConflictMaterializeOptions;
use crate::conflicts::MaterializedTreeValue;
use crate::conflicts::materialize_merge_result_to_bytes;
use crate::conflicts::materialize_tree_value;
use crate::diff::ContentDiff;
use crate::diff::DiffHunkKind;
use crate::files::FileMergeHunkLevel;
use crate::fileset::FilesetExpression;
use crate::graph::GraphEdge;
use crate::merge::SameChange;
use crate::merged_tree::MergedTree;
use crate::repo::Repo;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::revset::ResolvedRevsetExpression;
use crate::revset::RevsetEvaluationError;
use crate::revset::RevsetExpression;
use crate::revset::RevsetFilterPredicate;
use crate::store::Store;
use crate::tree_merge::MergeOptions;
/// Annotation results for a specific file
#[derive(Clone, Debug)]
pub struct FileAnnotation {
line_map: OriginalLineMap,
text: BString,
}
impl FileAnnotation {
/// Returns iterator over `(line_origin, line)`s.
///
/// For each line, `Ok(line_origin)` returns information about the
/// originator commit of the line. If no originator commit was found
/// within the domain, `Err(line_origin)` should be set. It points to the
/// commit outside of the domain where the search stopped.
///
/// The `line` includes newline character.
pub fn line_origins(&self) -> impl Iterator<Item = (Result<&LineOrigin, &LineOrigin>, &BStr)> {
itertools::zip_eq(&self.line_map, self.text.split_inclusive(|b| *b == b'\n'))
.map(|(line_origin, line)| (line_origin.as_ref(), line.as_ref()))
}
/// Returns iterator over `(commit_id, line)`s.
///
/// For each line, `Ok(commit_id)` points to the originator commit of the
/// line. If no originator commit was found within the domain,
/// `Err(commit_id)` should be set. It points to the commit outside of the
/// domain where the search stopped.
///
/// The `line` includes newline character.
pub fn lines(&self) -> impl Iterator<Item = (Result<&CommitId, &CommitId>, &BStr)> {
itertools::zip_eq(
self.commit_ids(),
self.text
.split_inclusive(|b| *b == b'\n')
.map(AsRef::as_ref),
)
}
/// Returns iterator over `(commit_id, line_range)`s.
///
/// See [`Self::lines()`] for `commit_id`s.
///
/// The `line_range` is a slice range in the file `text`. Consecutive ranges
/// having the same `commit_id` are not compacted.
pub fn line_ranges(
&self,
) -> impl Iterator<Item = (Result<&CommitId, &CommitId>, Range<usize>)> {
let ranges = self
.text
.split_inclusive(|b| *b == b'\n')
.scan(0, |total, line| {
let start = *total;
*total += line.len();
Some(start..*total)
});
itertools::zip_eq(self.commit_ids(), ranges)
}
/// Returns iterator over compacted `(commit_id, line_range)`s.
///
/// Consecutive ranges having the same `commit_id` are merged into one.
pub fn compact_line_ranges(
&self,
) -> impl Iterator<Item = (Result<&CommitId, &CommitId>, Range<usize>)> {
let mut ranges = self.line_ranges();
let mut acc = ranges.next();
iter::from_fn(move || {
let (acc_commit_id, acc_range) = acc.as_mut()?;
for (cur_commit_id, cur_range) in ranges.by_ref() {
if *acc_commit_id == cur_commit_id {
acc_range.end = cur_range.end;
} else {
return acc.replace((cur_commit_id, cur_range));
}
}
acc.take()
})
}
/// File content at the starting commit.
pub fn text(&self) -> &BStr {
self.text.as_ref()
}
fn commit_ids(&self) -> impl Iterator<Item = Result<&CommitId, &CommitId>> {
self.line_map.iter().map(|line_origin| {
line_origin
.as_ref()
.map(|origin| &origin.commit_id)
.map_err(|origin| &origin.commit_id)
})
}
}
/// Annotation process for a specific file.
#[derive(Clone, Debug)]
pub struct FileAnnotator {
// If we add copy-tracing support, file_path might be tracked by state.
file_path: RepoPathBuf,
starting_text: BString,
state: AnnotationState,
}
impl FileAnnotator {
/// Initializes annotator for a specific file in the `starting_commit`.
///
/// If the file is not found, the result would be empty.
pub fn from_commit(starting_commit: &Commit, file_path: &RepoPath) -> BackendResult<Self> {
let source = Source::load(starting_commit, file_path)?;
Ok(Self::with_source(starting_commit.id(), file_path, source))
}
/// Initializes annotator for a specific file path starting with the given
/// content.
///
/// The file content at the `starting_commit` is set to `starting_text`.
/// This is typically one of the file contents in the conflict or
/// merged-parent tree.
pub fn with_file_content(
starting_commit_id: &CommitId,
file_path: &RepoPath,
starting_text: impl Into<Vec<u8>>,
) -> Self {
let source = Source::new(BString::new(starting_text.into()));
Self::with_source(starting_commit_id, file_path, source)
}
fn with_source(
starting_commit_id: &CommitId,
file_path: &RepoPath,
mut source: Source,
) -> Self {
source.fill_line_map();
let starting_text = source.text.clone();
let state = AnnotationState {
original_line_map: (0..source.line_map.len())
.map(|line_number| {
Err(LineOrigin {
commit_id: starting_commit_id.clone(),
line_number,
})
})
.collect(),
commit_source_map: HashMap::from([(starting_commit_id.clone(), source)]),
num_unresolved_roots: 0,
};
Self {
file_path: file_path.to_owned(),
starting_text,
state,
}
}
/// Computes line-by-line annotation within the `domain`.
///
/// The `domain` expression narrows the range of ancestors to search. It
/// will be intersected as `domain & ::pending_commits & files(file_path)`.
/// The `pending_commits` is assumed to be included in the `domain`.
pub fn compute(
&mut self,
repo: &dyn Repo,
domain: &Arc<ResolvedRevsetExpression>,
) -> Result<(), RevsetEvaluationError> {
process_commits(repo, &mut self.state, domain, &self.file_path)
}
/// Remaining commit ids to visit from.
pub fn pending_commits(&self) -> impl Iterator<Item = &CommitId> {
self.state.commit_source_map.keys()
}
/// Returns the current state as line-oriented annotation.
pub fn to_annotation(&self) -> FileAnnotation {
// Just clone the line map. We might want to change the underlying data
// model something akin to interleaved delta in order to get annotation
// at a certain ancestor commit without recomputing.
FileAnnotation {
line_map: self.state.original_line_map.clone(),
text: self.starting_text.clone(),
}
}
}
/// Intermediate state of file annotation.
#[derive(Clone, Debug)]
struct AnnotationState {
original_line_map: OriginalLineMap,
/// Commits to file line mappings and contents.
commit_source_map: HashMap<CommitId, Source>,
/// Number of unresolved root commits in `commit_source_map`.
num_unresolved_roots: usize,
}
/// Line mapping and file content at a certain commit.
#[derive(Clone, Debug)]
struct Source {
/// Mapping of line numbers in the file at the current commit to the
/// starting file, sorted by the line numbers at the current commit.
line_map: Vec<(usize, usize)>,
/// File content at the current commit.
text: BString,
}
impl Source {
fn new(text: BString) -> Self {
Self {
line_map: Vec::new(),
text,
}
}
fn load(commit: &Commit, file_path: &RepoPath) -> Result<Self, BackendError> {
let tree = commit.tree();
let text = get_file_contents(commit.store(), file_path, &tree).block_on()?;
Ok(Self::new(text))
}
fn fill_line_map(&mut self) {
let lines = self.text.split_inclusive(|b| *b == b'\n');
self.line_map = lines.enumerate().map(|(i, _)| (i, i)).collect();
}
}
/// List of origins for each line, indexed by line numbers in the
/// starting file.
type OriginalLineMap = Vec<Result<LineOrigin, LineOrigin>>;
/// Information about the origin of an annotated line.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct LineOrigin {
/// Commit ID where the line was introduced.
pub commit_id: CommitId,
/// 0-based line number of the line in the origin commit.
pub line_number: usize,
}
/// Starting from the source commits, compute changes at that commit relative to
/// its direct parents, updating the mappings as we go.
fn process_commits(
repo: &dyn Repo,
state: &mut AnnotationState,
domain: &Arc<ResolvedRevsetExpression>,
file_name: &RepoPath,
) -> Result<(), RevsetEvaluationError> {
let predicate = RevsetFilterPredicate::File(FilesetExpression::file_path(file_name.to_owned()));
// TODO: If the domain isn't a contiguous range, changes masked out by it
// might not be caught by the closest ancestor revision. For example,
// domain=merges() would pick up almost nothing because merge revisions
// are usually empty. Perhaps, we want to query `files(file_path,
// within_sub_graph=domain)`, not `domain & files(file_path)`.
let heads = RevsetExpression::commits(state.commit_source_map.keys().cloned().collect());
let revset = heads
.union(&domain.intersection(&heads.ancestors()).filtered(predicate))
.evaluate(repo)?;
state.num_unresolved_roots = 0;
for node in revset.iter_graph() {
let (commit_id, edge_list) = node?;
process_commit(repo, file_name, state, &commit_id, &edge_list)?;
if state.commit_source_map.len() == state.num_unresolved_roots {
// No more lines to propagate to ancestors.
break;
}
}
Ok(())
}
/// For a given commit, for each parent, we compare the version in the parent
/// tree with the current version, updating the mappings for any lines in
/// common. If the parent doesn't have the file, we skip it.
fn process_commit(
repo: &dyn Repo,
file_name: &RepoPath,
state: &mut AnnotationState,
current_commit_id: &CommitId,
edges: &[GraphEdge<CommitId>],
) -> Result<(), BackendError> {
let Some(mut current_source) = state.commit_source_map.remove(current_commit_id) else {
return Ok(());
};
for parent_edge in edges {
let parent_commit_id = &parent_edge.target;
let parent_source = match state.commit_source_map.entry(parent_commit_id.clone()) {
hash_map::Entry::Occupied(entry) => entry.into_mut(),
hash_map::Entry::Vacant(entry) => {
let commit = repo.store().get_commit(entry.key())?;
entry.insert(Source::load(&commit, file_name)?)
}
};
// For two versions of the same file, for all the lines in common,
// overwrite the new mapping in the results for the new commit. Let's
// say I have a file in commit A and commit B. We know that according to
// local line_map, in commit A, line 3 corresponds to line 7 of the
// starting file. Now, line 3 in Commit A corresponds to line 6 in
// commit B. Then, we update local line_map to say that "Commit B line 6
// goes to line 7 of the starting file". We repeat this for all lines in
// common in the two commits.
let mut current_lines = current_source.line_map.iter().copied().peekable();
let mut new_current_line_map = Vec::new();
let mut new_parent_line_map = Vec::new();
copy_same_lines_with(
¤t_source.text,
&parent_source.text,
|current_start, parent_start, count| {
new_current_line_map
.extend(current_lines.peeking_take_while(|&(cur, _)| cur < current_start));
while let Some((current, starting)) =
current_lines.next_if(|&(cur, _)| cur < current_start + count)
{
let parent = parent_start + (current - current_start);
new_parent_line_map.push((parent, starting));
}
},
);
new_current_line_map.extend(current_lines);
current_source.line_map = new_current_line_map;
parent_source.line_map = if parent_source.line_map.is_empty() {
new_parent_line_map
} else {
itertools::merge(parent_source.line_map.iter().copied(), new_parent_line_map).collect()
};
if parent_source.line_map.is_empty() {
state.commit_source_map.remove(parent_commit_id);
} else if parent_edge.is_missing() {
// If an omitted parent had the file, leave these lines unresolved.
// The origin of the unresolved lines is represented as
// Err(LineOrigin { parent_commit_id, parent_line_number }).
for &(parent_line_number, starting_line_number) in &parent_source.line_map {
state.original_line_map[starting_line_number] = Err(LineOrigin {
commit_id: parent_commit_id.clone(),
line_number: parent_line_number,
});
}
state.num_unresolved_roots += 1;
}
}
// Once we've looked at all parents of a commit, any leftover lines must be
// original to the current commit, so we save this information in
// original_line_map.
for (current_line_number, starting_line_number) in current_source.line_map {
state.original_line_map[starting_line_number] = Ok(LineOrigin {
commit_id: current_commit_id.clone(),
line_number: current_line_number,
});
}
Ok(())
}
/// For two files, calls `copy(current_start, parent_start, count)` for each
/// range of contiguous lines in common (e.g. line 8-10 maps to line 9-11.)
fn copy_same_lines_with(
current_contents: &[u8],
parent_contents: &[u8],
mut copy: impl FnMut(usize, usize, usize),
) {
let diff = ContentDiff::by_line([current_contents, parent_contents]);
let mut current_line_counter: usize = 0;
let mut parent_line_counter: usize = 0;
for hunk in diff.hunks() {
match hunk.kind {
DiffHunkKind::Matching => {
let count = hunk.contents[0].split_inclusive(|b| *b == b'\n').count();
copy(current_line_counter, parent_line_counter, count);
current_line_counter += count;
parent_line_counter += count;
}
DiffHunkKind::Different => {
let current_output = hunk.contents[0];
let parent_output = hunk.contents[1];
current_line_counter += current_output.split_inclusive(|b| *b == b'\n').count();
parent_line_counter += parent_output.split_inclusive(|b| *b == b'\n').count();
}
}
}
}
async fn get_file_contents(
store: &Store,
path: &RepoPath,
tree: &MergedTree,
) -> Result<BString, BackendError> {
let file_value = tree.path_value_async(path).await?;
let effective_file_value =
materialize_tree_value(store, path, file_value, tree.labels()).await?;
match effective_file_value {
MaterializedTreeValue::File(mut file) => Ok(file.read_all(path).await?.into()),
MaterializedTreeValue::FileConflict(file) => {
// TODO: track line origins without materializing
let options = ConflictMaterializeOptions {
marker_style: ConflictMarkerStyle::Diff,
marker_len: None,
merge: MergeOptions {
hunk_level: FileMergeHunkLevel::Line,
same_change: SameChange::Accept,
},
};
Ok(materialize_merge_result_to_bytes(
&file.contents,
&file.labels,
&options,
))
}
_ => Ok(BString::default()),
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_line_origin(commit_id: &CommitId, line_number: usize) -> LineOrigin {
LineOrigin {
commit_id: commit_id.clone(),
line_number,
}
}
#[test]
fn test_lines_iterator_empty() {
let annotation = FileAnnotation {
line_map: vec![],
text: "".into(),
};
assert_eq!(annotation.line_origins().collect_vec(), vec![]);
assert_eq!(annotation.lines().collect_vec(), vec![]);
assert_eq!(annotation.line_ranges().collect_vec(), vec![]);
assert_eq!(annotation.compact_line_ranges().collect_vec(), vec![]);
}
#[test]
fn test_lines_iterator_with_content() {
let commit_id1 = CommitId::from_hex("111111");
let commit_id2 = CommitId::from_hex("222222");
let commit_id3 = CommitId::from_hex("333333");
let annotation = FileAnnotation {
line_map: vec![
Ok(make_line_origin(&commit_id1, 0)),
Ok(make_line_origin(&commit_id2, 1)),
Ok(make_line_origin(&commit_id3, 2)),
],
text: "foo\n\nbar\n".into(),
};
assert_eq!(
annotation.line_origins().collect_vec(),
vec![
(Ok(&make_line_origin(&commit_id1, 0)), "foo\n".as_ref()),
(Ok(&make_line_origin(&commit_id2, 1)), "\n".as_ref()),
(Ok(&make_line_origin(&commit_id3, 2)), "bar\n".as_ref()),
]
);
assert_eq!(
annotation.lines().collect_vec(),
vec![
(Ok(&commit_id1), "foo\n".as_ref()),
(Ok(&commit_id2), "\n".as_ref()),
(Ok(&commit_id3), "bar\n".as_ref()),
]
);
assert_eq!(
annotation.line_ranges().collect_vec(),
vec![
(Ok(&commit_id1), 0..4),
(Ok(&commit_id2), 4..5),
(Ok(&commit_id3), 5..9),
]
);
assert_eq!(
annotation.compact_line_ranges().collect_vec(),
vec![
(Ok(&commit_id1), 0..4),
(Ok(&commit_id2), 4..5),
(Ok(&commit_id3), 5..9),
]
);
}
#[test]
fn test_lines_iterator_compaction() {
let commit_id1 = CommitId::from_hex("111111");
let commit_id2 = CommitId::from_hex("222222");
let commit_id3 = CommitId::from_hex("333333");
let annotation = FileAnnotation {
line_map: vec![
Ok(make_line_origin(&commit_id1, 0)),
Ok(make_line_origin(&commit_id1, 1)),
Ok(make_line_origin(&commit_id2, 2)),
Ok(make_line_origin(&commit_id1, 3)),
Ok(make_line_origin(&commit_id3, 4)),
Ok(make_line_origin(&commit_id3, 5)),
Ok(make_line_origin(&commit_id3, 6)),
],
text: "\n".repeat(7).into(),
};
assert_eq!(
annotation.compact_line_ranges().collect_vec(),
vec![
(Ok(&commit_id1), 0..2),
(Ok(&commit_id2), 2..3),
(Ok(&commit_id1), 3..4),
(Ok(&commit_id3), 4..7),
]
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/fileset_parser.rs | lib/src/fileset_parser.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Parser for the fileset language.
use std::error;
use std::sync::LazyLock;
use itertools::Itertools as _;
use pest::Parser as _;
use pest::iterators::Pair;
use pest::pratt_parser::Assoc;
use pest::pratt_parser::Op;
use pest::pratt_parser::PrattParser;
use pest_derive::Parser;
use thiserror::Error;
use crate::dsl_util;
use crate::dsl_util::Diagnostics;
use crate::dsl_util::InvalidArguments;
use crate::dsl_util::StringLiteralParser;
#[derive(Parser)]
#[grammar = "fileset.pest"]
struct FilesetParser;
const STRING_LITERAL_PARSER: StringLiteralParser<Rule> = StringLiteralParser {
content_rule: Rule::string_content,
escape_rule: Rule::string_escape,
};
impl Rule {
fn to_symbol(self) -> Option<&'static str> {
match self {
Self::EOI => None,
Self::whitespace => None,
Self::identifier => None,
Self::strict_identifier_part => None,
Self::strict_identifier => None,
Self::bare_string => None,
Self::string_escape => None,
Self::string_content_char => None,
Self::string_content => None,
Self::string_literal => None,
Self::raw_string_content => None,
Self::raw_string_literal => None,
Self::pattern_kind_op => Some(":"),
Self::negate_op => Some("~"),
Self::union_op => Some("|"),
Self::intersection_op => Some("&"),
Self::difference_op => Some("~"),
Self::prefix_ops => None,
Self::infix_ops => None,
Self::function => None,
Self::function_name => None,
Self::function_arguments => None,
Self::string_pattern => None,
Self::bare_string_pattern => None,
Self::primary => None,
Self::expression => None,
Self::program => None,
Self::program_or_bare_string => None,
}
}
}
/// Manages diagnostic messages emitted during fileset parsing and name
/// resolution.
pub type FilesetDiagnostics = Diagnostics<FilesetParseError>;
/// Result of fileset parsing and name resolution.
pub type FilesetParseResult<T> = Result<T, FilesetParseError>;
/// Error occurred during fileset parsing and name resolution.
#[derive(Debug, Error)]
#[error("{pest_error}")]
pub struct FilesetParseError {
kind: FilesetParseErrorKind,
pest_error: Box<pest::error::Error<Rule>>,
source: Option<Box<dyn error::Error + Send + Sync>>,
}
/// Categories of fileset parsing and name resolution error.
#[expect(missing_docs)]
#[derive(Clone, Debug, Eq, Error, PartialEq)]
pub enum FilesetParseErrorKind {
#[error("Syntax error")]
SyntaxError,
#[error("Function `{name}` doesn't exist")]
NoSuchFunction {
name: String,
candidates: Vec<String>,
},
#[error("Function `{name}`: {message}")]
InvalidArguments { name: String, message: String },
#[error("{0}")]
Expression(String),
}
impl FilesetParseError {
pub(super) fn new(kind: FilesetParseErrorKind, span: pest::Span<'_>) -> Self {
let message = kind.to_string();
let pest_error = Box::new(pest::error::Error::new_from_span(
pest::error::ErrorVariant::CustomError { message },
span,
));
Self {
kind,
pest_error,
source: None,
}
}
pub(super) fn with_source(
mut self,
source: impl Into<Box<dyn error::Error + Send + Sync>>,
) -> Self {
self.source = Some(source.into());
self
}
/// Some other expression error.
pub(super) fn expression(message: impl Into<String>, span: pest::Span<'_>) -> Self {
Self::new(FilesetParseErrorKind::Expression(message.into()), span)
}
/// Category of the underlying error.
pub fn kind(&self) -> &FilesetParseErrorKind {
&self.kind
}
}
impl From<pest::error::Error<Rule>> for FilesetParseError {
fn from(err: pest::error::Error<Rule>) -> Self {
Self {
kind: FilesetParseErrorKind::SyntaxError,
pest_error: Box::new(rename_rules_in_pest_error(err)),
source: None,
}
}
}
impl From<InvalidArguments<'_>> for FilesetParseError {
fn from(err: InvalidArguments<'_>) -> Self {
let kind = FilesetParseErrorKind::InvalidArguments {
name: err.name.to_owned(),
message: err.message,
};
Self::new(kind, err.span)
}
}
fn rename_rules_in_pest_error(err: pest::error::Error<Rule>) -> pest::error::Error<Rule> {
err.renamed_rules(|rule| {
rule.to_symbol()
.map(|sym| format!("`{sym}`"))
.unwrap_or_else(|| format!("<{rule:?}>"))
})
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum ExpressionKind<'i> {
Identifier(&'i str),
String(String),
StringPattern {
kind: &'i str,
value: String,
},
Unary(UnaryOp, Box<ExpressionNode<'i>>),
Binary(BinaryOp, Box<ExpressionNode<'i>>, Box<ExpressionNode<'i>>),
/// `x | y | ..`
UnionAll(Vec<ExpressionNode<'i>>),
FunctionCall(Box<FunctionCallNode<'i>>),
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum UnaryOp {
/// `~`
Negate,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum BinaryOp {
/// `&`
Intersection,
/// `~`
Difference,
}
pub type ExpressionNode<'i> = dsl_util::ExpressionNode<'i, ExpressionKind<'i>>;
pub type FunctionCallNode<'i> = dsl_util::FunctionCallNode<'i, ExpressionKind<'i>>;
fn union_nodes<'i>(lhs: ExpressionNode<'i>, rhs: ExpressionNode<'i>) -> ExpressionNode<'i> {
let span = lhs.span.start_pos().span(&rhs.span.end_pos());
let expr = match lhs.kind {
// Flatten "x | y | z" to save recursion stack. Machine-generated query
// might have long chain of unions.
ExpressionKind::UnionAll(mut nodes) => {
nodes.push(rhs);
ExpressionKind::UnionAll(nodes)
}
_ => ExpressionKind::UnionAll(vec![lhs, rhs]),
};
ExpressionNode::new(expr, span)
}
fn parse_function_call_node(pair: Pair<Rule>) -> FilesetParseResult<FunctionCallNode> {
assert_eq!(pair.as_rule(), Rule::function);
let [name_pair, args_pair] = pair.into_inner().collect_array().unwrap();
assert_eq!(name_pair.as_rule(), Rule::function_name);
assert_eq!(args_pair.as_rule(), Rule::function_arguments);
let name_span = name_pair.as_span();
let args_span = args_pair.as_span();
let name = name_pair.as_str();
let args = args_pair
.into_inner()
.map(parse_expression_node)
.try_collect()?;
Ok(FunctionCallNode {
name,
name_span,
args,
keyword_args: vec![], // unsupported
args_span,
})
}
fn parse_as_string_literal(pair: Pair<Rule>) -> String {
match pair.as_rule() {
Rule::identifier => pair.as_str().to_owned(),
Rule::string_literal => STRING_LITERAL_PARSER.parse(pair.into_inner()),
Rule::raw_string_literal => {
let [content] = pair.into_inner().collect_array().unwrap();
assert_eq!(content.as_rule(), Rule::raw_string_content);
content.as_str().to_owned()
}
r => panic!("unexpected string literal rule: {r:?}"),
}
}
fn parse_primary_node(pair: Pair<Rule>) -> FilesetParseResult<ExpressionNode> {
assert_eq!(pair.as_rule(), Rule::primary);
let span = pair.as_span();
let first = pair.into_inner().next().unwrap();
let expr = match first.as_rule() {
// Ignore inner span to preserve parenthesized expression as such.
Rule::expression => parse_expression_node(first)?.kind,
Rule::function => {
let function = Box::new(parse_function_call_node(first)?);
ExpressionKind::FunctionCall(function)
}
Rule::string_pattern => {
let [lhs, op, rhs] = first.into_inner().collect_array().unwrap();
assert_eq!(lhs.as_rule(), Rule::strict_identifier);
assert_eq!(op.as_rule(), Rule::pattern_kind_op);
let kind = lhs.as_str();
let value = parse_as_string_literal(rhs);
ExpressionKind::StringPattern { kind, value }
}
Rule::identifier => ExpressionKind::Identifier(first.as_str()),
Rule::string_literal | Rule::raw_string_literal => {
ExpressionKind::String(parse_as_string_literal(first))
}
r => panic!("unexpected primary rule: {r:?}"),
};
Ok(ExpressionNode::new(expr, span))
}
fn parse_expression_node(pair: Pair<Rule>) -> FilesetParseResult<ExpressionNode> {
assert_eq!(pair.as_rule(), Rule::expression);
static PRATT: LazyLock<PrattParser<Rule>> = LazyLock::new(|| {
PrattParser::new()
.op(Op::infix(Rule::union_op, Assoc::Left))
.op(Op::infix(Rule::intersection_op, Assoc::Left)
| Op::infix(Rule::difference_op, Assoc::Left))
.op(Op::prefix(Rule::negate_op))
});
PRATT
.map_primary(parse_primary_node)
.map_prefix(|op, rhs| {
let op_kind = match op.as_rule() {
Rule::negate_op => UnaryOp::Negate,
r => panic!("unexpected prefix operator rule {r:?}"),
};
let rhs = Box::new(rhs?);
let span = op.as_span().start_pos().span(&rhs.span.end_pos());
let expr = ExpressionKind::Unary(op_kind, rhs);
Ok(ExpressionNode::new(expr, span))
})
.map_infix(|lhs, op, rhs| {
let op_kind = match op.as_rule() {
Rule::union_op => return Ok(union_nodes(lhs?, rhs?)),
Rule::intersection_op => BinaryOp::Intersection,
Rule::difference_op => BinaryOp::Difference,
r => panic!("unexpected infix operator rule {r:?}"),
};
let lhs = Box::new(lhs?);
let rhs = Box::new(rhs?);
let span = lhs.span.start_pos().span(&rhs.span.end_pos());
let expr = ExpressionKind::Binary(op_kind, lhs, rhs);
Ok(ExpressionNode::new(expr, span))
})
.parse(pair.into_inner())
}
/// Parses text into expression tree. No name resolution is made at this stage.
pub fn parse_program(text: &str) -> FilesetParseResult<ExpressionNode<'_>> {
let mut pairs = FilesetParser::parse(Rule::program, text)?;
let first = pairs.next().unwrap();
parse_expression_node(first)
}
/// Parses text into expression tree with bare string fallback. No name
/// resolution is made at this stage.
///
/// If the text can't be parsed as a fileset expression, and if it doesn't
/// contain any operator-like characters, it will be parsed as a file path.
pub fn parse_program_or_bare_string(text: &str) -> FilesetParseResult<ExpressionNode<'_>> {
let mut pairs = FilesetParser::parse(Rule::program_or_bare_string, text)?;
let first = pairs.next().unwrap();
let span = first.as_span();
let expr = match first.as_rule() {
Rule::expression => return parse_expression_node(first),
Rule::bare_string_pattern => {
let [lhs, op, rhs] = first.into_inner().collect_array().unwrap();
assert_eq!(lhs.as_rule(), Rule::strict_identifier);
assert_eq!(op.as_rule(), Rule::pattern_kind_op);
assert_eq!(rhs.as_rule(), Rule::bare_string);
let kind = lhs.as_str();
let value = rhs.as_str().to_owned();
ExpressionKind::StringPattern { kind, value }
}
Rule::bare_string => ExpressionKind::String(first.as_str().to_owned()),
r => panic!("unexpected program or bare string rule: {r:?}"),
};
Ok(ExpressionNode::new(expr, span))
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use super::*;
use crate::dsl_util::KeywordArgument;
fn parse_into_kind(text: &str) -> Result<ExpressionKind<'_>, FilesetParseErrorKind> {
parse_program(text)
.map(|node| node.kind)
.map_err(|err| err.kind)
}
fn parse_maybe_bare_into_kind(text: &str) -> Result<ExpressionKind<'_>, FilesetParseErrorKind> {
parse_program_or_bare_string(text)
.map(|node| node.kind)
.map_err(|err| err.kind)
}
fn parse_normalized(text: &str) -> ExpressionNode<'_> {
normalize_tree(parse_program(text).unwrap())
}
fn parse_maybe_bare_normalized(text: &str) -> ExpressionNode<'_> {
normalize_tree(parse_program_or_bare_string(text).unwrap())
}
/// Drops auxiliary data from parsed tree so it can be compared with other.
fn normalize_tree(node: ExpressionNode) -> ExpressionNode {
fn empty_span() -> pest::Span<'static> {
pest::Span::new("", 0, 0).unwrap()
}
fn normalize_list(nodes: Vec<ExpressionNode>) -> Vec<ExpressionNode> {
nodes.into_iter().map(normalize_tree).collect()
}
fn normalize_function_call(function: FunctionCallNode) -> FunctionCallNode {
FunctionCallNode {
name: function.name,
name_span: empty_span(),
args: normalize_list(function.args),
keyword_args: function
.keyword_args
.into_iter()
.map(|arg| KeywordArgument {
name: arg.name,
name_span: empty_span(),
value: normalize_tree(arg.value),
})
.collect(),
args_span: empty_span(),
}
}
let normalized_kind = match node.kind {
ExpressionKind::Identifier(_)
| ExpressionKind::String(_)
| ExpressionKind::StringPattern { .. } => node.kind,
ExpressionKind::Unary(op, arg) => {
let arg = Box::new(normalize_tree(*arg));
ExpressionKind::Unary(op, arg)
}
ExpressionKind::Binary(op, lhs, rhs) => {
let lhs = Box::new(normalize_tree(*lhs));
let rhs = Box::new(normalize_tree(*rhs));
ExpressionKind::Binary(op, lhs, rhs)
}
ExpressionKind::UnionAll(nodes) => {
let nodes = normalize_list(nodes);
ExpressionKind::UnionAll(nodes)
}
ExpressionKind::FunctionCall(function) => {
let function = Box::new(normalize_function_call(*function));
ExpressionKind::FunctionCall(function)
}
};
ExpressionNode {
kind: normalized_kind,
span: empty_span(),
}
}
#[test]
fn test_parse_tree_eq() {
assert_eq!(
parse_normalized(r#" foo( x ) | ~bar:"baz" "#),
parse_normalized(r#"(foo(x))|(~(bar:"baz"))"#)
);
assert_ne!(parse_normalized(r#" foo "#), parse_normalized(r#" "foo" "#));
}
#[test]
fn test_parse_invalid_function_name() {
assert_eq!(
parse_into_kind("5foo(x)"),
Err(FilesetParseErrorKind::SyntaxError)
);
}
#[test]
fn test_parse_whitespace() {
let ascii_whitespaces: String = ('\x00'..='\x7f')
.filter(char::is_ascii_whitespace)
.collect();
assert_eq!(
parse_normalized(&format!("{ascii_whitespaces}f()")),
parse_normalized("f()")
);
}
#[test]
fn test_parse_identifier() {
assert_eq!(
parse_into_kind("dir/foo-bar_0.baz"),
Ok(ExpressionKind::Identifier("dir/foo-bar_0.baz"))
);
assert_eq!(
parse_into_kind("cli-reference@.md.snap"),
Ok(ExpressionKind::Identifier("cli-reference@.md.snap"))
);
assert_eq!(
parse_into_kind("柔術.jj"),
Ok(ExpressionKind::Identifier("柔術.jj"))
);
assert_eq!(
parse_into_kind(r#"Windows\Path"#),
Ok(ExpressionKind::Identifier(r#"Windows\Path"#))
);
assert_eq!(
parse_into_kind("glob*[chars]?"),
Ok(ExpressionKind::Identifier("glob*[chars]?"))
);
}
#[test]
fn test_parse_string_literal() {
// "\<char>" escapes
assert_eq!(
parse_into_kind(r#" "\t\r\n\"\\\0\e" "#),
Ok(ExpressionKind::String("\t\r\n\"\\\0\u{1b}".to_owned())),
);
// Invalid "\<char>" escape
assert_eq!(
parse_into_kind(r#" "\y" "#),
Err(FilesetParseErrorKind::SyntaxError),
);
// Single-quoted raw string
assert_eq!(
parse_into_kind(r#" '' "#),
Ok(ExpressionKind::String("".to_owned())),
);
assert_eq!(
parse_into_kind(r#" 'a\n' "#),
Ok(ExpressionKind::String(r"a\n".to_owned())),
);
assert_eq!(
parse_into_kind(r#" '\' "#),
Ok(ExpressionKind::String(r"\".to_owned())),
);
assert_eq!(
parse_into_kind(r#" '"' "#),
Ok(ExpressionKind::String(r#"""#.to_owned())),
);
// Hex bytes
assert_eq!(
parse_into_kind(r#""\x61\x65\x69\x6f\x75""#),
Ok(ExpressionKind::String("aeiou".to_owned())),
);
assert_eq!(
parse_into_kind(r#""\xe0\xe8\xec\xf0\xf9""#),
Ok(ExpressionKind::String("àèìðù".to_owned())),
);
assert_eq!(
parse_into_kind(r#""\x""#),
Err(FilesetParseErrorKind::SyntaxError),
);
assert_eq!(
parse_into_kind(r#""\xf""#),
Err(FilesetParseErrorKind::SyntaxError),
);
assert_eq!(
parse_into_kind(r#""\xgg""#),
Err(FilesetParseErrorKind::SyntaxError),
);
}
#[test]
fn test_parse_string_pattern() {
assert_eq!(
parse_into_kind(r#" foo:bar "#),
Ok(ExpressionKind::StringPattern {
kind: "foo",
value: "bar".to_owned()
})
);
assert_eq!(
parse_into_kind(" foo:glob*[chars]? "),
Ok(ExpressionKind::StringPattern {
kind: "foo",
value: "glob*[chars]?".to_owned()
})
);
assert_eq!(
parse_into_kind(r#" foo:"bar" "#),
Ok(ExpressionKind::StringPattern {
kind: "foo",
value: "bar".to_owned()
})
);
assert_eq!(
parse_into_kind(r#" foo:"" "#),
Ok(ExpressionKind::StringPattern {
kind: "foo",
value: "".to_owned()
})
);
assert_eq!(
parse_into_kind(r#" foo:'\' "#),
Ok(ExpressionKind::StringPattern {
kind: "foo",
value: r"\".to_owned()
})
);
assert_eq!(
parse_into_kind(r#" foo: "#),
Err(FilesetParseErrorKind::SyntaxError)
);
assert_eq!(
parse_into_kind(r#" foo: "" "#),
Err(FilesetParseErrorKind::SyntaxError)
);
assert_eq!(
parse_into_kind(r#" foo :"" "#),
Err(FilesetParseErrorKind::SyntaxError)
);
}
#[test]
fn test_parse_operator() {
assert_matches!(
parse_into_kind("~x"),
Ok(ExpressionKind::Unary(UnaryOp::Negate, _))
);
assert_matches!(
parse_into_kind("x|y"),
Ok(ExpressionKind::UnionAll(nodes)) if nodes.len() == 2
);
assert_matches!(
parse_into_kind("x|y|z"),
Ok(ExpressionKind::UnionAll(nodes)) if nodes.len() == 3
);
assert_matches!(
parse_into_kind("x&y"),
Ok(ExpressionKind::Binary(BinaryOp::Intersection, _, _))
);
assert_matches!(
parse_into_kind("x~y"),
Ok(ExpressionKind::Binary(BinaryOp::Difference, _, _))
);
// Set operator associativity/precedence
assert_eq!(parse_normalized("~x|y"), parse_normalized("(~x)|y"));
assert_eq!(parse_normalized("x&~y"), parse_normalized("x&(~y)"));
assert_eq!(parse_normalized("x~~y"), parse_normalized("x~(~y)"));
assert_eq!(parse_normalized("x~~~y"), parse_normalized("x~(~(~y))"));
assert_eq!(parse_normalized("x|y|z"), parse_normalized("(x|y)|z"));
assert_eq!(parse_normalized("x&y|z"), parse_normalized("(x&y)|z"));
assert_eq!(parse_normalized("x|y&z"), parse_normalized("x|(y&z)"));
assert_eq!(parse_normalized("x|y~z"), parse_normalized("x|(y~z)"));
assert_eq!(parse_normalized("~x:y"), parse_normalized("~(x:y)"));
assert_eq!(parse_normalized("x|y:z"), parse_normalized("x|(y:z)"));
// Expression span
assert_eq!(parse_program(" ~ x ").unwrap().span.as_str(), "~ x");
assert_eq!(parse_program(" x |y ").unwrap().span.as_str(), "x |y");
assert_eq!(parse_program(" (x) ").unwrap().span.as_str(), "(x)");
assert_eq!(parse_program("~( x|y) ").unwrap().span.as_str(), "~( x|y)");
}
#[test]
fn test_parse_function_call() {
fn unwrap_function_call(node: ExpressionNode<'_>) -> Box<FunctionCallNode<'_>> {
match node.kind {
ExpressionKind::FunctionCall(function) => function,
_ => panic!("unexpected expression: {node:?}"),
}
}
assert_matches!(
parse_into_kind("foo()"),
Ok(ExpressionKind::FunctionCall(_))
);
// Trailing comma isn't allowed for empty argument
assert!(parse_into_kind("foo(,)").is_err());
// Trailing comma is allowed for the last argument
assert_eq!(parse_normalized("foo(a,)"), parse_normalized("foo(a)"));
assert_eq!(parse_normalized("foo(a , )"), parse_normalized("foo(a)"));
assert!(parse_into_kind("foo(,a)").is_err());
assert!(parse_into_kind("foo(a,,)").is_err());
assert!(parse_into_kind("foo(a , , )").is_err());
assert_eq!(parse_normalized("foo(a,b,)"), parse_normalized("foo(a,b)"));
assert!(parse_into_kind("foo(a,,b)").is_err());
// Expression span
let function = unwrap_function_call(parse_program("foo( a, (b) , ~(c) )").unwrap());
assert_eq!(function.name_span.as_str(), "foo");
assert_eq!(function.args_span.as_str(), "a, (b) , ~(c)");
assert_eq!(function.args[0].span.as_str(), "a");
assert_eq!(function.args[1].span.as_str(), "(b)");
assert_eq!(function.args[2].span.as_str(), "~(c)");
}
#[test]
fn test_parse_bare_string() {
// Valid expression should be parsed as such
assert_eq!(
parse_maybe_bare_into_kind(" valid "),
Ok(ExpressionKind::Identifier("valid"))
);
assert_eq!(
parse_maybe_bare_normalized("f(x)&y"),
parse_normalized("f(x)&y")
);
// Bare string
assert_eq!(
parse_maybe_bare_into_kind("Foo Bar.txt"),
Ok(ExpressionKind::String("Foo Bar.txt".to_owned()))
);
assert_eq!(
parse_maybe_bare_into_kind(r#"Windows\Path with space"#),
Ok(ExpressionKind::String(
r#"Windows\Path with space"#.to_owned()
))
);
assert_eq!(
parse_maybe_bare_into_kind("柔 術 . j j"),
Ok(ExpressionKind::String("柔 術 . j j".to_owned()))
);
assert_eq!(
parse_maybe_bare_into_kind("Unicode emoji 💩"),
Ok(ExpressionKind::String("Unicode emoji 💩".to_owned()))
);
assert_eq!(
parse_maybe_bare_into_kind("looks like & expression"),
Err(FilesetParseErrorKind::SyntaxError)
);
assert_eq!(
parse_maybe_bare_into_kind("unbalanced_parens("),
Err(FilesetParseErrorKind::SyntaxError)
);
// Bare string pattern
assert_eq!(
parse_maybe_bare_into_kind("foo: bar baz"),
Ok(ExpressionKind::StringPattern {
kind: "foo",
value: " bar baz".to_owned()
})
);
assert_eq!(
parse_maybe_bare_into_kind("foo:glob * [chars]?"),
Ok(ExpressionKind::StringPattern {
kind: "foo",
value: "glob * [chars]?".to_owned()
})
);
assert_eq!(
parse_maybe_bare_into_kind("foo:bar:baz"),
Err(FilesetParseErrorKind::SyntaxError)
);
assert_eq!(
parse_maybe_bare_into_kind("foo:"),
Err(FilesetParseErrorKind::SyntaxError)
);
assert_eq!(
parse_maybe_bare_into_kind(r#"foo:"unclosed quote"#),
Err(FilesetParseErrorKind::SyntaxError)
);
// Surrounding spaces are simply preserved. They could be trimmed, but
// space is valid bare_string character.
assert_eq!(
parse_maybe_bare_into_kind(" No trim "),
Ok(ExpressionKind::String(" No trim ".to_owned()))
);
}
#[test]
fn test_parse_error() {
insta::assert_snapshot!(parse_program("foo|").unwrap_err().to_string(), @r"
--> 1:5
|
1 | foo|
| ^---
|
= expected `~` or <primary>
");
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/working_copy.rs | lib/src/working_copy.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Defines the interface for the working copy. See `LocalWorkingCopy` for the
//! default local-disk implementation.
use std::any::Any;
use std::collections::BTreeMap;
use std::ffi::OsString;
use std::path::PathBuf;
use std::sync::Arc;
use async_trait::async_trait;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use thiserror::Error;
use tracing::instrument;
use crate::backend::BackendError;
use crate::commit::Commit;
use crate::dag_walk;
use crate::gitignore::GitIgnoreError;
use crate::gitignore::GitIgnoreFile;
use crate::matchers::Matcher;
use crate::merged_tree::MergedTree;
use crate::op_store::OpStoreError;
use crate::op_store::OperationId;
use crate::operation::Operation;
use crate::ref_name::WorkspaceName;
use crate::ref_name::WorkspaceNameBuf;
use crate::repo::ReadonlyRepo;
use crate::repo::Repo as _;
use crate::repo::RewriteRootCommit;
use crate::repo_path::InvalidRepoPathError;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::settings::UserSettings;
use crate::store::Store;
use crate::transaction::TransactionCommitError;
/// The trait all working-copy implementations must implement.
pub trait WorkingCopy: Any + Send {
/// The name/id of the implementation. Used for choosing the right
/// implementation when loading a working copy.
fn name(&self) -> &str;
/// The working copy's workspace name (or identifier.)
fn workspace_name(&self) -> &WorkspaceName;
/// The operation this working copy was most recently updated to.
fn operation_id(&self) -> &OperationId;
/// The tree this working copy was most recently updated to.
fn tree(&self) -> Result<&MergedTree, WorkingCopyStateError>;
/// Patterns that decide which paths from the current tree should be checked
/// out in the working copy. An empty list means that no paths should be
/// checked out in the working copy. A single `RepoPath::root()` entry means
/// that all files should be checked out.
fn sparse_patterns(&self) -> Result<&[RepoPathBuf], WorkingCopyStateError>;
/// Locks the working copy and returns an instance with methods for updating
/// the working copy files and state.
fn start_mutation(&self) -> Result<Box<dyn LockedWorkingCopy>, WorkingCopyStateError>;
}
impl dyn WorkingCopy {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: WorkingCopy>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
/// The factory which creates and loads a specific type of working copy.
pub trait WorkingCopyFactory {
/// Create a new working copy from scratch.
fn init_working_copy(
&self,
store: Arc<Store>,
working_copy_path: PathBuf,
state_path: PathBuf,
operation_id: OperationId,
workspace_name: WorkspaceNameBuf,
settings: &UserSettings,
) -> Result<Box<dyn WorkingCopy>, WorkingCopyStateError>;
/// Load an existing working copy.
fn load_working_copy(
&self,
store: Arc<Store>,
working_copy_path: PathBuf,
state_path: PathBuf,
settings: &UserSettings,
) -> Result<Box<dyn WorkingCopy>, WorkingCopyStateError>;
}
/// A working copy that's being modified.
#[async_trait]
pub trait LockedWorkingCopy: Any + Send {
/// The operation at the time the lock was taken
fn old_operation_id(&self) -> &OperationId;
/// The tree at the time the lock was taken
fn old_tree(&self) -> &MergedTree;
/// Snapshot the working copy. Returns the tree and stats.
async fn snapshot(
&mut self,
options: &SnapshotOptions,
) -> Result<(MergedTree, SnapshotStats), SnapshotError>;
/// Check out the specified commit in the working copy.
async fn check_out(&mut self, commit: &Commit) -> Result<CheckoutStats, CheckoutError>;
/// Update the workspace name.
fn rename_workspace(&mut self, new_workspace_name: WorkspaceNameBuf);
/// Update to another commit without touching the files in the working copy.
async fn reset(&mut self, commit: &Commit) -> Result<(), ResetError>;
/// Update to another commit without touching the files in the working copy,
/// without assuming that the previous tree exists.
async fn recover(&mut self, commit: &Commit) -> Result<(), ResetError>;
/// See `WorkingCopy::sparse_patterns()`
fn sparse_patterns(&self) -> Result<&[RepoPathBuf], WorkingCopyStateError>;
/// Updates the patterns that decide which paths from the current tree
/// should be checked out in the working copy.
// TODO: Use a different error type here so we can include a
// `SparseNotSupported` variants for working copies that don't support sparse
// checkouts (e.g. because they use a virtual file system so there's no reason
// to use sparse).
async fn set_sparse_patterns(
&mut self,
new_sparse_patterns: Vec<RepoPathBuf>,
) -> Result<CheckoutStats, CheckoutError>;
/// Finish the modifications to the working copy by writing the updated
/// states to disk. Returns the new (unlocked) working copy.
async fn finish(
self: Box<Self>,
operation_id: OperationId,
) -> Result<Box<dyn WorkingCopy>, WorkingCopyStateError>;
}
impl dyn LockedWorkingCopy {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: LockedWorkingCopy>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
/// Returns mutable reference of the implementation type.
pub fn downcast_mut<T: LockedWorkingCopy>(&mut self) -> Option<&mut T> {
(self as &mut dyn Any).downcast_mut()
}
}
/// An error while snapshotting the working copy.
#[derive(Debug, Error)]
pub enum SnapshotError {
/// A tracked path contained invalid component such as `..`.
#[error(transparent)]
InvalidRepoPath(#[from] InvalidRepoPathError),
/// A path in the working copy was not valid UTF-8.
#[error("Working copy path {} is not valid UTF-8", path.to_string_lossy())]
InvalidUtf8Path {
/// The path with invalid UTF-8.
path: OsString,
},
/// A symlink target in the working copy was not valid UTF-8.
#[error("Symlink {path} target is not valid UTF-8")]
InvalidUtf8SymlinkTarget {
/// The path of the symlink that has a target that's not valid UTF-8.
/// This path itself is valid UTF-8.
path: PathBuf,
},
/// Reading or writing from the commit backend failed.
#[error(transparent)]
BackendError(#[from] BackendError),
/// Checking path with ignore patterns failed.
#[error(transparent)]
GitIgnoreError(#[from] GitIgnoreError),
/// Failed to load the working copy state.
#[error(transparent)]
WorkingCopyStateError(#[from] WorkingCopyStateError),
/// Some other error happened while snapshotting the working copy.
#[error("{message}")]
Other {
/// Error message.
message: String,
/// The underlying error.
#[source]
err: Box<dyn std::error::Error + Send + Sync>,
},
}
/// Options used when snapshotting the working copy. Some of them may be ignored
/// by some `WorkingCopy` implementations.
#[derive(Clone)]
pub struct SnapshotOptions<'a> {
/// The `.gitignore`s to use while snapshotting. The typically come from the
/// user's configured patterns combined with per-repo patterns.
// The base_ignores are passed in here rather than being set on the TreeState
// because the TreeState may be long-lived if the library is used in a
// long-lived process.
pub base_ignores: Arc<GitIgnoreFile>,
/// A callback for the UI to display progress.
pub progress: Option<&'a SnapshotProgress<'a>>,
/// For new files that are not already tracked, start tracking them if they
/// match this.
pub start_tracking_matcher: &'a dyn Matcher,
/// For files that match the ignore patterns or are too large, start
/// tracking them anyway if they match this.
pub force_tracking_matcher: &'a dyn Matcher,
/// The size of the largest file that should be allowed to become tracked
/// (already tracked files are always snapshotted). If there are larger
/// files in the working copy, then `LockedWorkingCopy::snapshot()` may
/// (depending on implementation)
/// return `SnapshotError::NewFileTooLarge`.
pub max_new_file_size: u64,
}
/// A callback for getting progress updates.
pub type SnapshotProgress<'a> = dyn Fn(&RepoPath) + 'a + Sync;
/// Stats about a snapshot operation on a working copy.
#[derive(Clone, Debug, Default)]
pub struct SnapshotStats {
/// List of new (previously untracked) files which are still untracked.
pub untracked_paths: BTreeMap<RepoPathBuf, UntrackedReason>,
}
/// Reason why the new path isn't tracked.
#[derive(Clone, Debug)]
pub enum UntrackedReason {
/// File was larger than the specified maximum file size.
FileTooLarge {
/// Actual size of the large file.
size: u64,
/// Maximum allowed size.
max_size: u64,
},
/// File does not match the fileset specified in snapshot.auto-track.
FileNotAutoTracked,
}
/// Stats about a checkout operation on a working copy. All "files" mentioned
/// below may also be symlinks or materialized conflicts.
#[derive(Debug, PartialEq, Eq, Clone, Default)]
pub struct CheckoutStats {
/// The number of files that were updated in the working copy.
/// These files existed before and after the checkout.
pub updated_files: u32,
/// The number of files added in the working copy.
pub added_files: u32,
/// The number of files removed in the working copy.
pub removed_files: u32,
/// The number of files that were supposed to be updated or added in the
/// working copy but were skipped because there was an untracked (probably
/// ignored) file in its place.
pub skipped_files: u32,
}
/// The working-copy checkout failed.
#[derive(Debug, Error)]
pub enum CheckoutError {
/// The current working-copy commit was deleted, maybe by an overly
/// aggressive GC that happened while the current process was running.
#[error("Current working-copy commit not found")]
SourceNotFound {
/// The underlying error.
source: Box<dyn std::error::Error + Send + Sync>,
},
/// Another process checked out a commit while the current process was
/// running (after the working copy was read by the current process).
#[error("Concurrent checkout")]
ConcurrentCheckout,
/// Path in the commit contained invalid component such as `..`.
#[error(transparent)]
InvalidRepoPath(#[from] InvalidRepoPathError),
/// Path contained reserved name which cannot be checked out to disk.
#[error("Reserved path component {name} in {path}")]
ReservedPathComponent {
/// The file or directory path.
path: PathBuf,
/// The reserved path component.
name: &'static str,
},
/// Reading or writing from the commit backend failed.
#[error("Internal backend error")]
InternalBackendError(#[from] BackendError),
/// Failed to load the working copy state.
#[error(transparent)]
WorkingCopyStateError(#[from] WorkingCopyStateError),
/// Some other error happened while checking out the working copy.
#[error("{message}")]
Other {
/// Error message.
message: String,
/// The underlying error.
#[source]
err: Box<dyn std::error::Error + Send + Sync>,
},
}
/// An error while resetting the working copy.
#[derive(Debug, Error)]
pub enum ResetError {
/// The current working-copy commit was deleted, maybe by an overly
/// aggressive GC that happened while the current process was running.
#[error("Current working-copy commit not found")]
SourceNotFound {
/// The underlying error.
source: Box<dyn std::error::Error + Send + Sync>,
},
/// Reading or writing from the commit backend failed.
#[error("Internal error")]
InternalBackendError(#[from] BackendError),
/// Failed to load the working copy state.
#[error(transparent)]
WorkingCopyStateError(#[from] WorkingCopyStateError),
/// Some other error happened while resetting the working copy.
#[error("{message}")]
Other {
/// Error message.
message: String,
/// The underlying error.
#[source]
err: Box<dyn std::error::Error + Send + Sync>,
},
}
/// Whether the working copy is stale or not.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum WorkingCopyFreshness {
/// The working copy isn't stale, and no need to reload the repo.
Fresh,
/// The working copy was updated since we loaded the repo. The repo must be
/// reloaded at the working copy's operation.
Updated(Box<Operation>),
/// The working copy is behind the latest operation.
WorkingCopyStale,
/// The working copy is a sibling of the latest operation.
SiblingOperation,
}
impl WorkingCopyFreshness {
/// Determine the freshness of the provided working copy relative to the
/// target commit.
#[instrument(skip_all)]
pub fn check_stale(
locked_wc: &dyn LockedWorkingCopy,
wc_commit: &Commit,
repo: &ReadonlyRepo,
) -> Result<Self, OpStoreError> {
// Check if the working copy's operation matches the repo's operation
if locked_wc.old_operation_id() == repo.op_id() {
// The working copy isn't stale, and no need to reload the repo.
Ok(Self::Fresh)
} else {
let wc_operation = repo.loader().load_operation(locked_wc.old_operation_id())?;
let repo_operation = repo.operation();
let ancestor_op = dag_walk::closest_common_node_ok(
[Ok(wc_operation.clone())],
[Ok(repo_operation.clone())],
|op: &Operation| op.id().clone(),
|op: &Operation| op.parents().collect_vec(),
)?
.expect("unrelated operations");
if ancestor_op.id() == repo_operation.id() {
// The working copy was updated since we loaded the repo. The repo must be
// reloaded at the working copy's operation.
Ok(Self::Updated(Box::new(wc_operation)))
} else if ancestor_op.id() == wc_operation.id() {
// The working copy was not updated when some repo operation committed,
// meaning that it's stale compared to the repo view.
if locked_wc.old_tree().tree_ids_and_labels()
== wc_commit.tree().tree_ids_and_labels()
{
// The working copy doesn't require any changes
Ok(Self::Fresh)
} else {
Ok(Self::WorkingCopyStale)
}
} else {
Ok(Self::SiblingOperation)
}
}
}
}
/// An error while recovering a stale working copy.
#[derive(Debug, Error)]
pub enum RecoverWorkspaceError {
/// Backend error.
#[error(transparent)]
Backend(#[from] BackendError),
/// Error during checkout.
#[error(transparent)]
Reset(#[from] ResetError),
/// Checkout attempted to modify the root commit.
#[error(transparent)]
RewriteRootCommit(#[from] RewriteRootCommit),
/// Error during transaction.
#[error(transparent)]
TransactionCommit(#[from] TransactionCommitError),
/// Working copy commit is missing.
#[error(r#""{}" doesn't have a working-copy commit"#, .0.as_symbol())]
WorkspaceMissingWorkingCopy(WorkspaceNameBuf),
}
/// Recover this workspace to its last known checkout.
pub fn create_and_check_out_recovery_commit(
locked_wc: &mut dyn LockedWorkingCopy,
repo: &Arc<ReadonlyRepo>,
workspace_name: WorkspaceNameBuf,
description: &str,
) -> Result<(Arc<ReadonlyRepo>, Commit), RecoverWorkspaceError> {
let mut tx = repo.start_transaction();
let repo_mut = tx.repo_mut();
let commit_id = repo
.view()
.get_wc_commit_id(&workspace_name)
.ok_or_else(|| {
RecoverWorkspaceError::WorkspaceMissingWorkingCopy(workspace_name.clone())
})?;
let commit = repo.store().get_commit(commit_id)?;
let new_commit = repo_mut
.new_commit(vec![commit_id.clone()], commit.tree())
.set_description(description)
.write()?;
repo_mut.set_wc_commit(workspace_name, new_commit.id().clone())?;
let repo = tx.commit("recovery commit")?;
locked_wc.recover(&new_commit).block_on()?;
Ok((repo, new_commit))
}
/// An error while reading the working copy state.
#[derive(Debug, Error)]
#[error("{message}")]
pub struct WorkingCopyStateError {
/// Error message.
pub message: String,
/// The underlying error.
#[source]
pub err: Box<dyn std::error::Error + Send + Sync>,
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/dsl_util.rs | lib/src/dsl_util.rs | // Copyright 2020-2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Domain-specific language helpers.
use std::ascii;
use std::collections::HashMap;
use std::fmt;
use std::slice;
use itertools::Itertools as _;
use pest::RuleType;
use pest::iterators::Pair;
use pest::iterators::Pairs;
/// Manages diagnostic messages emitted during parsing.
///
/// `T` is usually a parse error type of the language, which contains a message
/// and source span of 'static lifetime.
#[derive(Debug)]
pub struct Diagnostics<T> {
// This might be extended to [{ kind: Warning|Error, message: T }, ..].
diagnostics: Vec<T>,
}
impl<T> Diagnostics<T> {
/// Creates new empty diagnostics collector.
pub fn new() -> Self {
Self {
diagnostics: Vec::new(),
}
}
/// Returns `true` if there are no diagnostic messages.
pub fn is_empty(&self) -> bool {
self.diagnostics.is_empty()
}
/// Returns the number of diagnostic messages.
pub fn len(&self) -> usize {
self.diagnostics.len()
}
/// Returns iterator over diagnostic messages.
pub fn iter(&self) -> slice::Iter<'_, T> {
self.diagnostics.iter()
}
/// Adds a diagnostic message of warning level.
pub fn add_warning(&mut self, diag: T) {
self.diagnostics.push(diag);
}
/// Moves diagnostic messages of different type (such as fileset warnings
/// emitted within `file()` revset.)
pub fn extend_with<U>(&mut self, diagnostics: Diagnostics<U>, mut f: impl FnMut(U) -> T) {
self.diagnostics
.extend(diagnostics.diagnostics.into_iter().map(&mut f));
}
}
impl<T> Default for Diagnostics<T> {
fn default() -> Self {
Self::new()
}
}
impl<'a, T> IntoIterator for &'a Diagnostics<T> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// AST node without type or name checking.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ExpressionNode<'i, T> {
/// Expression item such as identifier, literal, function call, etc.
pub kind: T,
/// Span of the node.
pub span: pest::Span<'i>,
}
impl<'i, T> ExpressionNode<'i, T> {
/// Wraps the given expression and span.
pub fn new(kind: T, span: pest::Span<'i>) -> Self {
Self { kind, span }
}
}
/// Function call in AST.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FunctionCallNode<'i, T> {
/// Function name.
pub name: &'i str,
/// Span of the function name.
pub name_span: pest::Span<'i>,
/// List of positional arguments.
pub args: Vec<ExpressionNode<'i, T>>,
/// List of keyword arguments.
pub keyword_args: Vec<KeywordArgument<'i, T>>,
/// Span of the arguments list.
pub args_span: pest::Span<'i>,
}
/// Keyword argument pair in AST.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct KeywordArgument<'i, T> {
/// Parameter name.
pub name: &'i str,
/// Span of the parameter name.
pub name_span: pest::Span<'i>,
/// Value expression.
pub value: ExpressionNode<'i, T>,
}
impl<'i, T> FunctionCallNode<'i, T> {
/// Number of arguments assuming named arguments are all unique.
pub fn arity(&self) -> usize {
self.args.len() + self.keyword_args.len()
}
/// Ensures that no arguments passed.
pub fn expect_no_arguments(&self) -> Result<(), InvalidArguments<'i>> {
let ([], []) = self.expect_arguments()?;
Ok(())
}
/// Extracts exactly N required arguments.
pub fn expect_exact_arguments<const N: usize>(
&self,
) -> Result<&[ExpressionNode<'i, T>; N], InvalidArguments<'i>> {
let (args, []) = self.expect_arguments()?;
Ok(args)
}
/// Extracts N required arguments and remainders.
///
/// This can be used to get all the positional arguments without requiring
/// any (N = 0):
/// ```ignore
/// let ([], content_nodes) = function.expect_some_arguments()?;
/// ```
/// Avoid accessing `function.args` directly, as that may allow keyword
/// arguments to be silently ignored.
#[expect(clippy::type_complexity)]
pub fn expect_some_arguments<const N: usize>(
&self,
) -> Result<(&[ExpressionNode<'i, T>; N], &[ExpressionNode<'i, T>]), InvalidArguments<'i>> {
self.ensure_no_keyword_arguments()?;
if self.args.len() >= N {
let (required, rest) = self.args.split_at(N);
Ok((required.try_into().unwrap(), rest))
} else {
Err(self.invalid_arguments_count(N, None))
}
}
/// Extracts N required arguments and M optional arguments.
#[expect(clippy::type_complexity)]
pub fn expect_arguments<const N: usize, const M: usize>(
&self,
) -> Result<
(
&[ExpressionNode<'i, T>; N],
[Option<&ExpressionNode<'i, T>>; M],
),
InvalidArguments<'i>,
> {
self.ensure_no_keyword_arguments()?;
let count_range = N..=(N + M);
if count_range.contains(&self.args.len()) {
let (required, rest) = self.args.split_at(N);
let mut optional = rest.iter().map(Some).collect_vec();
optional.resize(M, None);
Ok((
required.try_into().unwrap(),
optional.try_into().ok().unwrap(),
))
} else {
let (min, max) = count_range.into_inner();
Err(self.invalid_arguments_count(min, Some(max)))
}
}
/// Extracts N required arguments and M optional arguments. Some of them can
/// be specified as keyword arguments.
///
/// `names` is a list of parameter names. Unnamed positional arguments
/// should be padded with `""`.
#[expect(clippy::type_complexity)]
pub fn expect_named_arguments<const N: usize, const M: usize>(
&self,
names: &[&str],
) -> Result<
(
[&ExpressionNode<'i, T>; N],
[Option<&ExpressionNode<'i, T>>; M],
),
InvalidArguments<'i>,
> {
if self.keyword_args.is_empty() {
let (required, optional) = self.expect_arguments::<N, M>()?;
Ok((required.each_ref(), optional))
} else {
let (required, optional) = self.expect_named_arguments_vec(names, N, N + M)?;
Ok((
required.try_into().ok().unwrap(),
optional.try_into().ok().unwrap(),
))
}
}
#[expect(clippy::type_complexity)]
fn expect_named_arguments_vec(
&self,
names: &[&str],
min: usize,
max: usize,
) -> Result<
(
Vec<&ExpressionNode<'i, T>>,
Vec<Option<&ExpressionNode<'i, T>>>,
),
InvalidArguments<'i>,
> {
assert!(names.len() <= max);
if self.args.len() > max {
return Err(self.invalid_arguments_count(min, Some(max)));
}
let mut extracted = Vec::with_capacity(max);
extracted.extend(self.args.iter().map(Some));
extracted.resize(max, None);
for arg in &self.keyword_args {
let name = arg.name;
let span = arg.name_span.start_pos().span(&arg.value.span.end_pos());
let pos = names.iter().position(|&n| n == name).ok_or_else(|| {
self.invalid_arguments(format!(r#"Unexpected keyword argument "{name}""#), span)
})?;
if extracted[pos].is_some() {
return Err(self.invalid_arguments(
format!(r#"Got multiple values for keyword "{name}""#),
span,
));
}
extracted[pos] = Some(&arg.value);
}
let optional = extracted.split_off(min);
let required = extracted.into_iter().flatten().collect_vec();
if required.len() != min {
return Err(self.invalid_arguments_count(min, Some(max)));
}
Ok((required, optional))
}
fn ensure_no_keyword_arguments(&self) -> Result<(), InvalidArguments<'i>> {
if let (Some(first), Some(last)) = (self.keyword_args.first(), self.keyword_args.last()) {
let span = first.name_span.start_pos().span(&last.value.span.end_pos());
Err(self.invalid_arguments("Unexpected keyword arguments".to_owned(), span))
} else {
Ok(())
}
}
fn invalid_arguments(&self, message: String, span: pest::Span<'i>) -> InvalidArguments<'i> {
InvalidArguments {
name: self.name,
message,
span,
}
}
fn invalid_arguments_count(&self, min: usize, max: Option<usize>) -> InvalidArguments<'i> {
let message = match (min, max) {
(min, Some(max)) if min == max => format!("Expected {min} arguments"),
(min, Some(max)) => format!("Expected {min} to {max} arguments"),
(min, None) => format!("Expected at least {min} arguments"),
};
self.invalid_arguments(message, self.args_span)
}
fn invalid_arguments_count_with_arities(
&self,
arities: impl IntoIterator<Item = usize>,
) -> InvalidArguments<'i> {
let message = format!("Expected {} arguments", arities.into_iter().join(", "));
self.invalid_arguments(message, self.args_span)
}
}
/// Unexpected number of arguments, or invalid combination of arguments.
///
/// This error is supposed to be converted to language-specific parse error
/// type, where lifetime `'i` will be eliminated.
#[derive(Clone, Debug)]
pub struct InvalidArguments<'i> {
/// Function name.
pub name: &'i str,
/// Error message.
pub message: String,
/// Span of the bad arguments.
pub span: pest::Span<'i>,
}
/// Expression item that can be transformed recursively by using `folder: F`.
pub trait FoldableExpression<'i>: Sized {
/// Transforms `self` by applying the `folder` to inner items.
fn fold<F>(self, folder: &mut F, span: pest::Span<'i>) -> Result<Self, F::Error>
where
F: ExpressionFolder<'i, Self> + ?Sized;
}
/// Visitor-like interface to transform AST nodes recursively.
pub trait ExpressionFolder<'i, T: FoldableExpression<'i>> {
/// Transform error.
type Error;
/// Transforms the expression `node`. By default, inner items are
/// transformed recursively.
fn fold_expression(
&mut self,
node: ExpressionNode<'i, T>,
) -> Result<ExpressionNode<'i, T>, Self::Error> {
let ExpressionNode { kind, span } = node;
let kind = kind.fold(self, span)?;
Ok(ExpressionNode { kind, span })
}
/// Transforms identifier.
fn fold_identifier(&mut self, name: &'i str, span: pest::Span<'i>) -> Result<T, Self::Error>;
/// Transforms function call.
fn fold_function_call(
&mut self,
function: Box<FunctionCallNode<'i, T>>,
span: pest::Span<'i>,
) -> Result<T, Self::Error>;
}
/// Transforms list of `nodes` by using `folder`.
pub fn fold_expression_nodes<'i, F, T>(
folder: &mut F,
nodes: Vec<ExpressionNode<'i, T>>,
) -> Result<Vec<ExpressionNode<'i, T>>, F::Error>
where
F: ExpressionFolder<'i, T> + ?Sized,
T: FoldableExpression<'i>,
{
nodes
.into_iter()
.map(|node| folder.fold_expression(node))
.try_collect()
}
/// Transforms function call arguments by using `folder`.
pub fn fold_function_call_args<'i, F, T>(
folder: &mut F,
function: FunctionCallNode<'i, T>,
) -> Result<FunctionCallNode<'i, T>, F::Error>
where
F: ExpressionFolder<'i, T> + ?Sized,
T: FoldableExpression<'i>,
{
Ok(FunctionCallNode {
name: function.name,
name_span: function.name_span,
args: fold_expression_nodes(folder, function.args)?,
keyword_args: function
.keyword_args
.into_iter()
.map(|arg| {
Ok(KeywordArgument {
name: arg.name,
name_span: arg.name_span,
value: folder.fold_expression(arg.value)?,
})
})
.try_collect()?,
args_span: function.args_span,
})
}
/// Helper to parse string literal.
#[derive(Debug)]
pub struct StringLiteralParser<R> {
/// String content part.
pub content_rule: R,
/// Escape sequence part including backslash character.
pub escape_rule: R,
}
impl<R: RuleType> StringLiteralParser<R> {
/// Parses the given string literal `pairs` into string.
pub fn parse(&self, pairs: Pairs<R>) -> String {
let mut result = String::new();
for part in pairs {
if part.as_rule() == self.content_rule {
result.push_str(part.as_str());
} else if part.as_rule() == self.escape_rule {
match &part.as_str()[1..] {
"\"" => result.push('"'),
"\\" => result.push('\\'),
"t" => result.push('\t'),
"r" => result.push('\r'),
"n" => result.push('\n'),
"0" => result.push('\0'),
"e" => result.push('\x1b'),
hex if hex.starts_with('x') => {
result.push(char::from(
u8::from_str_radix(&hex[1..], 16).expect("hex characters"),
));
}
char => panic!("invalid escape: \\{char:?}"),
}
} else {
panic!("unexpected part of string: {part:?}");
}
}
result
}
}
/// Escape special characters in the input
pub fn escape_string(unescaped: &str) -> String {
let mut escaped = String::with_capacity(unescaped.len());
for c in unescaped.chars() {
match c {
'"' => escaped.push_str(r#"\""#),
'\\' => escaped.push_str(r#"\\"#),
'\t' => escaped.push_str(r#"\t"#),
'\r' => escaped.push_str(r#"\r"#),
'\n' => escaped.push_str(r#"\n"#),
'\0' => escaped.push_str(r#"\0"#),
c if c.is_ascii_control() => {
for b in ascii::escape_default(c as u8) {
escaped.push(b as char);
}
}
c => escaped.push(c),
}
}
escaped
}
/// Helper to parse function call.
#[derive(Debug)]
pub struct FunctionCallParser<R> {
/// Function name.
pub function_name_rule: R,
/// List of positional and keyword arguments.
pub function_arguments_rule: R,
/// Pair of parameter name and value.
pub keyword_argument_rule: R,
/// Parameter name.
pub argument_name_rule: R,
/// Value expression.
pub argument_value_rule: R,
}
impl<R: RuleType> FunctionCallParser<R> {
/// Parses the given `pair` as function call.
pub fn parse<'i, T, E: From<InvalidArguments<'i>>>(
&self,
pair: Pair<'i, R>,
// parse_name can be defined for any Pair<'_, R>, but parse_value should
// be allowed to construct T by capturing Pair<'i, R>.
parse_name: impl Fn(Pair<'i, R>) -> Result<&'i str, E>,
parse_value: impl Fn(Pair<'i, R>) -> Result<ExpressionNode<'i, T>, E>,
) -> Result<FunctionCallNode<'i, T>, E> {
let [name_pair, args_pair] = pair.into_inner().collect_array().unwrap();
assert_eq!(name_pair.as_rule(), self.function_name_rule);
assert_eq!(args_pair.as_rule(), self.function_arguments_rule);
let name_span = name_pair.as_span();
let args_span = args_pair.as_span();
let function_name = parse_name(name_pair)?;
let mut args = Vec::new();
let mut keyword_args = Vec::new();
for pair in args_pair.into_inner() {
let span = pair.as_span();
if pair.as_rule() == self.argument_value_rule {
if !keyword_args.is_empty() {
return Err(InvalidArguments {
name: function_name,
message: "Positional argument follows keyword argument".to_owned(),
span,
}
.into());
}
args.push(parse_value(pair)?);
} else if pair.as_rule() == self.keyword_argument_rule {
let [name_pair, value_pair] = pair.into_inner().collect_array().unwrap();
assert_eq!(name_pair.as_rule(), self.argument_name_rule);
assert_eq!(value_pair.as_rule(), self.argument_value_rule);
let name_span = name_pair.as_span();
let arg = KeywordArgument {
name: parse_name(name_pair)?,
name_span,
value: parse_value(value_pair)?,
};
keyword_args.push(arg);
} else {
panic!("unexpected argument rule {pair:?}");
}
}
Ok(FunctionCallNode {
name: function_name,
name_span,
args,
keyword_args,
args_span,
})
}
}
/// Map of symbol and function aliases.
#[derive(Clone, Debug, Default)]
pub struct AliasesMap<P, V> {
symbol_aliases: HashMap<String, V>,
// name: [(params, defn)] (sorted by arity)
function_aliases: HashMap<String, Vec<(Vec<String>, V)>>,
// Parser type P helps prevent misuse of AliasesMap of different language.
parser: P,
}
impl<P, V> AliasesMap<P, V> {
/// Creates an empty aliases map with default-constructed parser.
pub fn new() -> Self
where
P: Default,
{
Self {
symbol_aliases: Default::default(),
function_aliases: Default::default(),
parser: Default::default(),
}
}
/// Adds new substitution rule `decl = defn`.
///
/// Returns error if `decl` is invalid. The `defn` part isn't checked. A bad
/// `defn` will be reported when the alias is substituted.
pub fn insert(&mut self, decl: impl AsRef<str>, defn: impl Into<V>) -> Result<(), P::Error>
where
P: AliasDeclarationParser,
{
match self.parser.parse_declaration(decl.as_ref())? {
AliasDeclaration::Symbol(name) => {
self.symbol_aliases.insert(name, defn.into());
}
AliasDeclaration::Function(name, params) => {
let overloads = self.function_aliases.entry(name).or_default();
match overloads.binary_search_by_key(¶ms.len(), |(params, _)| params.len()) {
Ok(i) => overloads[i] = (params, defn.into()),
Err(i) => overloads.insert(i, (params, defn.into())),
}
}
}
Ok(())
}
/// Iterates symbol names in arbitrary order.
pub fn symbol_names(&self) -> impl Iterator<Item = &str> {
self.symbol_aliases.keys().map(|n| n.as_ref())
}
/// Iterates function names in arbitrary order.
pub fn function_names(&self) -> impl Iterator<Item = &str> {
self.function_aliases.keys().map(|n| n.as_ref())
}
/// Looks up symbol alias by name. Returns identifier and definition text.
pub fn get_symbol(&self, name: &str) -> Option<(AliasId<'_>, &V)> {
self.symbol_aliases
.get_key_value(name)
.map(|(name, defn)| (AliasId::Symbol(name), defn))
}
/// Looks up function alias by name and arity. Returns identifier, list of
/// parameter names, and definition text.
pub fn get_function(&self, name: &str, arity: usize) -> Option<(AliasId<'_>, &[String], &V)> {
let overloads = self.get_function_overloads(name)?;
overloads.find_by_arity(arity)
}
/// Looks up function aliases by name.
fn get_function_overloads(&self, name: &str) -> Option<AliasFunctionOverloads<'_, V>> {
let (name, overloads) = self.function_aliases.get_key_value(name)?;
Some(AliasFunctionOverloads { name, overloads })
}
}
#[derive(Clone, Debug)]
struct AliasFunctionOverloads<'a, V> {
name: &'a String,
overloads: &'a Vec<(Vec<String>, V)>,
}
impl<'a, V> AliasFunctionOverloads<'a, V> {
fn arities(&self) -> impl DoubleEndedIterator<Item = usize> + ExactSizeIterator {
self.overloads.iter().map(|(params, _)| params.len())
}
fn min_arity(&self) -> usize {
self.arities().next().unwrap()
}
fn max_arity(&self) -> usize {
self.arities().next_back().unwrap()
}
fn find_by_arity(&self, arity: usize) -> Option<(AliasId<'a>, &'a [String], &'a V)> {
let index = self
.overloads
.binary_search_by_key(&arity, |(params, _)| params.len())
.ok()?;
let (params, defn) = &self.overloads[index];
// Exact parameter names aren't needed to identify a function, but they
// provide a better error indication. (e.g. "foo(x, y)" is easier to
// follow than "foo/2".)
Some((AliasId::Function(self.name, params), params, defn))
}
}
/// Borrowed reference to identify alias expression.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum AliasId<'a> {
/// Symbol name.
Symbol(&'a str),
/// Function name and parameter names.
Function(&'a str, &'a [String]),
/// Function parameter name.
Parameter(&'a str),
}
impl fmt::Display for AliasId<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Symbol(name) => write!(f, "{name}"),
Self::Function(name, params) => {
write!(f, "{name}({params})", params = params.join(", "))
}
Self::Parameter(name) => write!(f, "{name}"),
}
}
}
/// Parsed declaration part of alias rule.
#[derive(Clone, Debug)]
pub enum AliasDeclaration {
/// Symbol name.
Symbol(String),
/// Function name and parameters.
Function(String, Vec<String>),
}
// AliasDeclarationParser and AliasDefinitionParser can be merged into a single
// trait, but it's unclear whether doing that would simplify the abstraction.
/// Parser for symbol and function alias declaration.
pub trait AliasDeclarationParser {
/// Parse error type.
type Error;
/// Parses symbol or function name and parameters.
fn parse_declaration(&self, source: &str) -> Result<AliasDeclaration, Self::Error>;
}
/// Parser for symbol and function alias definition.
pub trait AliasDefinitionParser {
/// Expression item type.
type Output<'i>;
/// Parse error type.
type Error;
/// Parses alias body.
fn parse_definition<'i>(
&self,
source: &'i str,
) -> Result<ExpressionNode<'i, Self::Output<'i>>, Self::Error>;
}
/// Expression item that supports alias substitution.
pub trait AliasExpandableExpression<'i>: FoldableExpression<'i> {
/// Wraps identifier.
fn identifier(name: &'i str) -> Self;
/// Wraps function call.
fn function_call(function: Box<FunctionCallNode<'i, Self>>) -> Self;
/// Wraps substituted expression.
fn alias_expanded(id: AliasId<'i>, subst: Box<ExpressionNode<'i, Self>>) -> Self;
}
/// Error that may occur during alias substitution.
pub trait AliasExpandError: Sized {
/// Unexpected number of arguments, or invalid combination of arguments.
fn invalid_arguments(err: InvalidArguments<'_>) -> Self;
/// Recursion detected during alias substitution.
fn recursive_expansion(id: AliasId<'_>, span: pest::Span<'_>) -> Self;
/// Attaches alias trace to the current error.
fn within_alias_expansion(self, id: AliasId<'_>, span: pest::Span<'_>) -> Self;
}
/// Expands aliases recursively in tree of `T`.
#[derive(Debug)]
struct AliasExpander<'i, 'a, T, P> {
/// Alias symbols and functions that are globally available.
aliases_map: &'i AliasesMap<P, String>,
/// Local variables set in the outermost scope.
locals: &'a HashMap<&'i str, ExpressionNode<'i, T>>,
/// Stack of aliases and local parameters currently expanding.
states: Vec<AliasExpandingState<'i, T>>,
}
#[derive(Debug)]
struct AliasExpandingState<'i, T> {
id: AliasId<'i>,
locals: HashMap<&'i str, ExpressionNode<'i, T>>,
}
impl<'i, T, P, E> AliasExpander<'i, '_, T, P>
where
T: AliasExpandableExpression<'i> + Clone,
P: AliasDefinitionParser<Output<'i> = T, Error = E>,
E: AliasExpandError,
{
/// Local variables available to the current scope.
fn current_locals(&self) -> &HashMap<&'i str, ExpressionNode<'i, T>> {
self.states.last().map_or(self.locals, |s| &s.locals)
}
fn expand_defn(
&mut self,
id: AliasId<'i>,
defn: &'i str,
locals: HashMap<&'i str, ExpressionNode<'i, T>>,
span: pest::Span<'i>,
) -> Result<T, E> {
// The stack should be short, so let's simply do linear search.
if self.states.iter().any(|s| s.id == id) {
return Err(E::recursive_expansion(id, span));
}
self.states.push(AliasExpandingState { id, locals });
// Parsed defn could be cached if needed.
let result = self
.aliases_map
.parser
.parse_definition(defn)
.and_then(|node| self.fold_expression(node))
.map(|node| T::alias_expanded(id, Box::new(node)))
.map_err(|e| e.within_alias_expansion(id, span));
self.states.pop();
result
}
}
impl<'i, T, P, E> ExpressionFolder<'i, T> for AliasExpander<'i, '_, T, P>
where
T: AliasExpandableExpression<'i> + Clone,
P: AliasDefinitionParser<Output<'i> = T, Error = E>,
E: AliasExpandError,
{
type Error = E;
fn fold_identifier(&mut self, name: &'i str, span: pest::Span<'i>) -> Result<T, Self::Error> {
if let Some(subst) = self.current_locals().get(name) {
let id = AliasId::Parameter(name);
Ok(T::alias_expanded(id, Box::new(subst.clone())))
} else if let Some((id, defn)) = self.aliases_map.get_symbol(name) {
let locals = HashMap::new(); // Don't spill out the current scope
self.expand_defn(id, defn, locals, span)
} else {
Ok(T::identifier(name))
}
}
fn fold_function_call(
&mut self,
function: Box<FunctionCallNode<'i, T>>,
span: pest::Span<'i>,
) -> Result<T, Self::Error> {
// For better error indication, builtin functions are shadowed by name,
// not by (name, arity).
if let Some(overloads) = self.aliases_map.get_function_overloads(function.name) {
// TODO: add support for keyword arguments
function
.ensure_no_keyword_arguments()
.map_err(E::invalid_arguments)?;
let Some((id, params, defn)) = overloads.find_by_arity(function.arity()) else {
let min = overloads.min_arity();
let max = overloads.max_arity();
let err = if max - min + 1 == overloads.arities().len() {
function.invalid_arguments_count(min, Some(max))
} else {
function.invalid_arguments_count_with_arities(overloads.arities())
};
return Err(E::invalid_arguments(err));
};
// Resolve arguments in the current scope, and pass them in to the alias
// expansion scope.
let args = fold_expression_nodes(self, function.args)?;
let locals = params.iter().map(|s| s.as_str()).zip(args).collect();
self.expand_defn(id, defn, locals, span)
} else {
let function = Box::new(fold_function_call_args(self, *function)?);
Ok(T::function_call(function))
}
}
}
/// Expands aliases recursively.
pub fn expand_aliases<'i, T, P>(
node: ExpressionNode<'i, T>,
aliases_map: &'i AliasesMap<P, String>,
) -> Result<ExpressionNode<'i, T>, P::Error>
where
T: AliasExpandableExpression<'i> + Clone,
P: AliasDefinitionParser<Output<'i> = T>,
P::Error: AliasExpandError,
{
expand_aliases_with_locals(node, aliases_map, &HashMap::new())
}
/// Expands aliases recursively with the outermost local variables.
///
/// Local variables are similar to alias symbols, but are scoped. Alias symbols
/// are globally accessible from alias expressions, but local variables aren't.
pub fn expand_aliases_with_locals<'i, T, P>(
node: ExpressionNode<'i, T>,
aliases_map: &'i AliasesMap<P, String>,
locals: &HashMap<&'i str, ExpressionNode<'i, T>>,
) -> Result<ExpressionNode<'i, T>, P::Error>
where
T: AliasExpandableExpression<'i> + Clone,
P: AliasDefinitionParser<Output<'i> = T>,
P::Error: AliasExpandError,
{
let mut expander = AliasExpander {
aliases_map,
locals,
states: Vec::new(),
};
expander.fold_expression(node)
}
/// Collects similar names from the `candidates` list.
pub fn collect_similar<I>(name: &str, candidates: I) -> Vec<String>
where
I: IntoIterator,
I::Item: AsRef<str>,
{
candidates
.into_iter()
.filter(|cand| {
// The parameter is borrowed from clap f5540d26
strsim::jaro(name, cand.as_ref()) > 0.7
})
.map(|s| s.as_ref().to_owned())
.sorted_unstable()
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_expect_arguments() {
fn empty_span() -> pest::Span<'static> {
pest::Span::new("", 0, 0).unwrap()
}
fn function(
name: &'static str,
args: impl Into<Vec<ExpressionNode<'static, u32>>>,
keyword_args: impl Into<Vec<KeywordArgument<'static, u32>>>,
) -> FunctionCallNode<'static, u32> {
FunctionCallNode {
name,
name_span: empty_span(),
args: args.into(),
keyword_args: keyword_args.into(),
args_span: empty_span(),
}
}
fn value(v: u32) -> ExpressionNode<'static, u32> {
ExpressionNode::new(v, empty_span())
}
fn keyword(name: &'static str, v: u32) -> KeywordArgument<'static, u32> {
KeywordArgument {
name,
name_span: empty_span(),
value: value(v),
}
}
let f = function("foo", [], []);
assert!(f.expect_no_arguments().is_ok());
assert!(f.expect_some_arguments::<0>().is_ok());
assert!(f.expect_arguments::<0, 0>().is_ok());
assert!(f.expect_named_arguments::<0, 0>(&[]).is_ok());
let f = function("foo", [value(0)], []);
assert!(f.expect_no_arguments().is_err());
assert_eq!(
f.expect_some_arguments::<0>().unwrap(),
(&[], [value(0)].as_slice())
);
assert_eq!(
f.expect_some_arguments::<1>().unwrap(),
(&[value(0)], [].as_slice())
);
assert!(f.expect_arguments::<0, 0>().is_err());
assert_eq!(
f.expect_arguments::<0, 1>().unwrap(),
(&[], [Some(&value(0))])
);
assert_eq!(f.expect_arguments::<1, 1>().unwrap(), (&[value(0)], [None]));
assert!(f.expect_named_arguments::<0, 0>(&[]).is_err());
assert_eq!(
f.expect_named_arguments::<0, 1>(&["a"]).unwrap(),
([], [Some(&value(0))])
);
assert_eq!(
f.expect_named_arguments::<1, 0>(&["a"]).unwrap(),
([&value(0)], [])
);
let f = function("foo", [], [keyword("a", 0)]);
assert!(f.expect_no_arguments().is_err());
assert!(f.expect_some_arguments::<1>().is_err());
assert!(f.expect_arguments::<0, 1>().is_err());
assert!(f.expect_arguments::<1, 0>().is_err());
assert!(f.expect_named_arguments::<0, 0>(&[]).is_err());
assert!(f.expect_named_arguments::<0, 1>(&[]).is_err());
assert!(f.expect_named_arguments::<1, 0>(&[]).is_err());
assert_eq!(
f.expect_named_arguments::<1, 0>(&["a"]).unwrap(),
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/git_backend.rs | lib/src/git_backend.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashSet;
use std::ffi::OsStr;
use std::fmt::Debug;
use std::fmt::Error;
use std::fmt::Formatter;
use std::fs;
use std::io;
use std::io::Cursor;
use std::path::Path;
use std::path::PathBuf;
use std::pin::Pin;
use std::process::Command;
use std::process::ExitStatus;
use std::str::Utf8Error;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::MutexGuard;
use std::time::SystemTime;
use async_trait::async_trait;
use futures::stream::BoxStream;
use gix::bstr::BString;
use gix::objs::CommitRefIter;
use gix::objs::WriteTo as _;
use itertools::Itertools as _;
use once_cell::sync::OnceCell as OnceLock;
use pollster::FutureExt as _;
use prost::Message as _;
use smallvec::SmallVec;
use thiserror::Error;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt as _;
use crate::backend::Backend;
use crate::backend::BackendError;
use crate::backend::BackendInitError;
use crate::backend::BackendLoadError;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::Commit;
use crate::backend::CommitId;
use crate::backend::CopyHistory;
use crate::backend::CopyId;
use crate::backend::CopyRecord;
use crate::backend::FileId;
use crate::backend::MillisSinceEpoch;
use crate::backend::SecureSig;
use crate::backend::Signature;
use crate::backend::SigningFn;
use crate::backend::SymlinkId;
use crate::backend::Timestamp;
use crate::backend::Tree;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::backend::make_root_commit;
use crate::config::ConfigGetError;
use crate::file_util;
use crate::file_util::BadPathEncoding;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::git::GitSettings;
use crate::index::Index;
use crate::lock::FileLock;
use crate::merge::Merge;
use crate::merge::MergeBuilder;
use crate::object_id::ObjectId;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponentBuf;
use crate::settings::UserSettings;
use crate::stacked_table::MutableTable;
use crate::stacked_table::ReadonlyTable;
use crate::stacked_table::TableSegment as _;
use crate::stacked_table::TableStore;
use crate::stacked_table::TableStoreError;
const HASH_LENGTH: usize = 20;
const CHANGE_ID_LENGTH: usize = 16;
/// Ref namespace used only for preventing GC.
const NO_GC_REF_NAMESPACE: &str = "refs/jj/keep/";
pub const JJ_TREES_COMMIT_HEADER: &str = "jj:trees";
pub const JJ_CONFLICT_LABELS_COMMIT_HEADER: &str = "jj:conflict-labels";
pub const CHANGE_ID_COMMIT_HEADER: &str = "change-id";
#[derive(Debug, Error)]
pub enum GitBackendInitError {
#[error("Failed to initialize git repository")]
InitRepository(#[source] gix::init::Error),
#[error("Failed to open git repository")]
OpenRepository(#[source] gix::open::Error),
#[error("Failed to encode git repository path")]
EncodeRepositoryPath(#[source] BadPathEncoding),
#[error(transparent)]
Config(ConfigGetError),
#[error(transparent)]
Path(PathError),
}
impl From<Box<GitBackendInitError>> for BackendInitError {
fn from(err: Box<GitBackendInitError>) -> Self {
Self(err)
}
}
#[derive(Debug, Error)]
pub enum GitBackendLoadError {
#[error("Failed to open git repository")]
OpenRepository(#[source] gix::open::Error),
#[error("Failed to decode git repository path")]
DecodeRepositoryPath(#[source] BadPathEncoding),
#[error(transparent)]
Config(ConfigGetError),
#[error(transparent)]
Path(PathError),
}
impl From<Box<GitBackendLoadError>> for BackendLoadError {
fn from(err: Box<GitBackendLoadError>) -> Self {
Self(err)
}
}
/// `GitBackend`-specific error that may occur after the backend is loaded.
#[derive(Debug, Error)]
pub enum GitBackendError {
#[error("Failed to read non-git metadata")]
ReadMetadata(#[source] TableStoreError),
#[error("Failed to write non-git metadata")]
WriteMetadata(#[source] TableStoreError),
}
impl From<GitBackendError> for BackendError {
fn from(err: GitBackendError) -> Self {
Self::Other(err.into())
}
}
#[derive(Debug, Error)]
pub enum GitGcError {
#[error("Failed to run git gc command")]
GcCommand(#[source] std::io::Error),
#[error("git gc command exited with an error: {0}")]
GcCommandErrorStatus(ExitStatus),
}
pub struct GitBackend {
// While gix::Repository can be created from gix::ThreadSafeRepository, it's
// cheaper to cache the thread-local instance behind a mutex than creating
// one for each backend method call. Our GitBackend is most likely to be
// used in a single-threaded context.
base_repo: gix::ThreadSafeRepository,
repo: Mutex<gix::Repository>,
root_commit_id: CommitId,
root_change_id: ChangeId,
empty_tree_id: TreeId,
shallow_root_ids: OnceLock<Vec<CommitId>>,
extra_metadata_store: TableStore,
cached_extra_metadata: Mutex<Option<Arc<ReadonlyTable>>>,
git_executable: PathBuf,
write_change_id_header: bool,
}
impl GitBackend {
pub fn name() -> &'static str {
"git"
}
fn new(
base_repo: gix::ThreadSafeRepository,
extra_metadata_store: TableStore,
git_settings: GitSettings,
) -> Self {
let repo = Mutex::new(base_repo.to_thread_local());
let root_commit_id = CommitId::from_bytes(&[0; HASH_LENGTH]);
let root_change_id = ChangeId::from_bytes(&[0; CHANGE_ID_LENGTH]);
let empty_tree_id = TreeId::from_hex("4b825dc642cb6eb9a060e54bf8d69288fbee4904");
Self {
base_repo,
repo,
root_commit_id,
root_change_id,
empty_tree_id,
shallow_root_ids: OnceLock::new(),
extra_metadata_store,
cached_extra_metadata: Mutex::new(None),
git_executable: git_settings.executable_path,
write_change_id_header: git_settings.write_change_id_header,
}
}
pub fn init_internal(
settings: &UserSettings,
store_path: &Path,
) -> Result<Self, Box<GitBackendInitError>> {
let git_repo_path = Path::new("git");
let git_repo = gix::ThreadSafeRepository::init_opts(
store_path.join(git_repo_path),
gix::create::Kind::Bare,
gix::create::Options::default(),
gix_open_opts_from_settings(settings),
)
.map_err(GitBackendInitError::InitRepository)?;
let git_settings =
GitSettings::from_settings(settings).map_err(GitBackendInitError::Config)?;
Self::init_with_repo(store_path, git_repo_path, git_repo, git_settings)
}
/// Initializes backend by creating a new Git repo at the specified
/// workspace path. The workspace directory must exist.
pub fn init_colocated(
settings: &UserSettings,
store_path: &Path,
workspace_root: &Path,
) -> Result<Self, Box<GitBackendInitError>> {
let canonical_workspace_root = {
let path = store_path.join(workspace_root);
dunce::canonicalize(&path)
.context(&path)
.map_err(GitBackendInitError::Path)?
};
let git_repo = gix::ThreadSafeRepository::init_opts(
canonical_workspace_root,
gix::create::Kind::WithWorktree,
gix::create::Options::default(),
gix_open_opts_from_settings(settings),
)
.map_err(GitBackendInitError::InitRepository)?;
let git_repo_path = workspace_root.join(".git");
let git_settings =
GitSettings::from_settings(settings).map_err(GitBackendInitError::Config)?;
Self::init_with_repo(store_path, &git_repo_path, git_repo, git_settings)
}
/// Initializes backend with an existing Git repo at the specified path.
pub fn init_external(
settings: &UserSettings,
store_path: &Path,
git_repo_path: &Path,
) -> Result<Self, Box<GitBackendInitError>> {
let canonical_git_repo_path = {
let path = store_path.join(git_repo_path);
canonicalize_git_repo_path(&path)
.context(&path)
.map_err(GitBackendInitError::Path)?
};
let git_repo = gix::ThreadSafeRepository::open_opts(
canonical_git_repo_path,
gix_open_opts_from_settings(settings),
)
.map_err(GitBackendInitError::OpenRepository)?;
let git_settings =
GitSettings::from_settings(settings).map_err(GitBackendInitError::Config)?;
Self::init_with_repo(store_path, git_repo_path, git_repo, git_settings)
}
fn init_with_repo(
store_path: &Path,
git_repo_path: &Path,
repo: gix::ThreadSafeRepository,
git_settings: GitSettings,
) -> Result<Self, Box<GitBackendInitError>> {
let extra_path = store_path.join("extra");
fs::create_dir(&extra_path)
.context(&extra_path)
.map_err(GitBackendInitError::Path)?;
let target_path = store_path.join("git_target");
let git_repo_path = if cfg!(windows) && git_repo_path.is_relative() {
// When a repository is created in Windows, format the path with *forward
// slashes* and not backwards slashes. This makes it possible to use the same
// repository under Windows Subsystem for Linux.
//
// This only works for relative paths. If the path is absolute, there's not much
// we can do, and it simply won't work inside and outside WSL at the same time.
file_util::slash_path(git_repo_path)
} else {
git_repo_path.into()
};
let git_repo_path_bytes = file_util::path_to_bytes(&git_repo_path)
.map_err(GitBackendInitError::EncodeRepositoryPath)?;
fs::write(&target_path, git_repo_path_bytes)
.context(&target_path)
.map_err(GitBackendInitError::Path)?;
let extra_metadata_store = TableStore::init(extra_path, HASH_LENGTH);
Ok(Self::new(repo, extra_metadata_store, git_settings))
}
pub fn load(
settings: &UserSettings,
store_path: &Path,
) -> Result<Self, Box<GitBackendLoadError>> {
let git_repo_path = {
let target_path = store_path.join("git_target");
let git_repo_path_bytes = fs::read(&target_path)
.context(&target_path)
.map_err(GitBackendLoadError::Path)?;
let git_repo_path = file_util::path_from_bytes(&git_repo_path_bytes)
.map_err(GitBackendLoadError::DecodeRepositoryPath)?;
let git_repo_path = store_path.join(git_repo_path);
canonicalize_git_repo_path(&git_repo_path)
.context(&git_repo_path)
.map_err(GitBackendLoadError::Path)?
};
let repo = gix::ThreadSafeRepository::open_opts(
git_repo_path,
gix_open_opts_from_settings(settings),
)
.map_err(GitBackendLoadError::OpenRepository)?;
let extra_metadata_store = TableStore::load(store_path.join("extra"), HASH_LENGTH);
let git_settings =
GitSettings::from_settings(settings).map_err(GitBackendLoadError::Config)?;
Ok(Self::new(repo, extra_metadata_store, git_settings))
}
fn lock_git_repo(&self) -> MutexGuard<'_, gix::Repository> {
self.repo.lock().unwrap()
}
/// Returns new thread-local instance to access to the underlying Git repo.
pub fn git_repo(&self) -> gix::Repository {
self.base_repo.to_thread_local()
}
/// Path to the `.git` directory or the repository itself if it's bare.
pub fn git_repo_path(&self) -> &Path {
self.base_repo.path()
}
/// Path to the working directory if the repository isn't bare.
pub fn git_workdir(&self) -> Option<&Path> {
self.base_repo.work_dir()
}
fn shallow_root_ids(&self, git_repo: &gix::Repository) -> BackendResult<&[CommitId]> {
// The list of shallow roots is cached by gix, but it's still expensive
// to stat file on every read_object() call. Refreshing shallow roots is
// also bad for consistency reasons.
self.shallow_root_ids
.get_or_try_init(|| {
let maybe_oids = git_repo
.shallow_commits()
.map_err(|err| BackendError::Other(err.into()))?;
let commit_ids = maybe_oids.map_or(vec![], |oids| {
oids.iter()
.map(|oid| CommitId::from_bytes(oid.as_bytes()))
.collect()
});
Ok(commit_ids)
})
.map(AsRef::as_ref)
}
fn cached_extra_metadata_table(&self) -> BackendResult<Arc<ReadonlyTable>> {
let mut locked_head = self.cached_extra_metadata.lock().unwrap();
match locked_head.as_ref() {
Some(head) => Ok(head.clone()),
None => {
let table = self
.extra_metadata_store
.get_head()
.map_err(GitBackendError::ReadMetadata)?;
*locked_head = Some(table.clone());
Ok(table)
}
}
}
fn read_extra_metadata_table_locked(&self) -> BackendResult<(Arc<ReadonlyTable>, FileLock)> {
let table = self
.extra_metadata_store
.get_head_locked()
.map_err(GitBackendError::ReadMetadata)?;
Ok(table)
}
fn save_extra_metadata_table(
&self,
mut_table: MutableTable,
_table_lock: &FileLock,
) -> BackendResult<()> {
let table = self
.extra_metadata_store
.save_table(mut_table)
.map_err(GitBackendError::WriteMetadata)?;
// Since the parent table was the head, saved table are likely to be new head.
// If it's not, cache will be reloaded when entry can't be found.
*self.cached_extra_metadata.lock().unwrap() = Some(table);
Ok(())
}
/// Imports the given commits and ancestors from the backing Git repo.
///
/// The `head_ids` may contain commits that have already been imported, but
/// the caller should filter them out to eliminate redundant I/O processing.
#[tracing::instrument(skip(self, head_ids))]
pub fn import_head_commits<'a>(
&self,
head_ids: impl IntoIterator<Item = &'a CommitId>,
) -> BackendResult<()> {
let head_ids: HashSet<&CommitId> = head_ids
.into_iter()
.filter(|&id| *id != self.root_commit_id)
.collect();
if head_ids.is_empty() {
return Ok(());
}
// Create no-gc ref even if known to the extras table. Concurrent GC
// process might have deleted the no-gc ref.
let locked_repo = self.lock_git_repo();
locked_repo
.edit_references(head_ids.iter().copied().map(to_no_gc_ref_update))
.map_err(|err| BackendError::Other(Box::new(err)))?;
// These commits are imported from Git. Make our change ids persist (otherwise
// future write_commit() could reassign new change id.)
tracing::debug!(
heads_count = head_ids.len(),
"import extra metadata entries"
);
let (table, table_lock) = self.read_extra_metadata_table_locked()?;
let mut mut_table = table.start_mutation();
import_extra_metadata_entries_from_heads(
&locked_repo,
&mut mut_table,
&table_lock,
&head_ids,
self.shallow_root_ids(&locked_repo)?,
)?;
self.save_extra_metadata_table(mut_table, &table_lock)
}
fn read_file_sync(&self, id: &FileId) -> BackendResult<Vec<u8>> {
let git_blob_id = validate_git_object_id(id)?;
let locked_repo = self.lock_git_repo();
let mut blob = locked_repo
.find_object(git_blob_id)
.map_err(|err| map_not_found_err(err, id))?
.try_into_blob()
.map_err(|err| to_read_object_err(err, id))?;
Ok(blob.take_data())
}
fn new_diff_platform(&self) -> BackendResult<gix::diff::blob::Platform> {
let attributes = gix::worktree::Stack::new(
Path::new(""),
gix::worktree::stack::State::AttributesStack(Default::default()),
gix::worktree::glob::pattern::Case::Sensitive,
Vec::new(),
Vec::new(),
);
let filter = gix::diff::blob::Pipeline::new(
Default::default(),
gix::filter::plumbing::Pipeline::new(
self.git_repo()
.command_context()
.map_err(|err| BackendError::Other(Box::new(err)))?,
Default::default(),
),
Vec::new(),
Default::default(),
);
Ok(gix::diff::blob::Platform::new(
Default::default(),
filter,
gix::diff::blob::pipeline::Mode::ToGit,
attributes,
))
}
fn read_tree_for_commit<'repo>(
&self,
repo: &'repo gix::Repository,
id: &CommitId,
) -> BackendResult<gix::Tree<'repo>> {
let tree = self.read_commit(id).block_on()?.root_tree;
// TODO(kfm): probably want to do something here if it is a merge
let tree_id = tree.first().clone();
let gix_id = validate_git_object_id(&tree_id)?;
repo.find_object(gix_id)
.map_err(|err| map_not_found_err(err, &tree_id))?
.try_into_tree()
.map_err(|err| to_read_object_err(err, &tree_id))
}
}
/// Canonicalizes the given `path` except for the last `".git"` component.
///
/// The last path component matters when opening a Git repo without `core.bare`
/// config. This config is usually set, but the "repo" tool will set up such
/// repositories and symlinks. Opening such repo with fully-canonicalized path
/// would turn a colocated Git repo into a bare repo.
pub fn canonicalize_git_repo_path(path: &Path) -> io::Result<PathBuf> {
if path.ends_with(".git") {
let workdir = path.parent().unwrap();
dunce::canonicalize(workdir).map(|dir| dir.join(".git"))
} else {
dunce::canonicalize(path)
}
}
fn gix_open_opts_from_settings(settings: &UserSettings) -> gix::open::Options {
let user_name = settings.user_name();
let user_email = settings.user_email();
gix::open::Options::default()
.config_overrides([
// Committer has to be configured to record reflog. Author isn't
// needed, but let's copy the same values.
format!("author.name={user_name}"),
format!("author.email={user_email}"),
format!("committer.name={user_name}"),
format!("committer.email={user_email}"),
])
// The git_target path should point the repository, not the working directory.
.open_path_as_is(true)
// Gitoxide recommends this when correctness is preferred
.strict_config(true)
}
/// Parses the `jj:conflict-labels` header value if present.
fn extract_conflict_labels_from_commit(commit: &gix::objs::CommitRef) -> Merge<String> {
let Some(value) = commit
.extra_headers()
.find(JJ_CONFLICT_LABELS_COMMIT_HEADER)
else {
return Merge::resolved(String::new());
};
str::from_utf8(value)
.expect("labels should be valid utf8")
.split_terminator('\n')
.map(str::to_owned)
.collect::<MergeBuilder<_>>()
.build()
}
/// Parses the `jj:trees` header value if present, otherwise returns the
/// resolved tree ID from Git.
fn extract_root_tree_from_commit(commit: &gix::objs::CommitRef) -> Result<Merge<TreeId>, ()> {
let Some(value) = commit.extra_headers().find(JJ_TREES_COMMIT_HEADER) else {
let tree_id = TreeId::from_bytes(commit.tree().as_bytes());
return Ok(Merge::resolved(tree_id));
};
let mut tree_ids = SmallVec::new();
for hex in value.split(|b| *b == b' ') {
let tree_id = TreeId::try_from_hex(hex).ok_or(())?;
if tree_id.as_bytes().len() != HASH_LENGTH {
return Err(());
}
tree_ids.push(tree_id);
}
// It is invalid to use `jj:trees` with a non-conflicted tree. If this were
// allowed, it would be possible to construct a commit which appears to have
// different contents depending on whether it is viewed using `jj` or `git`.
if tree_ids.len() == 1 || tree_ids.len() % 2 == 0 {
return Err(());
}
Ok(Merge::from_vec(tree_ids))
}
fn commit_from_git_without_root_parent(
id: &CommitId,
git_object: &gix::Object,
is_shallow: bool,
) -> BackendResult<Commit> {
let decode_err = |err: gix::objs::decode::Error| to_read_object_err(err, id);
let commit = git_object
.try_to_commit_ref()
.map_err(|err| to_read_object_err(err, id))?;
// If the git header has a change-id field, we attempt to convert that to a
// valid JJ Change Id
let change_id = extract_change_id_from_commit(&commit)
.unwrap_or_else(|| synthetic_change_id_from_git_commit_id(id));
// shallow commits don't have parents their parents actually fetched, so we
// discard them here
// TODO: This causes issues when a shallow repository is deepened/unshallowed
let parents = if is_shallow {
vec![]
} else {
commit
.parents()
.map(|oid| CommitId::from_bytes(oid.as_bytes()))
.collect_vec()
};
// If the commit is a conflict, the conflict labels are stored in a commit
// header separately from the trees.
let conflict_labels = extract_conflict_labels_from_commit(&commit);
// Conflicted commits written before we started using the `jj:trees` header
// (~March 2024) may have the root trees stored in the extra metadata table
// instead. For such commits, we'll update the root tree later when we read the
// extra metadata.
let root_tree = extract_root_tree_from_commit(&commit)
.map_err(|()| to_read_object_err("Invalid jj:trees header", id))?;
// Use lossy conversion as commit message with "mojibake" is still better than
// nothing.
// TODO: what should we do with commit.encoding?
let description = String::from_utf8_lossy(commit.message).into_owned();
let author = signature_from_git(commit.author().map_err(decode_err)?);
let committer = signature_from_git(commit.committer().map_err(decode_err)?);
// If the commit is signed, extract both the signature and the signed data
// (which is the commit buffer with the gpgsig header omitted).
// We have to re-parse the raw commit data because gix CommitRef does not give
// us the sogned data, only the signature.
// Ideally, we could use try_to_commit_ref_iter at the beginning of this
// function and extract everything from that. For now, this works
let secure_sig = commit
.extra_headers
.iter()
// gix does not recognize gpgsig-sha256, but prevent future footguns by checking for it too
.any(|(k, _)| *k == "gpgsig" || *k == "gpgsig-sha256")
.then(|| CommitRefIter::signature(&git_object.data))
.transpose()
.map_err(decode_err)?
.flatten()
.map(|(sig, data)| SecureSig {
data: data.to_bstring().into(),
sig: sig.into_owned().into(),
});
Ok(Commit {
parents,
predecessors: vec![],
// If this commit has associated extra metadata, we may reset this later.
root_tree,
conflict_labels,
change_id,
description,
author,
committer,
secure_sig,
})
}
/// Extracts change id from commit headers.
pub fn extract_change_id_from_commit(commit: &gix::objs::CommitRef) -> Option<ChangeId> {
commit
.extra_headers()
.find(CHANGE_ID_COMMIT_HEADER)
.and_then(ChangeId::try_from_reverse_hex)
.filter(|val| val.as_bytes().len() == CHANGE_ID_LENGTH)
}
/// Deterministically creates a change id based on the commit id
///
/// Used when we get a commit without a change id. The exact algorithm for the
/// computation should not be relied upon.
pub fn synthetic_change_id_from_git_commit_id(id: &CommitId) -> ChangeId {
// We reverse the bits of the commit id to create the change id. We don't
// want to use the first bytes unmodified because then it would be ambiguous
// if a given hash prefix refers to the commit id or the change id. It would
// have been enough to pick the last 16 bytes instead of the leading 16
// bytes to address that. We also reverse the bits to make it less likely
// that users depend on any relationship between the two ids.
let bytes = id.as_bytes()[4..HASH_LENGTH]
.iter()
.rev()
.map(|b| b.reverse_bits())
.collect();
ChangeId::new(bytes)
}
const EMPTY_STRING_PLACEHOLDER: &str = "JJ_EMPTY_STRING";
fn signature_from_git(signature: gix::actor::SignatureRef) -> Signature {
let name = signature.name;
let name = if name != EMPTY_STRING_PLACEHOLDER {
String::from_utf8_lossy(name).into_owned()
} else {
"".to_string()
};
let email = signature.email;
let email = if email != EMPTY_STRING_PLACEHOLDER {
String::from_utf8_lossy(email).into_owned()
} else {
"".to_string()
};
let time = signature.time().unwrap_or_default();
let timestamp = MillisSinceEpoch(time.seconds * 1000);
let tz_offset = time.offset.div_euclid(60); // in minutes
Signature {
name,
email,
timestamp: Timestamp {
timestamp,
tz_offset,
},
}
}
fn signature_to_git(signature: &Signature) -> gix::actor::Signature {
// git does not support empty names or emails
let name = if !signature.name.is_empty() {
&signature.name
} else {
EMPTY_STRING_PLACEHOLDER
};
let email = if !signature.email.is_empty() {
&signature.email
} else {
EMPTY_STRING_PLACEHOLDER
};
let time = gix::date::Time::new(
signature.timestamp.timestamp.0.div_euclid(1000),
signature.timestamp.tz_offset * 60, // in seconds
);
gix::actor::Signature {
name: name.into(),
email: email.into(),
time,
}
}
fn serialize_extras(commit: &Commit) -> Vec<u8> {
let mut proto = crate::protos::git_store::Commit {
change_id: commit.change_id.to_bytes(),
..Default::default()
};
proto.uses_tree_conflict_format = true;
if !commit.root_tree.is_resolved() {
// This is done for the sake of jj versions <0.28 (before commit
// f7b14be) being able to read the repo. At some point in the
// future, we can stop doing it.
proto.root_tree = commit.root_tree.iter().map(|r| r.to_bytes()).collect();
}
for predecessor in &commit.predecessors {
proto.predecessors.push(predecessor.to_bytes());
}
proto.encode_to_vec()
}
fn deserialize_extras(commit: &mut Commit, bytes: &[u8]) {
let proto = crate::protos::git_store::Commit::decode(bytes).unwrap();
if !proto.change_id.is_empty() {
commit.change_id = ChangeId::new(proto.change_id);
}
if commit.root_tree.is_resolved()
&& proto.uses_tree_conflict_format
&& !proto.root_tree.is_empty()
{
let merge_builder: MergeBuilder<_> = proto
.root_tree
.iter()
.map(|id_bytes| TreeId::from_bytes(id_bytes))
.collect();
commit.root_tree = merge_builder.build();
}
for predecessor in &proto.predecessors {
commit.predecessors.push(CommitId::from_bytes(predecessor));
}
}
/// Returns `RefEdit` that will create a ref in `refs/jj/keep` if not exist.
/// Used for preventing GC of commits we create.
fn to_no_gc_ref_update(id: &CommitId) -> gix::refs::transaction::RefEdit {
let name = format!("{NO_GC_REF_NAMESPACE}{id}");
let new = gix::refs::Target::Object(gix::ObjectId::from_bytes_or_panic(id.as_bytes()));
let expected = gix::refs::transaction::PreviousValue::ExistingMustMatch(new.clone());
gix::refs::transaction::RefEdit {
change: gix::refs::transaction::Change::Update {
log: gix::refs::transaction::LogChange {
message: "used by jj".into(),
..Default::default()
},
expected,
new,
},
name: name.try_into().unwrap(),
deref: false,
}
}
fn to_ref_deletion(git_ref: gix::refs::Reference) -> gix::refs::transaction::RefEdit {
let expected = gix::refs::transaction::PreviousValue::ExistingMustMatch(git_ref.target);
gix::refs::transaction::RefEdit {
change: gix::refs::transaction::Change::Delete {
expected,
log: gix::refs::transaction::RefLog::AndReference,
},
name: git_ref.name,
deref: false,
}
}
/// Recreates `refs/jj/keep` refs for the `new_heads`, and removes the other
/// unreachable and non-head refs.
fn recreate_no_gc_refs(
git_repo: &gix::Repository,
new_heads: impl IntoIterator<Item = CommitId>,
keep_newer: SystemTime,
) -> BackendResult<()> {
// Calculate diff between existing no-gc refs and new heads.
let new_heads: HashSet<CommitId> = new_heads.into_iter().collect();
let mut no_gc_refs_to_keep_count: usize = 0;
let mut no_gc_refs_to_delete: Vec<gix::refs::Reference> = Vec::new();
let git_references = git_repo
.references()
.map_err(|err| BackendError::Other(err.into()))?;
let no_gc_refs_iter = git_references
.prefixed(NO_GC_REF_NAMESPACE)
.map_err(|err| BackendError::Other(err.into()))?;
for git_ref in no_gc_refs_iter {
let git_ref = git_ref.map_err(BackendError::Other)?.detach();
let oid = git_ref.target.try_id().ok_or_else(|| {
let name = git_ref.name.as_bstr();
BackendError::Other(format!("Symbolic no-gc ref found: {name}").into())
})?;
let id = CommitId::from_bytes(oid.as_bytes());
let name_good = git_ref.name.as_bstr()[NO_GC_REF_NAMESPACE.len()..] == id.hex();
if new_heads.contains(&id) && name_good {
no_gc_refs_to_keep_count += 1;
continue;
}
// Check timestamp of loose ref, but this is still racy on re-import
// because:
// - existing packed ref won't be demoted to loose ref
// - existing loose ref won't be touched
//
// TODO: might be better to switch to a dummy merge, where new no-gc ref
// will always have a unique name. Doing that with the current
// ref-per-head strategy would increase the number of the no-gc refs.
// https://github.com/jj-vcs/jj/pull/2659#issuecomment-1837057782
let loose_ref_path = git_repo.path().join(git_ref.name.to_path());
if let Ok(metadata) = loose_ref_path.metadata() {
let mtime = metadata.modified().expect("unsupported platform?");
if mtime > keep_newer {
tracing::trace!(?git_ref, "not deleting new");
no_gc_refs_to_keep_count += 1;
continue;
}
}
// Also deletes no-gc ref of random name created by old jj.
tracing::trace!(?git_ref, ?name_good, "will delete");
no_gc_refs_to_delete.push(git_ref);
}
tracing::info!(
new_heads_count = new_heads.len(),
no_gc_refs_to_keep_count,
no_gc_refs_to_delete_count = no_gc_refs_to_delete.len(),
"collected reachable refs"
);
// It's slow to delete packed refs one by one, so update refs all at once.
let ref_edits = itertools::chain(
no_gc_refs_to_delete.into_iter().map(to_ref_deletion),
new_heads.iter().map(to_no_gc_ref_update),
);
git_repo
.edit_references(ref_edits)
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/graph.rs | lib/src/graph.rs | // Copyright 2021-2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::hash::Hash;
/// Node and edges pair of type `N` and `ID` respectively.
///
/// `ID` uniquely identifies a node within the graph. It's usually cheap to
/// clone. There should be a pure `(&N) -> &ID` function.
pub type GraphNode<N, ID = N> = (N, Vec<GraphEdge<ID>>);
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub struct GraphEdge<N> {
pub target: N,
pub edge_type: GraphEdgeType,
}
impl<N> GraphEdge<N> {
pub fn missing(target: N) -> Self {
Self {
target,
edge_type: GraphEdgeType::Missing,
}
}
pub fn direct(target: N) -> Self {
Self {
target,
edge_type: GraphEdgeType::Direct,
}
}
pub fn indirect(target: N) -> Self {
Self {
target,
edge_type: GraphEdgeType::Indirect,
}
}
pub fn map<M>(self, f: impl FnOnce(N) -> M) -> GraphEdge<M> {
GraphEdge {
target: f(self.target),
edge_type: self.edge_type,
}
}
pub fn is_missing(&self) -> bool {
self.edge_type == GraphEdgeType::Missing
}
pub fn is_direct(&self) -> bool {
self.edge_type == GraphEdgeType::Direct
}
pub fn is_indirect(&self) -> bool {
self.edge_type == GraphEdgeType::Indirect
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub enum GraphEdgeType {
Missing,
Direct,
Indirect,
}
fn reachable_targets<N>(edges: &[GraphEdge<N>]) -> impl DoubleEndedIterator<Item = &N> {
edges
.iter()
.filter(|edge| !edge.is_missing())
.map(|edge| &edge.target)
}
/// Creates new graph in which nodes and edges are reversed.
pub fn reverse_graph<N, ID: Clone + Eq + Hash, E>(
input: impl Iterator<Item = Result<GraphNode<N, ID>, E>>,
as_id: impl Fn(&N) -> &ID,
) -> Result<Vec<GraphNode<N, ID>>, E> {
let mut entries = vec![];
let mut reverse_edges: HashMap<ID, Vec<GraphEdge<ID>>> = HashMap::new();
for item in input {
let (node, edges) = item?;
for GraphEdge { target, edge_type } in edges {
reverse_edges.entry(target).or_default().push(GraphEdge {
target: as_id(&node).clone(),
edge_type,
});
}
entries.push(node);
}
let mut items = vec![];
for node in entries.into_iter().rev() {
let edges = reverse_edges.remove(as_id(&node)).unwrap_or_default();
items.push((node, edges));
}
Ok(items)
}
/// Graph iterator adapter to group topological branches.
///
/// Basic idea is DFS from the heads. At fork point, the other descendant
/// branches will be visited. At merge point, the second (or the last) ancestor
/// branch will be visited first. This is practically [the same as Git][Git].
///
/// If no branches are prioritized, the branch containing the first commit in
/// the input iterator will be emitted first. It is often the working-copy
/// ancestor branch. The other head branches won't be enqueued eagerly, and will
/// be emitted as late as possible.
///
/// [Git]: https://github.blog/2022-08-30-gits-database-internals-ii-commit-history-queries/#topological-sorting
#[derive(Clone, Debug)]
pub struct TopoGroupedGraphIterator<N, ID, I, F> {
input_iter: I,
as_id: F,
/// Graph nodes read from the input iterator but not yet emitted.
nodes: HashMap<ID, TopoGroupedGraphNode<N, ID>>,
/// Stack of graph nodes to be emitted.
emittable_ids: Vec<ID>,
/// List of new head nodes found while processing unpopulated nodes, or
/// prioritized branch nodes added by caller.
new_head_ids: VecDeque<ID>,
/// Set of nodes which may be ancestors of `new_head_ids`.
blocked_ids: HashSet<ID>,
}
#[derive(Clone, Debug)]
struct TopoGroupedGraphNode<N, ID> {
/// Graph nodes which must be emitted before.
child_ids: HashSet<ID>,
/// Graph node data and edges to parent nodes. `None` until this node is
/// populated.
item: Option<GraphNode<N, ID>>,
}
impl<N, ID> Default for TopoGroupedGraphNode<N, ID> {
fn default() -> Self {
Self {
child_ids: Default::default(),
item: None,
}
}
}
impl<N, ID, E, I, F> TopoGroupedGraphIterator<N, ID, I, F>
where
ID: Clone + Hash + Eq,
I: Iterator<Item = Result<GraphNode<N, ID>, E>>,
F: Fn(&N) -> &ID,
{
/// Wraps the given iterator to group topological branches. The input
/// iterator must be topologically ordered.
pub fn new(input_iter: I, as_id: F) -> Self {
Self {
input_iter,
as_id,
nodes: HashMap::new(),
emittable_ids: Vec::new(),
new_head_ids: VecDeque::new(),
blocked_ids: HashSet::new(),
}
}
/// Makes the branch containing the specified node be emitted earlier than
/// the others.
///
/// The `id` usually points to a head node, but this isn't a requirement.
/// If the specified node isn't a head, all preceding nodes will be queued.
///
/// The specified node must exist in the input iterator. If it didn't, the
/// iterator would panic.
pub fn prioritize_branch(&mut self, id: ID) {
// Mark existence of unpopulated node
self.nodes.entry(id.clone()).or_default();
// Push to non-emitting list so the prioritized heads wouldn't be
// interleaved
self.new_head_ids.push_back(id);
}
fn populate_one(&mut self) -> Result<Option<()>, E> {
let item = match self.input_iter.next() {
Some(Ok(item)) => item,
Some(Err(err)) => {
return Err(err);
}
None => {
return Ok(None);
}
};
let (data, edges) = &item;
let current_id = (self.as_id)(data);
// Set up reverse reference
for parent_id in reachable_targets(edges) {
let parent_node = self.nodes.entry(parent_id.clone()).or_default();
parent_node.child_ids.insert(current_id.clone());
}
// Populate the current node
if let Some(current_node) = self.nodes.get_mut(current_id) {
assert!(current_node.item.is_none());
current_node.item = Some(item);
} else {
let current_id = current_id.clone();
let current_node = TopoGroupedGraphNode {
item: Some(item),
..Default::default()
};
self.nodes.insert(current_id.clone(), current_node);
// Push to non-emitting list so the new head wouldn't be interleaved
self.new_head_ids.push_back(current_id);
}
Ok(Some(()))
}
/// Enqueues the first new head which will unblock the waiting ancestors.
///
/// This does not move multiple head nodes to the queue at once because
/// heads may be connected to the fork points in arbitrary order.
fn flush_new_head(&mut self) {
assert!(!self.new_head_ids.is_empty());
if self.blocked_ids.is_empty() || self.new_head_ids.len() <= 1 {
// Fast path: orphaned or no choice
let new_head_id = self.new_head_ids.pop_front().unwrap();
self.emittable_ids.push(new_head_id);
self.blocked_ids.clear();
return;
}
// Mark descendant nodes reachable from the blocking nodes
let mut to_visit: Vec<&ID> = self
.blocked_ids
.iter()
.map(|id| {
// Borrow from self.nodes so self.blocked_ids can be mutated later
let (id, _) = self.nodes.get_key_value(id).unwrap();
id
})
.collect();
let mut visited: HashSet<&ID> = to_visit.iter().copied().collect();
while let Some(id) = to_visit.pop() {
let node = self.nodes.get(id).unwrap();
to_visit.extend(node.child_ids.iter().filter(|id| visited.insert(id)));
}
// Pick the first reachable head
let index = self
.new_head_ids
.iter()
.position(|id| visited.contains(id))
.expect("blocking head should exist");
let new_head_id = self.new_head_ids.remove(index).unwrap();
// Unmark ancestors of the selected head so they won't contribute to future
// new-head resolution within the newly-unblocked sub graph. The sub graph
// can have many fork points, and the corresponding heads should be picked in
// the fork-point order, not in the head appearance order.
to_visit.push(&new_head_id);
visited.remove(&new_head_id);
while let Some(id) = to_visit.pop() {
let node = self.nodes.get(id).unwrap();
if let Some((_, edges)) = &node.item {
to_visit.extend(reachable_targets(edges).filter(|id| visited.remove(id)));
}
}
self.blocked_ids.retain(|id| visited.contains(id));
self.emittable_ids.push(new_head_id);
}
fn next_node(&mut self) -> Result<Option<GraphNode<N, ID>>, E> {
// Based on Kahn's algorithm
loop {
if let Some(current_id) = self.emittable_ids.last() {
let Some(current_node) = self.nodes.get_mut(current_id) else {
// Queued twice because new children populated and emitted
self.emittable_ids.pop().unwrap();
continue;
};
if !current_node.child_ids.is_empty() {
// New children populated after emitting the other
let current_id = self.emittable_ids.pop().unwrap();
self.blocked_ids.insert(current_id);
continue;
}
let Some(item) = current_node.item.take() else {
// Not yet populated
self.populate_one()?
.expect("parent or prioritized node should exist");
continue;
};
// The second (or the last) parent will be visited first
let current_id = self.emittable_ids.pop().unwrap();
self.nodes.remove(¤t_id).unwrap();
let (_, edges) = &item;
for parent_id in reachable_targets(edges) {
let parent_node = self.nodes.get_mut(parent_id).unwrap();
parent_node.child_ids.remove(¤t_id);
if parent_node.child_ids.is_empty() {
let reusable_id = self.blocked_ids.take(parent_id);
let parent_id = reusable_id.unwrap_or_else(|| parent_id.clone());
self.emittable_ids.push(parent_id);
} else {
self.blocked_ids.insert(parent_id.clone());
}
}
return Ok(Some(item));
} else if !self.new_head_ids.is_empty() {
self.flush_new_head();
} else {
// Populate the first or orphan head
if self.populate_one()?.is_none() {
return Ok(None);
}
}
}
}
}
impl<N, ID, E, I, F> Iterator for TopoGroupedGraphIterator<N, ID, I, F>
where
ID: Clone + Hash + Eq,
I: Iterator<Item = Result<GraphNode<N, ID>, E>>,
F: Fn(&N) -> &ID,
{
type Item = Result<GraphNode<N, ID>, E>;
fn next(&mut self) -> Option<Self::Item> {
match self.next_node() {
Ok(Some(node)) => Some(Ok(node)),
Ok(None) => {
assert!(self.nodes.is_empty(), "all nodes should have been emitted");
None
}
Err(err) => Some(Err(err)),
}
}
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use itertools::Itertools as _;
use renderdag::Ancestor;
use renderdag::GraphRowRenderer;
use renderdag::Renderer as _;
use super::*;
fn missing(c: char) -> GraphEdge<char> {
GraphEdge::missing(c)
}
fn direct(c: char) -> GraphEdge<char> {
GraphEdge::direct(c)
}
fn indirect(c: char) -> GraphEdge<char> {
GraphEdge::indirect(c)
}
fn format_edge(edge: &GraphEdge<char>) -> String {
let c = edge.target;
match edge.edge_type {
GraphEdgeType::Missing => format!("missing({c})"),
GraphEdgeType::Direct => format!("direct({c})"),
GraphEdgeType::Indirect => format!("indirect({c})"),
}
}
fn format_graph(
graph_iter: impl IntoIterator<Item = Result<GraphNode<char>, Infallible>>,
) -> String {
let mut renderer = GraphRowRenderer::new()
.output()
.with_min_row_height(2)
.build_box_drawing();
graph_iter
.into_iter()
.map(|item| match item {
Ok(node) => node,
Err(err) => match err {},
})
.map(|(id, edges)| {
let glyph = id.to_string();
let message = edges.iter().map(format_edge).join(", ");
let parents = edges
.into_iter()
.map(|edge| match edge.edge_type {
GraphEdgeType::Missing => Ancestor::Anonymous,
GraphEdgeType::Direct => Ancestor::Parent(edge.target),
GraphEdgeType::Indirect => Ancestor::Ancestor(edge.target),
})
.collect();
renderer.next_row(id, parents, glyph, message)
})
.collect()
}
#[test]
fn test_format_graph() {
let graph = [
('D', vec![direct('C'), indirect('B')]),
('C', vec![direct('A')]),
('B', vec![missing('X')]),
('A', vec![]),
]
.map(Ok);
insta::assert_snapshot!(format_graph(graph), @r"
D direct(C), indirect(B)
├─╮
C ╷ direct(A)
│ ╷
│ B missing(X)
│ │
│ ~
│
A
");
}
type TopoGrouped<N, I> = TopoGroupedGraphIterator<N, N, I, fn(&N) -> &N>;
fn topo_grouped<I, E>(graph_iter: I) -> TopoGrouped<char, I::IntoIter>
where
I: IntoIterator<Item = Result<GraphNode<char>, E>>,
{
TopoGroupedGraphIterator::new(graph_iter.into_iter(), |c| c)
}
#[test]
fn test_topo_grouped_multiple_roots() {
let graph = [
('C', vec![missing('Y')]),
('B', vec![missing('X')]),
('A', vec![]),
]
.map(Ok);
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
C missing(Y)
│
~
B missing(X)
│
~
A
");
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
C missing(Y)
│
~
B missing(X)
│
~
A
");
// All nodes can be lazily emitted.
let mut iter = topo_grouped(graph.iter().cloned().peekable());
assert_eq!(iter.next().unwrap().unwrap().0, 'C');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'B');
assert_eq!(iter.next().unwrap().unwrap().0, 'B');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'A');
}
#[test]
fn test_topo_grouped_trivial_fork() {
let graph = [
('E', vec![direct('B')]),
('D', vec![direct('A')]),
('C', vec![direct('B')]),
('B', vec![direct('A')]),
('A', vec![]),
]
.map(Ok);
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
E direct(B)
│
│ D direct(A)
│ │
│ │ C direct(B)
├───╯
B │ direct(A)
├─╯
A
");
// D-A is found earlier than B-A, but B is emitted first because it belongs to
// the emitting branch.
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
E direct(B)
│
│ C direct(B)
├─╯
B direct(A)
│
│ D direct(A)
├─╯
A
");
// E can be lazy, then D and C will be queued.
let mut iter = topo_grouped(graph.iter().cloned().peekable());
assert_eq!(iter.next().unwrap().unwrap().0, 'E');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'D');
assert_eq!(iter.next().unwrap().unwrap().0, 'C');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'B');
assert_eq!(iter.next().unwrap().unwrap().0, 'B');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'A');
}
#[test]
fn test_topo_grouped_fork_interleaved() {
let graph = [
('F', vec![direct('D')]),
('E', vec![direct('C')]),
('D', vec![direct('B')]),
('C', vec![direct('B')]),
('B', vec![direct('A')]),
('A', vec![]),
]
.map(Ok);
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
F direct(D)
│
│ E direct(C)
│ │
D │ direct(B)
│ │
│ C direct(B)
├─╯
B direct(A)
│
A
");
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
F direct(D)
│
D direct(B)
│
│ E direct(C)
│ │
│ C direct(B)
├─╯
B direct(A)
│
A
");
// F can be lazy, then E will be queued, then C.
let mut iter = topo_grouped(graph.iter().cloned().peekable());
assert_eq!(iter.next().unwrap().unwrap().0, 'F');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'E');
assert_eq!(iter.next().unwrap().unwrap().0, 'D');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'C');
assert_eq!(iter.next().unwrap().unwrap().0, 'E');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'B');
}
#[test]
fn test_topo_grouped_fork_multiple_heads() {
let graph = [
('I', vec![direct('E')]),
('H', vec![direct('C')]),
('G', vec![direct('A')]),
('F', vec![direct('E')]),
('E', vec![direct('C')]),
('D', vec![direct('C')]),
('C', vec![direct('A')]),
('B', vec![direct('A')]),
('A', vec![]),
]
.map(Ok);
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
I direct(E)
│
│ H direct(C)
│ │
│ │ G direct(A)
│ │ │
│ │ │ F direct(E)
├─────╯
E │ │ direct(C)
├─╯ │
│ D │ direct(C)
├─╯ │
C │ direct(A)
├───╯
│ B direct(A)
├─╯
A
");
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
I direct(E)
│
│ F direct(E)
├─╯
E direct(C)
│
│ H direct(C)
├─╯
│ D direct(C)
├─╯
C direct(A)
│
│ G direct(A)
├─╯
│ B direct(A)
├─╯
A
");
// I can be lazy, then H, G, and F will be queued.
let mut iter = topo_grouped(graph.iter().cloned().peekable());
assert_eq!(iter.next().unwrap().unwrap().0, 'I');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'H');
assert_eq!(iter.next().unwrap().unwrap().0, 'F');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'E');
}
#[test]
fn test_topo_grouped_fork_parallel() {
let graph = [
// Pull all sub graphs in reverse order:
('I', vec![direct('A')]),
('H', vec![direct('C')]),
('G', vec![direct('E')]),
// Orphan sub graph G,F-E:
('F', vec![direct('E')]),
('E', vec![missing('Y')]),
// Orphan sub graph H,D-C:
('D', vec![direct('C')]),
('C', vec![missing('X')]),
// Orphan sub graph I,B-A:
('B', vec![direct('A')]),
('A', vec![]),
]
.map(Ok);
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
I direct(A)
│
│ H direct(C)
│ │
│ │ G direct(E)
│ │ │
│ │ │ F direct(E)
│ │ ├─╯
│ │ E missing(Y)
│ │ │
│ │ ~
│ │
│ │ D direct(C)
│ ├─╯
│ C missing(X)
│ │
│ ~
│
│ B direct(A)
├─╯
A
");
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
I direct(A)
│
│ B direct(A)
├─╯
A
H direct(C)
│
│ D direct(C)
├─╯
C missing(X)
│
~
G direct(E)
│
│ F direct(E)
├─╯
E missing(Y)
│
~
");
}
#[test]
fn test_topo_grouped_fork_nested() {
fn sub_graph(
chars: impl IntoIterator<Item = char>,
root_edges: Vec<GraphEdge<char>>,
) -> Vec<GraphNode<char>> {
let [b, c, d, e, f]: [char; 5] = chars.into_iter().collect_vec().try_into().unwrap();
vec![
(f, vec![direct(c)]),
(e, vec![direct(b)]),
(d, vec![direct(c)]),
(c, vec![direct(b)]),
(b, root_edges),
]
}
// One nested fork sub graph from A
let graph = itertools::chain!(
vec![('G', vec![direct('A')])],
sub_graph('B'..='F', vec![direct('A')]),
vec![('A', vec![])],
)
.map(Ok)
.collect_vec();
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
G direct(A)
│
│ F direct(C)
│ │
│ │ E direct(B)
│ │ │
│ │ │ D direct(C)
│ ├───╯
│ C │ direct(B)
│ ├─╯
│ B direct(A)
├─╯
A
");
// A::F is picked at A, and A will be unblocked. Then, C::D at C, ...
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
G direct(A)
│
│ F direct(C)
│ │
│ │ D direct(C)
│ ├─╯
│ C direct(B)
│ │
│ │ E direct(B)
│ ├─╯
│ B direct(A)
├─╯
A
");
// Two nested fork sub graphs from A
let graph = itertools::chain!(
vec![('L', vec![direct('A')])],
sub_graph('G'..='K', vec![direct('A')]),
sub_graph('B'..='F', vec![direct('A')]),
vec![('A', vec![])],
)
.map(Ok)
.collect_vec();
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
L direct(A)
│
│ K direct(H)
│ │
│ │ J direct(G)
│ │ │
│ │ │ I direct(H)
│ ├───╯
│ H │ direct(G)
│ ├─╯
│ G direct(A)
├─╯
│ F direct(C)
│ │
│ │ E direct(B)
│ │ │
│ │ │ D direct(C)
│ ├───╯
│ C │ direct(B)
│ ├─╯
│ B direct(A)
├─╯
A
");
// A::K is picked at A, and A will be unblocked. Then, H::I at H, ...
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
L direct(A)
│
│ K direct(H)
│ │
│ │ I direct(H)
│ ├─╯
│ H direct(G)
│ │
│ │ J direct(G)
│ ├─╯
│ G direct(A)
├─╯
│ F direct(C)
│ │
│ │ D direct(C)
│ ├─╯
│ C direct(B)
│ │
│ │ E direct(B)
│ ├─╯
│ B direct(A)
├─╯
A
");
// Two nested fork sub graphs from A, interleaved
let graph = itertools::chain!(
vec![('L', vec![direct('A')])],
sub_graph(['C', 'E', 'G', 'I', 'K'], vec![direct('A')]),
sub_graph(['B', 'D', 'F', 'H', 'J'], vec![direct('A')]),
vec![('A', vec![])],
)
.sorted_by(|(id1, _), (id2, _)| id2.cmp(id1))
.map(Ok)
.collect_vec();
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
L direct(A)
│
│ K direct(E)
│ │
│ │ J direct(D)
│ │ │
│ │ │ I direct(C)
│ │ │ │
│ │ │ │ H direct(B)
│ │ │ │ │
│ │ │ │ │ G direct(E)
│ ├───────╯
│ │ │ │ │ F direct(D)
│ │ ├─────╯
│ E │ │ │ direct(C)
│ ├───╯ │
│ │ D │ direct(B)
│ │ ├───╯
│ C │ direct(A)
├─╯ │
│ B direct(A)
├───╯
A
");
// A::K is picked at A, and A will be unblocked. Then, E::G at E, ...
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
L direct(A)
│
│ K direct(E)
│ │
│ │ G direct(E)
│ ├─╯
│ E direct(C)
│ │
│ │ I direct(C)
│ ├─╯
│ C direct(A)
├─╯
│ J direct(D)
│ │
│ │ F direct(D)
│ ├─╯
│ D direct(B)
│ │
│ │ H direct(B)
│ ├─╯
│ B direct(A)
├─╯
A
");
// Merged fork sub graphs at K
let graph = itertools::chain!(
vec![('K', vec![direct('E'), direct('J')])],
sub_graph('F'..='J', vec![missing('Y')]),
sub_graph('A'..='E', vec![missing('X')]),
)
.map(Ok)
.collect_vec();
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
K direct(E), direct(J)
├─╮
│ J direct(G)
│ │
│ │ I direct(F)
│ │ │
│ │ │ H direct(G)
│ ├───╯
│ G │ direct(F)
│ ├─╯
│ F missing(Y)
│ │
│ ~
│
E direct(B)
│
│ D direct(A)
│ │
│ │ C direct(B)
├───╯
B │ direct(A)
├─╯
A missing(X)
│
~
");
// K-E,J is resolved without queuing new heads. Then, G::H, F::I, B::C, and
// A::D.
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
K direct(E), direct(J)
├─╮
│ J direct(G)
│ │
E │ direct(B)
│ │
│ │ H direct(G)
│ ├─╯
│ G direct(F)
│ │
│ │ I direct(F)
│ ├─╯
│ F missing(Y)
│ │
│ ~
│
│ C direct(B)
├─╯
B direct(A)
│
│ D direct(A)
├─╯
A missing(X)
│
~
");
// Merged fork sub graphs at K, interleaved
let graph = itertools::chain!(
vec![('K', vec![direct('I'), direct('J')])],
sub_graph(['B', 'D', 'F', 'H', 'J'], vec![missing('Y')]),
sub_graph(['A', 'C', 'E', 'G', 'I'], vec![missing('X')]),
)
.sorted_by(|(id1, _), (id2, _)| id2.cmp(id1))
.map(Ok)
.collect_vec();
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
K direct(I), direct(J)
├─╮
│ J direct(D)
│ │
I │ direct(C)
│ │
│ │ H direct(B)
│ │ │
│ │ │ G direct(A)
│ │ │ │
│ │ │ │ F direct(D)
│ ├─────╯
│ │ │ │ E direct(C)
├───────╯
│ D │ │ direct(B)
│ ├─╯ │
C │ │ direct(A)
├─────╯
│ B missing(Y)
│ │
│ ~
│
A missing(X)
│
~
");
// K-I,J is resolved without queuing new heads. Then, D::F, B::H, C::E, and
// A::G.
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
K direct(I), direct(J)
├─╮
│ J direct(D)
│ │
I │ direct(C)
│ │
│ │ F direct(D)
│ ├─╯
│ D direct(B)
│ │
│ │ H direct(B)
│ ├─╯
│ B missing(Y)
│ │
│ ~
│
│ E direct(C)
├─╯
C direct(A)
│
│ G direct(A)
├─╯
A missing(X)
│
~
");
}
#[test]
fn test_topo_grouped_merge_interleaved() {
let graph = [
('F', vec![direct('E')]),
('E', vec![direct('C'), direct('D')]),
('D', vec![direct('B')]),
('C', vec![direct('A')]),
('B', vec![direct('A')]),
('A', vec![]),
]
.map(Ok);
insta::assert_snapshot!(format_graph(graph.iter().cloned()), @r"
F direct(E)
│
E direct(C), direct(D)
├─╮
│ D direct(B)
│ │
C │ direct(A)
│ │
│ B direct(A)
├─╯
A
");
insta::assert_snapshot!(format_graph(topo_grouped(graph.iter().cloned())), @r"
F direct(E)
│
E direct(C), direct(D)
├─╮
│ D direct(B)
│ │
│ B direct(A)
│ │
C │ direct(A)
├─╯
A
");
// F, E, and D can be lazy, then C will be queued, then B.
let mut iter = topo_grouped(graph.iter().cloned().peekable());
assert_eq!(iter.next().unwrap().unwrap().0, 'F');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'E');
assert_eq!(iter.next().unwrap().unwrap().0, 'E');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'D');
assert_eq!(iter.next().unwrap().unwrap().0, 'D');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'C');
assert_eq!(iter.next().unwrap().unwrap().0, 'B');
assert_eq!(iter.input_iter.peek().unwrap().as_ref().unwrap().0, 'A');
}
#[test]
fn test_topo_grouped_merge_but_missing() {
let graph = [
('E', vec![direct('D')]),
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/ssh_signing.rs | lib/src/ssh_signing.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::ffi::OsString;
use std::fmt::Debug;
use std::io::Write as _;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::process::ExitStatus;
use std::process::Stdio;
use either::Either;
use thiserror::Error;
use crate::config::ConfigGetError;
use crate::config::ConfigGetResultExt as _;
use crate::settings::UserSettings;
use crate::signing::SigStatus;
use crate::signing::SignError;
use crate::signing::SigningBackend;
use crate::signing::Verification;
#[derive(Debug)]
pub struct SshBackend {
program: OsString,
allowed_signers: Option<OsString>,
revocation_list: Option<OsString>,
}
#[derive(Debug, Error)]
pub enum SshError {
#[error("SSH sign failed with {exit_status}:\n{stderr}")]
Command {
exit_status: ExitStatus,
stderr: String,
},
#[error("Failed to parse ssh program response")]
BadResult,
#[error("Failed to run ssh-keygen")]
Io(#[from] std::io::Error),
#[error("Signing key required")]
MissingKey,
}
impl From<SshError> for SignError {
fn from(e: SshError) -> Self {
Self::Backend(Box::new(e))
}
}
type SshResult<T> = Result<T, SshError>;
fn parse_utf8_string(data: &[u8]) -> SshResult<&str> {
str::from_utf8(data).map_err(|_| SshError::BadResult)
}
fn run_command(command: &mut Command, stdin: &[u8]) -> SshResult<Vec<u8>> {
tracing::info!(?command, "running SSH signing command");
let process = command.spawn()?;
let write_result = process.stdin.as_ref().unwrap().write_all(stdin);
let output = process.wait_with_output()?;
tracing::info!(?command, ?output.status, "SSH signing command exited");
if output.status.success() {
write_result?;
Ok(output.stdout)
} else {
Err(SshError::Command {
exit_status: output.status,
stderr: String::from_utf8_lossy(&output.stderr).trim_end().into(),
})
}
}
// This attempts to convert given key data into a file and return the filepath.
// If the given data is actually already a filepath to a key on disk then the
// key input is returned directly.
fn ensure_key_as_file(key: &str) -> SshResult<Either<PathBuf, tempfile::TempPath>> {
let key_path = crate::file_util::expand_home_path(key);
if key_path.is_absolute() {
return Ok(either::Left(key_path));
}
let mut pub_key_file = tempfile::Builder::new()
.prefix("jj-signing-key-")
.tempfile()
.map_err(SshError::Io)?;
pub_key_file
.write_all(key.as_bytes())
.map_err(SshError::Io)?;
pub_key_file.flush().map_err(SshError::Io)?;
// This is converted into a TempPath so that the underlying file handle is
// closed. On Windows systems this is required for other programs to be able
// to open the file for reading.
let pub_key_path = pub_key_file.into_temp_path();
Ok(either::Right(pub_key_path))
}
fn parse_fingerprint(output: &[u8]) -> SshResult<String> {
Ok(parse_utf8_string(output)?
.rsplit_once(' ')
.ok_or(SshError::BadResult)?
.1
.trim()
.into())
}
impl SshBackend {
pub fn new(
program: OsString,
allowed_signers: Option<OsString>,
revocation_list: Option<OsString>,
) -> Self {
Self {
program,
allowed_signers,
revocation_list,
}
}
pub fn from_settings(settings: &UserSettings) -> Result<Self, ConfigGetError> {
let program = settings.get_string("signing.backends.ssh.program")?;
let get_expanded_path = |name| {
Ok(settings
.get_string(name)
.optional()?
.map(|v| crate::file_util::expand_home_path(v.as_str())))
};
let allowed_signers = get_expanded_path("signing.backends.ssh.allowed-signers")?;
let revocation_list = get_expanded_path("signing.backends.ssh.revocation-list")?;
Ok(Self::new(
program.into(),
allowed_signers.map(Into::into),
revocation_list.map(Into::into),
))
}
fn create_command(&self) -> Command {
let mut command = Command::new(&self.program);
// Hide console window on Windows (https://stackoverflow.com/a/60958956)
#[cfg(windows)]
{
use std::os::windows::process::CommandExt as _;
const CREATE_NO_WINDOW: u32 = 0x08000000;
command.creation_flags(CREATE_NO_WINDOW);
}
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
command
}
fn find_principal(&self, signature_file_path: &Path) -> Result<Option<String>, SshError> {
let Some(allowed_signers) = &self.allowed_signers else {
return Ok(None);
};
let mut command = self.create_command();
command
.arg("-Y")
.arg("find-principals")
.arg("-f")
.arg(allowed_signers)
.arg("-s")
.arg(signature_file_path);
// We can't use the existing run_command helper here as `-Y find-principals`
// will return a non-0 exit code if no principals are found.
//
// In this case we don't want to error out, just return None.
tracing::info!(?command, "running SSH signing command");
let process = command.spawn()?;
let output = process.wait_with_output()?;
tracing::info!(?command, ?output.status, "SSH signing command exited");
let principal = parse_utf8_string(&output.stdout)?
.split('\n')
.next()
.unwrap()
.trim()
.to_string();
if principal.is_empty() {
return Ok(None);
}
Ok(Some(principal))
}
}
impl SigningBackend for SshBackend {
fn name(&self) -> &'static str {
"ssh"
}
fn can_read(&self, signature: &[u8]) -> bool {
signature.starts_with(b"-----BEGIN SSH SIGNATURE-----")
}
fn sign(&self, data: &[u8], key: Option<&str>) -> Result<Vec<u8>, SignError> {
let Some(key) = key else {
return Err(SshError::MissingKey.into());
};
// The ssh-keygen `-f` flag expects to be given a file which contains either a
// private or public key.
//
// As it expects a file and we might have an inlined public key instead, we need
// to ensure it is written to a file first.
let pub_key_path = ensure_key_as_file(key)?;
let mut command = self.create_command();
let path = match &pub_key_path {
either::Left(path) => path.as_os_str(),
either::Right(path) => path.as_os_str(),
};
command
.arg("-Y")
.arg("sign")
.arg("-f")
.arg(path)
.arg("-n")
.arg("git");
Ok(run_command(&mut command, data)?)
}
fn verify(&self, data: &[u8], signature: &[u8]) -> Result<Verification, SignError> {
let mut signature_file = tempfile::Builder::new()
.prefix(".jj-ssh-sig-")
.tempfile()
.map_err(SshError::Io)?;
signature_file.write_all(signature).map_err(SshError::Io)?;
signature_file.flush().map_err(SshError::Io)?;
let signature_file_path = signature_file.into_temp_path();
let principal = self.find_principal(&signature_file_path)?;
let mut command = self.create_command();
match (principal, self.allowed_signers.as_ref()) {
(Some(principal), Some(allowed_signers)) => {
command
.arg("-Y")
.arg("verify")
.arg("-s")
.arg(&signature_file_path)
.arg("-I")
.arg(&principal)
.arg("-f")
.arg(allowed_signers)
.arg("-n")
.arg("git");
if let Some(revocation_list) = self.revocation_list.as_ref() {
command.arg("-r").arg(revocation_list);
}
let result = run_command(&mut command, data);
let (status, key) = match &result {
Ok(output) => (SigStatus::Good, Some(parse_fingerprint(output)?)),
Err(_) => (SigStatus::Bad, None),
};
Ok(Verification::new(status, key, Some(principal)))
}
_ => {
command
.arg("-Y")
.arg("check-novalidate")
.arg("-s")
.arg(&signature_file_path)
.arg("-n")
.arg("git");
let result = run_command(&mut command, data);
match &result {
Ok(output) => Ok(Verification::new(
SigStatus::Unknown,
Some(parse_fingerprint(output)?),
Some("Signature OK. Unknown principal".into()),
)),
Err(_) => Ok(Verification::new(SigStatus::Bad, None, None)),
}
}
}
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Read as _;
use super::*;
#[test]
fn test_ssh_key_to_file_conversion_raw_key_data() {
let keydata = "ssh-ed25519 some-key-data";
let path = ensure_key_as_file(keydata).unwrap();
let mut buf = vec![];
let mut file = File::open(path.right().unwrap()).unwrap();
file.read_to_end(&mut buf).unwrap();
assert_eq!("ssh-ed25519 some-key-data", String::from_utf8(buf).unwrap());
}
#[test]
fn test_ssh_key_to_file_conversion_non_ssh_prefix() {
let keydata = "ecdsa-sha2-nistp256 some-key-data";
let path = ensure_key_as_file(keydata).unwrap();
let mut buf = vec![];
let mut file = File::open(path.right().unwrap()).unwrap();
file.read_to_end(&mut buf).unwrap();
assert_eq!(
"ecdsa-sha2-nistp256 some-key-data",
String::from_utf8(buf).unwrap()
);
}
#[test]
fn test_ssh_key_to_file_conversion_existing_file() {
let mut file = tempfile::Builder::new()
.prefix("jj-signing-key-")
.tempfile()
.map_err(SshError::Io)
.unwrap();
file.write_all(b"some-data").map_err(SshError::Io).unwrap();
file.flush().map_err(SshError::Io).unwrap();
let file_path = file.into_temp_path();
let path = ensure_key_as_file(file_path.to_str().unwrap()).unwrap();
assert_eq!(
file_path.to_str().unwrap(),
path.left().unwrap().to_str().unwrap()
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/secret_backend.rs | lib/src/secret_backend.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Provides a backend for testing ACLs
use std::path::Path;
use std::pin::Pin;
use std::time::SystemTime;
use async_trait::async_trait;
use futures::stream::BoxStream;
use tokio::io::AsyncRead;
use crate::backend::Backend;
use crate::backend::BackendError;
use crate::backend::BackendLoadError;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::Commit;
use crate::backend::CommitId;
use crate::backend::CopyHistory;
use crate::backend::CopyId;
use crate::backend::CopyRecord;
use crate::backend::FileId;
use crate::backend::SigningFn;
use crate::backend::SymlinkId;
use crate::backend::Tree;
use crate::backend::TreeId;
use crate::git_backend::GitBackend;
use crate::index::Index;
use crate::object_id::ObjectId as _;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::settings::UserSettings;
const SECRET_CONTENTS_HEX: [&str; 2] = [
"d97c5eada5d8c52079031eef0107a4430a9617c5", // "secret\n"
"536aca34dbae6b2b8af26bebdcba83543c9546f0", // "secret"
];
/// A commit backend that's completely compatible with the Git backend, except
/// that it refuses to read files and symlinks with the word "secret" in the
/// path, or "secret" or "secret\n" in the content.
#[derive(Debug)]
pub struct SecretBackend {
inner: GitBackend,
}
impl SecretBackend {
/// "secret"
pub fn name() -> &'static str {
"secret"
}
/// Loads the backend from the given path.
pub fn load(settings: &UserSettings, store_path: &Path) -> Result<Self, BackendLoadError> {
let inner = GitBackend::load(settings, store_path)?;
Ok(Self { inner })
}
/// Convert a git repo to using `SecretBackend`
// TODO: Avoid this hack
pub fn adopt_git_repo(workspace_path: &Path) {
std::fs::write(
workspace_path
.join(".jj")
.join("repo")
.join("store")
.join("type"),
Self::name(),
)
.unwrap();
}
}
#[async_trait]
impl Backend for SecretBackend {
fn name(&self) -> &str {
Self::name()
}
fn commit_id_length(&self) -> usize {
self.inner.commit_id_length()
}
fn change_id_length(&self) -> usize {
self.inner.change_id_length()
}
fn root_commit_id(&self) -> &CommitId {
self.inner.root_commit_id()
}
fn root_change_id(&self) -> &ChangeId {
self.inner.root_change_id()
}
fn empty_tree_id(&self) -> &TreeId {
self.inner.empty_tree_id()
}
fn concurrency(&self) -> usize {
1
}
async fn read_file(
&self,
path: &RepoPath,
id: &FileId,
) -> BackendResult<Pin<Box<dyn AsyncRead + Send>>> {
if path.as_internal_file_string().contains("secret")
|| SECRET_CONTENTS_HEX.contains(&id.hex().as_ref())
{
return Err(BackendError::ReadAccessDenied {
object_type: "file".to_string(),
hash: id.hex(),
source: "No access".into(),
});
}
self.inner.read_file(path, id).await
}
async fn write_file(
&self,
path: &RepoPath,
contents: &mut (dyn AsyncRead + Send + Unpin),
) -> BackendResult<FileId> {
self.inner.write_file(path, contents).await
}
async fn read_symlink(&self, path: &RepoPath, id: &SymlinkId) -> BackendResult<String> {
if path.as_internal_file_string().contains("secret")
|| SECRET_CONTENTS_HEX.contains(&id.hex().as_ref())
{
return Err(BackendError::ReadAccessDenied {
object_type: "symlink".to_string(),
hash: id.hex(),
source: "No access".into(),
});
}
self.inner.read_symlink(path, id).await
}
async fn write_symlink(&self, path: &RepoPath, target: &str) -> BackendResult<SymlinkId> {
self.inner.write_symlink(path, target).await
}
async fn read_copy(&self, _id: &CopyId) -> BackendResult<CopyHistory> {
Err(BackendError::Unsupported(
"The secret backend doesn't support copies".to_string(),
))
}
async fn write_copy(&self, _contents: &CopyHistory) -> BackendResult<CopyId> {
Err(BackendError::Unsupported(
"The secret backend doesn't support copies".to_string(),
))
}
async fn get_related_copies(&self, _copy_id: &CopyId) -> BackendResult<Vec<CopyHistory>> {
Err(BackendError::Unsupported(
"The secret backend doesn't support copies".to_string(),
))
}
async fn read_tree(&self, path: &RepoPath, id: &TreeId) -> BackendResult<Tree> {
self.inner.read_tree(path, id).await
}
async fn write_tree(&self, path: &RepoPath, contents: &Tree) -> BackendResult<TreeId> {
self.inner.write_tree(path, contents).await
}
async fn read_commit(&self, id: &CommitId) -> BackendResult<Commit> {
self.inner.read_commit(id).await
}
async fn write_commit(
&self,
contents: Commit,
sign_with: Option<&mut SigningFn>,
) -> BackendResult<(CommitId, Commit)> {
self.inner.write_commit(contents, sign_with).await
}
fn get_copy_records(
&self,
paths: Option<&[RepoPathBuf]>,
root: &CommitId,
head: &CommitId,
) -> BackendResult<BoxStream<'_, BackendResult<CopyRecord>>> {
self.inner.get_copy_records(paths, root, head)
}
fn gc(&self, index: &dyn Index, keep_newer: SystemTime) -> BackendResult<()> {
self.inner.gc(index, keep_newer)
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/absorb.rs | lib/src/absorb.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Algorithm to split changes in a single source commit into its most relevant
//! ancestors, 'absorbing' them away.
use std::cmp;
use std::collections::HashMap;
use std::ops::Range;
use std::sync::Arc;
use bstr::BString;
use futures::StreamExt as _;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use thiserror::Error;
use crate::annotate::FileAnnotator;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::CommitId;
use crate::backend::TreeValue;
use crate::commit::Commit;
use crate::commit::conflict_label_for_commits;
use crate::conflicts::MaterializedFileValue;
use crate::conflicts::MaterializedTreeValue;
use crate::conflicts::materialized_diff_stream;
use crate::copies::CopyRecords;
use crate::diff::ContentDiff;
use crate::diff::DiffHunkKind;
use crate::matchers::Matcher;
use crate::merge::Diff;
use crate::merge::Merge;
use crate::merged_tree::MergedTree;
use crate::merged_tree::MergedTreeBuilder;
use crate::repo::MutableRepo;
use crate::repo::Repo;
use crate::repo_path::RepoPathBuf;
use crate::revset::ResolvedRevsetExpression;
use crate::revset::RevsetEvaluationError;
/// The source commit to absorb into its ancestry.
#[derive(Clone, Debug)]
pub struct AbsorbSource {
commit: Commit,
parents: Vec<Commit>,
parent_tree: MergedTree,
}
impl AbsorbSource {
/// Create an absorb source from a single commit.
pub fn from_commit(repo: &dyn Repo, commit: Commit) -> BackendResult<Self> {
let parents = commit.parents().try_collect()?;
let parent_tree = commit.parent_tree(repo)?;
Ok(Self {
commit,
parents,
parent_tree,
})
}
}
/// Error splitting an absorb source into modified ancestry trees.
#[derive(Debug, Error)]
pub enum AbsorbError {
/// Error while contacting the Backend.
#[error(transparent)]
Backend(#[from] BackendError),
/// Error resolving commit ancestry.
#[error(transparent)]
RevsetEvaluation(#[from] RevsetEvaluationError),
}
/// An absorb 'plan' indicating which commits should be modified and what they
/// should be modified to.
#[derive(Default)]
pub struct SelectedTrees {
/// Commits to be modified, to be passed to `absorb_hunks`.
pub target_commits: HashMap<CommitId, MergedTreeBuilder>,
/// Paths that were not absorbed for various error reasons.
pub skipped_paths: Vec<(RepoPathBuf, String)>,
}
/// Builds trees to be merged into destination commits by splitting source
/// changes based on file annotation.
pub async fn split_hunks_to_trees(
repo: &dyn Repo,
source: &AbsorbSource,
destinations: &Arc<ResolvedRevsetExpression>,
matcher: &dyn Matcher,
) -> Result<SelectedTrees, AbsorbError> {
let mut selected_trees = SelectedTrees::default();
let left_tree = &source.parent_tree;
let right_tree = source.commit.tree();
// TODO: enable copy tracking if we add support for annotate and merge
let copy_records = CopyRecords::default();
let tree_diff = left_tree.diff_stream_with_copies(&right_tree, matcher, ©_records);
let mut diff_stream = materialized_diff_stream(
repo.store(),
tree_diff,
Diff::new(left_tree.labels(), right_tree.labels()),
);
while let Some(entry) = diff_stream.next().await {
let left_path = entry.path.source();
let right_path = entry.path.target();
let values = entry.values?;
let (left_text, executable, copy_id) = match to_file_value(values.before) {
Ok(Some(mut value)) => (
value.read_all(left_path).await?,
value.executable,
value.copy_id,
),
// New file should have no destinations
Ok(None) => continue,
Err(reason) => {
selected_trees
.skipped_paths
.push((left_path.to_owned(), reason));
continue;
}
};
let (right_text, deleted) = match to_file_value(values.after) {
Ok(Some(mut value)) => (value.read_all(right_path).await?, false),
Ok(None) => (vec![], true),
Err(reason) => {
selected_trees
.skipped_paths
.push((right_path.to_owned(), reason));
continue;
}
};
// Compute annotation of parent (= left) content to map right hunks
let mut annotator =
FileAnnotator::with_file_content(source.commit.id(), left_path, left_text.clone());
annotator.compute(repo, destinations)?;
let annotation = annotator.to_annotation();
let annotation_ranges = annotation
.compact_line_ranges()
.filter_map(|(commit_id, range)| Some((commit_id.ok()?, range)))
.collect_vec();
let diff = ContentDiff::by_line([&left_text, &right_text]);
let selected_ranges = split_file_hunks(&annotation_ranges, &diff);
// Build trees containing parent (= left) contents + selected hunks
for (&commit_id, ranges) in &selected_ranges {
let tree_builder = selected_trees
.target_commits
.entry(commit_id.clone())
.or_insert_with(|| MergedTreeBuilder::new(left_tree.clone()));
let new_text = combine_texts(&left_text, &right_text, ranges);
// Since changes to be absorbed are represented as diffs relative to
// the source parent, we can propagate file deletion only if the
// whole file content is deleted at a single destination commit.
let new_tree_value = if new_text.is_empty() && deleted {
Merge::absent()
} else {
let id = repo
.store()
.write_file(left_path, &mut new_text.as_slice())
.await?;
Merge::normal(TreeValue::File {
id,
executable,
copy_id: copy_id.clone(),
})
};
tree_builder.set_or_remove(left_path.to_owned(), new_tree_value);
}
}
Ok(selected_trees)
}
type SelectedRange = (Range<usize>, Range<usize>);
/// Maps `diff` hunks to commits based on the left `annotation_ranges`. The
/// `annotation_ranges` should be compacted.
fn split_file_hunks<'a>(
mut annotation_ranges: &[(&'a CommitId, Range<usize>)],
diff: &ContentDiff,
) -> HashMap<&'a CommitId, Vec<SelectedRange>> {
debug_assert!(annotation_ranges.iter().all(|(_, range)| !range.is_empty()));
let mut selected_ranges: HashMap<&CommitId, Vec<_>> = HashMap::new();
let mut diff_hunk_ranges = diff
.hunk_ranges()
.filter(|hunk| hunk.kind == DiffHunkKind::Different);
while !annotation_ranges.is_empty() {
let Some(hunk) = diff_hunk_ranges.next() else {
break;
};
let [left_range, right_range]: &[_; 2] = hunk.ranges[..].try_into().unwrap();
assert!(!left_range.is_empty() || !right_range.is_empty());
if right_range.is_empty() {
// If the hunk is pure deletion, it can be mapped to multiple
// overlapped annotation ranges unambiguously.
let skip = annotation_ranges
.iter()
.take_while(|(_, range)| range.end <= left_range.start)
.count();
annotation_ranges = &annotation_ranges[skip..];
let pre_overlap = annotation_ranges
.iter()
.take_while(|(_, range)| range.end < left_range.end)
.count();
let maybe_overlapped_ranges = annotation_ranges.get(..pre_overlap + 1);
annotation_ranges = &annotation_ranges[pre_overlap..];
let Some(overlapped_ranges) = maybe_overlapped_ranges else {
continue;
};
// Ensure that the ranges are contiguous and include the start.
let all_covered = overlapped_ranges
.iter()
.try_fold(left_range.start, |prev_end, (_, cur)| {
(cur.start <= prev_end).then_some(cur.end)
})
.inspect(|&last_end| assert!(left_range.end <= last_end))
.is_some();
if all_covered {
for (commit_id, cur_range) in overlapped_ranges {
let start = cmp::max(cur_range.start, left_range.start);
let end = cmp::min(cur_range.end, left_range.end);
assert!(start < end);
let selected = selected_ranges.entry(commit_id).or_default();
selected.push((start..end, right_range.clone()));
}
}
} else {
// In other cases, the hunk should be included in an annotation
// range to map it unambiguously. Skip any pre-overlapped ranges.
let skip = annotation_ranges
.iter()
.take_while(|(_, range)| range.end < left_range.end)
.count();
annotation_ranges = &annotation_ranges[skip..];
let Some((commit_id, cur_range)) = annotation_ranges.first() else {
continue;
};
let contained = cur_range.start <= left_range.start && left_range.end <= cur_range.end;
// If the hunk is pure insertion, it can be mapped to two distinct
// annotation ranges, which is ambiguous.
let ambiguous = cur_range.end == left_range.start
&& annotation_ranges
.get(1)
.is_some_and(|(_, next_range)| next_range.start == left_range.end);
if contained && !ambiguous {
let selected = selected_ranges.entry(commit_id).or_default();
selected.push((left_range.clone(), right_range.clone()));
}
}
}
selected_ranges
}
/// Constructs new text by replacing `text1` range with `text2` range for each
/// selected `(range1, range2)` pairs.
fn combine_texts(text1: &[u8], text2: &[u8], selected_ranges: &[SelectedRange]) -> BString {
itertools::chain!(
[(0..0, 0..0)],
selected_ranges.iter().cloned(),
[(text1.len()..text1.len(), text2.len()..text2.len())],
)
.tuple_windows()
// Copy unchanged hunk from text1 and current hunk from text2
.map(|((prev1, _), (cur1, cur2))| (prev1.end..cur1.start, cur2))
.flat_map(|(range1, range2)| [&text1[range1], &text2[range2]])
.collect()
}
/// Describes changes made by [`absorb_hunks()`].
#[derive(Clone, Debug)]
pub struct AbsorbStats {
/// Rewritten source commit which the absorbed hunks were removed, or `None`
/// if the source commit was abandoned or no hunks were moved.
pub rewritten_source: Option<Commit>,
/// Rewritten commits which the source hunks were absorbed into, in forward
/// topological order.
pub rewritten_destinations: Vec<Commit>,
/// Number of descendant commits which were rebased. The number of rewritten
/// destination commits are not included.
pub num_rebased: usize,
}
/// Merges selected trees into the specified commits. Abandons the source commit
/// if it becomes discardable.
pub fn absorb_hunks(
repo: &mut MutableRepo,
source: &AbsorbSource,
mut selected_trees: HashMap<CommitId, MergedTreeBuilder>,
) -> BackendResult<AbsorbStats> {
let mut rewritten_source = None;
let mut rewritten_destinations = Vec::new();
let mut num_rebased = 0;
let parents_label = conflict_label_for_commits(&source.parents);
let source_commit_label = source.commit.conflict_label();
// Rewrite commits in topological order so that descendant commits wouldn't
// be rewritten multiple times.
repo.transform_descendants(selected_trees.keys().cloned().collect(), async |rewriter| {
// Remove selected hunks from the source commit by reparent()
if rewriter.old_commit().id() == source.commit.id() {
let commit_builder = rewriter.reparent();
if commit_builder.is_discardable()? {
commit_builder.abandon();
} else {
rewritten_source = Some(commit_builder.write()?);
num_rebased += 1;
}
return Ok(());
}
let Some(tree_builder) = selected_trees.remove(rewriter.old_commit().id()) else {
rewriter.rebase().await?.write()?;
num_rebased += 1;
return Ok(());
};
// Merge hunks between source parent tree and selected tree
let selected_tree = tree_builder.write_tree()?;
let destination_label = rewriter.old_commit().conflict_label();
let commit_builder = rewriter.rebase().await?;
let destination_tree = commit_builder.tree();
let new_tree = MergedTree::merge(Merge::from_vec(vec![
(
destination_tree,
format!("{destination_label} (absorb destination)"),
),
(
source.parent_tree.clone(),
format!("{parents_label} (parents of absorbed revision)"),
),
(
selected_tree,
format!("absorbed changes (from {source_commit_label})"),
),
]))
.block_on()?;
let mut predecessors = commit_builder.predecessors().to_vec();
predecessors.push(source.commit.id().clone());
let new_commit = commit_builder
.set_tree(new_tree)
.set_predecessors(predecessors)
.write()?;
rewritten_destinations.push(new_commit);
Ok(())
})?;
Ok(AbsorbStats {
rewritten_source,
rewritten_destinations,
num_rebased,
})
}
fn to_file_value(value: MaterializedTreeValue) -> Result<Option<MaterializedFileValue>, String> {
match value {
MaterializedTreeValue::Absent => Ok(None), // New or deleted file
MaterializedTreeValue::AccessDenied(err) => Err(format!("Access is denied: {err}")),
MaterializedTreeValue::File(file) => Ok(Some(file)),
MaterializedTreeValue::Symlink { .. } => Err("Is a symlink".into()),
MaterializedTreeValue::FileConflict(_) | MaterializedTreeValue::OtherConflict { .. } => {
Err("Is a conflict".into())
}
MaterializedTreeValue::GitSubmodule(_) => Err("Is a Git submodule".into()),
MaterializedTreeValue::Tree(_) => panic!("diff should not contain trees"),
}
}
#[cfg(test)]
mod tests {
use maplit::hashmap;
use super::*;
#[test]
fn test_split_file_hunks_empty_or_single_line() {
let commit_id1 = &CommitId::from_hex("111111");
// unchanged
assert_eq!(
split_file_hunks(&[], &ContentDiff::by_line(["", ""])),
hashmap! {}
);
// insert single line
assert_eq!(
split_file_hunks(&[], &ContentDiff::by_line(["", "2X\n"])),
hashmap! {}
);
// delete single line
assert_eq!(
split_file_hunks(&[(commit_id1, 0..3)], &ContentDiff::by_line(["1a\n", ""])),
hashmap! { commit_id1 => vec![(0..3, 0..0)] }
);
// modify single line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..3)],
&ContentDiff::by_line(["1a\n", "1AA\n"])
),
hashmap! { commit_id1 => vec![(0..3, 0..4)] }
);
}
#[test]
fn test_split_file_hunks_single_range() {
let commit_id1 = &CommitId::from_hex("111111");
// insert first, middle, and last lines
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6)],
&ContentDiff::by_line(["1a\n1b\n", "1X\n1a\n1Y\n1b\n1Z\n"])
),
hashmap! {
commit_id1 => vec![(0..0, 0..3), (3..3, 6..9), (6..6, 12..15)],
}
);
// delete first, middle, and last lines
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..15)],
&ContentDiff::by_line(["1a\n1b\n1c\n1d\n1e\n1f\n", "1b\n1d\n1f\n"])
),
hashmap! {
commit_id1 => vec![(0..3, 0..0), (6..9, 3..3), (12..15, 6..6)],
}
);
// modify non-contiguous lines
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..12)],
&ContentDiff::by_line(["1a\n1b\n1c\n1d\n", "1A\n1b\n1C\n1d\n"])
),
hashmap! { commit_id1 => vec![(0..3, 0..3), (6..9, 6..9)] }
);
}
#[test]
fn test_split_file_hunks_contiguous_ranges_insert() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// insert first line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1X\n1a\n1b\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(0..0, 0..3)] }
);
// insert middle line to first range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1X\n1b\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(3..3, 3..6)] }
);
// insert middle line between ranges (ambiguous)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n3X\n2a\n2b\n"])
),
hashmap! {}
);
// insert middle line to second range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2a\n2X\n2b\n"])
),
hashmap! { commit_id2 => vec![(9..9, 9..12)] }
);
// insert last line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2a\n2b\n2X\n"])
),
hashmap! { commit_id2 => vec![(12..12, 12..15)] }
);
}
#[test]
fn test_split_file_hunks_contiguous_ranges_delete() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// delete first line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1b\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(0..3, 0..0)] }
);
// delete middle line from first range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(3..6, 3..3)] }
);
// delete middle line from second range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2b\n"])
),
hashmap! { commit_id2 => vec![(6..9, 6..6)] }
);
// delete last line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2a\n"])
),
hashmap! { commit_id2 => vec![(9..12, 9..9)] }
);
// delete first and last lines
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1b\n2a\n"])
),
hashmap! {
commit_id1 => vec![(0..3, 0..0)],
commit_id2 => vec![(9..12, 6..6)],
}
);
// delete across ranges (split first annotation range)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n"])
),
hashmap! {
commit_id1 => vec![(3..6, 3..3)],
commit_id2 => vec![(6..12, 3..3)],
}
);
// delete middle lines across ranges (split both annotation ranges)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n2b\n"])
),
hashmap! {
commit_id1 => vec![(3..6, 3..3)],
commit_id2 => vec![(6..9, 3..3)],
}
);
// delete across ranges (split second annotation range)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "2b\n"])
),
hashmap! {
commit_id1 => vec![(0..6, 0..0)],
commit_id2 => vec![(6..9, 0..0)],
}
);
// delete all
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", ""])
),
hashmap! {
commit_id1 => vec![(0..6, 0..0)],
commit_id2 => vec![(6..12, 0..0)],
}
);
}
#[test]
fn test_split_file_hunks_contiguous_ranges_modify() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// modify first line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1A\n1b\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(0..3, 0..3)] }
);
// modify middle line of first range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1B\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(3..6, 3..6)] }
);
// modify middle lines of both ranges (ambiguous)
// ('hg absorb' accepts this)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1B\n2A\n2b\n"])
),
hashmap! {}
);
// modify middle line of second range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2A\n2b\n"])
),
hashmap! { commit_id2 => vec![(6..9, 6..9)] }
);
// modify last line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2a\n2B\n"])
),
hashmap! { commit_id2 => vec![(9..12, 9..12)] }
);
// modify first and last lines
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1A\n1b\n2a\n2B\n"])
),
hashmap! {
commit_id1 => vec![(0..3, 0..3)],
commit_id2 => vec![(9..12, 9..12)],
}
);
}
#[test]
fn test_split_file_hunks_contiguous_ranges_modify_insert() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// modify first range, insert adjacent middle line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1A\n1B\n1X\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(0..6, 0..9)] }
);
// modify second range, insert adjacent middle line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2X\n2A\n2B\n"])
),
hashmap! { commit_id2 => vec![(6..12, 6..15)] }
);
// modify second range, insert last line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2A\n2B\n2X\n"])
),
hashmap! { commit_id2 => vec![(6..12, 6..15)] }
);
// modify first and last lines (unambiguous), insert middle line between
// ranges (ambiguous)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1A\n1b\n3X\n2a\n2B\n"])
),
hashmap! {
commit_id1 => vec![(0..3, 0..3)],
commit_id2 => vec![(9..12, 12..15)],
}
);
}
#[test]
fn test_split_file_hunks_contiguous_ranges_modify_delete() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// modify first line, delete adjacent middle line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1A\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(0..6, 0..3)] }
);
// modify last line, delete adjacent middle line
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1b\n2B\n"])
),
hashmap! { commit_id2 => vec![(6..12, 6..9)] }
);
// modify first and last lines, delete middle line from first range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1A\n2a\n2B\n"])
),
hashmap! {
commit_id1 => vec![(0..6, 0..3)],
commit_id2 => vec![(9..12, 6..9)],
}
);
// modify first and last lines, delete middle line from second range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1A\n1b\n2B\n"])
),
hashmap! {
commit_id1 => vec![(0..3, 0..3)],
commit_id2 => vec![(6..12, 6..9)],
}
);
// modify middle line, delete adjacent middle line (ambiguous)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), (commit_id2, 6..12)],
&ContentDiff::by_line(["1a\n1b\n2a\n2b\n", "1a\n1B\n2b\n"])
),
hashmap! {}
);
}
#[test]
fn test_split_file_hunks_non_contiguous_ranges_insert() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// insert middle line to first range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n1b\n1X\n0a\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(6..6, 6..9)] }
);
// insert middle line to second range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n1b\n0a\n2X\n2a\n2b\n"])
),
hashmap! { commit_id2 => vec![(9..9, 9..12)] }
);
// insert middle lines to both ranges
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n1b\n1X\n0a\n2X\n2a\n2b\n"])
),
hashmap! {
commit_id1 => vec![(6..6, 6..9)],
commit_id2 => vec![(9..9, 12..15)],
}
);
}
#[test]
fn test_split_file_hunks_non_contiguous_ranges_insert_modify_masked() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// insert middle line to first range, modify masked line (ambiguous)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n1b\n1X\n0A\n2a\n2b\n"])
),
hashmap! {}
);
// insert middle line to second range, modify masked line (ambiguous)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n1b\n0A\n2X\n2a\n2b\n"])
),
hashmap! {}
);
// insert middle lines to both ranges, modify masked line (ambiguous)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n1b\n1X\n0A\n2X\n2a\n2b\n"])
),
hashmap! {}
);
}
#[test]
fn test_split_file_hunks_non_contiguous_ranges_delete() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// delete middle line from first range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n0a\n2a\n2b\n"])
),
hashmap! { commit_id1 => vec![(3..6, 3..3)] }
);
// delete middle line from second range
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n1b\n0a\n2b\n"])
),
hashmap! { commit_id2 => vec![(9..12, 9..9)] }
);
// delete middle lines from both ranges
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n0a\n2b\n"])
),
hashmap! {
commit_id1 => vec![(3..6, 3..3)],
commit_id2 => vec![(9..12, 6..6)],
}
);
}
#[test]
fn test_split_file_hunks_non_contiguous_ranges_delete_modify_masked() {
let commit_id1 = &CommitId::from_hex("111111");
let commit_id2 = &CommitId::from_hex("222222");
// delete middle line from first range, modify masked line (ambiguous)
assert_eq!(
split_file_hunks(
&[(commit_id1, 0..6), /* 6..9, */ (commit_id2, 9..15)],
&ContentDiff::by_line(["1a\n1b\n0a\n2a\n2b\n", "1a\n0A\n2a\n2b\n"])
),
hashmap! {}
);
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/test_signing_backend.rs | lib/src/test_signing_backend.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic APIs to work with cryptographic signatures created and verified by
//! various backends.
use crate::content_hash::blake2b_hash;
use crate::hex_util;
use crate::signing::SigStatus;
use crate::signing::SignError;
use crate::signing::SignResult;
use crate::signing::SigningBackend;
use crate::signing::Verification;
/// A test signing backend that uses a simple hash-based signature format.
#[derive(Debug)]
pub struct TestSigningBackend;
const PREFIX: &str = "--- JJ-TEST-SIGNATURE ---\nKEY: ";
impl SigningBackend for TestSigningBackend {
fn name(&self) -> &'static str {
"test"
}
fn can_read(&self, signature: &[u8]) -> bool {
signature.starts_with(PREFIX.as_bytes())
}
fn sign(&self, data: &[u8], key: Option<&str>) -> SignResult<Vec<u8>> {
let key = key.unwrap_or_default();
let mut body = Vec::with_capacity(data.len() + key.len());
body.extend_from_slice(key.as_bytes());
body.extend_from_slice(data);
let hash = hex_util::encode_hex(&blake2b_hash(&body));
Ok(format!("{PREFIX}{key}\n{hash}\n").into_bytes())
}
fn verify(&self, data: &[u8], signature: &[u8]) -> SignResult<Verification> {
let Some(key) = signature
.strip_prefix(PREFIX.as_bytes())
.and_then(|s| s.splitn(2, |&b| b == b'\n').next())
else {
return Err(SignError::InvalidSignatureFormat);
};
let key = (!key.is_empty()).then_some(str::from_utf8(key).unwrap().to_owned());
let sig = self.sign(data, key.as_deref())?;
if sig == signature {
Ok(Verification {
status: SigStatus::Good,
key,
display: Some("test-display".into()),
})
} else {
Ok(Verification {
status: SigStatus::Bad,
key,
display: Some("test-display".into()),
})
}
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/submodule_store.rs | lib/src/submodule_store.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fmt::Debug;
pub trait SubmoduleStore: Send + Sync + Debug {
fn name(&self) -> &str;
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/operation.rs | lib/src/operation.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::cmp::Ordering;
use std::fmt::Debug;
use std::fmt::Error;
use std::fmt::Formatter;
use std::hash::Hash;
use std::hash::Hasher;
use std::iter;
use std::sync::Arc;
use pollster::FutureExt as _;
use crate::backend::CommitId;
use crate::op_store;
use crate::op_store::OpStore;
use crate::op_store::OpStoreResult;
use crate::op_store::OperationId;
use crate::op_store::OperationMetadata;
use crate::op_store::ViewId;
use crate::view::View;
/// A wrapper around [`op_store::Operation`] that defines additional methods and
/// stores a pointer to the `OpStore` the operation belongs to.
#[derive(Clone, serde::Serialize)]
pub struct Operation {
#[serde(skip)]
op_store: Arc<dyn OpStore>,
id: OperationId,
#[serde(flatten)]
data: Arc<op_store::Operation>, // allow cheap clone
}
impl Debug for Operation {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
f.debug_struct("Operation").field("id", &self.id).finish()
}
}
impl PartialEq for Operation {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for Operation {}
impl Ord for Operation {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}
impl PartialOrd for Operation {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Hash for Operation {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
impl Operation {
pub fn new(
op_store: Arc<dyn OpStore>,
id: OperationId,
data: impl Into<Arc<op_store::Operation>>,
) -> Self {
Self {
op_store,
id,
data: data.into(),
}
}
pub fn op_store(&self) -> Arc<dyn OpStore> {
self.op_store.clone()
}
pub fn id(&self) -> &OperationId {
&self.id
}
pub fn view_id(&self) -> &ViewId {
&self.data.view_id
}
pub fn parent_ids(&self) -> &[OperationId] {
&self.data.parents
}
pub fn parents(&self) -> impl ExactSizeIterator<Item = OpStoreResult<Self>> {
let op_store = &self.op_store;
self.data.parents.iter().map(|parent_id| {
let data = op_store.read_operation(parent_id).block_on()?;
Ok(Self::new(op_store.clone(), parent_id.clone(), data))
})
}
pub fn view(&self) -> OpStoreResult<View> {
let data = self.op_store.read_view(&self.data.view_id).block_on()?;
Ok(View::new(data))
}
pub fn metadata(&self) -> &OperationMetadata {
&self.data.metadata
}
/// Returns true if predecessors are recorded in this operation.
///
/// This returns false only if the operation was written by jj < 0.30.
pub fn stores_commit_predecessors(&self) -> bool {
self.data.commit_predecessors.is_some()
}
/// Returns predecessors of the specified commit if recorded.
pub fn predecessors_for_commit(&self, commit_id: &CommitId) -> Option<&[CommitId]> {
let map = self.data.commit_predecessors.as_ref()?;
Some(map.get(commit_id)?)
}
/// Iterates all commit ids referenced by this operation ignoring the view.
///
/// Use this in addition to [`View::all_referenced_commit_ids()`] to build
/// commit index from scratch. The predecessor commit ids are also included,
/// which ensures that the old commits to be returned by
/// [`Self::predecessors_for_commit()`] are still reachable.
///
/// The iteration order is unspecified.
pub fn all_referenced_commit_ids(&self) -> impl Iterator<Item = &CommitId> {
self.data.commit_predecessors.iter().flat_map(|map| {
map.iter()
.flat_map(|(new_id, old_ids)| iter::once(new_id).chain(old_ids))
})
}
pub fn store_operation(&self) -> &op_store::Operation {
&self.data
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/gpg_signing.rs | lib/src/gpg_signing.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::ffi::OsString;
use std::fmt::Debug;
use std::io;
use std::io::Write as _;
use std::process::Command;
use std::process::ExitStatus;
use std::process::Stdio;
use thiserror::Error;
use crate::config::ConfigGetError;
use crate::settings::UserSettings;
use crate::signing::SigStatus;
use crate::signing::SignError;
use crate::signing::SigningBackend;
use crate::signing::Verification;
// Search for one of the:
// [GNUPG:] GOODSIG <long keyid> <primary uid..>
// [GNUPG:] EXPKEYSIG <long keyid> <primary uid..>
// [GNUPG:] NO_PUBKEY <long keyid>
// [GNUPG:] BADSIG <long keyid> <primary uid..>
// in the output from --status-fd=1
// Assume signature is invalid if none of the above was found
fn parse_gpg_verify_output(
output: &[u8],
allow_expired_keys: bool,
) -> Result<Verification, SignError> {
output
.split(|&b| b == b'\n')
.filter_map(|line| line.strip_prefix(b"[GNUPG:] "))
.find_map(|line| {
let mut parts = line.splitn(3, |&b| b == b' ').fuse();
let status = match parts.next()? {
b"GOODSIG" => SigStatus::Good,
b"EXPKEYSIG" => {
if allow_expired_keys {
SigStatus::Good
} else {
SigStatus::Bad
}
}
b"NO_PUBKEY" => SigStatus::Unknown,
b"BADSIG" => SigStatus::Bad,
b"ERROR" => match parts.next()? {
b"verify.findkey" => return Some(Verification::unknown()),
_ => return None,
},
_ => return None,
};
let key = parts
.next()
.and_then(|bs| str::from_utf8(bs).ok())
.map(|value| value.trim().to_owned());
let display = parts
.next()
.and_then(|bs| str::from_utf8(bs).ok())
.map(|value| value.trim().to_owned());
Some(Verification::new(status, key, display))
})
.ok_or(SignError::InvalidSignatureFormat)
}
fn run_sign_command(command: &mut Command, input: &[u8]) -> Result<Vec<u8>, GpgError> {
tracing::info!(?command, "running GPG signing command");
let process = command.stderr(Stdio::piped()).spawn()?;
let write_result = process.stdin.as_ref().unwrap().write_all(input);
let output = process.wait_with_output()?;
tracing::info!(?command, ?output.status, "GPG signing command exited");
if output.status.success() {
write_result?;
Ok(output.stdout)
} else {
Err(GpgError::Command {
exit_status: output.status,
stderr: String::from_utf8_lossy(&output.stderr).trim_end().into(),
})
}
}
fn run_verify_command(command: &mut Command, input: &[u8]) -> Result<Vec<u8>, GpgError> {
tracing::info!(?command, "running GPG signing command");
let process = command.stderr(Stdio::null()).spawn()?;
let write_result = process.stdin.as_ref().unwrap().write_all(input);
let output = process.wait_with_output()?;
tracing::info!(?command, ?output.status, "GPG signing command exited");
match write_result {
Ok(()) => Ok(output.stdout),
// If the signature format is invalid, gpg will terminate early. Writing
// more input data will fail in that case.
Err(err) if err.kind() == io::ErrorKind::BrokenPipe => Ok(vec![]),
Err(err) => Err(err.into()),
}
}
#[derive(Debug)]
pub struct GpgBackend {
program: OsString,
allow_expired_keys: bool,
extra_args: Vec<OsString>,
default_key: String,
}
#[derive(Debug, Error)]
pub enum GpgError {
#[error("GPG failed with {exit_status}:\n{stderr}")]
Command {
exit_status: ExitStatus,
stderr: String,
},
#[error("Failed to run GPG")]
Io(#[from] std::io::Error),
}
impl From<GpgError> for SignError {
fn from(e: GpgError) -> Self {
Self::Backend(Box::new(e))
}
}
impl GpgBackend {
pub fn new(program: OsString, allow_expired_keys: bool, default_key: String) -> Self {
Self {
program,
allow_expired_keys,
extra_args: vec![],
default_key,
}
}
/// Primarily intended for testing
pub fn with_extra_args(mut self, args: &[OsString]) -> Self {
self.extra_args.extend_from_slice(args);
self
}
pub fn from_settings(settings: &UserSettings) -> Result<Self, ConfigGetError> {
let program = settings.get_string("signing.backends.gpg.program")?;
let allow_expired_keys = settings.get_bool("signing.backends.gpg.allow-expired-keys")?;
let default_key = settings.user_email().to_owned();
Ok(Self::new(program.into(), allow_expired_keys, default_key))
}
fn create_command(&self) -> Command {
let mut command = Command::new(&self.program);
// Hide console window on Windows (https://stackoverflow.com/a/60958956)
#[cfg(windows)]
{
use std::os::windows::process::CommandExt as _;
const CREATE_NO_WINDOW: u32 = 0x08000000;
command.creation_flags(CREATE_NO_WINDOW);
}
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.args(&self.extra_args);
command
}
}
impl SigningBackend for GpgBackend {
fn name(&self) -> &'static str {
"gpg"
}
fn can_read(&self, signature: &[u8]) -> bool {
signature.starts_with(b"-----BEGIN PGP SIGNATURE-----")
}
fn sign(&self, data: &[u8], key: Option<&str>) -> Result<Vec<u8>, SignError> {
let key = key.unwrap_or(&self.default_key);
Ok(run_sign_command(
self.create_command().args(["-abu", key]),
data,
)?)
}
fn verify(&self, data: &[u8], signature: &[u8]) -> Result<Verification, SignError> {
let mut signature_file = tempfile::Builder::new()
.prefix(".jj-gpg-sig-tmp-")
.tempfile()
.map_err(GpgError::Io)?;
signature_file.write_all(signature).map_err(GpgError::Io)?;
signature_file.flush().map_err(GpgError::Io)?;
let sig_path = signature_file.into_temp_path();
let output = run_verify_command(
self.create_command()
.args(["--keyid-format=long", "--status-fd=1", "--verify"])
.arg(&sig_path)
.arg("-"),
data,
)?;
parse_gpg_verify_output(&output, self.allow_expired_keys)
}
}
#[derive(Debug)]
pub struct GpgsmBackend {
program: OsString,
allow_expired_keys: bool,
extra_args: Vec<OsString>,
default_key: String,
}
impl GpgsmBackend {
pub fn new(program: OsString, allow_expired_keys: bool, default_key: String) -> Self {
Self {
program,
allow_expired_keys,
extra_args: vec![],
default_key,
}
}
/// Primarily intended for testing
pub fn with_extra_args(mut self, args: &[OsString]) -> Self {
self.extra_args.extend_from_slice(args);
self
}
pub fn from_settings(settings: &UserSettings) -> Result<Self, ConfigGetError> {
let program = settings.get_string("signing.backends.gpgsm.program")?;
let allow_expired_keys = settings.get_bool("signing.backends.gpgsm.allow-expired-keys")?;
let default_key = settings.user_email().to_owned();
Ok(Self::new(program.into(), allow_expired_keys, default_key))
}
fn create_command(&self) -> Command {
let mut command = Command::new(&self.program);
// Hide console window on Windows (https://stackoverflow.com/a/60958956)
#[cfg(windows)]
{
use std::os::windows::process::CommandExt as _;
const CREATE_NO_WINDOW: u32 = 0x08000000;
command.creation_flags(CREATE_NO_WINDOW);
}
command
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.args(&self.extra_args);
command
}
}
impl SigningBackend for GpgsmBackend {
fn name(&self) -> &'static str {
"gpgsm"
}
fn can_read(&self, signature: &[u8]) -> bool {
signature.starts_with(b"-----BEGIN SIGNED MESSAGE-----")
}
fn sign(&self, data: &[u8], key: Option<&str>) -> Result<Vec<u8>, SignError> {
let key = key.unwrap_or(&self.default_key);
Ok(run_sign_command(
self.create_command().args(["-abu", key]),
data,
)?)
}
fn verify(&self, data: &[u8], signature: &[u8]) -> Result<Verification, SignError> {
let mut signature_file = tempfile::Builder::new()
.prefix(".jj-gpgsm-sig-tmp-")
.tempfile()
.map_err(GpgError::Io)?;
signature_file.write_all(signature).map_err(GpgError::Io)?;
signature_file.flush().map_err(GpgError::Io)?;
let sig_path = signature_file.into_temp_path();
let output = run_verify_command(
self.create_command()
.args(["--status-fd=1", "--verify"])
.arg(&sig_path)
.arg("-"),
data,
)?;
parse_gpg_verify_output(&output, self.allow_expired_keys)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn gpg_verify_invalid_signature_format() {
use assert_matches::assert_matches;
assert_matches!(
parse_gpg_verify_output(b"", true),
Err(SignError::InvalidSignatureFormat)
);
}
#[test]
fn gpg_verify_bad_signature() {
assert_eq!(
parse_gpg_verify_output(b"[GNUPG:] BADSIG 123 456", true).unwrap(),
Verification::new(SigStatus::Bad, Some("123".into()), Some("456".into()))
);
}
#[test]
fn gpg_verify_unknown_signature() {
assert_eq!(
parse_gpg_verify_output(b"[GNUPG:] NO_PUBKEY 123", true).unwrap(),
Verification::new(SigStatus::Unknown, Some("123".into()), None)
);
}
#[test]
fn gpg_verify_good_signature() {
assert_eq!(
parse_gpg_verify_output(b"[GNUPG:] GOODSIG 123 456", true).unwrap(),
Verification::new(SigStatus::Good, Some("123".into()), Some("456".into()))
);
}
#[test]
fn gpg_verify_expired_signature() {
assert_eq!(
parse_gpg_verify_output(b"[GNUPG:] EXPKEYSIG 123 456", true).unwrap(),
Verification::new(SigStatus::Good, Some("123".into()), Some("456".into()))
);
assert_eq!(
parse_gpg_verify_output(b"[GNUPG:] EXPKEYSIG 123 456", false).unwrap(),
Verification::new(SigStatus::Bad, Some("123".into()), Some("456".into()))
);
}
#[test]
fn gpgsm_verify_unknown_signature() {
assert_eq!(
parse_gpg_verify_output(b"[GNUPG:] ERROR verify.findkey 50331657", true).unwrap(),
Verification::unknown(),
);
}
#[test]
fn gpgsm_verify_invalid_signature_format() {
use assert_matches::assert_matches;
assert_matches!(
parse_gpg_verify_output(b"[GNUPG:] ERROR verify.leave 150995087", true),
Err(SignError::InvalidSignatureFormat)
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/transaction.rs | lib/src/transaction.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::sync::Arc;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use thiserror::Error;
use crate::backend::Timestamp;
use crate::dag_walk;
use crate::index::IndexStoreError;
use crate::index::ReadonlyIndex;
use crate::op_heads_store::OpHeadsStore;
use crate::op_heads_store::OpHeadsStoreError;
use crate::op_store;
use crate::op_store::OpStoreError;
use crate::op_store::OperationMetadata;
use crate::op_store::TimestampRange;
use crate::operation::Operation;
use crate::repo::MutableRepo;
use crate::repo::ReadonlyRepo;
use crate::repo::Repo as _;
use crate::repo::RepoLoader;
use crate::repo::RepoLoaderError;
use crate::settings::UserSettings;
use crate::view::View;
/// Error from attempts to write and publish transaction.
#[derive(Debug, Error)]
#[error("Failed to commit new operation")]
pub enum TransactionCommitError {
IndexStore(#[from] IndexStoreError),
OpHeadsStore(#[from] OpHeadsStoreError),
OpStore(#[from] OpStoreError),
}
/// An in-memory representation of a repo and any changes being made to it.
///
/// Within the scope of a transaction, changes to the repository are made
/// in-memory to `mut_repo` and published to the repo backend when
/// [`Transaction::commit`] is called. When a transaction is committed, it
/// becomes atomically visible as an Operation in the op log that represents the
/// transaction itself, and as a View that represents the state of the repo
/// after the transaction. This is similar to how a Commit represents a change
/// to the contents of the repository and a Tree represents the repository's
/// contents after the change. See the documentation for [`op_store::Operation`]
/// and [`op_store::View`] for more information.
pub struct Transaction {
mut_repo: MutableRepo,
parent_ops: Vec<Operation>,
op_metadata: OperationMetadata,
end_time: Option<Timestamp>,
}
impl Transaction {
pub fn new(mut_repo: MutableRepo, user_settings: &UserSettings) -> Self {
let parent_ops = vec![mut_repo.base_repo().operation().clone()];
let op_metadata = create_op_metadata(user_settings, "".to_string(), false);
let end_time = user_settings.operation_timestamp();
Self {
mut_repo,
parent_ops,
op_metadata,
end_time,
}
}
pub fn base_repo(&self) -> &Arc<ReadonlyRepo> {
self.mut_repo.base_repo()
}
pub fn set_tag(&mut self, key: String, value: String) {
self.op_metadata.tags.insert(key, value);
}
pub fn repo(&self) -> &MutableRepo {
&self.mut_repo
}
pub fn repo_mut(&mut self) -> &mut MutableRepo {
&mut self.mut_repo
}
pub fn merge_operation(&mut self, other_op: Operation) -> Result<(), RepoLoaderError> {
let ancestor_op = dag_walk::closest_common_node_ok(
self.parent_ops.iter().cloned().map(Ok),
[Ok(other_op.clone())],
|op: &Operation| op.id().clone(),
|op: &Operation| op.parents().collect_vec(),
)?
.unwrap();
let repo_loader = self.base_repo().loader();
let base_repo = repo_loader.load_at(&ancestor_op)?;
let other_repo = repo_loader.load_at(&other_op)?;
self.parent_ops.push(other_op);
let merged_repo = self.repo_mut();
merged_repo.merge(&base_repo, &other_repo)?;
Ok(())
}
pub fn set_is_snapshot(&mut self, is_snapshot: bool) {
self.op_metadata.is_snapshot = is_snapshot;
}
/// Writes the transaction to the operation store and publishes it.
pub fn commit(
self,
description: impl Into<String>,
) -> Result<Arc<ReadonlyRepo>, TransactionCommitError> {
self.write(description)?.publish()
}
/// Writes the transaction to the operation store, but does not publish it.
/// That means that a repo can be loaded at the operation, but the
/// operation will not be seen when loading the repo at head.
pub fn write(
mut self,
description: impl Into<String>,
) -> Result<UnpublishedOperation, TransactionCommitError> {
let mut_repo = self.mut_repo;
// TODO: Should we instead just do the rebasing here if necessary?
assert!(
!mut_repo.has_rewrites(),
"BUG: Descendants have not been rebased after the last rewrites."
);
let base_repo = mut_repo.base_repo().clone();
let (mut_index, view, predecessors) = mut_repo.consume();
let operation = {
let view_id = base_repo
.op_store()
.write_view(view.store_view())
.block_on()?;
self.op_metadata.description = description.into();
self.op_metadata.time.end = self.end_time.unwrap_or_else(Timestamp::now);
let parents = self.parent_ops.iter().map(|op| op.id().clone()).collect();
let store_operation = op_store::Operation {
view_id,
parents,
metadata: self.op_metadata,
commit_predecessors: Some(predecessors),
};
let new_op_id = base_repo
.op_store()
.write_operation(&store_operation)
.block_on()?;
Operation::new(base_repo.op_store().clone(), new_op_id, store_operation)
};
let index = base_repo.index_store().write_index(mut_index, &operation)?;
let unpublished = UnpublishedOperation::new(base_repo.loader(), operation, view, index);
Ok(unpublished)
}
}
pub fn create_op_metadata(
user_settings: &UserSettings,
description: String,
is_snapshot: bool,
) -> OperationMetadata {
let timestamp = user_settings
.operation_timestamp()
.unwrap_or_else(Timestamp::now);
let hostname = user_settings.operation_hostname().to_owned();
let username = user_settings.operation_username().to_owned();
OperationMetadata {
time: TimestampRange {
start: timestamp,
end: timestamp,
},
description,
hostname,
username,
is_snapshot,
tags: Default::default(),
}
}
/// An unpublished operation in the store.
///
/// An Operation which has been written to the operation store but not
/// published. The repo can be loaded at an unpublished Operation, but the
/// Operation will not be visible in the op log if the repo is loaded at head.
///
/// Either [`Self::publish`] or [`Self::leave_unpublished`] must be called to
/// finish the operation.
#[must_use = "Either publish() or leave_unpublished() must be called to finish the operation."]
pub struct UnpublishedOperation {
op_heads_store: Arc<dyn OpHeadsStore>,
repo: Arc<ReadonlyRepo>,
}
impl UnpublishedOperation {
fn new(
repo_loader: &RepoLoader,
operation: Operation,
view: View,
index: Box<dyn ReadonlyIndex>,
) -> Self {
Self {
op_heads_store: repo_loader.op_heads_store().clone(),
repo: repo_loader.create_from(operation, view, index),
}
}
pub fn operation(&self) -> &Operation {
self.repo.operation()
}
pub fn publish(self) -> Result<Arc<ReadonlyRepo>, TransactionCommitError> {
let _lock = self.op_heads_store.lock().block_on()?;
self.op_heads_store
.update_op_heads(self.operation().parent_ids(), self.operation().id())
.block_on()?;
Ok(self.repo)
}
pub fn leave_unpublished(self) -> Arc<ReadonlyRepo> {
self.repo
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/bisect.rs | lib/src/bisect.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Bisect a range of commits.
use std::collections::HashSet;
use std::sync::Arc;
use itertools::Itertools as _;
use thiserror::Error;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::repo::Repo;
use crate::revset::ResolvedRevsetExpression;
use crate::revset::RevsetEvaluationError;
use crate::revset::RevsetExpression;
use crate::revset::RevsetIteratorExt as _;
/// An error that occurred while bisecting
#[derive(Error, Debug)]
pub enum BisectionError {
/// Failed to evaluate a revset
#[error("Failed to evaluate a revset involved in bisection")]
RevsetEvaluationError(#[from] RevsetEvaluationError),
}
/// Indicates whether a given commit was good, bad, or if it could not be
/// determined.
#[derive(Debug)]
pub enum Evaluation {
/// The commit was good
Good,
/// The commit was bad
Bad,
/// It could not be determined whether the commit was good or bad
Skip,
}
impl Evaluation {
/// Maps the current evaluation to its inverse.
///
/// Maps `Good`->`Bad`, `Bad`->`Good`, and keeps `Skip` as is.
pub fn invert(self) -> Self {
use Evaluation::*;
match self {
Good => Bad,
Bad => Good,
Skip => Skip,
}
}
}
/// Performs bisection to find the first bad commit in a range.
pub struct Bisector<'repo> {
repo: &'repo dyn Repo,
input_range: Arc<ResolvedRevsetExpression>,
good_commits: HashSet<CommitId>,
bad_commits: HashSet<CommitId>,
skipped_commits: HashSet<CommitId>,
}
/// The result of bisection.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum BisectionResult {
/// Found the first bad commit(s). It should be exactly one unless the input
/// range had multiple disjoint heads.
Found(Vec<Commit>),
/// Could not determine the first bad commit because it was in a
/// skipped range.
Indeterminate,
}
/// The next bisection step.
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum NextStep {
/// The commit must be evaluated.
Evaluate(Commit),
/// Bisection is complete.
Done(BisectionResult),
}
impl<'repo> Bisector<'repo> {
/// Create a new bisector. The range's heads are assumed to be bad.
/// Parents of the range's roots are assumed to be good.
pub fn new(
repo: &'repo dyn Repo,
input_range: Arc<ResolvedRevsetExpression>,
) -> Result<Self, BisectionError> {
let bad_commits = input_range.heads().evaluate(repo)?.iter().try_collect()?;
Ok(Self {
repo,
input_range,
bad_commits,
good_commits: HashSet::new(),
skipped_commits: HashSet::new(),
})
}
/// Mark a commit good.
pub fn mark_good(&mut self, id: CommitId) {
assert!(!self.bad_commits.contains(&id));
assert!(!self.skipped_commits.contains(&id));
self.good_commits.insert(id);
}
/// Mark a commit bad.
pub fn mark_bad(&mut self, id: CommitId) {
assert!(!self.good_commits.contains(&id));
assert!(!self.skipped_commits.contains(&id));
self.bad_commits.insert(id);
}
/// Mark a commit as skipped (cannot be determined if it's good or bad).
pub fn mark_skipped(&mut self, id: CommitId) {
assert!(!self.good_commits.contains(&id));
assert!(!self.bad_commits.contains(&id));
self.skipped_commits.insert(id);
}
/// Mark a commit as good, bad, or skipped, according to the outcome in
/// `evaluation`.
pub fn mark(&mut self, id: CommitId, evaluation: Evaluation) {
match evaluation {
Evaluation::Good => self.mark_good(id),
Evaluation::Bad => self.mark_bad(id),
Evaluation::Skip => self.mark_skipped(id),
}
}
/// The commits that were marked good.
pub fn good_commits(&self) -> &HashSet<CommitId> {
&self.good_commits
}
/// The commits that were marked bad.
pub fn bad_commits(&self) -> &HashSet<CommitId> {
&self.bad_commits
}
/// The commits that were skipped.
pub fn skipped_commits(&self) -> &HashSet<CommitId> {
&self.skipped_commits
}
/// Find the next commit to evaluate, or determine that there are no more
/// steps.
pub fn next_step(&mut self) -> Result<NextStep, BisectionError> {
let good_expr = RevsetExpression::commits(self.good_commits.iter().cloned().collect());
let bad_expr = RevsetExpression::commits(self.bad_commits.iter().cloned().collect());
let skipped_expr =
RevsetExpression::commits(self.skipped_commits.iter().cloned().collect());
// Intersect the input range with the current bad range and then bisect it to
// find the next commit to evaluate.
// Skipped revisions are simply subtracted from the set.
// TODO: Handle long ranges of skipped revisions better
let to_evaluate_expr = self
.input_range
.intersection(&good_expr.heads().range(&bad_expr.roots()))
.minus(&bad_expr)
.minus(&skipped_expr)
.bisect()
.latest(1);
let to_evaluate_set = to_evaluate_expr.evaluate(self.repo)?;
if let Some(commit) = to_evaluate_set
.iter()
.commits(self.repo.store())
.next()
.transpose()?
{
Ok(NextStep::Evaluate(commit))
} else {
let bad_roots = bad_expr.roots().evaluate(self.repo)?;
let bad_commits: Vec<_> = bad_roots.iter().commits(self.repo.store()).try_collect()?;
if bad_commits.is_empty() {
Ok(NextStep::Done(BisectionResult::Indeterminate))
} else {
Ok(NextStep::Done(BisectionResult::Found(bad_commits)))
}
}
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/revset_engine.rs | lib/src/default_index/revset_engine.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cell::RefCell;
use std::cmp::Ordering;
use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::collections::HashSet;
use std::convert::Infallible;
use std::fmt;
use std::iter;
use std::ops::Range;
use std::rc::Rc;
use std::sync::Arc;
use bstr::BString;
use futures::StreamExt as _;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use super::composite::AsCompositeIndex;
use super::composite::CompositeIndex;
use super::entry::GlobalCommitPosition;
use super::rev_walk::EagerRevWalk;
use super::rev_walk::PeekableRevWalk;
use super::rev_walk::RevWalk;
use super::rev_walk::RevWalkBuilder;
use super::revset_graph_iterator::RevsetGraphWalk;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::backend::MillisSinceEpoch;
use crate::commit::Commit;
use crate::conflict_labels::ConflictLabels;
use crate::conflicts::MaterializedTreeValue;
use crate::conflicts::materialize_tree_value;
use crate::diff::ContentDiff;
use crate::diff::DiffHunkKind;
use crate::files;
use crate::graph::GraphNode;
use crate::matchers::FilesMatcher;
use crate::matchers::Matcher;
use crate::matchers::Visit;
use crate::merge::Merge;
use crate::object_id::ObjectId as _;
use crate::repo_path::RepoPath;
use crate::revset::GENERATION_RANGE_FULL;
use crate::revset::ResolvedExpression;
use crate::revset::ResolvedPredicateExpression;
use crate::revset::Revset;
use crate::revset::RevsetContainingFn;
use crate::revset::RevsetEvaluationError;
use crate::revset::RevsetFilterPredicate;
use crate::rewrite;
use crate::store::Store;
use crate::str_util::StringMatcher;
use crate::tree_merge::MergeOptions;
use crate::tree_merge::resolve_file_values;
use crate::union_find;
type BoxedPredicateFn<'a> = Box<
dyn FnMut(&CompositeIndex, GlobalCommitPosition) -> Result<bool, RevsetEvaluationError> + 'a,
>;
pub(super) type BoxedRevWalk<'a> = Box<
dyn RevWalk<CompositeIndex, Item = Result<GlobalCommitPosition, RevsetEvaluationError>> + 'a,
>;
trait ToPredicateFn: fmt::Debug {
/// Creates function that tests if the given entry is included in the set.
///
/// The predicate function is evaluated in order of `RevsetIterator`.
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a;
}
impl<T: ToPredicateFn + ?Sized> ToPredicateFn for Box<T> {
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
<T as ToPredicateFn>::to_predicate_fn(self)
}
}
trait InternalRevset: fmt::Debug + ToPredicateFn {
// All revsets currently iterate in order of descending index position
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a;
}
impl<T: InternalRevset + ?Sized> InternalRevset for Box<T> {
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a,
{
<T as InternalRevset>::positions(self)
}
}
pub(super) struct RevsetImpl<I> {
inner: Box<dyn InternalRevset>,
index: I,
}
impl<I: AsCompositeIndex + Clone> RevsetImpl<I> {
fn new(inner: Box<dyn InternalRevset>, index: I) -> Self {
Self { inner, index }
}
fn positions(
&self,
) -> impl Iterator<Item = Result<GlobalCommitPosition, RevsetEvaluationError>> {
self.inner.positions().attach(self.index.as_composite())
}
pub fn iter_graph_impl(
&self,
skip_transitive_edges: bool,
) -> impl Iterator<Item = Result<GraphNode<CommitId>, RevsetEvaluationError>> + use<I> {
let index = self.index.clone();
let walk = self.inner.positions();
let mut graph_walk = RevsetGraphWalk::new(walk, skip_transitive_edges);
iter::from_fn(move || graph_walk.next(index.as_composite()))
}
}
impl<I> fmt::Debug for RevsetImpl<I> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RevsetImpl")
.field("inner", &self.inner)
.finish_non_exhaustive()
}
}
impl<I: AsCompositeIndex + Clone> Revset for RevsetImpl<I> {
fn iter<'a>(&self) -> Box<dyn Iterator<Item = Result<CommitId, RevsetEvaluationError>> + 'a>
where
Self: 'a,
{
let index = self.index.clone();
let mut walk = self
.inner
.positions()
.map(|index, pos| Ok(index.commits().entry_by_pos(pos?).commit_id()));
Box::new(iter::from_fn(move || walk.next(index.as_composite())))
}
fn commit_change_ids<'a>(
&self,
) -> Box<dyn Iterator<Item = Result<(CommitId, ChangeId), RevsetEvaluationError>> + 'a>
where
Self: 'a,
{
let index = self.index.clone();
let mut walk = self.inner.positions().map(|index, pos| {
let entry = index.commits().entry_by_pos(pos?);
Ok((entry.commit_id(), entry.change_id()))
});
Box::new(iter::from_fn(move || walk.next(index.as_composite())))
}
fn iter_graph<'a>(
&self,
) -> Box<dyn Iterator<Item = Result<GraphNode<CommitId>, RevsetEvaluationError>> + 'a>
where
Self: 'a,
{
let skip_transitive_edges = true;
Box::new(self.iter_graph_impl(skip_transitive_edges))
}
fn is_empty(&self) -> bool {
self.positions().next().is_none()
}
fn count_estimate(&self) -> Result<(usize, Option<usize>), RevsetEvaluationError> {
if cfg!(feature = "testing") {
// Exercise the estimation feature in tests. (If we ever have a Revset
// implementation in production code that returns estimates, we can probably
// remove this and rewrite the associated tests.)
let count = self
.positions()
.take(10)
.process_results(|iter| iter.count())?;
if count < 10 {
Ok((count, Some(count)))
} else {
Ok((10, None))
}
} else {
let count = self.positions().process_results(|iter| iter.count())?;
Ok((count, Some(count)))
}
}
fn containing_fn<'a>(&self) -> Box<RevsetContainingFn<'a>>
where
Self: 'a,
{
let positions = PositionsAccumulator::new(self.index.clone(), self.inner.positions());
Box::new(move |commit_id| positions.contains(commit_id))
}
}
/// Incrementally consumes `RevWalk` of the revset collecting positions.
struct PositionsAccumulator<'a, I> {
index: I,
inner: RefCell<PositionsAccumulatorInner<'a>>,
}
impl<'a, I: AsCompositeIndex> PositionsAccumulator<'a, I> {
fn new(index: I, walk: BoxedRevWalk<'a>) -> Self {
let inner = RefCell::new(PositionsAccumulatorInner {
walk,
consumed_positions: Vec::new(),
});
Self { index, inner }
}
/// Checks whether the commit is in the revset.
fn contains(&self, commit_id: &CommitId) -> Result<bool, RevsetEvaluationError> {
let index = self.index.as_composite();
let Some(position) = index.commits().commit_id_to_pos(commit_id) else {
return Ok(false);
};
let mut inner = self.inner.borrow_mut();
inner.consume_to(index, position)?;
let found = inner
.consumed_positions
.binary_search_by(|p| p.cmp(&position).reverse())
.is_ok();
Ok(found)
}
#[cfg(test)]
fn consumed_len(&self) -> usize {
self.inner.borrow().consumed_positions.len()
}
}
/// Helper struct for [`PositionsAccumulator`] to simplify interior mutability.
struct PositionsAccumulatorInner<'a> {
walk: BoxedRevWalk<'a>,
consumed_positions: Vec<GlobalCommitPosition>,
}
impl PositionsAccumulatorInner<'_> {
/// Consumes `RevWalk` to a desired position but not deeper.
fn consume_to(
&mut self,
index: &CompositeIndex,
desired_position: GlobalCommitPosition,
) -> Result<(), RevsetEvaluationError> {
let last_position = self.consumed_positions.last();
if last_position.is_some_and(|&pos| pos <= desired_position) {
return Ok(());
}
while let Some(position) = self.walk.next(index).transpose()? {
self.consumed_positions.push(position);
if position <= desired_position {
return Ok(());
}
}
Ok(())
}
}
/// Adapter for precomputed `GlobalCommitPosition`s.
#[derive(Debug)]
struct EagerRevset {
positions: Vec<GlobalCommitPosition>,
}
impl EagerRevset {
pub const fn empty() -> Self {
Self {
positions: Vec::new(),
}
}
}
impl InternalRevset for EagerRevset {
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a,
{
let walk = EagerRevWalk::new(self.positions.clone().into_iter());
Box::new(walk.map(|_index, pos| Ok(pos)))
}
}
impl ToPredicateFn for EagerRevset {
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
let walk = EagerRevWalk::new(self.positions.clone().into_iter());
predicate_fn_from_rev_walk(walk)
}
}
/// Adapter for infallible `RevWalk` of `GlobalCommitPosition`s.
struct RevWalkRevset<W> {
walk: W,
}
impl<W> fmt::Debug for RevWalkRevset<W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RevWalkRevset").finish_non_exhaustive()
}
}
impl<W> InternalRevset for RevWalkRevset<W>
where
W: RevWalk<CompositeIndex, Item = GlobalCommitPosition> + Clone,
{
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a,
{
Box::new(self.walk.clone().map(|_index, pos| Ok(pos)))
}
}
impl<W> ToPredicateFn for RevWalkRevset<W>
where
W: RevWalk<CompositeIndex, Item = GlobalCommitPosition> + Clone,
{
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
predicate_fn_from_rev_walk(self.walk.clone())
}
}
fn predicate_fn_from_rev_walk<'a, W>(walk: W) -> BoxedPredicateFn<'a>
where
W: RevWalk<CompositeIndex, Item = GlobalCommitPosition> + 'a,
{
let mut walk = walk.peekable();
Box::new(move |index, entry_pos| {
while walk.next_if(index, |&pos| pos > entry_pos).is_some() {
continue;
}
Ok(walk.next_if(index, |&pos| pos == entry_pos).is_some())
})
}
#[derive(Debug)]
struct FilterRevset<S, P> {
candidates: S,
predicate: P,
}
impl<S, P> InternalRevset for FilterRevset<S, P>
where
S: InternalRevset,
P: ToPredicateFn,
{
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a,
{
let mut p = self.predicate.to_predicate_fn();
Box::new(self.candidates.positions().filter_map(move |index, pos| {
pos.and_then(|pos| Ok(p(index, pos)?.then_some(pos)))
.transpose()
}))
}
}
impl<S, P> ToPredicateFn for FilterRevset<S, P>
where
S: ToPredicateFn,
P: ToPredicateFn,
{
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
let mut p1 = self.candidates.to_predicate_fn();
let mut p2 = self.predicate.to_predicate_fn();
Box::new(move |index, pos| Ok(p1(index, pos)? && p2(index, pos)?))
}
}
#[derive(Debug)]
struct NotInPredicate<S>(S);
impl<S: ToPredicateFn> ToPredicateFn for NotInPredicate<S> {
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
let mut p = self.0.to_predicate_fn();
Box::new(move |index, pos| Ok(!p(index, pos)?))
}
}
#[derive(Debug)]
struct UnionRevset<S1, S2> {
set1: S1,
set2: S2,
}
impl<S1, S2> InternalRevset for UnionRevset<S1, S2>
where
S1: InternalRevset,
S2: InternalRevset,
{
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a,
{
Box::new(union_by(
self.set1.positions(),
self.set2.positions(),
|pos1, pos2| pos1.cmp(pos2).reverse(),
))
}
}
impl<S1, S2> ToPredicateFn for UnionRevset<S1, S2>
where
S1: ToPredicateFn,
S2: ToPredicateFn,
{
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
let mut p1 = self.set1.to_predicate_fn();
let mut p2 = self.set2.to_predicate_fn();
Box::new(move |index, pos| Ok(p1(index, pos)? || p2(index, pos)?))
}
}
/// `RevWalk` node that merges two sorted walk nodes.
///
/// The input items should be sorted in ascending order by the `cmp` function.
struct UnionRevWalk<I: ?Sized, W1: RevWalk<I>, W2: RevWalk<I>, C> {
walk1: PeekableRevWalk<I, W1>,
walk2: PeekableRevWalk<I, W2>,
cmp: C,
}
impl<I, T, E, W1, W2, C> RevWalk<I> for UnionRevWalk<I, W1, W2, C>
where
I: ?Sized,
W1: RevWalk<I, Item = Result<T, E>>,
W2: RevWalk<I, Item = Result<T, E>>,
C: FnMut(&T, &T) -> Ordering,
{
type Item = W1::Item;
fn next(&mut self, index: &I) -> Option<Self::Item> {
match (self.walk1.peek(index), self.walk2.peek(index)) {
(None, _) => self.walk2.next(index),
(_, None) => self.walk1.next(index),
(Some(Ok(item1)), Some(Ok(item2))) => match (self.cmp)(item1, item2) {
Ordering::Less => self.walk1.next(index),
Ordering::Equal => {
self.walk2.next(index);
self.walk1.next(index)
}
Ordering::Greater => self.walk2.next(index),
},
(Some(Err(_)), _) => self.walk1.next(index),
(_, Some(Err(_))) => self.walk2.next(index),
}
}
}
fn union_by<I, T, E, W1, W2, C>(walk1: W1, walk2: W2, cmp: C) -> UnionRevWalk<I, W1, W2, C>
where
I: ?Sized,
W1: RevWalk<I, Item = Result<T, E>>,
W2: RevWalk<I, Item = Result<T, E>>,
C: FnMut(&T, &T) -> Ordering,
{
UnionRevWalk {
walk1: walk1.peekable(),
walk2: walk2.peekable(),
cmp,
}
}
#[derive(Debug)]
struct IntersectionRevset<S1, S2> {
set1: S1,
set2: S2,
}
impl<S1, S2> InternalRevset for IntersectionRevset<S1, S2>
where
S1: InternalRevset,
S2: InternalRevset,
{
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a,
{
Box::new(intersection_by(
self.set1.positions(),
self.set2.positions(),
|pos1, pos2| pos1.cmp(pos2).reverse(),
))
}
}
impl<S1, S2> ToPredicateFn for IntersectionRevset<S1, S2>
where
S1: ToPredicateFn,
S2: ToPredicateFn,
{
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
let mut p1 = self.set1.to_predicate_fn();
let mut p2 = self.set2.to_predicate_fn();
Box::new(move |index, pos| Ok(p1(index, pos)? && p2(index, pos)?))
}
}
/// `RevWalk` node that intersects two sorted walk nodes.
///
/// The input items should be sorted in ascending order by the `cmp` function.
struct IntersectionRevWalk<I: ?Sized, W1: RevWalk<I>, W2: RevWalk<I>, C> {
walk1: PeekableRevWalk<I, W1>,
walk2: PeekableRevWalk<I, W2>,
cmp: C,
}
impl<I, T, E, W1, W2, C> RevWalk<I> for IntersectionRevWalk<I, W1, W2, C>
where
I: ?Sized,
W1: RevWalk<I, Item = Result<T, E>>,
W2: RevWalk<I, Item = Result<T, E>>,
C: FnMut(&T, &T) -> Ordering,
{
type Item = W1::Item;
fn next(&mut self, index: &I) -> Option<Self::Item> {
loop {
match (self.walk1.peek(index), self.walk2.peek(index)) {
(None, _) => {
return None;
}
(_, None) => {
return None;
}
(Some(Ok(item1)), Some(Ok(item2))) => match (self.cmp)(item1, item2) {
Ordering::Less => {
self.walk1.next(index);
}
Ordering::Equal => {
self.walk2.next(index);
return self.walk1.next(index);
}
Ordering::Greater => {
self.walk2.next(index);
}
},
(Some(Err(_)), _) => {
return self.walk1.next(index);
}
(_, Some(Err(_))) => {
return self.walk2.next(index);
}
}
}
}
}
fn intersection_by<I, T, E, W1, W2, C>(
walk1: W1,
walk2: W2,
cmp: C,
) -> IntersectionRevWalk<I, W1, W2, C>
where
I: ?Sized,
W1: RevWalk<I, Item = Result<T, E>>,
W2: RevWalk<I, Item = Result<T, E>>,
C: FnMut(&T, &T) -> Ordering,
{
IntersectionRevWalk {
walk1: walk1.peekable(),
walk2: walk2.peekable(),
cmp,
}
}
#[derive(Debug)]
struct DifferenceRevset<S1, S2> {
// The minuend (what to subtract from)
set1: S1,
// The subtrahend (what to subtract)
set2: S2,
}
impl<S1, S2> InternalRevset for DifferenceRevset<S1, S2>
where
S1: InternalRevset,
S2: InternalRevset,
{
fn positions<'a>(&self) -> BoxedRevWalk<'a>
where
Self: 'a,
{
Box::new(difference_by(
self.set1.positions(),
self.set2.positions(),
|pos1, pos2| pos1.cmp(pos2).reverse(),
))
}
}
impl<S1, S2> ToPredicateFn for DifferenceRevset<S1, S2>
where
S1: ToPredicateFn,
S2: ToPredicateFn,
{
fn to_predicate_fn<'a>(&self) -> BoxedPredicateFn<'a>
where
Self: 'a,
{
let mut p1 = self.set1.to_predicate_fn();
let mut p2 = self.set2.to_predicate_fn();
Box::new(move |index, pos| Ok(p1(index, pos)? && !p2(index, pos)?))
}
}
/// `RevWalk` node that subtracts `walk2` items from `walk1`.
///
/// The input items should be sorted in ascending order by the `cmp` function.
struct DifferenceRevWalk<I: ?Sized, W1: RevWalk<I>, W2: RevWalk<I>, C> {
walk1: PeekableRevWalk<I, W1>,
walk2: PeekableRevWalk<I, W2>,
cmp: C,
}
impl<I, T, E, W1, W2, C> RevWalk<I> for DifferenceRevWalk<I, W1, W2, C>
where
I: ?Sized,
W1: RevWalk<I, Item = Result<T, E>>,
W2: RevWalk<I, Item = Result<T, E>>,
C: FnMut(&T, &T) -> Ordering,
{
type Item = W1::Item;
fn next(&mut self, index: &I) -> Option<Self::Item> {
loop {
match (self.walk1.peek(index), self.walk2.peek(index)) {
(None, _) => {
return None;
}
(_, None) => {
return self.walk1.next(index);
}
(Some(Ok(item1)), Some(Ok(item2))) => match (self.cmp)(item1, item2) {
Ordering::Less => {
return self.walk1.next(index);
}
Ordering::Equal => {
self.walk2.next(index);
self.walk1.next(index);
}
Ordering::Greater => {
self.walk2.next(index);
}
},
(Some(Err(_)), _) => {
return self.walk1.next(index);
}
(_, Some(Err(_))) => {
return self.walk2.next(index);
}
}
}
}
}
fn difference_by<I, T, E, W1, W2, C>(
walk1: W1,
walk2: W2,
cmp: C,
) -> DifferenceRevWalk<I, W1, W2, C>
where
I: ?Sized,
W1: RevWalk<I, Item = Result<T, E>>,
W2: RevWalk<I, Item = Result<T, E>>,
C: FnMut(&T, &T) -> Ordering,
{
DifferenceRevWalk {
walk1: walk1.peekable(),
walk2: walk2.peekable(),
cmp,
}
}
pub(super) fn evaluate<I: AsCompositeIndex + Clone>(
expression: &ResolvedExpression,
store: &Arc<Store>,
index: I,
) -> Result<RevsetImpl<I>, RevsetEvaluationError> {
let context = EvaluationContext {
store: store.clone(),
index: index.as_composite(),
};
let internal_revset = context.evaluate(expression)?;
Ok(RevsetImpl::new(internal_revset, index))
}
struct EvaluationContext<'index> {
store: Arc<Store>,
index: &'index CompositeIndex,
}
fn to_u32_generation_range(range: &Range<u64>) -> Result<Range<u32>, RevsetEvaluationError> {
let start = range.start.try_into().map_err(|_| {
RevsetEvaluationError::Other(
format!("Lower bound of generation ({}) is too large", range.start).into(),
)
})?;
let end = range.end.try_into().unwrap_or(u32::MAX);
Ok(start..end)
}
impl EvaluationContext<'_> {
fn evaluate(
&self,
expression: &ResolvedExpression,
) -> Result<Box<dyn InternalRevset>, RevsetEvaluationError> {
let index = self.index;
match expression {
ResolvedExpression::Commits(commit_ids) => {
Ok(Box::new(self.revset_for_commit_ids(commit_ids)?))
}
ResolvedExpression::Ancestors {
heads,
generation,
parents_range,
} => {
let head_set = self.evaluate(heads)?;
let head_positions = head_set.positions().attach(index);
let builder = RevWalkBuilder::new(index)
.wanted_heads(head_positions.try_collect()?)
.wanted_parents_range(parents_range.clone());
if generation == &GENERATION_RANGE_FULL {
let walk = builder.ancestors().detach();
Ok(Box::new(RevWalkRevset { walk }))
} else {
let generation = to_u32_generation_range(generation)?;
let walk = builder
.ancestors_filtered_by_generation(generation)
.detach();
Ok(Box::new(RevWalkRevset { walk }))
}
}
ResolvedExpression::Range {
roots,
heads,
generation,
parents_range,
} => {
let root_set = self.evaluate(roots)?;
let root_positions: Vec<_> = root_set.positions().attach(index).try_collect()?;
// Pre-filter heads so queries like 'immutable_heads()..' can
// terminate early. immutable_heads() usually includes some
// visible heads, which can be trivially rejected.
let head_set = self.evaluate(heads)?;
let head_positions = difference_by(
head_set.positions(),
EagerRevWalk::new(root_positions.iter().copied().map(Ok)),
|pos1, pos2| pos1.cmp(pos2).reverse(),
)
.attach(index);
let builder = RevWalkBuilder::new(index)
.wanted_heads(head_positions.try_collect()?)
.wanted_parents_range(parents_range.clone())
.unwanted_roots(root_positions);
if generation == &GENERATION_RANGE_FULL {
let walk = builder.ancestors().detach();
Ok(Box::new(RevWalkRevset { walk }))
} else {
let generation = to_u32_generation_range(generation)?;
let walk = builder
.ancestors_filtered_by_generation(generation)
.detach();
Ok(Box::new(RevWalkRevset { walk }))
}
}
ResolvedExpression::DagRange {
roots,
heads,
generation_from_roots,
} => {
let root_set = self.evaluate(roots)?;
let root_positions = root_set.positions().attach(index);
let head_set = self.evaluate(heads)?;
let head_positions = head_set.positions().attach(index);
let builder =
RevWalkBuilder::new(index).wanted_heads(head_positions.try_collect()?);
if generation_from_roots == &(1..2) {
let root_positions: HashSet<_> = root_positions.try_collect()?;
let walk = builder
.ancestors_until_roots(root_positions.iter().copied())
.detach();
let candidates = RevWalkRevset { walk };
let predicate = as_pure_predicate_fn(move |index, pos| {
Ok(index
.commits()
.entry_by_pos(pos)
.parent_positions()
.iter()
.any(|parent_pos| root_positions.contains(parent_pos)))
});
// TODO: Suppose heads include all visible heads, ToPredicateFn version can be
// optimized to only test the predicate()
Ok(Box::new(FilterRevset {
candidates,
predicate,
}))
} else if generation_from_roots == &GENERATION_RANGE_FULL {
let mut positions = builder
.descendants(root_positions.try_collect()?)
.collect_vec();
positions.reverse();
Ok(Box::new(EagerRevset { positions }))
} else {
// For small generation range, it might be better to build a reachable map
// with generation bit set, which can be calculated incrementally from roots:
// reachable[pos] = (reachable[parent_pos] | ...) << 1
let mut positions = builder
.descendants_filtered_by_generation(
root_positions.try_collect()?,
to_u32_generation_range(generation_from_roots)?,
)
.map(|Reverse(pos)| pos)
.collect_vec();
positions.reverse();
Ok(Box::new(EagerRevset { positions }))
}
}
ResolvedExpression::Reachable { sources, domain } => {
let mut sets = union_find::UnionFind::<GlobalCommitPosition>::new();
// Compute all reachable subgraphs.
let domain_revset = self.evaluate(domain)?;
let domain_vec: Vec<_> = domain_revset.positions().attach(index).try_collect()?;
let domain_set: HashSet<_> = domain_vec.iter().copied().collect();
for pos in &domain_set {
for parent_pos in index.commits().entry_by_pos(*pos).parent_positions() {
if domain_set.contains(&parent_pos) {
sets.union(*pos, parent_pos);
}
}
}
// `UnionFind::find` is somewhat slow, so it's faster to only do this once and
// then cache the result.
let domain_reps = domain_vec.iter().map(|&pos| sets.find(pos)).collect_vec();
// Identify disjoint sets reachable from sources. Using a predicate here can be
// significantly faster for cases like `reachable(filter, X)`, since the filter
// can be checked for only commits in `X` instead of for all visible commits,
// and the difference is usually negligible for non-filter revsets.
let sources_revset = self.evaluate(sources)?;
let mut sources_predicate = sources_revset.to_predicate_fn();
let mut set_reps = HashSet::new();
for (&pos, &rep) in domain_vec.iter().zip(&domain_reps) {
// Skip evaluating predicate if `rep` has already been added.
if set_reps.contains(&rep) {
continue;
}
if sources_predicate(index, pos)? {
set_reps.insert(rep);
}
}
let positions = domain_vec
.into_iter()
.zip(domain_reps)
.filter_map(|(pos, rep)| set_reps.contains(&rep).then_some(pos))
.collect_vec();
Ok(Box::new(EagerRevset { positions }))
}
ResolvedExpression::Heads(candidates) => {
let candidate_set = self.evaluate(candidates)?;
let positions = index
.commits()
.heads_pos(candidate_set.positions().attach(index).try_collect()?);
Ok(Box::new(EagerRevset { positions }))
}
ResolvedExpression::HeadsRange {
roots,
heads,
parents_range,
filter,
} => {
let root_set = self.evaluate(roots)?;
let root_positions: Vec<_> = root_set.positions().attach(index).try_collect()?;
// Pre-filter heads so queries like 'immutable_heads()..' can
// terminate early. immutable_heads() usually includes some
// visible heads, which can be trivially rejected.
let head_set = self.evaluate(heads)?;
let head_positions = difference_by(
head_set.positions(),
EagerRevWalk::new(root_positions.iter().copied().map(Ok)),
|pos1, pos2| pos1.cmp(pos2).reverse(),
)
.attach(index)
.try_collect()?;
let positions = if let Some(filter) = filter {
let mut filter = self.evaluate_predicate(filter)?.to_predicate_fn();
index.commits().heads_from_range_and_filter(
root_positions,
head_positions,
parents_range,
|pos| filter(index, pos),
)?
} else {
let Ok(positions) = index.commits().heads_from_range_and_filter::<Infallible>(
root_positions,
head_positions,
parents_range,
|_| Ok(true),
);
positions
};
Ok(Box::new(EagerRevset { positions }))
}
ResolvedExpression::Roots(candidates) => {
let mut positions: Vec<_> = self
.evaluate(candidates)?
.positions()
.attach(index)
.try_collect()?;
let filled = RevWalkBuilder::new(index)
.wanted_heads(positions.clone())
.descendants(positions.iter().copied().collect())
.collect_positions_set();
positions.retain(|&pos| {
!index
.commits()
.entry_by_pos(pos)
.parent_positions()
.iter()
.any(|parent| filled.contains(parent))
});
Ok(Box::new(EagerRevset { positions }))
}
ResolvedExpression::ForkPoint(expression) => {
let expression_set = self.evaluate(expression)?;
let mut expression_positions_iter = expression_set.positions().attach(index);
let Some(position) = expression_positions_iter.next() else {
return Ok(Box::new(EagerRevset::empty()));
};
let mut positions = vec![position?];
for position in expression_positions_iter {
positions = index
.commits()
.common_ancestors_pos(positions, vec![position?]);
}
Ok(Box::new(EagerRevset { positions }))
}
ResolvedExpression::Bisect(candidates) => {
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/rev_walk_queue.rs | lib/src/default_index/rev_walk_queue.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BinaryHeap;
use std::mem;
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub(super) struct RevWalkWorkItem<P, T> {
pub pos: P,
pub value: T,
}
#[derive(Clone)]
pub(super) struct RevWalkQueue<P, T> {
items: BinaryHeap<RevWalkWorkItem<P, T>>,
// Optionally keep the greatest item out of the heap, so pop() + push() of
// the greatest item won't have to rebalance the heap.
scratch_item: Option<RevWalkWorkItem<P, T>>,
min_pos: P,
}
impl<P: Ord, T: Ord> RevWalkQueue<P, T> {
pub fn with_min_pos(min_pos: P) -> Self {
Self {
items: BinaryHeap::new(),
scratch_item: None,
min_pos,
}
}
#[cfg_attr(not(test), expect(dead_code))]
pub fn len(&self) -> usize {
self.items.len() + usize::from(self.scratch_item.is_some())
}
pub fn push(&mut self, pos: P, value: T) {
if pos < self.min_pos {
return;
}
self.push_item(RevWalkWorkItem { pos, value });
}
fn push_item(&mut self, new: RevWalkWorkItem<P, T>) {
if let Some(next) = self.scratch_item.as_mut() {
if new < *next {
self.items.push(new);
} else {
let next = mem::replace(next, new);
self.items.push(next);
}
} else if let Some(next) = self.items.peek() {
if new < *next {
// items[0] could be moved to scratch_item, but simply leave
// scratch_item empty.
self.items.push(new);
} else {
self.scratch_item = Some(new);
}
} else {
self.scratch_item = Some(new);
}
}
pub fn extend(&mut self, positions: impl IntoIterator<Item = P>, value: T)
where
T: Clone,
{
for pos in positions {
self.push(pos, value.clone());
}
}
pub fn peek(&self) -> Option<&RevWalkWorkItem<P, T>> {
self.scratch_item.as_ref().or_else(|| self.items.peek())
}
pub fn pop(&mut self) -> Option<RevWalkWorkItem<P, T>> {
let next = self.scratch_item.take().or_else(|| self.items.pop())?;
Some(next)
}
pub fn pop_if(
&mut self,
predicate: impl FnOnce(&RevWalkWorkItem<P, T>) -> bool,
) -> Option<RevWalkWorkItem<P, T>> {
let next = self.peek()?;
predicate(next).then(|| self.pop().unwrap())
}
pub fn pop_eq(&mut self, pos: &P) -> Option<RevWalkWorkItem<P, T>> {
self.pop_if(|next| next.pos == *pos)
}
pub fn skip_while_eq(&mut self, pos: &P) {
while self.pop_eq(pos).is_some() {
continue;
}
}
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use super::*;
#[test]
fn test_push_pop_in_forward_order() {
let mut queue: RevWalkQueue<u32, ()> = RevWalkQueue::with_min_pos(0);
queue.push(0, ());
assert!(queue.scratch_item.is_some());
assert_eq!(queue.items.len(), 0);
queue.push(1, ());
assert!(queue.scratch_item.is_some());
assert_eq!(queue.items.len(), 1);
assert_matches!(queue.pop(), Some(RevWalkWorkItem { pos: 1, .. }));
assert!(queue.scratch_item.is_none());
assert_eq!(queue.items.len(), 1);
queue.push(2, ());
assert!(queue.scratch_item.is_some());
assert_eq!(queue.items.len(), 1);
assert_matches!(queue.pop(), Some(RevWalkWorkItem { pos: 2, .. }));
assert!(queue.scratch_item.is_none());
assert_eq!(queue.items.len(), 1);
assert_matches!(queue.pop(), Some(RevWalkWorkItem { pos: 0, .. }));
assert!(queue.scratch_item.is_none());
assert_eq!(queue.items.len(), 0);
assert_matches!(queue.pop(), None);
}
#[test]
fn test_push_pop_in_reverse_order() {
let mut queue: RevWalkQueue<u32, ()> = RevWalkQueue::with_min_pos(0);
queue.push(2, ());
assert!(queue.scratch_item.is_some());
assert_eq!(queue.items.len(), 0);
queue.push(1, ());
assert!(queue.scratch_item.is_some());
assert_eq!(queue.items.len(), 1);
assert_matches!(queue.pop(), Some(RevWalkWorkItem { pos: 2, .. }));
assert!(queue.scratch_item.is_none());
assert_eq!(queue.items.len(), 1);
queue.push(0, ());
assert!(queue.scratch_item.is_none());
assert_eq!(queue.items.len(), 2);
assert_matches!(queue.pop(), Some(RevWalkWorkItem { pos: 1, .. }));
assert!(queue.scratch_item.is_none());
assert_eq!(queue.items.len(), 1);
assert_matches!(queue.pop(), Some(RevWalkWorkItem { pos: 0, .. }));
assert!(queue.scratch_item.is_none());
assert_eq!(queue.items.len(), 0);
assert_matches!(queue.pop(), None);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/readonly.rs | lib/src/default_index/readonly.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::cmp::Ordering;
use std::collections::HashSet;
use std::fmt;
use std::fmt::Debug;
use std::fs::File;
use std::io;
use std::io::Read;
use std::iter;
use std::ops::Range;
use std::path::Path;
use std::sync::Arc;
use itertools::Itertools as _;
use smallvec::smallvec;
use thiserror::Error;
use super::changed_path::CompositeChangedPathIndex;
use super::composite::AsCompositeIndex;
use super::composite::ChangeIdIndexImpl;
use super::composite::CommitIndexSegment;
use super::composite::CommitIndexSegmentId;
use super::composite::CompositeCommitIndex;
use super::composite::CompositeIndex;
use super::entry::GlobalCommitPosition;
use super::entry::LocalCommitPosition;
use super::entry::SmallGlobalCommitPositionsVec;
use super::entry::SmallLocalCommitPositionsVec;
use super::mutable::DefaultMutableIndex;
use super::revset_engine;
use super::revset_engine::RevsetImpl;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::graph::GraphNode;
use crate::index::ChangeIdIndex;
use crate::index::Index;
use crate::index::IndexResult;
use crate::index::MutableIndex;
use crate::index::ReadonlyIndex;
use crate::object_id::HexPrefix;
use crate::object_id::ObjectId;
use crate::object_id::PrefixResolution;
use crate::repo_path::RepoPathBuf;
use crate::revset::ResolvedExpression;
use crate::revset::Revset;
use crate::revset::RevsetEvaluationError;
use crate::store::Store;
/// Error while loading index segment file.
#[derive(Debug, Error)]
pub enum ReadonlyIndexLoadError {
#[error("Unexpected {kind} index version")]
UnexpectedVersion {
/// Index type.
kind: &'static str,
found_version: u32,
expected_version: u32,
},
#[error("Failed to load {kind} index file '{name}'")]
Other {
/// Index type.
kind: &'static str,
/// Index file name.
name: String,
/// Underlying error.
#[source]
error: io::Error,
},
}
impl ReadonlyIndexLoadError {
pub(super) fn invalid_data(
kind: &'static str,
name: impl Into<String>,
error: impl Into<Box<dyn std::error::Error + Send + Sync>>,
) -> Self {
Self::from_io_err(
kind,
name,
io::Error::new(io::ErrorKind::InvalidData, error),
)
}
pub(super) fn from_io_err(
kind: &'static str,
name: impl Into<String>,
error: io::Error,
) -> Self {
Self::Other {
kind,
name: name.into(),
error,
}
}
/// Returns true if the underlying error suggests data corruption.
pub(super) fn is_corrupt_or_not_found(&self) -> bool {
match self {
Self::UnexpectedVersion { .. } => true,
Self::Other { error, .. } => {
// If the parent file name field is corrupt, the file wouldn't be found.
// And there's no need to distinguish it from an empty file.
matches!(
error.kind(),
io::ErrorKind::NotFound
| io::ErrorKind::InvalidData
| io::ErrorKind::UnexpectedEof
)
}
}
}
}
/// Current format version of the commit index segment file.
pub(super) const COMMIT_INDEX_SEGMENT_FILE_FORMAT_VERSION: u32 = 6;
/// If set, the value is stored in the overflow table.
pub(super) const OVERFLOW_FLAG: u32 = 0x8000_0000;
/// Global index position of parent entry, or overflow pointer.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct ParentIndexPosition(u32);
impl ParentIndexPosition {
fn as_inlined(self) -> Option<GlobalCommitPosition> {
(self.0 & OVERFLOW_FLAG == 0).then_some(GlobalCommitPosition(self.0))
}
fn as_overflow(self) -> Option<u32> {
(self.0 & OVERFLOW_FLAG != 0).then_some(!self.0)
}
}
/// Local position of entry pointed by change id, or overflow pointer.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct ChangeLocalPosition(u32);
impl ChangeLocalPosition {
fn as_inlined(self) -> Option<LocalCommitPosition> {
(self.0 & OVERFLOW_FLAG == 0).then_some(LocalCommitPosition(self.0))
}
fn as_overflow(self) -> Option<u32> {
(self.0 & OVERFLOW_FLAG != 0).then_some(!self.0)
}
}
/// Lengths of fields to be serialized.
#[derive(Clone, Copy, Debug)]
pub(super) struct FieldLengths {
pub commit_id: usize,
pub change_id: usize,
}
struct CommitGraphEntry<'a> {
data: &'a [u8],
}
// TODO: Add pointers to ancestors further back, like a skip list. Clear the
// lowest set bit to determine which generation number the pointers point to.
impl CommitGraphEntry<'_> {
fn size(commit_id_length: usize) -> usize {
16 + commit_id_length
}
fn generation_number(&self) -> u32 {
u32::from_le_bytes(self.data[0..4].try_into().unwrap())
}
fn parent1_pos_or_overflow_pos(&self) -> ParentIndexPosition {
ParentIndexPosition(u32::from_le_bytes(self.data[4..8].try_into().unwrap()))
}
fn parent2_pos_or_overflow_len(&self) -> ParentIndexPosition {
ParentIndexPosition(u32::from_le_bytes(self.data[8..12].try_into().unwrap()))
}
fn change_id_lookup_pos(&self) -> u32 {
u32::from_le_bytes(self.data[12..16].try_into().unwrap())
}
fn commit_id(&self) -> CommitId {
CommitId::from_bytes(self.commit_id_bytes())
}
// might be better to add borrowed version of CommitId
fn commit_id_bytes(&self) -> &[u8] {
&self.data[16..]
}
}
/// Commit index segment backed by immutable file.
///
/// File format:
/// ```text
/// u32: file format version
/// u32: parent segment file name length (0 means root)
/// <length number of bytes>: parent segment file name
///
/// u32: number of local commit entries
/// u32: number of local change ids
/// u32: number of overflow parent entries
/// u32: number of overflow change id positions
/// for each entry, in some topological order with parents first:
/// u32: generation number
/// if number of parents <= 2:
/// u32: (< 0x8000_0000) global index position for parent 1
/// (==0xffff_ffff) no parent 1
/// u32: (< 0x8000_0000) global index position for parent 2
/// (==0xffff_ffff) no parent 2
/// else:
/// u32: (>=0x8000_0000) position in the overflow table, bit-negated
/// u32: (>=0x8000_0000) number of parents (in the overflow table), bit-negated
/// u32: change id position in the sorted change ids table
/// <commit id length number of bytes>: commit id
/// for each entry, sorted by commit id:
/// u32: local position in the graph entries table
/// for each entry, sorted by change id:
/// <change id length number of bytes>: change id
/// for each entry, sorted by change id:
/// if number of associated commits == 1:
/// u32: (< 0x8000_0000) local position in the graph entries table
/// else:
/// u32: (>=0x8000_0000) position in the overflow table, bit-negated
/// for each overflow parent:
/// u32: global index position
/// for each overflow change id entry:
/// u32: local position in the graph entries table
/// ```
///
/// Note that u32 fields are 4-byte aligned so long as the parent file name
/// (which is hexadecimal hash) and commit/change ids aren't of exotic length.
// TODO: replace the table by a trie so we don't have to repeat the full commit
// ids
// TODO: add a fanout table like git's commit graph has?
pub(super) struct ReadonlyCommitIndexSegment {
parent_file: Option<Arc<Self>>,
num_parent_commits: u32,
id: CommitIndexSegmentId,
field_lengths: FieldLengths,
// Number of commits not counting the parent file
num_local_commits: u32,
num_local_change_ids: u32,
num_change_overflow_entries: u32,
// Base data offsets in bytes:
commit_lookup_base: usize,
change_id_table_base: usize,
change_pos_table_base: usize,
parent_overflow_base: usize,
change_overflow_base: usize,
data: Vec<u8>,
}
impl Debug for ReadonlyCommitIndexSegment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("ReadonlyCommitIndexSegment")
.field("id", &self.id)
.field("parent_file", &self.parent_file)
.finish_non_exhaustive()
}
}
impl ReadonlyCommitIndexSegment {
/// Loads both parent segments and local entries from the given file `name`.
pub(super) fn load(
dir: &Path,
id: CommitIndexSegmentId,
lengths: FieldLengths,
) -> Result<Arc<Self>, ReadonlyIndexLoadError> {
let mut file = File::open(dir.join(id.hex()))
.map_err(|err| ReadonlyIndexLoadError::from_io_err("commit", id.hex(), err))?;
Self::load_from(&mut file, dir, id, lengths)
}
/// Loads both parent segments and local entries from the given `file`.
pub(super) fn load_from(
file: &mut dyn Read,
dir: &Path,
id: CommitIndexSegmentId,
lengths: FieldLengths,
) -> Result<Arc<Self>, ReadonlyIndexLoadError> {
let from_io_err = |err| ReadonlyIndexLoadError::from_io_err("commit", id.hex(), err);
let read_u32 = |file: &mut dyn Read| {
let mut buf = [0; 4];
file.read_exact(&mut buf).map_err(from_io_err)?;
Ok(u32::from_le_bytes(buf))
};
let format_version = read_u32(file)?;
if format_version != COMMIT_INDEX_SEGMENT_FILE_FORMAT_VERSION {
return Err(ReadonlyIndexLoadError::UnexpectedVersion {
kind: "commit",
found_version: format_version,
expected_version: COMMIT_INDEX_SEGMENT_FILE_FORMAT_VERSION,
});
}
let parent_filename_len = read_u32(file)?;
let maybe_parent_file = if parent_filename_len > 0 {
let mut parent_filename_bytes = vec![0; parent_filename_len as usize];
file.read_exact(&mut parent_filename_bytes)
.map_err(from_io_err)?;
let parent_file_id = CommitIndexSegmentId::try_from_hex(parent_filename_bytes)
.ok_or_else(|| {
ReadonlyIndexLoadError::invalid_data(
"commit",
id.hex(),
"parent file name is not valid hex",
)
})?;
let parent_file = Self::load(dir, parent_file_id, lengths)?;
Some(parent_file)
} else {
None
};
Self::load_with_parent_file(file, id, maybe_parent_file, lengths)
}
/// Loads local entries from the given `file`, returns new segment linked to
/// the given `parent_file`.
pub(super) fn load_with_parent_file(
file: &mut dyn Read,
id: CommitIndexSegmentId,
parent_file: Option<Arc<Self>>,
lengths: FieldLengths,
) -> Result<Arc<Self>, ReadonlyIndexLoadError> {
let from_io_err = |err| ReadonlyIndexLoadError::from_io_err("commit", id.hex(), err);
let read_u32 = |file: &mut dyn Read| {
let mut buf = [0; 4];
file.read_exact(&mut buf).map_err(from_io_err)?;
Ok(u32::from_le_bytes(buf))
};
let num_parent_commits = parent_file
.as_ref()
.map_or(0, |segment| segment.as_composite().num_commits());
let num_local_commits = read_u32(file)?;
let num_local_change_ids = read_u32(file)?;
let num_parent_overflow_entries = read_u32(file)?;
let num_change_overflow_entries = read_u32(file)?;
let mut data = vec![];
file.read_to_end(&mut data).map_err(from_io_err)?;
let commit_graph_entry_size = CommitGraphEntry::size(lengths.commit_id);
let graph_size = (num_local_commits as usize) * commit_graph_entry_size;
let commit_lookup_size = (num_local_commits as usize) * 4;
let change_id_table_size = (num_local_change_ids as usize) * lengths.change_id;
let change_pos_table_size = (num_local_change_ids as usize) * 4;
let parent_overflow_size = (num_parent_overflow_entries as usize) * 4;
let change_overflow_size = (num_change_overflow_entries as usize) * 4;
let graph_base = 0;
let commit_lookup_base = graph_base + graph_size;
let change_id_table_base = commit_lookup_base + commit_lookup_size;
let change_pos_table_base = change_id_table_base + change_id_table_size;
let parent_overflow_base = change_pos_table_base + change_pos_table_size;
let change_overflow_base = parent_overflow_base + parent_overflow_size;
let expected_size = change_overflow_base + change_overflow_size;
if data.len() != expected_size {
return Err(ReadonlyIndexLoadError::invalid_data(
"commit",
id.hex(),
"unexpected data length",
));
}
Ok(Arc::new(Self {
parent_file,
num_parent_commits,
id,
field_lengths: lengths,
num_local_commits,
num_local_change_ids,
num_change_overflow_entries,
commit_lookup_base,
change_id_table_base,
change_pos_table_base,
parent_overflow_base,
change_overflow_base,
data,
}))
}
pub(super) fn as_composite(&self) -> &CompositeCommitIndex {
CompositeCommitIndex::new(self)
}
pub(super) fn id(&self) -> &CommitIndexSegmentId {
&self.id
}
pub(super) fn field_lengths(&self) -> FieldLengths {
self.field_lengths
}
fn graph_entry(&self, local_pos: LocalCommitPosition) -> CommitGraphEntry<'_> {
let table = &self.data[..self.commit_lookup_base];
let entry_size = CommitGraphEntry::size(self.field_lengths.commit_id);
let offset = (local_pos.0 as usize) * entry_size;
CommitGraphEntry {
data: &table[offset..][..entry_size],
}
}
fn commit_lookup_pos(&self, lookup_pos: u32) -> LocalCommitPosition {
let table = &self.data[self.commit_lookup_base..self.change_id_table_base];
let offset = (lookup_pos as usize) * 4;
LocalCommitPosition(u32::from_le_bytes(table[offset..][..4].try_into().unwrap()))
}
fn change_lookup_id(&self, lookup_pos: u32) -> ChangeId {
ChangeId::from_bytes(self.change_lookup_id_bytes(lookup_pos))
}
// might be better to add borrowed version of ChangeId
fn change_lookup_id_bytes(&self, lookup_pos: u32) -> &[u8] {
let table = &self.data[self.change_id_table_base..self.change_pos_table_base];
let offset = (lookup_pos as usize) * self.field_lengths.change_id;
&table[offset..][..self.field_lengths.change_id]
}
fn change_lookup_pos(&self, lookup_pos: u32) -> ChangeLocalPosition {
let table = &self.data[self.change_pos_table_base..self.parent_overflow_base];
let offset = (lookup_pos as usize) * 4;
ChangeLocalPosition(u32::from_le_bytes(table[offset..][..4].try_into().unwrap()))
}
fn overflow_parents(
&self,
overflow_pos: u32,
num_parents: u32,
) -> SmallGlobalCommitPositionsVec {
let table = &self.data[self.parent_overflow_base..self.change_overflow_base];
let offset = (overflow_pos as usize) * 4;
let size = (num_parents as usize) * 4;
let (chunks, _remainder) = table[offset..][..size].as_chunks();
chunks
.iter()
.map(|&chunk: &[u8; 4]| GlobalCommitPosition(u32::from_le_bytes(chunk)))
.collect()
}
/// Scans graph entry positions stored in the overflow change ids table.
fn overflow_changes_from(
&self,
overflow_pos: u32,
) -> impl Iterator<Item = LocalCommitPosition> {
let table = &self.data[self.change_overflow_base..];
let offset = (overflow_pos as usize) * 4;
let (chunks, _remainder) = table[offset..].as_chunks();
chunks
.iter()
.map(|&chunk: &[u8; 4]| LocalCommitPosition(u32::from_le_bytes(chunk)))
}
/// Binary searches commit id by `prefix`. Returns the lookup position.
fn commit_id_byte_prefix_to_lookup_pos(&self, prefix: &[u8]) -> PositionLookupResult {
binary_search_pos_by(self.num_local_commits, |pos| {
let local_pos = self.commit_lookup_pos(pos);
let entry = self.graph_entry(local_pos);
entry.commit_id_bytes().cmp(prefix)
})
}
/// Binary searches change id by `prefix`. Returns the lookup position.
fn change_id_byte_prefix_to_lookup_pos(&self, prefix: &[u8]) -> PositionLookupResult {
binary_search_pos_by(self.num_local_change_ids, |pos| {
let change_id_bytes = self.change_lookup_id_bytes(pos);
change_id_bytes.cmp(prefix)
})
}
}
impl CommitIndexSegment for ReadonlyCommitIndexSegment {
fn num_parent_commits(&self) -> u32 {
self.num_parent_commits
}
fn num_local_commits(&self) -> u32 {
self.num_local_commits
}
fn parent_file(&self) -> Option<&Arc<ReadonlyCommitIndexSegment>> {
self.parent_file.as_ref()
}
fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option<LocalCommitPosition> {
self.commit_id_byte_prefix_to_lookup_pos(commit_id.as_bytes())
.ok()
.map(|pos| self.commit_lookup_pos(pos))
}
fn resolve_neighbor_commit_ids(
&self,
commit_id: &CommitId,
) -> (Option<CommitId>, Option<CommitId>) {
self.commit_id_byte_prefix_to_lookup_pos(commit_id.as_bytes())
.map_neighbors(|pos| {
let local_pos = self.commit_lookup_pos(pos);
let entry = self.graph_entry(local_pos);
entry.commit_id()
})
}
fn resolve_commit_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<CommitId> {
self.commit_id_byte_prefix_to_lookup_pos(prefix.min_prefix_bytes())
.prefix_matches(prefix, |pos| {
let local_pos = self.commit_lookup_pos(pos);
let entry = self.graph_entry(local_pos);
entry.commit_id()
})
.map(|(id, _)| id)
}
fn resolve_neighbor_change_ids(
&self,
change_id: &ChangeId,
) -> (Option<ChangeId>, Option<ChangeId>) {
self.change_id_byte_prefix_to_lookup_pos(change_id.as_bytes())
.map_neighbors(|pos| self.change_lookup_id(pos))
}
fn resolve_change_id_prefix(
&self,
prefix: &HexPrefix,
) -> PrefixResolution<(ChangeId, SmallLocalCommitPositionsVec)> {
self.change_id_byte_prefix_to_lookup_pos(prefix.min_prefix_bytes())
.prefix_matches(prefix, |pos| self.change_lookup_id(pos))
.map(|(id, lookup_pos)| {
let change_pos = self.change_lookup_pos(lookup_pos);
if let Some(local_pos) = change_pos.as_inlined() {
(id, smallvec![local_pos])
} else {
let overflow_pos = change_pos.as_overflow().unwrap();
// Collect commits having the same change id. For cache
// locality, it might be better to look for the next few
// change id positions to determine the size.
let positions: SmallLocalCommitPositionsVec = self
.overflow_changes_from(overflow_pos)
.take_while(|&local_pos| {
let entry = self.graph_entry(local_pos);
entry.change_id_lookup_pos() == lookup_pos
})
.collect();
debug_assert_eq!(
overflow_pos + u32::try_from(positions.len()).unwrap(),
(lookup_pos + 1..self.num_local_change_ids)
.find_map(|lookup_pos| self.change_lookup_pos(lookup_pos).as_overflow())
.unwrap_or(self.num_change_overflow_entries),
"all overflow positions to the next change id should be collected"
);
(id, positions)
}
})
}
fn generation_number(&self, local_pos: LocalCommitPosition) -> u32 {
self.graph_entry(local_pos).generation_number()
}
fn commit_id(&self, local_pos: LocalCommitPosition) -> CommitId {
self.graph_entry(local_pos).commit_id()
}
fn change_id(&self, local_pos: LocalCommitPosition) -> ChangeId {
let entry = self.graph_entry(local_pos);
self.change_lookup_id(entry.change_id_lookup_pos())
}
fn num_parents(&self, local_pos: LocalCommitPosition) -> u32 {
let graph_entry = self.graph_entry(local_pos);
let pos1_or_overflow_pos = graph_entry.parent1_pos_or_overflow_pos();
let pos2_or_overflow_len = graph_entry.parent2_pos_or_overflow_len();
let inlined_len1 = u32::from(pos1_or_overflow_pos.as_inlined().is_some());
let inlined_len2 = u32::from(pos2_or_overflow_len.as_inlined().is_some());
let overflow_len = pos2_or_overflow_len.as_overflow().unwrap_or(0);
inlined_len1 + inlined_len2 + overflow_len
}
fn parent_positions(&self, local_pos: LocalCommitPosition) -> SmallGlobalCommitPositionsVec {
let graph_entry = self.graph_entry(local_pos);
let pos1_or_overflow_pos = graph_entry.parent1_pos_or_overflow_pos();
let pos2_or_overflow_len = graph_entry.parent2_pos_or_overflow_len();
if let Some(pos1) = pos1_or_overflow_pos.as_inlined() {
if let Some(pos2) = pos2_or_overflow_len.as_inlined() {
smallvec![pos1, pos2]
} else {
smallvec![pos1]
}
} else {
let overflow_pos = pos1_or_overflow_pos.as_overflow().unwrap();
let num_parents = pos2_or_overflow_len.as_overflow().unwrap();
self.overflow_parents(overflow_pos, num_parents)
}
}
}
/// Commit index backend which stores data on local disk.
#[derive(Clone, Debug)]
pub struct DefaultReadonlyIndex(CompositeIndex);
impl DefaultReadonlyIndex {
pub(super) fn from_segment(
commits: Arc<ReadonlyCommitIndexSegment>,
changed_paths: CompositeChangedPathIndex,
) -> Self {
Self(CompositeIndex::from_readonly(commits, changed_paths))
}
pub(super) fn readonly_commits(&self) -> &Arc<ReadonlyCommitIndexSegment> {
self.0.readonly_commits().expect("must have readonly")
}
pub(super) fn changed_paths(&self) -> &CompositeChangedPathIndex {
self.0.changed_paths()
}
pub(super) fn has_id_impl(&self, commit_id: &CommitId) -> bool {
self.0.commits().has_id(commit_id)
}
/// Returns the number of all indexed commits.
pub fn num_commits(&self) -> u32 {
self.0.commits().num_commits()
}
/// Collects statistics of indexed commits and segments.
pub fn stats(&self) -> IndexStats {
let commits = self.readonly_commits();
let num_commits = commits.as_composite().num_commits();
let mut num_merges = 0;
let mut max_generation_number = 0;
let mut change_ids = HashSet::new();
for pos in (0..num_commits).map(GlobalCommitPosition) {
let entry = commits.as_composite().entry_by_pos(pos);
max_generation_number = max_generation_number.max(entry.generation_number());
if entry.num_parents() > 1 {
num_merges += 1;
}
change_ids.insert(entry.change_id());
}
let num_heads = u32::try_from(commits.as_composite().all_heads_pos().count()).unwrap();
let mut commit_levels = iter::successors(Some(commits), |segment| segment.parent_file())
.map(|segment| CommitIndexLevelStats {
num_commits: segment.num_local_commits(),
name: segment.id().hex(),
})
.collect_vec();
commit_levels.reverse();
let changed_paths = self.changed_paths();
let changed_path_commits_range = changed_paths
.start_commit_pos()
.map(|GlobalCommitPosition(start)| start..(start + changed_paths.num_commits()));
let changed_path_levels = changed_paths
.readonly_segments()
.iter()
.map(|segment| ChangedPathIndexLevelStats {
num_commits: segment.num_local_commits(),
num_changed_paths: segment.num_changed_paths(),
num_paths: segment.num_paths(),
name: segment.id().hex(),
})
.collect_vec();
IndexStats {
num_commits,
num_merges,
max_generation_number,
num_heads,
num_changes: change_ids.len().try_into().unwrap(),
commit_levels,
changed_path_commits_range,
changed_path_levels,
}
}
/// Looks up generation of the specified commit.
pub fn generation_number(&self, commit_id: &CommitId) -> Option<u32> {
let entry = self.0.commits().entry_by_id(commit_id)?;
Some(entry.generation_number())
}
#[doc(hidden)] // for tests
pub fn evaluate_revset_impl(
&self,
expression: &ResolvedExpression,
store: &Arc<Store>,
) -> Result<DefaultReadonlyIndexRevset, RevsetEvaluationError> {
let inner = revset_engine::evaluate(expression, store, self.clone())?;
Ok(DefaultReadonlyIndexRevset { inner })
}
pub(super) fn start_modification(&self) -> DefaultMutableIndex {
DefaultMutableIndex::incremental(self)
}
}
impl AsCompositeIndex for DefaultReadonlyIndex {
fn as_composite(&self) -> &CompositeIndex {
&self.0
}
}
impl Index for DefaultReadonlyIndex {
fn shortest_unique_commit_id_prefix_len(&self, commit_id: &CommitId) -> IndexResult<usize> {
self.0.shortest_unique_commit_id_prefix_len(commit_id)
}
fn resolve_commit_id_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<CommitId>> {
self.0.resolve_commit_id_prefix(prefix)
}
fn has_id(&self, commit_id: &CommitId) -> IndexResult<bool> {
Ok(self.has_id_impl(commit_id))
}
fn is_ancestor(&self, ancestor_id: &CommitId, descendant_id: &CommitId) -> IndexResult<bool> {
self.0.is_ancestor(ancestor_id, descendant_id)
}
fn common_ancestors(&self, set1: &[CommitId], set2: &[CommitId]) -> IndexResult<Vec<CommitId>> {
self.0.common_ancestors(set1, set2)
}
fn all_heads_for_gc(&self) -> IndexResult<Box<dyn Iterator<Item = CommitId> + '_>> {
self.0.all_heads_for_gc()
}
fn heads(&self, candidates: &mut dyn Iterator<Item = &CommitId>) -> IndexResult<Vec<CommitId>> {
self.0.heads(candidates)
}
fn changed_paths_in_commit(
&self,
commit_id: &CommitId,
) -> IndexResult<Option<Box<dyn Iterator<Item = RepoPathBuf> + '_>>> {
self.0.changed_paths_in_commit(commit_id)
}
fn evaluate_revset(
&self,
expression: &ResolvedExpression,
store: &Arc<Store>,
) -> Result<Box<dyn Revset + '_>, RevsetEvaluationError> {
self.0.evaluate_revset(expression, store)
}
}
impl ReadonlyIndex for DefaultReadonlyIndex {
fn as_index(&self) -> &dyn Index {
self
}
fn change_id_index(
&self,
heads: &mut dyn Iterator<Item = &CommitId>,
) -> Box<dyn ChangeIdIndex> {
Box::new(ChangeIdIndexImpl::new(self.clone(), heads))
}
fn start_modification(&self) -> Box<dyn MutableIndex> {
Box::new(Self::start_modification(self))
}
}
#[derive(Debug)]
#[doc(hidden)] // for tests
pub struct DefaultReadonlyIndexRevset {
inner: RevsetImpl<DefaultReadonlyIndex>,
}
impl DefaultReadonlyIndexRevset {
pub fn iter_graph_impl(
&self,
skip_transitive_edges: bool,
) -> impl Iterator<Item = Result<GraphNode<CommitId>, RevsetEvaluationError>> {
self.inner.iter_graph_impl(skip_transitive_edges)
}
pub fn into_inner(self) -> Box<dyn Revset> {
Box::new(self.inner)
}
}
#[derive(Clone, Debug)]
pub struct IndexStats {
pub num_commits: u32,
pub num_merges: u32,
pub max_generation_number: u32,
pub num_heads: u32,
pub num_changes: u32,
pub commit_levels: Vec<CommitIndexLevelStats>,
pub changed_path_commits_range: Option<Range<u32>>,
pub changed_path_levels: Vec<ChangedPathIndexLevelStats>,
}
#[derive(Clone, Debug)]
pub struct CommitIndexLevelStats {
pub num_commits: u32,
pub name: String,
}
#[derive(Clone, Debug)]
pub struct ChangedPathIndexLevelStats {
/// Number of commits.
pub num_commits: u32,
/// Sum of number of per-commit changed paths.
pub num_changed_paths: u32,
/// Number of unique paths.
pub num_paths: u32,
/// Index file name.
pub name: String,
}
/// Binary search result in a sorted lookup table.
#[derive(Clone, Copy, Debug)]
struct PositionLookupResult {
/// `Ok` means the element is found at the position. `Err` contains the
/// position where the element could be inserted.
result: Result<u32, u32>,
size: u32,
}
impl PositionLookupResult {
/// Returns position of the element if exactly matched.
fn ok(self) -> Option<u32> {
self.result.ok()
}
/// Returns `(previous, next)` positions of the matching element or
/// boundary.
fn neighbors(self) -> (Option<u32>, Option<u32>) {
match self.result {
Ok(pos) => (pos.checked_sub(1), (pos + 1..self.size).next()),
Err(pos) => (pos.checked_sub(1), (pos..self.size).next()),
}
}
/// Looks up `(previous, next)` elements by the given function.
fn map_neighbors<T>(self, mut lookup: impl FnMut(u32) -> T) -> (Option<T>, Option<T>) {
let (prev_pos, next_pos) = self.neighbors();
(prev_pos.map(&mut lookup), next_pos.map(&mut lookup))
}
/// Looks up matching elements from the current position, returns one if
/// the given `prefix` unambiguously matches.
fn prefix_matches<T: ObjectId>(
self,
prefix: &HexPrefix,
lookup: impl FnMut(u32) -> T,
) -> PrefixResolution<(T, u32)> {
let lookup_pos = self.result.unwrap_or_else(|pos| pos);
let mut matches = (lookup_pos..self.size)
.map(lookup)
.take_while(|id| prefix.matches(id))
.fuse();
match (matches.next(), matches.next()) {
(Some(id), None) => PrefixResolution::SingleMatch((id, lookup_pos)),
(Some(_), Some(_)) => PrefixResolution::AmbiguousMatch,
(None, _) => PrefixResolution::NoMatch,
}
}
}
/// Binary searches u32 position with the given comparison function.
fn binary_search_pos_by(size: u32, mut f: impl FnMut(u32) -> Ordering) -> PositionLookupResult {
let mut low = 0;
let mut high = size;
while low < high {
let mid = (low + high) / 2;
let cmp = f(mid);
// According to Rust std lib, this produces cmov instructions.
// https://github.com/rust-lang/rust/blob/1.76.0/library/core/src/slice/mod.rs#L2845-L2855
low = if cmp == Ordering::Less { mid + 1 } else { low };
high = if cmp == Ordering::Greater { mid } else { high };
if cmp == Ordering::Equal {
let result = Ok(mid);
return PositionLookupResult { result, size };
}
}
let result = Err(low);
PositionLookupResult { result, size }
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/store.rs | lib/src/default_index/store.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashMap;
use std::collections::HashSet;
use std::fs;
use std::io;
use std::io::Write as _;
use std::path::Path;
use std::path::PathBuf;
use std::slice;
use std::sync::Arc;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use prost::Message as _;
use tempfile::NamedTempFile;
use thiserror::Error;
use super::changed_path::ChangedPathIndexSegmentId;
use super::changed_path::CompositeChangedPathIndex;
use super::changed_path::collect_changed_paths;
use super::composite::AsCompositeIndex as _;
use super::composite::CommitIndexSegmentId;
use super::entry::GlobalCommitPosition;
use super::mutable::DefaultMutableIndex;
use super::readonly::DefaultReadonlyIndex;
use super::readonly::FieldLengths;
use super::readonly::ReadonlyCommitIndexSegment;
use super::readonly::ReadonlyIndexLoadError;
use crate::backend::BackendError;
use crate::backend::BackendInitError;
use crate::backend::CommitId;
use crate::commit::CommitByCommitterTimestamp;
use crate::dag_walk;
use crate::file_util;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::file_util::persist_temp_file;
use crate::index::IndexStore;
use crate::index::IndexStoreError;
use crate::index::IndexStoreResult;
use crate::index::MutableIndex;
use crate::index::ReadonlyIndex;
use crate::object_id::ObjectId as _;
use crate::op_store::OpStoreError;
use crate::op_store::OperationId;
use crate::op_walk;
use crate::operation::Operation;
use crate::store::Store;
// BLAKE2b-512 hash length in hex string
const SEGMENT_FILE_NAME_LENGTH: usize = 64 * 2;
/// Error that may occur during `DefaultIndexStore` initialization.
#[derive(Debug, Error)]
#[error("Failed to initialize index store")]
pub struct DefaultIndexStoreInitError(#[from] pub PathError);
impl From<DefaultIndexStoreInitError> for BackendInitError {
fn from(err: DefaultIndexStoreInitError) -> Self {
Self(err.into())
}
}
#[derive(Debug, Error)]
pub enum DefaultIndexStoreError {
#[error("Failed to associate index files with an operation {op_id}")]
AssociateIndex {
op_id: OperationId,
source: PathError,
},
#[error("Failed to load associated index file names")]
LoadAssociation(#[source] PathError),
#[error(transparent)]
LoadIndex(ReadonlyIndexLoadError),
#[error("Failed to write index file")]
SaveIndex(#[source] PathError),
#[error("Failed to index commits at operation {op_id}")]
IndexCommits {
op_id: OperationId,
source: BackendError,
},
#[error(transparent)]
OpStore(#[from] OpStoreError),
}
#[derive(Debug)]
pub struct DefaultIndexStore {
dir: PathBuf,
}
impl DefaultIndexStore {
pub fn name() -> &'static str {
"default"
}
pub fn init(dir: &Path) -> Result<Self, DefaultIndexStoreInitError> {
let store = Self {
dir: dir.to_owned(),
};
store.ensure_base_dirs()?;
Ok(store)
}
pub fn load(dir: &Path) -> Self {
Self {
dir: dir.to_owned(),
}
}
pub fn reinit(&self) -> Result<(), DefaultIndexStoreInitError> {
// Create base directories in case the store was initialized by old jj.
self.ensure_base_dirs()?;
// Remove all operation links to trigger rebuilding.
file_util::remove_dir_contents(&self.op_links_dir())?;
file_util::remove_dir_contents(&self.legacy_operations_dir())?;
// Remove index segments to save disk space. If raced, new segment file
// will be created by the other process.
file_util::remove_dir_contents(&self.commit_segments_dir())?;
file_util::remove_dir_contents(&self.changed_path_segments_dir())?;
// jj <= 0.14 created segment files in the top directory
for entry in self.dir.read_dir().context(&self.dir)? {
let entry = entry.context(&self.dir)?;
let path = entry.path();
if path.file_name().unwrap().len() != SEGMENT_FILE_NAME_LENGTH {
// Skip "type" file, "operations" directory, etc.
continue;
}
fs::remove_file(&path).context(&path)?;
}
Ok(())
}
fn ensure_base_dirs(&self) -> Result<(), PathError> {
for dir in [
self.op_links_dir(),
self.legacy_operations_dir(),
self.commit_segments_dir(),
self.changed_path_segments_dir(),
] {
file_util::create_or_reuse_dir(&dir).context(&dir)?;
}
Ok(())
}
/// Directory for mapping from operations to segments. (jj >= 0.33)
fn op_links_dir(&self) -> PathBuf {
self.dir.join("op_links")
}
/// Directory for mapping from operations to commit segments. (jj < 0.33)
fn legacy_operations_dir(&self) -> PathBuf {
self.dir.join("operations")
}
/// Directory for commit segment files.
fn commit_segments_dir(&self) -> PathBuf {
self.dir.join("segments")
}
/// Directory for changed-path segment files.
fn changed_path_segments_dir(&self) -> PathBuf {
self.dir.join("changed_paths")
}
fn load_index_at_operation(
&self,
op_id: &OperationId,
lengths: FieldLengths,
) -> Result<DefaultReadonlyIndex, DefaultIndexStoreError> {
let commit_segment_id;
let changed_path_start_commit_pos;
let changed_path_segment_ids;
let op_link_file = self.op_links_dir().join(op_id.hex());
match fs::read(&op_link_file).context(&op_link_file) {
Ok(data) => {
let proto = crate::protos::default_index::SegmentControl::decode(&*data)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))
.context(&op_link_file)
.map_err(DefaultIndexStoreError::LoadAssociation)?;
commit_segment_id = CommitIndexSegmentId::new(proto.commit_segment_id);
changed_path_start_commit_pos = proto
.changed_path_start_commit_pos
.map(GlobalCommitPosition);
changed_path_segment_ids = proto
.changed_path_segment_ids
.into_iter()
.map(ChangedPathIndexSegmentId::new)
.collect_vec();
}
// TODO: drop support for legacy operation link file in jj 0.39 or so
Err(PathError { source: error, .. }) if error.kind() == io::ErrorKind::NotFound => {
let op_id_file = self.legacy_operations_dir().join(op_id.hex());
let data = fs::read(&op_id_file)
.context(&op_id_file)
.map_err(DefaultIndexStoreError::LoadAssociation)?;
commit_segment_id = CommitIndexSegmentId::try_from_hex(&data)
.ok_or_else(|| {
io::Error::new(io::ErrorKind::InvalidData, "file name is not valid hex")
})
.context(&op_id_file)
.map_err(DefaultIndexStoreError::LoadAssociation)?;
changed_path_start_commit_pos = None;
changed_path_segment_ids = vec![];
}
Err(err) => return Err(DefaultIndexStoreError::LoadAssociation(err)),
};
let commits = ReadonlyCommitIndexSegment::load(
&self.commit_segments_dir(),
commit_segment_id,
lengths,
)
.map_err(DefaultIndexStoreError::LoadIndex)?;
// TODO: lazy load or mmap?
let changed_paths = if let Some(start_commit_pos) = changed_path_start_commit_pos {
CompositeChangedPathIndex::load(
&self.changed_path_segments_dir(),
start_commit_pos,
&changed_path_segment_ids,
)
.map_err(DefaultIndexStoreError::LoadIndex)?
} else {
CompositeChangedPathIndex::null()
};
Ok(DefaultReadonlyIndex::from_segment(commits, changed_paths))
}
/// Rebuilds index for the given `operation`.
///
/// The index to be built will be calculated from one of the ancestor
/// operations if exists. Use `reinit()` to rebuild index from scratch.
#[tracing::instrument(skip(self, store))]
pub async fn build_index_at_operation(
&self,
operation: &Operation,
store: &Arc<Store>,
) -> Result<DefaultReadonlyIndex, DefaultIndexStoreError> {
tracing::info!("scanning operations to index");
let op_links_dir = self.op_links_dir();
let legacy_operations_dir = self.legacy_operations_dir();
let field_lengths = FieldLengths {
commit_id: store.commit_id_length(),
change_id: store.change_id_length(),
};
// Pick the latest existing ancestor operation as the parent segment.
let mut unindexed_ops = Vec::new();
let mut parent_op = None;
for op in op_walk::walk_ancestors(slice::from_ref(operation)) {
let op = op?;
if op_links_dir.join(op.id().hex()).is_file()
|| legacy_operations_dir.join(op.id().hex()).is_file()
{
parent_op = Some(op);
break;
} else {
unindexed_ops.push(op);
}
}
let ops_to_visit = if let Some(op) = &parent_op {
// There may be concurrent ops, so revisit from the head. The parent
// op is usually shallow if existed.
op_walk::walk_ancestors_range(slice::from_ref(operation), slice::from_ref(op))
.try_collect()?
} else {
unindexed_ops
};
tracing::info!(
ops_count = ops_to_visit.len(),
"collecting head commits to index"
);
let mut historical_heads: HashMap<CommitId, OperationId> = HashMap::new();
for op in &ops_to_visit {
for commit_id in itertools::chain(
op.all_referenced_commit_ids(),
op.view()?.all_referenced_commit_ids(),
) {
if !historical_heads.contains_key(commit_id) {
historical_heads.insert(commit_id.clone(), op.id().clone());
}
}
}
let mut mutable_index;
let maybe_parent_index;
match &parent_op {
None => {
mutable_index = DefaultMutableIndex::full(field_lengths);
maybe_parent_index = None;
}
Some(op) => {
let parent_index = self.load_index_at_operation(op.id(), field_lengths)?;
mutable_index = parent_index.start_modification();
maybe_parent_index = Some(parent_index);
}
}
tracing::info!(
?maybe_parent_index,
heads_count = historical_heads.len(),
"indexing commits reachable from historical heads"
);
// Build a list of ancestors of heads where parents come after the
// commit itself.
let parent_index_has_id = |id: &CommitId| {
maybe_parent_index
.as_ref()
.is_some_and(|index| index.has_id_impl(id))
};
let get_commit_with_op = |commit_id: &CommitId, op_id: &OperationId| {
let op_id = op_id.clone();
match store.get_commit(commit_id) {
// Propagate head's op_id to report possible source of an error.
// The op_id doesn't have to be included in the sort key, but
// that wouldn't matter since the commit should be unique.
Ok(commit) => Ok((CommitByCommitterTimestamp(commit), op_id)),
Err(source) => Err(DefaultIndexStoreError::IndexCommits { op_id, source }),
}
};
// Retain immediate predecessors if legacy operation exists. Some
// commands (e.g. squash into grandparent) may leave transitive
// predecessors, which aren't visible to any views.
// TODO: delete this workaround with commit.predecessors.
let commits_to_keep_immediate_predecessors = if ops_to_visit
.iter()
.any(|op| !op.stores_commit_predecessors())
{
let mut ancestors = HashSet::new();
let mut work = historical_heads.keys().cloned().collect_vec();
while let Some(commit_id) = work.pop() {
if ancestors.contains(&commit_id) || parent_index_has_id(&commit_id) {
continue;
}
if let Ok(commit) = store.get_commit(&commit_id) {
work.extend(commit.parent_ids().iter().cloned());
}
ancestors.insert(commit_id);
}
ancestors
} else {
HashSet::new()
};
let commits = dag_walk::topo_order_reverse_ord_ok(
historical_heads
.iter()
.filter(|&(commit_id, _)| !parent_index_has_id(commit_id))
.map(|(commit_id, op_id)| get_commit_with_op(commit_id, op_id)),
|(CommitByCommitterTimestamp(commit), _)| commit.id().clone(),
|(CommitByCommitterTimestamp(commit), op_id)| {
let keep_predecessors =
commits_to_keep_immediate_predecessors.contains(commit.id());
itertools::chain(
commit.parent_ids(),
keep_predecessors
.then_some(&commit.store_commit().predecessors)
.into_iter()
.flatten(),
)
.filter(|&id| !parent_index_has_id(id))
.map(|commit_id| get_commit_with_op(commit_id, op_id))
.collect_vec()
},
|_| panic!("graph has cycle"),
)?;
for (CommitByCommitterTimestamp(commit), op_id) in commits.iter().rev() {
mutable_index.add_commit(commit).await.map_err(|source| {
DefaultIndexStoreError::IndexCommits {
op_id: op_id.clone(),
source,
}
})?;
}
let index = self.save_mutable_index(mutable_index, operation.id())?;
tracing::info!(?index, commits_count = commits.len(), "saved new index");
Ok(index)
}
/// Builds changed-path index for the specified operation.
///
/// At most `max_commits` number of commits will be scanned from the latest
/// unindexed commit.
#[tracing::instrument(skip(self, store))]
pub async fn build_changed_path_index_at_operation(
&self,
op_id: &OperationId,
store: &Arc<Store>,
max_commits: u32,
// TODO: add progress callback?
) -> Result<DefaultReadonlyIndex, DefaultIndexStoreError> {
// Create directories in case the store was initialized by jj < 0.33.
self.ensure_base_dirs()
.map_err(DefaultIndexStoreError::SaveIndex)?;
let field_lengths = FieldLengths {
commit_id: store.commit_id_length(),
change_id: store.change_id_length(),
};
let index = self.load_index_at_operation(op_id, field_lengths)?;
let old_changed_paths = index.changed_paths();
// Distribute max_commits to contiguous pre/post ranges:
// ..|pre|old_changed_paths|post|
// (where pre.len() + post.len() <= max_commits)
let pre_start;
let pre_end;
let post_start;
let post_end;
if let Some(GlobalCommitPosition(pos)) = old_changed_paths.start_commit_pos() {
post_start = pos + old_changed_paths.num_commits();
assert!(post_start <= index.num_commits());
post_end = u32::saturating_add(post_start, max_commits).min(index.num_commits());
pre_start = u32::saturating_sub(pos, max_commits - (post_end - post_start));
pre_end = pos;
} else {
pre_start = u32::saturating_sub(index.num_commits(), max_commits);
pre_end = index.num_commits();
post_start = pre_end;
post_end = pre_end;
}
let to_index_err = |source| DefaultIndexStoreError::IndexCommits {
op_id: op_id.clone(),
source,
};
let index_commit = async |changed_paths: &mut CompositeChangedPathIndex,
pos: GlobalCommitPosition| {
assert_eq!(changed_paths.next_mutable_commit_pos(), Some(pos));
let commit_id = index.as_composite().commits().entry_by_pos(pos).commit_id();
let commit = store.get_commit_async(&commit_id).await?;
let paths = collect_changed_paths(&index, &commit).await?;
changed_paths.add_changed_paths(paths);
Ok(())
};
// Index pre range
let mut new_changed_paths =
CompositeChangedPathIndex::empty(GlobalCommitPosition(pre_start));
new_changed_paths.make_mutable();
tracing::info!(?pre_start, ?pre_end, "indexing changed paths in commits");
for pos in (pre_start..pre_end).map(GlobalCommitPosition) {
index_commit(&mut new_changed_paths, pos)
.await
.map_err(to_index_err)?;
}
new_changed_paths
.save_in(&self.changed_path_segments_dir())
.map_err(DefaultIndexStoreError::SaveIndex)?;
// Copy previously-indexed segments
new_changed_paths.append_segments(old_changed_paths);
// Index post range, which is usually empty
new_changed_paths.make_mutable();
tracing::info!(?post_start, ?post_end, "indexing changed paths in commits");
for pos in (post_start..post_end).map(GlobalCommitPosition) {
index_commit(&mut new_changed_paths, pos)
.await
.map_err(to_index_err)?;
}
new_changed_paths.maybe_squash_with_ancestors();
new_changed_paths
.save_in(&self.changed_path_segments_dir())
.map_err(DefaultIndexStoreError::SaveIndex)?;
// Update the operation link to point to the new segments
let commits = index.readonly_commits().clone();
let index = DefaultReadonlyIndex::from_segment(commits, new_changed_paths);
self.associate_index_with_operation(&index, op_id)
.map_err(|source| DefaultIndexStoreError::AssociateIndex {
op_id: op_id.to_owned(),
source,
})?;
Ok(index)
}
fn save_mutable_index(
&self,
index: DefaultMutableIndex,
op_id: &OperationId,
) -> Result<DefaultReadonlyIndex, DefaultIndexStoreError> {
// Create directories in case the store was initialized by jj < 0.33.
self.ensure_base_dirs()
.map_err(DefaultIndexStoreError::SaveIndex)?;
let (commits, mut changed_paths) = index.into_segment();
let commits = commits
.maybe_squash_with_ancestors()
.save_in(&self.commit_segments_dir())
.map_err(DefaultIndexStoreError::SaveIndex)?;
changed_paths.maybe_squash_with_ancestors();
changed_paths
.save_in(&self.changed_path_segments_dir())
.map_err(DefaultIndexStoreError::SaveIndex)?;
let index = DefaultReadonlyIndex::from_segment(commits, changed_paths);
self.associate_index_with_operation(&index, op_id)
.map_err(|source| DefaultIndexStoreError::AssociateIndex {
op_id: op_id.to_owned(),
source,
})?;
Ok(index)
}
/// Records a link from the given operation to the this index version.
fn associate_index_with_operation(
&self,
index: &DefaultReadonlyIndex,
op_id: &OperationId,
) -> Result<(), PathError> {
let proto = crate::protos::default_index::SegmentControl {
commit_segment_id: index.readonly_commits().id().to_bytes(),
changed_path_start_commit_pos: index
.changed_paths()
.start_commit_pos()
.map(|GlobalCommitPosition(start)| start),
changed_path_segment_ids: index
.changed_paths()
.readonly_segments()
.iter()
.map(|segment| segment.id().to_bytes())
.collect(),
};
let dir = self.op_links_dir();
let mut temp_file = NamedTempFile::new_in(&dir).context(&dir)?;
let file = temp_file.as_file_mut();
file.write_all(&proto.encode_to_vec())
.context(temp_file.path())?;
let path = dir.join(op_id.hex());
persist_temp_file(temp_file, &path).context(&path)?;
// TODO: drop support for legacy operation link file in jj 0.39 or so
let dir = self.legacy_operations_dir();
let mut temp_file = NamedTempFile::new_in(&dir).context(&dir)?;
let file = temp_file.as_file_mut();
file.write_all(index.readonly_commits().id().hex().as_bytes())
.context(temp_file.path())?;
let path = dir.join(op_id.hex());
persist_temp_file(temp_file, &path).context(&path)?;
Ok(())
}
}
impl IndexStore for DefaultIndexStore {
fn name(&self) -> &str {
Self::name()
}
fn get_index_at_op(
&self,
op: &Operation,
store: &Arc<Store>,
) -> IndexStoreResult<Box<dyn ReadonlyIndex>> {
let field_lengths = FieldLengths {
commit_id: store.commit_id_length(),
change_id: store.change_id_length(),
};
let index = match self.load_index_at_operation(op.id(), field_lengths) {
Err(DefaultIndexStoreError::LoadAssociation(PathError { source: error, .. }))
if error.kind() == io::ErrorKind::NotFound =>
{
self.build_index_at_operation(op, store).block_on()
}
Err(DefaultIndexStoreError::LoadIndex(err)) if err.is_corrupt_or_not_found() => {
// If the index was corrupt (maybe it was written in a different format),
// we just reindex.
match &err {
ReadonlyIndexLoadError::UnexpectedVersion {
kind,
found_version,
expected_version,
} => {
eprintln!(
"Found {kind} index format version {found_version}, expected version \
{expected_version}. Reindexing..."
);
}
ReadonlyIndexLoadError::Other { error, .. } => {
eprintln!("{err} (maybe the format has changed): {error}. Reindexing...");
}
}
self.reinit()
.map_err(|err| IndexStoreError::Read(err.into()))?;
self.build_index_at_operation(op, store).block_on()
}
result => result,
}
.map_err(|err| IndexStoreError::Read(err.into()))?;
Ok(Box::new(index))
}
fn write_index(
&self,
index: Box<dyn MutableIndex>,
op: &Operation,
) -> IndexStoreResult<Box<dyn ReadonlyIndex>> {
let index: Box<DefaultMutableIndex> = index
.downcast()
.expect("index to merge in must be a DefaultMutableIndex");
let index = self
.save_mutable_index(*index, op.id())
.map_err(|err| IndexStoreError::Write(err.into()))?;
Ok(Box::new(index))
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/mutable.rs | lib/src/default_index/mutable.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::max;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::fmt;
use std::fmt::Debug;
use std::io::Write as _;
use std::iter;
use std::ops::Bound;
use std::path::Path;
use std::sync::Arc;
use blake2::Blake2b512;
use digest::Digest as _;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use smallvec::SmallVec;
use smallvec::smallvec;
use tempfile::NamedTempFile;
use super::changed_path::CompositeChangedPathIndex;
use super::changed_path::collect_changed_paths;
use super::composite::AsCompositeIndex;
use super::composite::ChangeIdIndexImpl;
use super::composite::CommitIndexSegment;
use super::composite::CommitIndexSegmentId;
use super::composite::CompositeCommitIndex;
use super::composite::CompositeIndex;
use super::composite::DynCommitIndexSegment;
use super::entry::GlobalCommitPosition;
use super::entry::LocalCommitPosition;
use super::entry::SmallGlobalCommitPositionsVec;
use super::entry::SmallLocalCommitPositionsVec;
use super::readonly::COMMIT_INDEX_SEGMENT_FILE_FORMAT_VERSION;
use super::readonly::DefaultReadonlyIndex;
use super::readonly::FieldLengths;
use super::readonly::OVERFLOW_FLAG;
use super::readonly::ReadonlyCommitIndexSegment;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::file_util::persist_content_addressed_temp_file;
use crate::index::ChangeIdIndex;
use crate::index::Index;
use crate::index::IndexError;
use crate::index::IndexResult;
use crate::index::MutableIndex;
use crate::index::ReadonlyIndex;
use crate::object_id::HexPrefix;
use crate::object_id::ObjectId;
use crate::object_id::PrefixResolution;
use crate::repo_path::RepoPathBuf;
use crate::revset::ResolvedExpression;
use crate::revset::Revset;
use crate::revset::RevsetEvaluationError;
use crate::store::Store;
#[derive(Clone, Debug)]
struct MutableGraphEntry {
commit_id: CommitId,
change_id: ChangeId,
generation_number: u32,
parent_positions: SmallGlobalCommitPositionsVec,
}
#[derive(Clone)]
pub(super) struct MutableCommitIndexSegment {
parent_file: Option<Arc<ReadonlyCommitIndexSegment>>,
num_parent_commits: u32,
field_lengths: FieldLengths,
graph: Vec<MutableGraphEntry>,
commit_lookup: BTreeMap<CommitId, LocalCommitPosition>,
change_lookup: BTreeMap<ChangeId, SmallLocalCommitPositionsVec>,
}
impl Debug for MutableCommitIndexSegment {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_struct("MutableCommitIndexSegment")
.field("parent_file", &self.parent_file)
.finish_non_exhaustive()
}
}
impl MutableCommitIndexSegment {
pub(super) fn full(field_lengths: FieldLengths) -> Self {
Self {
parent_file: None,
num_parent_commits: 0,
field_lengths,
graph: vec![],
commit_lookup: BTreeMap::new(),
change_lookup: BTreeMap::new(),
}
}
pub(super) fn incremental(parent_file: Arc<ReadonlyCommitIndexSegment>) -> Self {
let num_parent_commits = parent_file.as_composite().num_commits();
let field_lengths = parent_file.field_lengths();
Self {
parent_file: Some(parent_file),
num_parent_commits,
field_lengths,
graph: vec![],
commit_lookup: BTreeMap::new(),
change_lookup: BTreeMap::new(),
}
}
pub(super) fn as_composite(&self) -> &CompositeCommitIndex {
CompositeCommitIndex::new(self)
}
pub(super) fn add_commit_data(
&mut self,
commit_id: CommitId,
change_id: ChangeId,
parent_ids: &[CommitId],
) {
if self.as_composite().has_id(&commit_id) {
return;
}
let mut entry = MutableGraphEntry {
commit_id,
change_id,
generation_number: 0,
parent_positions: SmallVec::new(),
};
for parent_id in parent_ids {
let parent_entry = self
.as_composite()
.entry_by_id(parent_id)
.expect("parent commit is not indexed");
entry.generation_number = max(
entry.generation_number,
parent_entry.generation_number() + 1,
);
entry.parent_positions.push(parent_entry.position());
}
let local_pos = LocalCommitPosition(u32::try_from(self.graph.len()).unwrap());
self.commit_lookup
.insert(entry.commit_id.clone(), local_pos);
self.change_lookup
.entry(entry.change_id.clone())
// positions are inherently sorted
.and_modify(|positions| positions.push(local_pos))
.or_insert(smallvec![local_pos]);
self.graph.push(entry);
}
pub(super) fn add_commits_from(&mut self, other_segment: &DynCommitIndexSegment) {
let other = CompositeCommitIndex::new(other_segment);
for pos in other_segment.num_parent_commits()..other.num_commits() {
let entry = other.entry_by_pos(GlobalCommitPosition(pos));
let parent_ids = entry.parents().map(|entry| entry.commit_id()).collect_vec();
self.add_commit_data(entry.commit_id(), entry.change_id(), &parent_ids);
}
}
pub(super) fn merge_in(&mut self, other: &Arc<ReadonlyCommitIndexSegment>) {
// Collect other segments down to the common ancestor segment
let files_to_add = itertools::merge_join_by(
self.as_composite().ancestor_files_without_local(),
iter::once(other).chain(other.as_composite().ancestor_files_without_local()),
|own, other| {
let own_num_commits = own.as_composite().num_commits();
let other_num_commits = other.as_composite().num_commits();
own_num_commits.cmp(&other_num_commits).reverse()
},
)
.take_while(|own_other| {
own_other
.as_ref()
.both()
.is_none_or(|(own, other)| own.id() != other.id())
})
.filter_map(|own_other| own_other.right())
.collect_vec();
for &file in files_to_add.iter().rev() {
self.add_commits_from(file.as_ref());
}
}
fn serialize_parent_filename(&self, buf: &mut Vec<u8>) {
if let Some(parent_file) = &self.parent_file {
let hex = parent_file.id().hex();
buf.extend(u32::try_from(hex.len()).unwrap().to_le_bytes());
buf.extend_from_slice(hex.as_bytes());
} else {
buf.extend(0_u32.to_le_bytes());
}
}
fn serialize_local_entries(&self, buf: &mut Vec<u8>) {
assert_eq!(self.graph.len(), self.commit_lookup.len());
debug_assert_eq!(
self.graph.len(),
self.change_lookup.values().flatten().count()
);
let num_commits = u32::try_from(self.graph.len()).unwrap();
buf.extend(num_commits.to_le_bytes());
let num_change_ids = u32::try_from(self.change_lookup.len()).unwrap();
buf.extend(num_change_ids.to_le_bytes());
// We'll write the actual values later
let parent_overflow_offset = buf.len();
buf.extend(0_u32.to_le_bytes());
let change_overflow_offset = buf.len();
buf.extend(0_u32.to_le_bytes());
// Positions of change ids in the sorted table
let change_id_pos_map: HashMap<&ChangeId, u32> = self
.change_lookup
.keys()
.enumerate()
.map(|(i, change_id)| (change_id, u32::try_from(i).unwrap()))
.collect();
let mut parent_overflow = vec![];
for entry in &self.graph {
buf.extend(entry.generation_number.to_le_bytes());
match entry.parent_positions.as_slice() {
[] => {
buf.extend((!0_u32).to_le_bytes());
buf.extend((!0_u32).to_le_bytes());
}
[GlobalCommitPosition(pos1)] => {
assert!(*pos1 < OVERFLOW_FLAG);
buf.extend(pos1.to_le_bytes());
buf.extend((!0_u32).to_le_bytes());
}
[GlobalCommitPosition(pos1), GlobalCommitPosition(pos2)] => {
assert!(*pos1 < OVERFLOW_FLAG);
assert!(*pos2 < OVERFLOW_FLAG);
buf.extend(pos1.to_le_bytes());
buf.extend(pos2.to_le_bytes());
}
positions => {
let overflow_pos = u32::try_from(parent_overflow.len()).unwrap();
let num_parents = u32::try_from(positions.len()).unwrap();
assert!(overflow_pos < OVERFLOW_FLAG);
assert!(num_parents < OVERFLOW_FLAG);
buf.extend((!overflow_pos).to_le_bytes());
buf.extend((!num_parents).to_le_bytes());
parent_overflow.extend_from_slice(positions);
}
}
buf.extend(change_id_pos_map[&entry.change_id].to_le_bytes());
assert_eq!(
entry.commit_id.as_bytes().len(),
self.field_lengths.commit_id
);
buf.extend_from_slice(entry.commit_id.as_bytes());
}
for LocalCommitPosition(pos) in self.commit_lookup.values() {
buf.extend(pos.to_le_bytes());
}
for change_id in self.change_lookup.keys() {
assert_eq!(change_id.as_bytes().len(), self.field_lengths.change_id);
buf.extend_from_slice(change_id.as_bytes());
}
let mut change_overflow = vec![];
for positions in self.change_lookup.values() {
match positions.as_slice() {
[] => panic!("change id lookup entry must not be empty"),
// Optimize for imported commits
[LocalCommitPosition(pos1)] => {
assert!(*pos1 < OVERFLOW_FLAG);
buf.extend(pos1.to_le_bytes());
}
positions => {
let overflow_pos = u32::try_from(change_overflow.len()).unwrap();
assert!(overflow_pos < OVERFLOW_FLAG);
buf.extend((!overflow_pos).to_le_bytes());
change_overflow.extend_from_slice(positions);
}
}
}
let num_parent_overflow = u32::try_from(parent_overflow.len()).unwrap();
buf[parent_overflow_offset..][..4].copy_from_slice(&num_parent_overflow.to_le_bytes());
for GlobalCommitPosition(pos) in parent_overflow {
buf.extend(pos.to_le_bytes());
}
let num_change_overflow = u32::try_from(change_overflow.len()).unwrap();
buf[change_overflow_offset..][..4].copy_from_slice(&num_change_overflow.to_le_bytes());
for LocalCommitPosition(pos) in change_overflow {
buf.extend(pos.to_le_bytes());
}
}
/// If the mutable segment has more than half the commits of its parent
/// segment, return mutable segment with the commits from both. This is done
/// recursively, so the stack of index segments has O(log n) files.
pub(super) fn maybe_squash_with_ancestors(self) -> Self {
let mut num_new_commits = self.num_local_commits();
let mut files_to_squash = vec![];
let mut base_parent_file = None;
for parent_file in self.as_composite().ancestor_files_without_local() {
// TODO: We should probably also squash if the parent file has less than N
// commits, regardless of how many (few) are in `self`.
if 2 * num_new_commits < parent_file.num_local_commits() {
base_parent_file = Some(parent_file.clone());
break;
}
num_new_commits += parent_file.num_local_commits();
files_to_squash.push(parent_file.clone());
}
if files_to_squash.is_empty() {
return self;
}
let mut squashed = if let Some(parent_file) = base_parent_file {
Self::incremental(parent_file)
} else {
Self::full(self.field_lengths)
};
for parent_file in files_to_squash.iter().rev() {
squashed.add_commits_from(parent_file.as_ref());
}
squashed.add_commits_from(&self);
squashed
}
pub(super) fn save_in(
mut self,
dir: &Path,
) -> Result<Arc<ReadonlyCommitIndexSegment>, PathError> {
if self.num_local_commits() == 0
&& let Some(parent_file) = self.parent_file.take()
{
return Ok(parent_file);
}
let mut buf = Vec::new();
buf.extend(COMMIT_INDEX_SEGMENT_FILE_FORMAT_VERSION.to_le_bytes());
self.serialize_parent_filename(&mut buf);
let local_entries_offset = buf.len();
self.serialize_local_entries(&mut buf);
let mut hasher = Blake2b512::new();
hasher.update(&buf);
let index_file_id = CommitIndexSegmentId::from_bytes(&hasher.finalize());
let index_file_path = dir.join(index_file_id.hex());
let mut temp_file = NamedTempFile::new_in(dir).context(dir)?;
let file = temp_file.as_file_mut();
file.write_all(&buf).context(temp_file.path())?;
persist_content_addressed_temp_file(temp_file, &index_file_path)
.context(&index_file_path)?;
Ok(ReadonlyCommitIndexSegment::load_with_parent_file(
&mut &buf[local_entries_offset..],
index_file_id,
self.parent_file,
self.field_lengths,
)
.expect("in-memory index data should be valid and readable"))
}
}
impl CommitIndexSegment for MutableCommitIndexSegment {
fn num_parent_commits(&self) -> u32 {
self.num_parent_commits
}
fn num_local_commits(&self) -> u32 {
self.graph.len().try_into().unwrap()
}
fn parent_file(&self) -> Option<&Arc<ReadonlyCommitIndexSegment>> {
self.parent_file.as_ref()
}
fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option<LocalCommitPosition> {
self.commit_lookup.get(commit_id).copied()
}
fn resolve_neighbor_commit_ids(
&self,
commit_id: &CommitId,
) -> (Option<CommitId>, Option<CommitId>) {
let (prev_id, next_id) = resolve_neighbor_ids(&self.commit_lookup, commit_id);
(prev_id.cloned(), next_id.cloned())
}
fn resolve_commit_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<CommitId> {
let min_bytes_prefix = CommitId::from_bytes(prefix.min_prefix_bytes());
resolve_id_prefix(&self.commit_lookup, prefix, &min_bytes_prefix).map(|(id, _)| id.clone())
}
fn resolve_neighbor_change_ids(
&self,
change_id: &ChangeId,
) -> (Option<ChangeId>, Option<ChangeId>) {
let (prev_id, next_id) = resolve_neighbor_ids(&self.change_lookup, change_id);
(prev_id.cloned(), next_id.cloned())
}
fn resolve_change_id_prefix(
&self,
prefix: &HexPrefix,
) -> PrefixResolution<(ChangeId, SmallLocalCommitPositionsVec)> {
let min_bytes_prefix = ChangeId::from_bytes(prefix.min_prefix_bytes());
resolve_id_prefix(&self.change_lookup, prefix, &min_bytes_prefix)
.map(|(id, positions)| (id.clone(), positions.clone()))
}
fn generation_number(&self, local_pos: LocalCommitPosition) -> u32 {
self.graph[local_pos.0 as usize].generation_number
}
fn commit_id(&self, local_pos: LocalCommitPosition) -> CommitId {
self.graph[local_pos.0 as usize].commit_id.clone()
}
fn change_id(&self, local_pos: LocalCommitPosition) -> ChangeId {
self.graph[local_pos.0 as usize].change_id.clone()
}
fn num_parents(&self, local_pos: LocalCommitPosition) -> u32 {
self.graph[local_pos.0 as usize]
.parent_positions
.len()
.try_into()
.unwrap()
}
fn parent_positions(&self, local_pos: LocalCommitPosition) -> SmallGlobalCommitPositionsVec {
self.graph[local_pos.0 as usize].parent_positions.clone()
}
}
/// In-memory mutable records for the on-disk commit index backend.
pub struct DefaultMutableIndex(CompositeIndex);
impl DefaultMutableIndex {
pub(super) fn full(lengths: FieldLengths) -> Self {
let commits = Box::new(MutableCommitIndexSegment::full(lengths));
// Changed-path index isn't enabled by default.
let mut changed_paths = CompositeChangedPathIndex::null();
changed_paths.make_mutable();
Self(CompositeIndex::from_mutable(commits, changed_paths))
}
pub(super) fn incremental(parent_index: &DefaultReadonlyIndex) -> Self {
let commits = Box::new(MutableCommitIndexSegment::incremental(
parent_index.readonly_commits().clone(),
));
let mut changed_paths = parent_index.changed_paths().clone();
changed_paths.make_mutable();
Self(CompositeIndex::from_mutable(commits, changed_paths))
}
pub(super) fn into_segment(
self,
) -> (Box<MutableCommitIndexSegment>, CompositeChangedPathIndex) {
self.0.into_mutable().expect("must have mutable")
}
fn mutable_commits(&mut self) -> &mut MutableCommitIndexSegment {
self.0.mutable_commits().expect("must have mutable")
}
/// Returns the number of all indexed commits.
pub fn num_commits(&self) -> u32 {
self.0.commits().num_commits()
}
#[tracing::instrument(skip(self))]
pub(super) async fn add_commit(&mut self, commit: &Commit) -> BackendResult<()> {
let new_commit_pos = GlobalCommitPosition(self.num_commits());
self.add_commit_data(
commit.id().clone(),
commit.change_id().clone(),
commit.parent_ids(),
);
if new_commit_pos == GlobalCommitPosition(self.num_commits()) {
return Ok(()); // commit already indexed
}
if self.0.changed_paths().next_mutable_commit_pos() == Some(new_commit_pos) {
self.add_commit_changed_paths(commit).await?;
}
Ok(())
}
pub(super) fn add_commit_data(
&mut self,
commit_id: CommitId,
change_id: ChangeId,
parent_ids: &[CommitId],
) {
self.mutable_commits()
.add_commit_data(commit_id, change_id, parent_ids);
}
// CompositeChangedPathIndex::add_commit() isn't implemented because we need
// a commit index to merge parent trees, which means we need to borrow self.
async fn add_commit_changed_paths(&mut self, commit: &Commit) -> BackendResult<()> {
let paths = collect_changed_paths(self, commit).await?;
self.0.changed_paths_mut().add_changed_paths(paths);
Ok(())
}
pub(super) fn merge_in(&mut self, other: &DefaultReadonlyIndex) {
let start_commit_pos = GlobalCommitPosition(self.num_commits());
self.mutable_commits().merge_in(other.readonly_commits());
if self.0.changed_paths().next_mutable_commit_pos() == Some(start_commit_pos) {
let other_commits = other.as_composite().commits();
for self_pos in (start_commit_pos.0..self.num_commits()).map(GlobalCommitPosition) {
let entry = self.0.commits().entry_by_pos(self_pos);
let other_pos = other_commits.commit_id_to_pos(&entry.commit_id()).unwrap();
let Some(paths) = other.changed_paths().changed_paths(other_pos) else {
break; // no more indexed paths in other index
};
let paths = paths.map(|path| path.to_owned()).collect();
self.0.changed_paths_mut().add_changed_paths(paths);
}
}
}
}
impl AsCompositeIndex for DefaultMutableIndex {
fn as_composite(&self) -> &CompositeIndex {
&self.0
}
}
impl Index for DefaultMutableIndex {
fn shortest_unique_commit_id_prefix_len(&self, commit_id: &CommitId) -> IndexResult<usize> {
self.0.shortest_unique_commit_id_prefix_len(commit_id)
}
fn resolve_commit_id_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<CommitId>> {
self.0.resolve_commit_id_prefix(prefix)
}
fn has_id(&self, commit_id: &CommitId) -> IndexResult<bool> {
self.0.has_id(commit_id)
}
fn is_ancestor(&self, ancestor_id: &CommitId, descendant_id: &CommitId) -> IndexResult<bool> {
self.0.is_ancestor(ancestor_id, descendant_id)
}
fn common_ancestors(&self, set1: &[CommitId], set2: &[CommitId]) -> IndexResult<Vec<CommitId>> {
self.0.common_ancestors(set1, set2)
}
fn all_heads_for_gc(&self) -> IndexResult<Box<dyn Iterator<Item = CommitId> + '_>> {
self.0.all_heads_for_gc()
}
fn heads(&self, candidates: &mut dyn Iterator<Item = &CommitId>) -> IndexResult<Vec<CommitId>> {
self.0.heads(candidates)
}
fn changed_paths_in_commit(
&self,
commit_id: &CommitId,
) -> IndexResult<Option<Box<dyn Iterator<Item = RepoPathBuf> + '_>>> {
self.0.changed_paths_in_commit(commit_id)
}
fn evaluate_revset(
&self,
expression: &ResolvedExpression,
store: &Arc<Store>,
) -> Result<Box<dyn Revset + '_>, RevsetEvaluationError> {
self.0.evaluate_revset(expression, store)
}
}
impl MutableIndex for DefaultMutableIndex {
fn as_index(&self) -> &dyn Index {
self
}
fn change_id_index(
&self,
heads: &mut dyn Iterator<Item = &CommitId>,
) -> Box<dyn ChangeIdIndex + '_> {
Box::new(ChangeIdIndexImpl::new(self, heads))
}
fn add_commit(&mut self, commit: &Commit) -> IndexResult<()> {
Self::add_commit(self, commit)
.block_on()
.map_err(|err| IndexError::Other(err.into()))
}
fn merge_in(&mut self, other: &dyn ReadonlyIndex) -> IndexResult<()> {
let other: &DefaultReadonlyIndex = other
.downcast_ref()
.expect("index to merge in must be a DefaultReadonlyIndex");
Self::merge_in(self, other);
Ok(())
}
}
fn resolve_neighbor_ids<'a, K: Ord, V>(
lookup_table: &'a BTreeMap<K, V>,
id: &K,
) -> (Option<&'a K>, Option<&'a K>) {
let prev_id = lookup_table
.range((Bound::Unbounded, Bound::Excluded(id)))
.next_back()
.map(|(id, _)| id);
let next_id = lookup_table
.range((Bound::Excluded(id), Bound::Unbounded))
.next()
.map(|(id, _)| id);
(prev_id, next_id)
}
fn resolve_id_prefix<'a, K: ObjectId + Ord, V>(
lookup_table: &'a BTreeMap<K, V>,
prefix: &HexPrefix,
min_bytes_prefix: &K,
) -> PrefixResolution<(&'a K, &'a V)> {
let mut matches = lookup_table
.range((Bound::Included(min_bytes_prefix), Bound::Unbounded))
.take_while(|&(id, _)| prefix.matches(id))
.fuse();
match (matches.next(), matches.next()) {
(Some(entry), None) => PrefixResolution::SingleMatch(entry),
(Some(_), Some(_)) => PrefixResolution::AmbiguousMatch,
(None, _) => PrefixResolution::NoMatch,
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/revset_graph_iterator.rs | lib/src/default_index/revset_graph_iterator.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::Ordering;
use std::cmp::min;
use std::collections::BTreeMap;
use std::collections::VecDeque;
use std::rc::Rc;
use itertools::Itertools as _;
use super::bit_set::PositionsBitSet;
use super::composite::CompositeCommitIndex;
use super::composite::CompositeIndex;
use super::entry::CommitIndexEntry;
use super::entry::GlobalCommitPosition;
use super::rev_walk::RevWalk;
use super::revset_engine::BoxedRevWalk;
use crate::backend::CommitId;
use crate::graph::GraphEdge;
use crate::graph::GraphNode;
use crate::revset::RevsetEvaluationError;
// This can be cheaply allocated and hashed compared to `CommitId`-based type.
type CommitGraphEdge = GraphEdge<GlobalCommitPosition>;
/// Given a `RevWalk` over some set of revisions, yields the same revisions with
/// associated edge types.
///
/// If a revision's parent is in the input set, then the edge will be "direct".
/// Otherwise, there will be one "indirect" edge for each closest ancestor in
/// the set, and one "missing" edge for each edge leading outside the set.
///
/// Example (uppercase characters are in the input set):
///
/// A A
/// |\ |\
/// B c B :
/// |\| => |\:
/// d E ~ E
/// |/ ~
/// root
///
/// The implementation works by walking the input set one commit at a time. It
/// then considers all parents of the commit. It looks ahead in the input set
/// far enough that all the parents will have been consumed if they are in the
/// input (and puts them away so we can emit them later). If a parent of the
/// current commit is not in the input set (i.e. it was not in the look-ahead),
/// we walk these external commits until we end up back back in the input set.
/// That walk may result in consuming more elements from the input `RevWalk`.
/// In the example above, when we consider "A", we will initially look ahead to
/// "B" and "c". When we consider edges from the external commit "c", we will
/// further consume the input `RevWalk` to "E".
///
/// Missing edges are those that don't lead back into the input set. If all
/// edges from an external commit are missing, we consider the edge to that
/// commit to also be missing. In the example above, that means that "B" will
/// have a missing edge to "d" rather than to the root.
///
/// `RevsetGraphWalk` can be configured to skip transitive edges that it would
/// otherwise return. In this mode (which is the default), the edge from "A" to
/// "E" in the example above would be excluded because there's also a transitive
/// path from "A" to "E" via "B". The implementation of that mode
/// adds a filtering step just before yielding the edges for a commit. The
/// filtering works by doing a DFS in the simplified graph. That may require
/// even more look-ahead. Consider this example (uppercase characters are in the
/// input set):
///
/// J
/// /|
/// | i
/// | |\
/// | | H
/// G | |
/// | e f
/// | \|\
/// | D |
/// \ / c
/// b /
/// |/
/// A
/// |
/// root
///
/// When walking from "J", we'll find indirect edges to "H", "G", and "D". This
/// is our unfiltered set of edges, before removing transitive edges. In order
/// to know that "D" is an ancestor of "H", we need to also walk from "H". We
/// use the same search for finding edges from "H" as we used from "J". That
/// results in looking ahead all the way to "A". We could reduce the amount of
/// look-ahead by stopping at "c" since we're only interested in edges that
/// could lead to "D", but that would require extra book-keeping to remember for
/// later that the edges from "f" and "H" are only partially computed.
pub(super) struct RevsetGraphWalk<'a> {
input_set_walk: BoxedRevWalk<'a>,
/// Commits in the input set we had to take out of the `RevWalk` while
/// walking external edges. Does not necessarily include the commit
/// we're currently about to emit.
look_ahead: VecDeque<GlobalCommitPosition>,
/// The last consumed position. This is always the smallest key in the
/// look_ahead set, but it's faster to keep a separate field for it.
min_position: GlobalCommitPosition,
/// Edges for commits not in the input set.
edges: BTreeMap<GlobalCommitPosition, Rc<[CommitGraphEdge]>>,
skip_transitive_edges: bool,
}
impl<'a> RevsetGraphWalk<'a> {
pub fn new(input_set_walk: BoxedRevWalk<'a>, skip_transitive_edges: bool) -> Self {
Self {
input_set_walk,
look_ahead: VecDeque::new(),
min_position: GlobalCommitPosition::MAX,
edges: Default::default(),
skip_transitive_edges,
}
}
fn next_index_position(
&mut self,
index: &CompositeIndex,
) -> Result<Option<GlobalCommitPosition>, RevsetEvaluationError> {
match self.look_ahead.pop_back() {
Some(position) => Ok(Some(position)),
None => self.input_set_walk.next(index).transpose(),
}
}
fn pop_edges_from_internal_commit(
&mut self,
index: &CompositeIndex,
index_entry: &CommitIndexEntry,
) -> Result<Rc<[CommitGraphEdge]>, RevsetEvaluationError> {
let position = index_entry.position();
while let Some(entry) = self.edges.last_entry() {
match entry.key().cmp(&position) {
Ordering::Less => break, // no cached edges found
Ordering::Equal => return Ok(entry.remove()),
Ordering::Greater => entry.remove(),
};
}
self.new_edges_from_internal_commit(index, index_entry)
}
fn new_edges_from_internal_commit(
&mut self,
index: &CompositeIndex,
index_entry: &CommitIndexEntry,
) -> Result<Rc<[CommitGraphEdge]>, RevsetEvaluationError> {
let mut parent_entries = index_entry.parents();
if parent_entries.len() == 1 {
let parent = parent_entries.next().unwrap();
let parent_position = parent.position();
self.consume_to(index, parent_position)?;
if self.look_ahead.binary_search(&parent_position).is_ok() {
Ok([CommitGraphEdge::direct(parent_position)].into())
} else {
let parent_edges = self.edges_from_external_commit(index, parent)?;
if parent_edges.iter().all(|edge| edge.is_missing()) {
Ok([CommitGraphEdge::missing(parent_position)].into())
} else {
Ok(parent_edges.clone())
}
}
} else {
let mut edges = Vec::new();
let mut known_ancestors = PositionsBitSet::with_max_pos(index_entry.position());
for parent in parent_entries {
let parent_position = parent.position();
self.consume_to(index, parent_position)?;
if self.look_ahead.binary_search(&parent_position).is_ok() {
edges.push(CommitGraphEdge::direct(parent_position));
} else {
let parent_edges = self.edges_from_external_commit(index, parent)?;
if parent_edges.iter().all(|edge| edge.is_missing()) {
edges.push(CommitGraphEdge::missing(parent_position));
} else {
edges.extend(
parent_edges
.iter()
.filter(|edge| !known_ancestors.get_set(edge.target)),
);
}
}
}
if self.skip_transitive_edges {
self.remove_transitive_edges(index.commits(), &mut edges);
}
Ok(edges.into())
}
}
fn edges_from_external_commit(
&mut self,
index: &CompositeIndex,
index_entry: CommitIndexEntry<'_>,
) -> Result<&Rc<[CommitGraphEdge]>, RevsetEvaluationError> {
let position = index_entry.position();
let mut stack = vec![index_entry];
while let Some(entry) = stack.last() {
let position = entry.position();
if self.edges.contains_key(&position) {
stack.pop().unwrap();
continue;
}
let mut parent_entries = entry.parents();
let complete_parent_edges = if parent_entries.len() == 1 {
let parent = parent_entries.next().unwrap();
let parent_position = parent.position();
self.consume_to(index, parent_position)?;
if self.look_ahead.binary_search(&parent_position).is_ok() {
// We have found a path back into the input set
Some([CommitGraphEdge::indirect(parent_position)].into())
} else if let Some(parent_edges) = self.edges.get(&parent_position) {
if parent_edges.iter().all(|edge| edge.is_missing()) {
Some([CommitGraphEdge::missing(parent_position)].into())
} else {
Some(parent_edges.clone())
}
} else if parent_position < self.min_position {
// The parent is not in the input set
Some([CommitGraphEdge::missing(parent_position)].into())
} else {
// The parent is not in the input set but it's somewhere in the range
// where we have commits in the input set, so continue searching.
stack.push(parent);
None
}
} else {
let mut edges = Vec::new();
let mut known_ancestors = PositionsBitSet::with_max_pos(position);
let mut parents_complete = true;
for parent in parent_entries {
let parent_position = parent.position();
self.consume_to(index, parent_position)?;
if self.look_ahead.binary_search(&parent_position).is_ok() {
// We have found a path back into the input set
edges.push(CommitGraphEdge::indirect(parent_position));
} else if let Some(parent_edges) = self.edges.get(&parent_position) {
if parent_edges.iter().all(|edge| edge.is_missing()) {
edges.push(CommitGraphEdge::missing(parent_position));
} else {
edges.extend(
parent_edges
.iter()
.filter(|edge| !known_ancestors.get_set(edge.target)),
);
}
} else if parent_position < self.min_position {
// The parent is not in the input set
edges.push(CommitGraphEdge::missing(parent_position));
} else {
// The parent is not in the input set but it's somewhere in the range
// where we have commits in the input set, so continue searching.
stack.push(parent);
parents_complete = false;
}
}
parents_complete.then(|| {
if self.skip_transitive_edges {
self.remove_transitive_edges(index.commits(), &mut edges);
}
edges.into()
})
};
if let Some(edges) = complete_parent_edges {
stack.pop().unwrap();
self.edges.insert(position, edges);
}
}
Ok(self.edges.get(&position).unwrap())
}
fn remove_transitive_edges(
&self,
index: &CompositeCommitIndex,
edges: &mut Vec<CommitGraphEdge>,
) {
if !edges.iter().any(|edge| edge.is_indirect()) {
return;
}
let Some((min_pos, max_pos)) = reachable_positions(edges).minmax().into_option() else {
return;
};
let enqueue_parents = |work: &mut Vec<GlobalCommitPosition>, entry: &CommitIndexEntry| {
if let Some(edges) = self.edges.get(&entry.position()) {
// Edges to internal commits are known. Skip external commits
// which should never be in the input edges.
work.extend(reachable_positions(edges).filter(|&pos| pos >= min_pos));
} else {
// The commit isn't visited yet. Cannot skip external commits.
let positions = entry.parent_positions();
work.extend(positions.into_iter().filter(|&pos| pos >= min_pos));
}
};
let mut min_generation = u32::MAX;
let mut initial_targets = PositionsBitSet::with_max_pos(max_pos);
let mut work = vec![];
// To start with, add the edges one step after the input edges.
for pos in reachable_positions(edges) {
initial_targets.set(pos);
let entry = index.entry_by_pos(pos);
min_generation = min(min_generation, entry.generation_number());
enqueue_parents(&mut work, &entry);
}
// Find commits reachable transitively and add them to the `unwanted` set.
let mut unwanted = PositionsBitSet::with_max_pos(max_pos);
while let Some(pos) = work.pop() {
if unwanted.get_set(pos) {
// Already visited
continue;
}
if initial_targets.get(pos) {
// Already visited
continue;
}
let entry = index.entry_by_pos(pos);
if entry.generation_number() < min_generation {
continue;
}
enqueue_parents(&mut work, &entry);
}
edges.retain(|edge| edge.is_missing() || !unwanted.get(edge.target));
}
fn consume_to(
&mut self,
index: &CompositeIndex,
pos: GlobalCommitPosition,
) -> Result<(), RevsetEvaluationError> {
while pos < self.min_position {
if let Some(next_position) = self.input_set_walk.next(index).transpose()? {
self.look_ahead.push_front(next_position);
self.min_position = next_position;
} else {
break;
}
}
Ok(())
}
fn try_next(
&mut self,
index: &CompositeIndex,
) -> Result<Option<GraphNode<CommitId>>, RevsetEvaluationError> {
let Some(position) = self.next_index_position(index)? else {
return Ok(None);
};
let entry = index.commits().entry_by_pos(position);
let edges = self.pop_edges_from_internal_commit(index, &entry)?;
let edges = edges
.iter()
.map(|edge| edge.map(|pos| index.commits().entry_by_pos(pos).commit_id()))
.collect();
Ok(Some((entry.commit_id(), edges)))
}
}
impl RevWalk<CompositeIndex> for RevsetGraphWalk<'_> {
type Item = Result<GraphNode<CommitId>, RevsetEvaluationError>;
fn next(&mut self, index: &CompositeIndex) -> Option<Self::Item> {
self.try_next(index).transpose()
}
}
fn reachable_positions(
edges: &[CommitGraphEdge],
) -> impl DoubleEndedIterator<Item = GlobalCommitPosition> {
edges
.iter()
.filter(|edge| !edge.is_missing())
.map(|edge| edge.target)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/rev_walk.rs | lib/src/default_index/rev_walk.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::Reverse;
use std::cmp::max;
use std::collections::HashMap;
use std::collections::HashSet;
use std::iter::Fuse;
use std::iter::FusedIterator;
use std::ops::Range;
use smallvec::SmallVec;
use super::composite::CompositeCommitIndex;
use super::composite::CompositeIndex;
use super::entry::GlobalCommitPosition;
use super::entry::SmallGlobalCommitPositionsVec;
use super::rev_walk_queue::RevWalkQueue;
use super::rev_walk_queue::RevWalkWorkItem;
use crate::revset::PARENTS_RANGE_FULL;
/// Like `Iterator`, but doesn't borrow the `index` internally.
pub(super) trait RevWalk<I: ?Sized> {
type Item;
/// Advances the iteration and returns the next item.
///
/// The caller must provide the same `index` instance.
///
/// Returns `None` when the iteration is finished. Once `None` is returned,
/// this will never resume. In other words, a `RevWalk` is fused.
fn next(&mut self, index: &I) -> Option<Self::Item>;
// The following methods are provided for convenience. They are not supposed
// to be reimplemented.
/// Wraps in adapter that will filter and transform items by the given
/// function.
fn filter_map<B, F>(self, f: F) -> FilterMapRevWalk<Self, F>
where
Self: Sized,
F: FnMut(&I, Self::Item) -> Option<B>,
{
FilterMapRevWalk { walk: self, f }
}
/// Wraps in adapter that will transform items by the given function.
fn map<B, F>(self, f: F) -> MapRevWalk<Self, F>
where
Self: Sized,
F: FnMut(&I, Self::Item) -> B,
{
MapRevWalk { walk: self, f }
}
/// Wraps in adapter that can peek one more item without consuming.
fn peekable(self) -> PeekableRevWalk<I, Self>
where
Self: Sized,
{
PeekableRevWalk {
walk: self,
peeked: None,
}
}
/// Reattaches the underlying `index`.
fn attach(self, index: &I) -> RevWalkBorrowedIndexIter<'_, I, Self>
where
Self: Sized,
{
RevWalkBorrowedIndexIter { index, walk: self }
}
}
impl<I: ?Sized, W: RevWalk<I> + ?Sized> RevWalk<I> for Box<W> {
type Item = W::Item;
fn next(&mut self, index: &I) -> Option<Self::Item> {
<W as RevWalk<I>>::next(self, index)
}
}
/// Adapter that turns `Iterator` into `RevWalk` by dropping index argument.
///
/// As the name suggests, the source object is usually a slice or `Vec`.
#[derive(Clone, Debug)]
pub(super) struct EagerRevWalk<T> {
iter: Fuse<T>,
}
impl<T: Iterator> EagerRevWalk<T> {
pub fn new(iter: T) -> Self {
Self { iter: iter.fuse() }
}
}
impl<I: ?Sized, T: Iterator> RevWalk<I> for EagerRevWalk<T> {
type Item = T::Item;
fn next(&mut self, _index: &I) -> Option<Self::Item> {
self.iter.next()
}
}
#[derive(Clone, Debug)]
#[must_use]
pub(super) struct FilterMapRevWalk<W, F> {
walk: W,
f: F,
}
impl<B, I, W, F> RevWalk<I> for FilterMapRevWalk<W, F>
where
I: ?Sized,
W: RevWalk<I>,
F: FnMut(&I, W::Item) -> Option<B>,
{
type Item = B;
fn next(&mut self, index: &I) -> Option<Self::Item> {
while let Some(item) = self.walk.next(index) {
if let Some(new_item) = (self.f)(index, item) {
return Some(new_item);
}
}
None
}
}
#[derive(Clone, Debug)]
#[must_use]
pub(super) struct MapRevWalk<W, F> {
walk: W,
f: F,
}
impl<B, I, W, F> RevWalk<I> for MapRevWalk<W, F>
where
I: ?Sized,
W: RevWalk<I>,
F: FnMut(&I, W::Item) -> B,
{
type Item = B;
fn next(&mut self, index: &I) -> Option<Self::Item> {
self.walk.next(index).map(|item| (self.f)(index, item))
}
}
#[derive(Clone, Debug)]
#[must_use]
pub(super) struct PeekableRevWalk<I: ?Sized, W: RevWalk<I>> {
walk: W,
// Since RevWalk is fused, we don't need a nested Option<Option<_>>.
peeked: Option<W::Item>,
}
impl<I: ?Sized, W: RevWalk<I>> PeekableRevWalk<I, W> {
pub fn peek(&mut self, index: &I) -> Option<&W::Item> {
if self.peeked.is_none() {
self.peeked = self.walk.next(index);
}
self.peeked.as_ref()
}
pub fn next_if(
&mut self,
index: &I,
predicate: impl FnOnce(&W::Item) -> bool,
) -> Option<W::Item> {
match self.next(index) {
Some(item) if predicate(&item) => Some(item),
other => {
assert!(self.peeked.is_none());
self.peeked = other;
None
}
}
}
}
impl<I: ?Sized, W: RevWalk<I>> RevWalk<I> for PeekableRevWalk<I, W> {
type Item = W::Item;
fn next(&mut self, index: &I) -> Option<Self::Item> {
self.peeked.take().or_else(|| self.walk.next(index))
}
}
/// Adapter that turns `RevWalk` into `Iterator` by attaching borrowed `index`.
#[derive(Clone, Debug)]
#[must_use]
pub(super) struct RevWalkBorrowedIndexIter<'a, I: ?Sized, W> {
index: &'a I,
walk: W,
}
impl<I: ?Sized, W> RevWalkBorrowedIndexIter<'_, I, W> {
/// Turns into `'static`-lifetime walk object by detaching the index.
pub fn detach(self) -> W {
self.walk
}
}
impl<I: ?Sized, W: RevWalk<I>> Iterator for RevWalkBorrowedIndexIter<'_, I, W> {
type Item = W::Item;
fn next(&mut self) -> Option<Self::Item> {
self.walk.next(self.index)
}
}
impl<I: ?Sized, W: RevWalk<I>> FusedIterator for RevWalkBorrowedIndexIter<'_, I, W> {}
/// Adapter that turns `RevWalk` into `Iterator` by attaching owned `index`.
#[derive(Clone, Debug)]
#[must_use]
pub(super) struct RevWalkOwnedIndexIter<I, W> {
index: I,
walk: W,
}
impl<I, W: RevWalk<I>> Iterator for RevWalkOwnedIndexIter<I, W> {
type Item = W::Item;
fn next(&mut self) -> Option<Self::Item> {
self.walk.next(&self.index)
}
}
impl<I, W: RevWalk<I>> FusedIterator for RevWalkOwnedIndexIter<I, W> {}
pub(super) trait RevWalkIndex {
type Position: Copy + Ord;
type AdjacentPositions: IntoIterator<Item = Self::Position> + AsRef<[Self::Position]>;
fn adjacent_positions(&self, pos: Self::Position) -> Self::AdjacentPositions;
}
impl RevWalkIndex for CompositeIndex {
type Position = GlobalCommitPosition;
type AdjacentPositions = SmallGlobalCommitPositionsVec;
fn adjacent_positions(&self, pos: Self::Position) -> Self::AdjacentPositions {
self.commits().entry_by_pos(pos).parent_positions()
}
}
#[derive(Clone)]
pub(super) struct RevWalkDescendantsIndex {
children_map: HashMap<GlobalCommitPosition, DescendantIndexPositionsVec>,
}
// See SmallGlobalCommitPositionsVec for the array size.
type DescendantIndexPositionsVec = SmallVec<[Reverse<GlobalCommitPosition>; 4]>;
impl RevWalkDescendantsIndex {
fn build(
index: &CompositeCommitIndex,
positions: impl IntoIterator<Item = GlobalCommitPosition>,
) -> Self {
// For dense set, it's probably cheaper to use `Vec` instead of `HashMap`.
let mut children_map: HashMap<GlobalCommitPosition, DescendantIndexPositionsVec> =
HashMap::new();
for pos in positions {
children_map.entry(pos).or_default(); // mark head node
for parent_pos in index.entry_by_pos(pos).parent_positions() {
let parent = children_map.entry(parent_pos).or_default();
parent.push(Reverse(pos));
}
}
Self { children_map }
}
fn contains_pos(&self, pos: GlobalCommitPosition) -> bool {
self.children_map.contains_key(&pos)
}
}
impl RevWalkIndex for RevWalkDescendantsIndex {
type Position = Reverse<GlobalCommitPosition>;
type AdjacentPositions = DescendantIndexPositionsVec;
fn adjacent_positions(&self, pos: Self::Position) -> Self::AdjacentPositions {
self.children_map[&pos.0].clone()
}
}
#[derive(Clone)]
#[must_use]
pub(super) struct RevWalkBuilder<'a> {
index: &'a CompositeIndex,
wanted: Vec<GlobalCommitPosition>,
unwanted: Vec<GlobalCommitPosition>,
wanted_parents_range: Range<u32>,
}
impl<'a> RevWalkBuilder<'a> {
pub fn new(index: &'a CompositeIndex) -> Self {
Self {
index,
wanted: Vec::new(),
unwanted: Vec::new(),
wanted_parents_range: PARENTS_RANGE_FULL,
}
}
/// Sets head positions to be included.
pub fn wanted_heads(mut self, positions: Vec<GlobalCommitPosition>) -> Self {
self.wanted = positions;
self
}
/// Sets range of parents to iterate over for wanted heads.
pub fn wanted_parents_range(mut self, wanted_parents_range: Range<u32>) -> Self {
assert!(wanted_parents_range.start <= wanted_parents_range.end);
self.wanted_parents_range = wanted_parents_range;
self
}
/// Sets root positions to be excluded. The roots precede the heads.
pub fn unwanted_roots(mut self, positions: Vec<GlobalCommitPosition>) -> Self {
self.unwanted = positions;
self
}
/// Walks ancestors.
pub fn ancestors(self) -> RevWalkAncestors<'a> {
self.ancestors_with_min_pos(GlobalCommitPosition::MIN)
}
fn ancestors_with_min_pos(self, min_pos: GlobalCommitPosition) -> RevWalkAncestors<'a> {
let index = self.index;
let wanted_parents_range = self.wanted_parents_range;
let mut wanted_queue = RevWalkQueue::with_min_pos(min_pos);
let mut unwanted_queue = RevWalkQueue::with_min_pos(min_pos);
wanted_queue.extend(self.wanted, ());
unwanted_queue.extend(self.unwanted, ());
RevWalkBorrowedIndexIter {
index,
walk: RevWalkImpl {
wanted_queue,
unwanted_queue,
wanted_parents_range,
},
}
}
/// Walks ancestors within the `generation_range`.
///
/// A generation number counts from the heads.
pub fn ancestors_filtered_by_generation(
self,
generation_range: Range<u32>,
) -> RevWalkAncestorsGenerationRange<'a> {
let index = self.index;
let wanted_parents_range = self.wanted_parents_range;
let mut wanted_queue = RevWalkQueue::with_min_pos(GlobalCommitPosition::MIN);
let mut unwanted_queue = RevWalkQueue::with_min_pos(GlobalCommitPosition::MIN);
let item_range = RevWalkItemGenerationRange::from_filter_range(generation_range.clone());
wanted_queue.extend(self.wanted, Reverse(item_range));
unwanted_queue.extend(self.unwanted, ());
RevWalkBorrowedIndexIter {
index,
walk: RevWalkGenerationRangeImpl {
wanted_queue,
unwanted_queue,
wanted_parents_range,
generation_end: generation_range.end,
},
}
}
/// Walks ancestors until all of the reachable roots in `root_positions` get
/// visited.
///
/// Use this if you are only interested in descendants of the given roots.
/// The caller still needs to filter out unwanted entries.
pub fn ancestors_until_roots(
self,
root_positions: impl IntoIterator<Item = GlobalCommitPosition>,
) -> RevWalkAncestors<'a> {
// We can also make it stop visiting based on the generation number. Maybe
// it will perform better for unbalanced branchy history.
// https://github.com/jj-vcs/jj/pull/1492#discussion_r1160678325
let min_pos = root_positions
.into_iter()
.min()
.unwrap_or(GlobalCommitPosition::MAX);
self.ancestors_with_min_pos(min_pos)
}
/// Fully consumes ancestors and walks back from the `root_positions`.
///
/// The returned iterator yields entries in order of ascending index
/// position.
pub fn descendants(
self,
root_positions: HashSet<GlobalCommitPosition>,
) -> RevWalkDescendants<'a> {
let index = self.index;
let candidate_positions = self
.ancestors_until_roots(root_positions.iter().copied())
.collect();
RevWalkBorrowedIndexIter {
index,
walk: RevWalkDescendantsImpl {
candidate_positions,
root_positions,
reachable_positions: HashSet::new(),
},
}
}
/// Fully consumes ancestors and walks back from the `root_positions` within
/// the `generation_range`.
///
/// A generation number counts from the roots.
///
/// The returned iterator yields entries in order of ascending index
/// position.
pub fn descendants_filtered_by_generation(
self,
root_positions: Vec<GlobalCommitPosition>,
generation_range: Range<u32>,
) -> RevWalkDescendantsGenerationRange {
let index = self.index;
let positions = self.ancestors_until_roots(root_positions.iter().copied());
let descendants_index = RevWalkDescendantsIndex::build(index.commits(), positions);
let mut wanted_queue = RevWalkQueue::with_min_pos(Reverse(GlobalCommitPosition::MAX));
let unwanted_queue = RevWalkQueue::with_min_pos(Reverse(GlobalCommitPosition::MAX));
let item_range = RevWalkItemGenerationRange::from_filter_range(generation_range.clone());
for pos in root_positions {
// Do not add unreachable roots which shouldn't be visited
if descendants_index.contains_pos(pos) {
wanted_queue.push(Reverse(pos), Reverse(item_range));
}
}
RevWalkOwnedIndexIter {
index: descendants_index,
walk: RevWalkGenerationRangeImpl {
wanted_queue,
unwanted_queue,
// Since we're using the descendants index, this actually is a range for the
// children instead of the parents.
wanted_parents_range: PARENTS_RANGE_FULL,
generation_end: generation_range.end,
},
}
}
}
pub(super) type RevWalkAncestors<'a> =
RevWalkBorrowedIndexIter<'a, CompositeIndex, RevWalkImpl<GlobalCommitPosition>>;
#[derive(Clone)]
#[must_use]
pub(super) struct RevWalkImpl<P> {
wanted_queue: RevWalkQueue<P, ()>,
unwanted_queue: RevWalkQueue<P, ()>,
wanted_parents_range: Range<u32>,
}
impl<I: RevWalkIndex + ?Sized> RevWalk<I> for RevWalkImpl<I::Position> {
type Item = I::Position;
fn next(&mut self, index: &I) -> Option<Self::Item> {
while let Some(item) = self.wanted_queue.pop() {
self.wanted_queue.skip_while_eq(&item.pos);
if flush_queue_until(&mut self.unwanted_queue, index, item.pos).is_some() {
continue;
}
let parents = index.adjacent_positions(item.pos);
self.wanted_queue.extend(
filter_slice_by_range(parents.as_ref(), &self.wanted_parents_range)
.iter()
.copied(),
(),
);
return Some(item.pos);
}
None
}
}
pub(super) type RevWalkAncestorsGenerationRange<'a> =
RevWalkBorrowedIndexIter<'a, CompositeIndex, RevWalkGenerationRangeImpl<GlobalCommitPosition>>;
pub(super) type RevWalkDescendantsGenerationRange = RevWalkOwnedIndexIter<
RevWalkDescendantsIndex,
RevWalkGenerationRangeImpl<Reverse<GlobalCommitPosition>>,
>;
#[derive(Clone)]
#[must_use]
pub(super) struct RevWalkGenerationRangeImpl<P> {
// Sort item generations in ascending order
wanted_queue: RevWalkQueue<P, Reverse<RevWalkItemGenerationRange>>,
unwanted_queue: RevWalkQueue<P, ()>,
wanted_parents_range: Range<u32>,
generation_end: u32,
}
impl<P: Copy + Ord> RevWalkGenerationRangeImpl<P> {
fn enqueue_wanted_adjacents<I>(
&mut self,
index: &I,
pos: P,
generation: RevWalkItemGenerationRange,
) where
I: RevWalkIndex<Position = P> + ?Sized,
{
// `gen.start` is incremented from 0, which should never overflow
if generation.start + 1 >= self.generation_end {
return;
}
let succ_generation = RevWalkItemGenerationRange {
start: generation.start + 1,
end: generation.end.saturating_add(1),
};
let parents = index.adjacent_positions(pos);
self.wanted_queue.extend(
filter_slice_by_range(parents.as_ref(), &self.wanted_parents_range)
.iter()
.copied(),
Reverse(succ_generation),
);
}
}
impl<I: RevWalkIndex + ?Sized> RevWalk<I> for RevWalkGenerationRangeImpl<I::Position> {
type Item = I::Position;
fn next(&mut self, index: &I) -> Option<Self::Item> {
while let Some(item) = self.wanted_queue.pop() {
if flush_queue_until(&mut self.unwanted_queue, index, item.pos).is_some() {
self.wanted_queue.skip_while_eq(&item.pos);
continue;
}
let Reverse(mut pending_gen) = item.value;
let mut some_in_range = pending_gen.contains_end(self.generation_end);
while let Some(x) = self.wanted_queue.pop_eq(&item.pos) {
// Merge overlapped ranges to reduce number of the queued items.
// For queries like `:(heads-)`, `gen.end` is close to `u32::MAX`, so
// ranges can be merged into one. If this is still slow, maybe we can add
// special case for upper/lower bounded ranges.
let Reverse(generation) = x.value;
some_in_range |= generation.contains_end(self.generation_end);
pending_gen = if let Some(merged) = pending_gen.try_merge_end(generation) {
merged
} else {
self.enqueue_wanted_adjacents(index, item.pos, pending_gen);
generation
};
}
self.enqueue_wanted_adjacents(index, item.pos, pending_gen);
if some_in_range {
return Some(item.pos);
}
}
None
}
}
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
struct RevWalkItemGenerationRange {
start: u32,
end: u32,
}
impl RevWalkItemGenerationRange {
/// Translates filter range to item range so that overlapped ranges can be
/// merged later.
///
/// Example: `generation_range = 1..4`
/// ```text
/// (original) (translated)
/// 0 1 2 3 4 0 1 2 3 4
/// *=====o generation_range + generation_end
/// + : : item's generation o=====* : item's range
/// ```
fn from_filter_range(range: Range<u32>) -> Self {
Self {
start: 0,
end: u32::saturating_sub(range.end, range.start),
}
}
/// Suppose sorted ranges `self, other`, merges them if overlapped.
#[must_use]
fn try_merge_end(self, other: Self) -> Option<Self> {
(other.start <= self.end).then(|| Self {
start: self.start,
end: max(self.end, other.end),
})
}
#[must_use]
fn contains_end(self, end: u32) -> bool {
self.start < end && end <= self.end
}
}
/// Walks queue items until `bottom_pos`. Returns item if found at `bottom_pos`.
fn flush_queue_until<I: RevWalkIndex + ?Sized>(
queue: &mut RevWalkQueue<I::Position, ()>,
index: &I,
bottom_pos: I::Position,
) -> Option<RevWalkWorkItem<I::Position, ()>> {
while let Some(item) = queue.pop_if(|x| x.pos >= bottom_pos) {
queue.skip_while_eq(&item.pos);
queue.extend(index.adjacent_positions(item.pos), ());
if item.pos == bottom_pos {
return Some(item);
}
}
None
}
/// Walks descendants from the roots, in order of ascending index position.
pub(super) type RevWalkDescendants<'a> =
RevWalkBorrowedIndexIter<'a, CompositeIndex, RevWalkDescendantsImpl>;
#[derive(Clone)]
#[must_use]
pub(super) struct RevWalkDescendantsImpl {
candidate_positions: Vec<GlobalCommitPosition>,
root_positions: HashSet<GlobalCommitPosition>,
reachable_positions: HashSet<GlobalCommitPosition>,
}
impl RevWalkDescendants<'_> {
/// Builds a set of index positions reachable from the roots.
///
/// This is equivalent to `.collect()` on the new iterator, but returns the
/// internal buffer instead.
pub fn collect_positions_set(mut self) -> HashSet<GlobalCommitPosition> {
self.by_ref().for_each(drop);
self.walk.reachable_positions
}
}
impl RevWalk<CompositeIndex> for RevWalkDescendantsImpl {
type Item = GlobalCommitPosition;
fn next(&mut self, index: &CompositeIndex) -> Option<Self::Item> {
let index = index.commits();
while let Some(candidate_pos) = self.candidate_positions.pop() {
if self.root_positions.contains(&candidate_pos)
|| index
.entry_by_pos(candidate_pos)
.parent_positions()
.iter()
.any(|parent_pos| self.reachable_positions.contains(parent_pos))
{
self.reachable_positions.insert(candidate_pos);
return Some(candidate_pos);
}
}
None
}
}
/// Filter a slice by a range, clamping the range to the length of the slice.
pub(super) fn filter_slice_by_range<'a, T: Copy>(slice: &'a [T], range: &Range<u32>) -> &'a [T] {
let start = (range.start as usize).min(slice.len());
let end = (range.end as usize).min(slice.len());
&slice[start..end]
}
#[cfg(test)]
#[rustversion::attr(
since(1.89),
expect(clippy::cloned_ref_to_slice_refs, reason = "makes tests more readable")
)]
mod tests {
use itertools::Itertools as _;
use super::super::composite::AsCompositeIndex as _;
use super::super::mutable::DefaultMutableIndex;
use super::*;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::default_index::readonly::FieldLengths;
const TEST_FIELD_LENGTHS: FieldLengths = FieldLengths {
// TODO: align with commit_id_generator()?
commit_id: 3,
change_id: 16,
};
/// Generator of unique 16-byte ChangeId excluding root id
fn change_id_generator() -> impl FnMut() -> ChangeId {
let mut iter = (1_u128..).map(|n| ChangeId::new(n.to_le_bytes().into()));
move || iter.next().unwrap()
}
fn to_positions_vec(
index: &CompositeIndex,
commit_ids: &[CommitId],
) -> Vec<GlobalCommitPosition> {
commit_ids
.iter()
.map(|id| index.commits().commit_id_to_pos(id).unwrap())
.collect()
}
#[test]
fn test_filter_map_rev_walk() {
let source = EagerRevWalk::new(vec![0, 1, 2, 3, 4].into_iter());
let mut filtered = source.filter_map(|_, v| (v & 1 == 0).then_some(v + 5));
assert_eq!(filtered.next(&()), Some(5));
assert_eq!(filtered.next(&()), Some(7));
assert_eq!(filtered.next(&()), Some(9));
assert_eq!(filtered.next(&()), None);
assert_eq!(filtered.next(&()), None);
}
#[test]
fn test_map_rev_walk() {
let source = EagerRevWalk::new(vec![0, 1, 2].into_iter());
let mut mapped = source.map(|_, v| v + 5);
assert_eq!(mapped.next(&()), Some(5));
assert_eq!(mapped.next(&()), Some(6));
assert_eq!(mapped.next(&()), Some(7));
assert_eq!(mapped.next(&()), None);
assert_eq!(mapped.next(&()), None);
}
#[test]
fn test_peekable_rev_walk() {
let source = EagerRevWalk::new(vec![0, 1, 2, 3].into_iter());
let mut peekable = source.peekable();
assert_eq!(peekable.peek(&()), Some(&0));
assert_eq!(peekable.peek(&()), Some(&0));
assert_eq!(peekable.next(&()), Some(0));
assert_eq!(peekable.peeked, None);
assert_eq!(peekable.next_if(&(), |&v| v == 2), None);
assert_eq!(peekable.next(&()), Some(1));
assert_eq!(peekable.next_if(&(), |&v| v == 2), Some(2));
assert_eq!(peekable.peeked, None);
assert_eq!(peekable.peek(&()), Some(&3));
assert_eq!(peekable.next_if(&(), |&v| v == 3), Some(3));
assert_eq!(peekable.peeked, None);
assert_eq!(peekable.next(&()), None);
assert_eq!(peekable.next(&()), None);
let source = EagerRevWalk::new((vec![] as Vec<i32>).into_iter());
let mut peekable = source.peekable();
assert_eq!(peekable.peek(&()), None);
assert_eq!(peekable.next(&()), None);
}
#[test]
fn test_walk_ancestors() {
let mut new_change_id = change_id_generator();
let mut index = DefaultMutableIndex::full(TEST_FIELD_LENGTHS);
// 5
// |\
// 4 | 3
// | |/
// 1 2
// |/
// 0
let id_0 = CommitId::from_hex("000000");
let id_1 = CommitId::from_hex("111111");
let id_2 = CommitId::from_hex("222222");
let id_3 = CommitId::from_hex("333333");
let id_4 = CommitId::from_hex("444444");
let id_5 = CommitId::from_hex("555555");
index.add_commit_data(id_0.clone(), new_change_id(), &[]);
index.add_commit_data(id_1.clone(), new_change_id(), &[id_0.clone()]);
index.add_commit_data(id_2.clone(), new_change_id(), &[id_0.clone()]);
index.add_commit_data(id_3.clone(), new_change_id(), &[id_2.clone()]);
index.add_commit_data(id_4.clone(), new_change_id(), &[id_1.clone()]);
index.add_commit_data(id_5.clone(), new_change_id(), &[id_4.clone(), id_2.clone()]);
let walk_commit_ids = |wanted: &[CommitId], unwanted: &[CommitId]| {
let index = index.as_composite();
RevWalkBuilder::new(index)
.wanted_heads(to_positions_vec(index, wanted))
.unwanted_roots(to_positions_vec(index, unwanted))
.ancestors()
.map(|pos| index.commits().entry_by_pos(pos).commit_id())
.collect_vec()
};
// No wanted commits
assert!(walk_commit_ids(&[], &[]).is_empty());
// Simple linear walk to roo
assert_eq!(
walk_commit_ids(&[id_4.clone()], &[]),
vec![id_4.clone(), id_1.clone(), id_0.clone()]
);
// Commits that are both wanted and unwanted are not walked
assert_eq!(walk_commit_ids(&[id_0.clone()], &[id_0.clone()]), vec![]);
// Commits that are listed twice are only walked once
assert_eq!(
walk_commit_ids(&[id_0.clone(), id_0.clone()], &[]),
vec![id_0.clone()]
);
// If a commit and its ancestor are both wanted, the ancestor still gets walked
// only once
assert_eq!(
walk_commit_ids(&[id_0.clone(), id_1.clone()], &[]),
vec![id_1.clone(), id_0.clone()]
);
// Ancestors of both wanted and unwanted commits are not walked
assert_eq!(
walk_commit_ids(&[id_2.clone()], &[id_1.clone()]),
vec![id_2.clone()]
);
// Same as above, but the opposite order, to make sure that order in index
// doesn't matter
assert_eq!(
walk_commit_ids(&[id_1.clone()], &[id_2.clone()]),
vec![id_1.clone()]
);
// Two wanted nodes
assert_eq!(
walk_commit_ids(&[id_1.clone(), id_2.clone()], &[]),
vec![id_2.clone(), id_1.clone(), id_0.clone()]
);
// Order of output doesn't depend on order of input
assert_eq!(
walk_commit_ids(&[id_2.clone(), id_1.clone()], &[]),
vec![id_2.clone(), id_1.clone(), id_0]
);
// Two wanted nodes that share an unwanted ancestor
assert_eq!(
walk_commit_ids(&[id_5.clone(), id_3.clone()], &[id_2]),
vec![id_5, id_4, id_3, id_1]
);
}
#[test]
fn test_walk_ancestors_until_roots() {
let mut new_change_id = change_id_generator();
let mut index = DefaultMutableIndex::full(TEST_FIELD_LENGTHS);
// 7
// 6 |
// 5 |
// 4 |
// | 3
// | 2
// |/
// 1
// 0
let id_0 = CommitId::from_hex("000000");
let id_1 = CommitId::from_hex("111111");
let id_2 = CommitId::from_hex("222222");
let id_3 = CommitId::from_hex("333333");
let id_4 = CommitId::from_hex("444444");
let id_5 = CommitId::from_hex("555555");
let id_6 = CommitId::from_hex("666666");
let id_7 = CommitId::from_hex("777777");
index.add_commit_data(id_0.clone(), new_change_id(), &[]);
index.add_commit_data(id_1.clone(), new_change_id(), &[id_0.clone()]);
index.add_commit_data(id_2.clone(), new_change_id(), &[id_1.clone()]);
index.add_commit_data(id_3.clone(), new_change_id(), &[id_2.clone()]);
index.add_commit_data(id_4.clone(), new_change_id(), &[id_1.clone()]);
index.add_commit_data(id_5.clone(), new_change_id(), &[id_4.clone()]);
index.add_commit_data(id_6.clone(), new_change_id(), &[id_5.clone()]);
index.add_commit_data(id_7.clone(), new_change_id(), &[id_3.clone()]);
let index = index.as_composite();
let make_iter = |heads: &[CommitId], roots: &[CommitId]| {
RevWalkBuilder::new(index)
.wanted_heads(to_positions_vec(index, heads))
.ancestors_until_roots(to_positions_vec(index, roots))
};
let to_commit_id = |pos| index.commits().entry_by_pos(pos).commit_id();
let mut iter = make_iter(&[id_6.clone(), id_7.clone()], &[id_3.clone()]);
assert_eq!(iter.walk.wanted_queue.len(), 2);
assert_eq!(iter.next().map(to_commit_id), Some(id_7.clone()));
assert_eq!(iter.next().map(to_commit_id), Some(id_6.clone()));
assert_eq!(iter.next().map(to_commit_id), Some(id_5.clone()));
assert_eq!(iter.walk.wanted_queue.len(), 2);
assert_eq!(iter.next().map(to_commit_id), Some(id_4.clone()));
assert_eq!(iter.walk.wanted_queue.len(), 1); // id_1 shouldn't be queued
assert_eq!(iter.next().map(to_commit_id), Some(id_3.clone()));
assert_eq!(iter.walk.wanted_queue.len(), 0); // id_2 shouldn't be queued
assert!(iter.next().is_none());
let iter = make_iter(&[id_6.clone(), id_7.clone(), id_2.clone()], &[id_3.clone()]);
assert_eq!(iter.walk.wanted_queue.len(), 2); // id_2 shouldn't be queued
let iter = make_iter(&[id_6.clone(), id_7.clone()], &[]);
assert_eq!(iter.walk.wanted_queue.len(), 0); // no ids should be queued
}
#[test]
fn test_walk_ancestors_filtered_by_generation() {
let mut new_change_id = change_id_generator();
let mut index = DefaultMutableIndex::full(TEST_FIELD_LENGTHS);
// 8 6
// | |
// 7 5
// |/|
// 4 |
// | 3
// 2 |
// |/
// 1
// |
// 0
let id_0 = CommitId::from_hex("000000");
let id_1 = CommitId::from_hex("111111");
let id_2 = CommitId::from_hex("222222");
let id_3 = CommitId::from_hex("333333");
let id_4 = CommitId::from_hex("444444");
let id_5 = CommitId::from_hex("555555");
let id_6 = CommitId::from_hex("666666");
let id_7 = CommitId::from_hex("777777");
let id_8 = CommitId::from_hex("888888");
index.add_commit_data(id_0.clone(), new_change_id(), &[]);
index.add_commit_data(id_1.clone(), new_change_id(), &[id_0.clone()]);
index.add_commit_data(id_2.clone(), new_change_id(), &[id_1.clone()]);
index.add_commit_data(id_3.clone(), new_change_id(), &[id_1.clone()]);
index.add_commit_data(id_4.clone(), new_change_id(), &[id_2.clone()]);
index.add_commit_data(id_5.clone(), new_change_id(), &[id_4.clone(), id_3.clone()]);
index.add_commit_data(id_6.clone(), new_change_id(), &[id_5.clone()]);
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/mod.rs | lib/src/default_index/mod.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An on-disk index of the commits in a repository.
//!
//! Implements an index of the commits in a repository that conforms to the
//! trains in the [index module](crate::index). The index is stored on local
//! disk and contains an entry for every commit in the repository. See
//! [`DefaultReadonlyIndex`] and [`DefaultMutableIndex`].
mod bit_set;
mod changed_path;
mod composite;
mod entry;
mod mutable;
mod readonly;
mod rev_walk;
mod rev_walk_queue;
mod revset_engine;
mod revset_graph_iterator;
mod store;
pub use self::mutable::DefaultMutableIndex;
pub use self::readonly::ChangedPathIndexLevelStats;
pub use self::readonly::CommitIndexLevelStats;
pub use self::readonly::DefaultReadonlyIndex;
pub use self::readonly::DefaultReadonlyIndexRevset;
pub use self::readonly::IndexStats;
pub use self::readonly::ReadonlyIndexLoadError;
pub use self::store::DefaultIndexStore;
pub use self::store::DefaultIndexStoreError;
pub use self::store::DefaultIndexStoreInitError;
#[cfg(test)]
#[rustversion::attr(
since(1.89),
expect(clippy::cloned_ref_to_slice_refs, reason = "makes tests more readable")
)]
mod tests {
use std::cmp::Reverse;
use std::convert::Infallible;
use std::ops::Range;
use std::sync::Arc;
use itertools::Itertools as _;
use smallvec::smallvec_inline;
use test_case::test_case;
use super::changed_path::CompositeChangedPathIndex;
use super::composite::AsCompositeIndex as _;
use super::composite::CommitIndexSegment as _;
use super::composite::CompositeCommitIndex;
use super::composite::DynCommitIndexSegment;
use super::entry::GlobalCommitPosition;
use super::entry::SmallGlobalCommitPositionsVec;
use super::mutable::MutableCommitIndexSegment;
use super::readonly::ReadonlyCommitIndexSegment;
use super::*;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::default_index::entry::LocalCommitPosition;
use crate::default_index::entry::SmallLocalCommitPositionsVec;
use crate::default_index::readonly::FieldLengths;
use crate::index::Index as _;
use crate::object_id::HexPrefix;
use crate::object_id::PrefixResolution;
use crate::revset::PARENTS_RANGE_FULL;
use crate::tests::new_temp_dir;
const TEST_FIELD_LENGTHS: FieldLengths = FieldLengths {
// TODO: align with commit_id_generator()?
commit_id: 3,
change_id: 16,
};
/// Generator of unique 16-byte CommitId excluding root id
fn commit_id_generator() -> impl FnMut() -> CommitId {
let mut iter = (1_u128..).map(|n| CommitId::new(n.to_le_bytes().into()));
move || iter.next().unwrap()
}
/// Generator of unique 16-byte ChangeId excluding root id
fn change_id_generator() -> impl FnMut() -> ChangeId {
let mut iter = (1_u128..).map(|n| ChangeId::new(n.to_le_bytes().into()));
move || iter.next().unwrap()
}
fn get_commit_index_stats(commits: &Arc<ReadonlyCommitIndexSegment>) -> IndexStats {
let changed_paths = CompositeChangedPathIndex::null();
let index = DefaultReadonlyIndex::from_segment(commits.clone(), changed_paths);
index.stats()
}
fn common_ancestors(
index: &DefaultMutableIndex,
set1: &[CommitId],
set2: &[CommitId],
) -> Vec<CommitId> {
index.common_ancestors(set1, set2).unwrap()
}
fn is_ancestor(
index: &DefaultMutableIndex,
ancestor_id: &CommitId,
descendant_id: &CommitId,
) -> bool {
index.is_ancestor(ancestor_id, descendant_id).unwrap()
}
#[test_case(false; "memory")]
#[test_case(true; "file")]
fn index_empty(on_disk: bool) {
let temp_dir = new_temp_dir();
let mutable_segment = MutableCommitIndexSegment::full(TEST_FIELD_LENGTHS);
let index_segment: Box<DynCommitIndexSegment> = if on_disk {
let saved_index = mutable_segment.save_in(temp_dir.path()).unwrap();
// Stats are as expected
let stats = get_commit_index_stats(&saved_index);
assert_eq!(stats.num_commits, 0);
assert_eq!(stats.num_heads, 0);
assert_eq!(stats.max_generation_number, 0);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.num_changes, 0);
Box::new(Arc::try_unwrap(saved_index).unwrap())
} else {
Box::new(mutable_segment)
};
let index = CompositeCommitIndex::new(index_segment.as_ref());
assert_eq!(index.num_commits(), 0);
// Cannot find any commits
assert!(index.entry_by_id(&CommitId::from_hex("000000")).is_none());
assert!(index.entry_by_id(&CommitId::from_hex("aaa111")).is_none());
assert!(index.entry_by_id(&CommitId::from_hex("ffffff")).is_none());
}
#[test_case(false; "memory")]
#[test_case(true; "file")]
fn index_root_commit(on_disk: bool) {
let temp_dir = new_temp_dir();
let mut new_change_id = change_id_generator();
let mut mutable_segment = MutableCommitIndexSegment::full(TEST_FIELD_LENGTHS);
let id_0 = CommitId::from_hex("000000");
let change_id0 = new_change_id();
mutable_segment.add_commit_data(id_0.clone(), change_id0.clone(), &[]);
let index_segment: Box<DynCommitIndexSegment> = if on_disk {
let saved_index = mutable_segment.save_in(temp_dir.path()).unwrap();
// Stats are as expected
let stats = get_commit_index_stats(&saved_index);
assert_eq!(stats.num_commits, 1);
assert_eq!(stats.num_heads, 1);
assert_eq!(stats.max_generation_number, 0);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.num_changes, 1);
Box::new(Arc::try_unwrap(saved_index).unwrap())
} else {
Box::new(mutable_segment)
};
let index = CompositeCommitIndex::new(index_segment.as_ref());
assert_eq!(index.num_commits(), 1);
// Can find only the root commit
assert_eq!(index.commit_id_to_pos(&id_0), Some(GlobalCommitPosition(0)));
assert_eq!(index.commit_id_to_pos(&CommitId::from_hex("aaaaaa")), None);
assert_eq!(index.commit_id_to_pos(&CommitId::from_hex("ffffff")), None);
// Check properties of root entry
let entry = index.entry_by_id(&id_0).unwrap();
assert_eq!(entry.position(), GlobalCommitPosition(0));
assert_eq!(entry.commit_id(), id_0);
assert_eq!(entry.change_id(), change_id0);
assert_eq!(entry.generation_number(), 0);
assert_eq!(entry.num_parents(), 0);
assert_eq!(
entry.parent_positions(),
SmallGlobalCommitPositionsVec::new()
);
assert_eq!(entry.parents().len(), 0);
}
#[test]
#[should_panic(expected = "parent commit is not indexed")]
fn index_missing_parent_commit() {
let mut new_change_id = change_id_generator();
let mut index = DefaultMutableIndex::full(TEST_FIELD_LENGTHS);
let id_0 = CommitId::from_hex("000000");
let id_1 = CommitId::from_hex("111111");
index.add_commit_data(id_1, new_change_id(), &[id_0]);
}
#[test_case(false, false; "full in memory")]
#[test_case(false, true; "full on disk")]
#[test_case(true, false; "incremental in memory")]
#[test_case(true, true; "incremental on disk")]
fn index_multiple_commits(incremental: bool, on_disk: bool) {
let temp_dir = new_temp_dir();
let mut new_change_id = change_id_generator();
let mut mutable_segment = MutableCommitIndexSegment::full(TEST_FIELD_LENGTHS);
// 5
// |\
// 4 | 3
// | |/
// 1 2
// |/
// 0
let id_0 = CommitId::from_hex("000000");
let change_id0 = new_change_id();
let id_1 = CommitId::from_hex("111111");
let change_id1 = new_change_id();
let id_2 = CommitId::from_hex("222222");
let change_id2 = change_id1.clone();
mutable_segment.add_commit_data(id_0.clone(), change_id0, &[]);
mutable_segment.add_commit_data(id_1.clone(), change_id1.clone(), &[id_0.clone()]);
mutable_segment.add_commit_data(id_2.clone(), change_id2.clone(), &[id_0.clone()]);
// If testing incremental indexing, write the first three commits to one file
// now and build the remainder as another segment on top.
if incremental {
let initial_file = mutable_segment.save_in(temp_dir.path()).unwrap();
mutable_segment = MutableCommitIndexSegment::incremental(initial_file);
}
let id_3 = CommitId::from_hex("333333");
let change_id3 = new_change_id();
let id_4 = CommitId::from_hex("444444");
let change_id4 = new_change_id();
let id_5 = CommitId::from_hex("555555");
let change_id5 = change_id3.clone();
mutable_segment.add_commit_data(id_3.clone(), change_id3.clone(), &[id_2.clone()]);
mutable_segment.add_commit_data(id_4.clone(), change_id4, &[id_1.clone()]);
mutable_segment.add_commit_data(id_5.clone(), change_id5, &[id_4.clone(), id_2.clone()]);
let index_segment: Box<DynCommitIndexSegment> = if on_disk {
let saved_index = mutable_segment.save_in(temp_dir.path()).unwrap();
// Stats are as expected
let stats = get_commit_index_stats(&saved_index);
assert_eq!(stats.num_commits, 6);
assert_eq!(stats.num_heads, 2);
assert_eq!(stats.max_generation_number, 3);
assert_eq!(stats.num_merges, 1);
assert_eq!(stats.num_changes, 4);
Box::new(Arc::try_unwrap(saved_index).unwrap())
} else {
Box::new(mutable_segment)
};
let index = CompositeCommitIndex::new(index_segment.as_ref());
assert_eq!(index.num_commits(), 6);
// Can find all the commits
let entry_0 = index.entry_by_id(&id_0).unwrap();
let entry_1 = index.entry_by_id(&id_1).unwrap();
let entry_2 = index.entry_by_id(&id_2).unwrap();
let entry_3 = index.entry_by_id(&id_3).unwrap();
let entry_4 = index.entry_by_id(&id_4).unwrap();
let entry_5 = index.entry_by_id(&id_5).unwrap();
// Check properties of some entries
assert_eq!(entry_0.position(), GlobalCommitPosition(0));
assert_eq!(entry_0.commit_id(), id_0);
assert_eq!(entry_1.position(), GlobalCommitPosition(1));
assert_eq!(entry_1.commit_id(), id_1);
assert_eq!(entry_1.change_id(), change_id1);
assert_eq!(entry_1.generation_number(), 1);
assert_eq!(entry_1.num_parents(), 1);
assert_eq!(
entry_1.parent_positions(),
smallvec_inline![GlobalCommitPosition(0)]
);
assert_eq!(entry_1.parents().len(), 1);
assert_eq!(
entry_1.parents().next().unwrap().position(),
GlobalCommitPosition(0)
);
assert_eq!(entry_2.position(), GlobalCommitPosition(2));
assert_eq!(entry_2.commit_id(), id_2);
assert_eq!(entry_2.change_id(), change_id2);
assert_eq!(entry_2.generation_number(), 1);
assert_eq!(entry_2.num_parents(), 1);
assert_eq!(
entry_2.parent_positions(),
smallvec_inline![GlobalCommitPosition(0)]
);
assert_eq!(entry_3.change_id(), change_id3);
assert_eq!(entry_3.generation_number(), 2);
assert_eq!(
entry_3.parent_positions(),
smallvec_inline![GlobalCommitPosition(2)]
);
assert_eq!(entry_4.position(), GlobalCommitPosition(4));
assert_eq!(entry_4.generation_number(), 2);
assert_eq!(entry_4.num_parents(), 1);
assert_eq!(
entry_4.parent_positions(),
smallvec_inline![GlobalCommitPosition(1)]
);
assert_eq!(entry_5.generation_number(), 3);
assert_eq!(entry_5.num_parents(), 2);
assert_eq!(
entry_5.parent_positions(),
smallvec_inline![GlobalCommitPosition(4), GlobalCommitPosition(2)]
);
assert_eq!(entry_5.parents().len(), 2);
assert_eq!(
entry_5.parents().next().unwrap().position(),
GlobalCommitPosition(4)
);
assert_eq!(
entry_5.parents().nth(1).unwrap().position(),
GlobalCommitPosition(2)
);
}
#[test_case(false; "in memory")]
#[test_case(true; "on disk")]
fn index_many_parents(on_disk: bool) {
let temp_dir = new_temp_dir();
let mut new_change_id = change_id_generator();
let mut mutable_segment = MutableCommitIndexSegment::full(TEST_FIELD_LENGTHS);
// 6
// /|\
// / | \
// / /|\ \
// 1 2 3 4 5
// \ \|/ /
// \ | /
// \|/
// 0
let id_0 = CommitId::from_hex("000000");
let id_1 = CommitId::from_hex("111111");
let id_2 = CommitId::from_hex("222222");
let id_3 = CommitId::from_hex("333333");
let id_4 = CommitId::from_hex("444444");
let id_5 = CommitId::from_hex("555555");
let id_6 = CommitId::from_hex("666666");
mutable_segment.add_commit_data(id_0.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_1.clone(), new_change_id(), &[id_0.clone()]);
mutable_segment.add_commit_data(id_2.clone(), new_change_id(), &[id_0.clone()]);
mutable_segment.add_commit_data(id_3.clone(), new_change_id(), &[id_0.clone()]);
mutable_segment.add_commit_data(id_4.clone(), new_change_id(), &[id_0.clone()]);
mutable_segment.add_commit_data(id_5.clone(), new_change_id(), &[id_0]);
mutable_segment.add_commit_data(
id_6.clone(),
new_change_id(),
&[id_1, id_2, id_3, id_4, id_5],
);
let index_segment: Box<DynCommitIndexSegment> = if on_disk {
let saved_index = mutable_segment.save_in(temp_dir.path()).unwrap();
// Stats are as expected
let stats = get_commit_index_stats(&saved_index);
assert_eq!(stats.num_commits, 7);
assert_eq!(stats.num_heads, 1);
assert_eq!(stats.max_generation_number, 2);
assert_eq!(stats.num_merges, 1);
Box::new(Arc::try_unwrap(saved_index).unwrap())
} else {
Box::new(mutable_segment)
};
let index = CompositeCommitIndex::new(index_segment.as_ref());
assert_eq!(index.num_commits(), 7);
// The octopus merge has the right parents
let entry_6 = index.entry_by_id(&id_6).unwrap();
assert_eq!(entry_6.commit_id(), id_6.clone());
assert_eq!(entry_6.num_parents(), 5);
assert_eq!(
entry_6.parent_positions(),
smallvec_inline![
GlobalCommitPosition(1),
GlobalCommitPosition(2),
GlobalCommitPosition(3),
GlobalCommitPosition(4),
GlobalCommitPosition(5),
]
);
assert_eq!(entry_6.generation_number(), 2);
}
#[test]
fn resolve_commit_id_prefix() {
let temp_dir = new_temp_dir();
let mut new_change_id = change_id_generator();
let mut mutable_segment = MutableCommitIndexSegment::full(TEST_FIELD_LENGTHS);
// Create some commits with different various common prefixes.
let id_0 = CommitId::from_hex("000000");
let id_1 = CommitId::from_hex("009999");
let id_2 = CommitId::from_hex("055488");
mutable_segment.add_commit_data(id_0.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_1.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_2.clone(), new_change_id(), &[]);
// Write the first three commits to one file and build the remainder on top.
let initial_file = mutable_segment.save_in(temp_dir.path()).unwrap();
mutable_segment = MutableCommitIndexSegment::incremental(initial_file);
let id_3 = CommitId::from_hex("055444");
let id_4 = CommitId::from_hex("055555");
let id_5 = CommitId::from_hex("033333");
mutable_segment.add_commit_data(id_3, new_change_id(), &[]);
mutable_segment.add_commit_data(id_4, new_change_id(), &[]);
mutable_segment.add_commit_data(id_5, new_change_id(), &[]);
let index = mutable_segment.as_composite();
// Can find commits given the full hex number
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::from_id(&id_0)),
PrefixResolution::SingleMatch(id_0)
);
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::from_id(&id_1)),
PrefixResolution::SingleMatch(id_1)
);
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::from_id(&id_2)),
PrefixResolution::SingleMatch(id_2)
);
// Test nonexistent commits
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::try_from_hex("ffffff").unwrap()),
PrefixResolution::NoMatch
);
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::try_from_hex("000001").unwrap()),
PrefixResolution::NoMatch
);
// Test ambiguous prefix
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::try_from_hex("0").unwrap()),
PrefixResolution::AmbiguousMatch
);
// Test a globally unique prefix in initial part
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::try_from_hex("009").unwrap()),
PrefixResolution::SingleMatch(CommitId::from_hex("009999"))
);
// Test a globally unique prefix in incremental part
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::try_from_hex("03").unwrap()),
PrefixResolution::SingleMatch(CommitId::from_hex("033333"))
);
// Test a locally unique but globally ambiguous prefix
assert_eq!(
index.resolve_commit_id_prefix(&HexPrefix::try_from_hex("0554").unwrap()),
PrefixResolution::AmbiguousMatch
);
}
#[test]
#[expect(clippy::redundant_clone)] // allow id_n.clone()
fn neighbor_commit_ids() {
let temp_dir = new_temp_dir();
let mut new_change_id = change_id_generator();
let mut mutable_segment = MutableCommitIndexSegment::full(TEST_FIELD_LENGTHS);
// Create some commits with different various common prefixes.
let id_0 = CommitId::from_hex("000001");
let id_1 = CommitId::from_hex("009999");
let id_2 = CommitId::from_hex("055488");
mutable_segment.add_commit_data(id_0.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_1.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_2.clone(), new_change_id(), &[]);
// Write the first three commits to one file and build the remainder on top.
let initial_file = mutable_segment.save_in(temp_dir.path()).unwrap();
mutable_segment = MutableCommitIndexSegment::incremental(initial_file.clone());
let id_3 = CommitId::from_hex("055444");
let id_4 = CommitId::from_hex("055555");
let id_5 = CommitId::from_hex("033333");
mutable_segment.add_commit_data(id_3.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_4.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_5.clone(), new_change_id(), &[]);
// Local lookup in readonly index, commit_id exists.
assert_eq!(
initial_file.resolve_neighbor_commit_ids(&id_0),
(None, Some(id_1.clone())),
);
assert_eq!(
initial_file.resolve_neighbor_commit_ids(&id_1),
(Some(id_0.clone()), Some(id_2.clone())),
);
assert_eq!(
initial_file.resolve_neighbor_commit_ids(&id_2),
(Some(id_1.clone()), None),
);
// Local lookup in readonly index, commit_id does not exist.
assert_eq!(
initial_file.resolve_neighbor_commit_ids(&CommitId::from_hex("000000")),
(None, Some(id_0.clone())),
);
assert_eq!(
initial_file.resolve_neighbor_commit_ids(&CommitId::from_hex("000002")),
(Some(id_0.clone()), Some(id_1.clone())),
);
assert_eq!(
initial_file.resolve_neighbor_commit_ids(&CommitId::from_hex("ffffff")),
(Some(id_2.clone()), None),
);
// Local lookup in mutable index, commit_id exists. id_5 < id_3 < id_4
assert_eq!(
mutable_segment.resolve_neighbor_commit_ids(&id_5),
(None, Some(id_3.clone())),
);
assert_eq!(
mutable_segment.resolve_neighbor_commit_ids(&id_3),
(Some(id_5.clone()), Some(id_4.clone())),
);
assert_eq!(
mutable_segment.resolve_neighbor_commit_ids(&id_4),
(Some(id_3.clone()), None),
);
// Local lookup in mutable index, commit_id does not exist. id_5 < id_3 < id_4
assert_eq!(
mutable_segment.resolve_neighbor_commit_ids(&CommitId::from_hex("033332")),
(None, Some(id_5.clone())),
);
assert_eq!(
mutable_segment.resolve_neighbor_commit_ids(&CommitId::from_hex("033334")),
(Some(id_5.clone()), Some(id_3.clone())),
);
assert_eq!(
mutable_segment.resolve_neighbor_commit_ids(&CommitId::from_hex("ffffff")),
(Some(id_4.clone()), None),
);
// Global lookup, commit_id exists. id_0 < id_1 < id_5 < id_3 < id_2 < id_4
let composite_index = CompositeCommitIndex::new(&mutable_segment);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&id_0),
(None, Some(id_1.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&id_1),
(Some(id_0.clone()), Some(id_5.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&id_5),
(Some(id_1.clone()), Some(id_3.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&id_3),
(Some(id_5.clone()), Some(id_2.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&id_2),
(Some(id_3.clone()), Some(id_4.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&id_4),
(Some(id_2.clone()), None),
);
// Global lookup, commit_id doesn't exist. id_0 < id_1 < id_5 < id_3 < id_2 <
// id_4
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&CommitId::from_hex("000000")),
(None, Some(id_0.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&CommitId::from_hex("010000")),
(Some(id_1.clone()), Some(id_5.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&CommitId::from_hex("033334")),
(Some(id_5.clone()), Some(id_3.clone())),
);
assert_eq!(
composite_index.resolve_neighbor_commit_ids(&CommitId::from_hex("ffffff")),
(Some(id_4.clone()), None),
);
}
#[test]
fn shortest_unique_commit_id_prefix() {
let temp_dir = new_temp_dir();
let mut new_change_id = change_id_generator();
let mut mutable_segment = MutableCommitIndexSegment::full(TEST_FIELD_LENGTHS);
// Create some commits with different various common prefixes.
let id_0 = CommitId::from_hex("000001");
let id_1 = CommitId::from_hex("009999");
let id_2 = CommitId::from_hex("055488");
mutable_segment.add_commit_data(id_0.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_1.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_2.clone(), new_change_id(), &[]);
// Write the first three commits to one file and build the remainder on top.
let initial_file = mutable_segment.save_in(temp_dir.path()).unwrap();
mutable_segment = MutableCommitIndexSegment::incremental(initial_file);
let id_3 = CommitId::from_hex("055444");
let id_4 = CommitId::from_hex("055555");
let id_5 = CommitId::from_hex("033333");
mutable_segment.add_commit_data(id_3.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_4.clone(), new_change_id(), &[]);
mutable_segment.add_commit_data(id_5.clone(), new_change_id(), &[]);
let index = mutable_segment.as_composite();
// Public API: calculate shortest unique prefix len with known commit_id
assert_eq!(index.shortest_unique_commit_id_prefix_len(&id_0), 3);
assert_eq!(index.shortest_unique_commit_id_prefix_len(&id_1), 3);
assert_eq!(index.shortest_unique_commit_id_prefix_len(&id_2), 5);
assert_eq!(index.shortest_unique_commit_id_prefix_len(&id_3), 5);
assert_eq!(index.shortest_unique_commit_id_prefix_len(&id_4), 4);
assert_eq!(index.shortest_unique_commit_id_prefix_len(&id_5), 2);
// Public API: calculate shortest unique prefix len with unknown commit_id
assert_eq!(
index.shortest_unique_commit_id_prefix_len(&CommitId::from_hex("000002")),
6
);
assert_eq!(
index.shortest_unique_commit_id_prefix_len(&CommitId::from_hex("010000")),
2
);
assert_eq!(
index.shortest_unique_commit_id_prefix_len(&CommitId::from_hex("033334")),
6
);
assert_eq!(
index.shortest_unique_commit_id_prefix_len(&CommitId::from_hex("ffffff")),
1
);
}
#[test]
fn resolve_change_id_prefix() {
let temp_dir = new_temp_dir();
let mut new_commit_id = commit_id_generator();
let local_positions_vec = |positions: &[u32]| -> SmallLocalCommitPositionsVec {
positions.iter().copied().map(LocalCommitPosition).collect()
};
let index_positions_vec = |positions: &[u32]| -> SmallGlobalCommitPositionsVec {
positions
.iter()
.copied()
.map(GlobalCommitPosition)
.collect()
};
let id_0 = ChangeId::from_hex("00000001");
let id_1 = ChangeId::from_hex("00999999");
let id_2 = ChangeId::from_hex("05548888");
let id_3 = ChangeId::from_hex("05544444");
let id_4 = ChangeId::from_hex("05555555");
let id_5 = ChangeId::from_hex("05555333");
// Create some commits with different various common prefixes.
let mut mutable_segment = MutableCommitIndexSegment::full(FieldLengths {
commit_id: 16,
change_id: 4,
});
mutable_segment.add_commit_data(new_commit_id(), id_0.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_1.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_2.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_1.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_2.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_2.clone(), &[]);
// Write these commits to one file and build the remainder on top.
let initial_file = mutable_segment.save_in(temp_dir.path()).unwrap();
mutable_segment = MutableCommitIndexSegment::incremental(initial_file.clone());
mutable_segment.add_commit_data(new_commit_id(), id_3.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_3.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_4.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_1.clone(), &[]);
mutable_segment.add_commit_data(new_commit_id(), id_5.clone(), &[]);
// Local lookup in readonly index with the full hex digits
assert_eq!(
initial_file.resolve_change_id_prefix(&HexPrefix::from_id(&id_0)),
PrefixResolution::SingleMatch((id_0.clone(), local_positions_vec(&[0])))
);
assert_eq!(
initial_file.resolve_change_id_prefix(&HexPrefix::from_id(&id_1)),
PrefixResolution::SingleMatch((id_1.clone(), local_positions_vec(&[1, 3])))
);
assert_eq!(
initial_file.resolve_change_id_prefix(&HexPrefix::from_id(&id_2)),
PrefixResolution::SingleMatch((id_2.clone(), local_positions_vec(&[2, 4, 5])))
);
// Local lookup in mutable index with the full hex digits
assert_eq!(
mutable_segment.resolve_change_id_prefix(&HexPrefix::from_id(&id_1)),
PrefixResolution::SingleMatch((id_1.clone(), local_positions_vec(&[3])))
);
assert_eq!(
mutable_segment.resolve_change_id_prefix(&HexPrefix::from_id(&id_3)),
PrefixResolution::SingleMatch((id_3.clone(), local_positions_vec(&[0, 1])))
);
assert_eq!(
mutable_segment.resolve_change_id_prefix(&HexPrefix::from_id(&id_4)),
PrefixResolution::SingleMatch((id_4.clone(), local_positions_vec(&[2])))
);
assert_eq!(
mutable_segment.resolve_change_id_prefix(&HexPrefix::from_id(&id_5)),
PrefixResolution::SingleMatch((id_5.clone(), local_positions_vec(&[4])))
);
// Local lookup with locally unknown prefix
assert_eq!(
initial_file.resolve_change_id_prefix(&HexPrefix::try_from_hex("0555").unwrap()),
PrefixResolution::NoMatch
);
assert_eq!(
mutable_segment.resolve_change_id_prefix(&HexPrefix::try_from_hex("000").unwrap()),
PrefixResolution::NoMatch
);
// Local lookup with locally unique prefix
assert_eq!(
initial_file.resolve_change_id_prefix(&HexPrefix::try_from_hex("0554").unwrap()),
PrefixResolution::SingleMatch((id_2.clone(), local_positions_vec(&[2, 4, 5])))
);
assert_eq!(
mutable_segment.resolve_change_id_prefix(&HexPrefix::try_from_hex("0554").unwrap()),
PrefixResolution::SingleMatch((id_3.clone(), local_positions_vec(&[0, 1])))
);
// Local lookup with locally ambiguous prefix
assert_eq!(
initial_file.resolve_change_id_prefix(&HexPrefix::try_from_hex("00").unwrap()),
PrefixResolution::AmbiguousMatch
);
assert_eq!(
mutable_segment.resolve_change_id_prefix(&HexPrefix::try_from_hex("05555").unwrap()),
PrefixResolution::AmbiguousMatch
);
let index = mutable_segment.as_composite();
// Global lookup with the full hex digits
assert_eq!(
index.resolve_change_id_prefix(&HexPrefix::from_id(&id_0)),
PrefixResolution::SingleMatch((id_0.clone(), index_positions_vec(&[0])))
);
assert_eq!(
index.resolve_change_id_prefix(&HexPrefix::from_id(&id_1)),
PrefixResolution::SingleMatch((id_1.clone(), index_positions_vec(&[9, 3, 1])))
);
assert_eq!(
index.resolve_change_id_prefix(&HexPrefix::from_id(&id_2)),
PrefixResolution::SingleMatch((id_2.clone(), index_positions_vec(&[5, 4, 2])))
);
assert_eq!(
index.resolve_change_id_prefix(&HexPrefix::from_id(&id_3)),
PrefixResolution::SingleMatch((id_3.clone(), index_positions_vec(&[7, 6])))
);
assert_eq!(
index.resolve_change_id_prefix(&HexPrefix::from_id(&id_4)),
PrefixResolution::SingleMatch((id_4.clone(), index_positions_vec(&[8])))
);
assert_eq!(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/changed_path.rs | lib/src/default_index/changed_path.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Index of per-commit changed paths.
use std::collections::HashMap;
use std::fmt;
use std::fmt::Debug;
use std::fs::File;
use std::io::Read;
use std::io::Write as _;
use std::path::Path;
use std::sync::Arc;
use blake2::Blake2b512;
use digest::Digest as _;
use either::Either;
use futures::StreamExt as _;
use futures::TryStreamExt as _;
use itertools::Itertools as _;
use tempfile::NamedTempFile;
use super::entry::GlobalCommitPosition;
use super::readonly::ReadonlyIndexLoadError;
use crate::backend::BackendResult;
use crate::commit::Commit;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::file_util::persist_content_addressed_temp_file;
use crate::index::Index;
use crate::matchers::EverythingMatcher;
use crate::object_id::ObjectId as _;
use crate::object_id::id_type;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::rewrite::merge_commit_trees_no_resolve_without_repo;
use crate::tree_merge::resolve_file_values;
/// Current format version of the changed-path index segment file.
const FILE_FORMAT_VERSION: u32 = 0;
id_type!(pub(super) ChangedPathIndexSegmentId { hex() });
/// Commit position within a changed-path index segment.
///
/// This may be different from `LocalCommitPosition`, which is a position
/// relative to the start of the commit index segment.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct CommitPosition(u32);
/// Path position within a changed-path index segment.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct PathPosition(u32);
/// Changed-path index segment backed by immutable file.
///
/// File format:
/// ```text
/// u32: file format version
///
/// u32: number of (local) commit entries
/// u32: number of changed path entries
/// u32: number of path entries
/// u32: number of bytes of path entries
///
/// for each commit, in commit-index order
/// u32: position in changed-path table
/// u32: number of changed-path entries (as sentinel)
/// for each commit, in commit-index order
/// for each changed path, sorted by path
/// u32: lookup position of path
///
/// for each path, sorted by path
/// u32: byte offset in sorted paths table
/// u32: number of bytes of path entries (as sentinel)
/// for each path, sorted by path
/// <arbitrary length of bytes>: path
/// ```
///
/// * The parent segment id isn't stored in a segment file. This allows us to
/// insert parents without rewriting the descendant segments.
/// * Paths table isn't compacted across segments to keep the implementation
/// simple. There isn't a strong reason to map paths to globally-unique
/// integers. This also means changed-path positions are sorted by both path
/// texts and integers.
/// * Changed-path positions are sorted by paths so that we can binary-search
/// entries by exact path or path prefix if needed.
/// * Path components aren't split nor compressed so we can borrow `&RepoPath`
/// from the index data.
///
/// Ideas for future improvements:
/// * Multi-level index based on the paths? Since indexing is slow, it might
/// make sense to split index files based on path depths.
/// * Shared paths table to save disk space?
pub(super) struct ReadonlyChangedPathIndexSegment {
id: ChangedPathIndexSegmentId,
num_local_commits: u32,
num_changed_paths: u32,
num_paths: u32,
// Base data offsets in bytes:
commit_lookup_base: usize,
changed_path_lookup_base: usize,
path_lookup_base: usize,
path_bytes_base: usize,
data: Vec<u8>,
}
impl Debug for ReadonlyChangedPathIndexSegment {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("ReadonlyChangedPathIndexSegment")
.field("id", &self.id)
.finish_non_exhaustive()
}
}
impl ReadonlyChangedPathIndexSegment {
pub(super) fn load(
dir: &Path,
id: ChangedPathIndexSegmentId,
) -> Result<Arc<Self>, ReadonlyIndexLoadError> {
let mut file = File::open(dir.join(id.hex()))
.map_err(|err| ReadonlyIndexLoadError::from_io_err("changed-path", id.hex(), err))?;
Self::load_from(&mut file, id)
}
pub(super) fn load_from(
file: &mut dyn Read,
id: ChangedPathIndexSegmentId,
) -> Result<Arc<Self>, ReadonlyIndexLoadError> {
let from_io_err = |err| ReadonlyIndexLoadError::from_io_err("changed-path", id.hex(), err);
let read_u32 = |file: &mut dyn Read| {
let mut buf = [0; 4];
file.read_exact(&mut buf).map_err(from_io_err)?;
Ok(u32::from_le_bytes(buf))
};
let format_version = read_u32(file)?;
if format_version != FILE_FORMAT_VERSION {
return Err(ReadonlyIndexLoadError::UnexpectedVersion {
kind: "changed-path",
found_version: format_version,
expected_version: FILE_FORMAT_VERSION,
});
}
let num_local_commits = read_u32(file)?;
let num_changed_paths = read_u32(file)?;
let num_paths = read_u32(file)?;
let num_path_bytes = read_u32(file)?;
let mut data = vec![];
file.read_to_end(&mut data).map_err(from_io_err)?;
let commit_lookup_size = (num_local_commits as usize + 1) * 4;
let changed_path_lookup_size = (num_changed_paths as usize) * 4;
let path_lookup_size = (num_paths as usize + 1) * 4;
let commit_lookup_base = 0;
let changed_path_lookup_base = commit_lookup_base + commit_lookup_size;
let path_lookup_base = changed_path_lookup_base + changed_path_lookup_size;
let path_bytes_base = path_lookup_base + path_lookup_size;
let expected_size = path_bytes_base + (num_path_bytes as usize);
if data.len() != expected_size {
return Err(ReadonlyIndexLoadError::invalid_data(
"changed-path",
id.hex(),
"unexpected data length",
));
}
Ok(Arc::new(Self {
id,
num_local_commits,
num_changed_paths,
num_paths,
commit_lookup_base,
changed_path_lookup_base,
path_lookup_base,
path_bytes_base,
data,
}))
}
pub(super) fn id(&self) -> &ChangedPathIndexSegmentId {
&self.id
}
pub(super) fn num_local_commits(&self) -> u32 {
self.num_local_commits
}
pub(super) fn num_changed_paths(&self) -> u32 {
self.num_changed_paths
}
pub(super) fn num_paths(&self) -> u32 {
self.num_paths
}
fn changed_paths(&self, pos: CommitPosition) -> impl ExactSizeIterator<Item = &RepoPath> {
let table = self.changed_paths_table(pos);
let (chunks, _remainder) = table.as_chunks();
chunks
.iter()
.map(|&chunk: &[u8; 4]| PathPosition(u32::from_le_bytes(chunk)))
.map(|pos| self.path(pos))
}
fn changed_paths_table(&self, pos: CommitPosition) -> &[u8] {
let table = &self.data[self.commit_lookup_base..self.changed_path_lookup_base];
let offset = pos.0 as usize * 4;
let start = u32::from_le_bytes(table[offset..][0..4].try_into().unwrap());
let end = u32::from_le_bytes(table[offset..][4..8].try_into().unwrap());
let table = &self.data[self.changed_path_lookup_base..self.path_lookup_base];
&table[(start as usize) * 4..(end as usize) * 4]
}
fn path(&self, pos: PathPosition) -> &RepoPath {
let bytes = self.path_bytes(pos);
RepoPath::from_internal_string(
str::from_utf8(bytes).expect("indexed path should be valid utf-8"),
)
.expect("indexed path should be valid")
}
fn path_bytes(&self, pos: PathPosition) -> &[u8] {
let table = &self.data[self.path_lookup_base..self.path_bytes_base];
let offset = pos.0 as usize * 4;
let start = u32::from_le_bytes(table[offset..][0..4].try_into().unwrap());
let end = u32::from_le_bytes(table[offset..][4..8].try_into().unwrap());
let bytes = &self.data[self.path_bytes_base..];
&bytes[start as usize..end as usize]
}
#[cfg(test)]
fn paths(&self) -> impl ExactSizeIterator<Item = &RepoPath> {
(0..self.num_paths).map(|pos| self.path(PathPosition(pos)))
}
}
/// Changed-path index segment which is not serialized to file.
#[derive(Clone)]
pub(super) struct MutableChangedPathIndexSegment {
entries: Vec<Vec<RepoPathBuf>>,
}
impl Debug for MutableChangedPathIndexSegment {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
f.debug_struct("MutableChangedPathIndexSegment")
.finish_non_exhaustive()
}
}
impl MutableChangedPathIndexSegment {
pub(super) fn empty() -> Self {
Self { entries: vec![] }
}
pub(super) fn is_empty(&self) -> bool {
self.entries.is_empty()
}
pub(super) fn num_local_commits(&self) -> u32 {
self.entries.len().try_into().unwrap()
}
fn changed_paths(&self, pos: CommitPosition) -> impl ExactSizeIterator<Item = &RepoPath> {
self.entries[pos.0 as usize].iter().map(AsRef::as_ref)
}
fn add_changed_paths(&mut self, paths: Vec<RepoPathBuf>) {
debug_assert!(paths.is_sorted_by(|a, b| a < b));
self.entries.push(paths);
}
fn extend_from_readonly_segment(&mut self, other_segment: &ReadonlyChangedPathIndexSegment) {
self.entries
.reserve(usize::try_from(other_segment.num_local_commits()).unwrap());
for pos in (0..other_segment.num_local_commits()).map(CommitPosition) {
let paths = other_segment
.changed_paths(pos)
.map(|path| path.to_owned())
.collect();
self.add_changed_paths(paths);
}
}
fn extend_from_mutable_segment(&mut self, other_segment: Self) {
self.entries.extend(other_segment.entries);
}
fn serialize_into(&self, buf: &mut Vec<u8>) {
let mut paths = self.entries.iter().flatten().unique().collect_vec();
paths.sort_unstable();
let path_pos_map: HashMap<_, _> = paths
.iter()
.enumerate()
.map(|(i, &path)| (path, PathPosition(u32::try_from(i).unwrap())))
.collect();
buf.extend(FILE_FORMAT_VERSION.to_le_bytes());
let num_commits = u32::try_from(self.entries.len()).unwrap();
let num_paths = u32::try_from(paths.len()).unwrap();
buf.extend(num_commits.to_le_bytes());
let num_changed_paths_offset = buf.len();
buf.extend(0_u32.to_le_bytes());
buf.extend(num_paths.to_le_bytes());
let num_path_bytes_offset = buf.len();
buf.extend(0_u32.to_le_bytes());
let mut num_changed_paths: u32 = 0;
for paths in &self.entries {
buf.extend(num_changed_paths.to_le_bytes());
num_changed_paths += u32::try_from(paths.len()).unwrap();
}
buf.extend(num_changed_paths.to_le_bytes()); // sentinel
buf[num_changed_paths_offset..][..4].copy_from_slice(&num_changed_paths.to_le_bytes());
for path in self.entries.iter().flatten() {
let PathPosition(pos) = path_pos_map[path];
buf.extend(pos.to_le_bytes());
}
let mut num_path_bytes: u32 = 0;
for &path in &paths {
buf.extend(num_path_bytes.to_le_bytes());
num_path_bytes += u32::try_from(path.as_internal_file_string().len()).unwrap();
}
buf.extend(num_path_bytes.to_le_bytes()); // sentinel
buf[num_path_bytes_offset..][..4].copy_from_slice(&num_path_bytes.to_le_bytes());
for &path in &paths {
buf.extend(path.as_internal_file_string().as_bytes());
}
}
pub(super) fn save_in(
&self,
dir: &Path,
) -> Result<Arc<ReadonlyChangedPathIndexSegment>, PathError> {
let mut buf = Vec::new();
self.serialize_into(&mut buf);
let mut hasher = Blake2b512::new();
hasher.update(&buf);
let file_id = ChangedPathIndexSegmentId::from_bytes(&hasher.finalize());
let file_path = dir.join(file_id.hex());
let mut file = NamedTempFile::new_in(dir).context(dir)?;
file.as_file_mut().write_all(&buf).context(file.path())?;
persist_content_addressed_temp_file(file, &file_path).context(&file_path)?;
let segment = ReadonlyChangedPathIndexSegment::load_from(&mut &buf[..], file_id)
.expect("in-memory index data should be valid and readable");
Ok(segment)
}
}
/// Index of per-commit changed paths.
#[derive(Clone, Debug)]
pub(super) struct CompositeChangedPathIndex {
start_commit_pos: Option<GlobalCommitPosition>,
num_commits: u32, // cache
readonly_segments: Vec<Arc<ReadonlyChangedPathIndexSegment>>,
mutable_segment: Option<Box<MutableChangedPathIndexSegment>>,
}
impl CompositeChangedPathIndex {
/// Creates empty changed-path index which cannot store entries. In other
/// words, the changed-path index is disabled.
pub(super) fn null() -> Self {
Self {
start_commit_pos: None,
num_commits: 0,
readonly_segments: vec![],
mutable_segment: None,
}
}
/// Creates empty changed-path index which will store entries from
/// `start_commit_pos`.
pub(super) fn empty(start_commit_pos: GlobalCommitPosition) -> Self {
Self {
start_commit_pos: Some(start_commit_pos),
num_commits: 0,
readonly_segments: vec![],
mutable_segment: None,
}
}
pub(super) fn load(
dir: &Path,
start_commit_pos: GlobalCommitPosition,
ids: &[ChangedPathIndexSegmentId],
) -> Result<Self, ReadonlyIndexLoadError> {
let readonly_segments: Vec<_> = ids
.iter()
.map(|id| ReadonlyChangedPathIndexSegment::load(dir, id.clone()))
.try_collect()?;
let num_commits = readonly_segments
.iter()
.map(|segment| segment.num_local_commits())
.sum();
Ok(Self {
start_commit_pos: Some(start_commit_pos),
num_commits,
readonly_segments,
mutable_segment: None,
})
}
/// Adds mutable segment if needed.
pub(super) fn make_mutable(&mut self) {
if self.start_commit_pos.is_none() || self.mutable_segment.is_some() {
return;
}
self.mutable_segment = Some(Box::new(MutableChangedPathIndexSegment::empty()));
}
/// Position of the first indexed (or to-be-indexed) commit.
pub(super) fn start_commit_pos(&self) -> Option<GlobalCommitPosition> {
self.start_commit_pos
}
/// New commit index position which can be added to this index.
pub(super) fn next_mutable_commit_pos(&self) -> Option<GlobalCommitPosition> {
if self.mutable_segment.is_some() {
self.start_commit_pos
.map(|GlobalCommitPosition(start)| GlobalCommitPosition(start + self.num_commits))
} else {
None
}
}
pub(super) fn num_commits(&self) -> u32 {
self.num_commits
}
pub(super) fn readonly_segments(&self) -> &[Arc<ReadonlyChangedPathIndexSegment>] {
&self.readonly_segments
}
/// Appends segments from the `other` index. This and the other index should
/// be contiguous.
pub(super) fn append_segments(&mut self, other: &Self) {
assert!(self.mutable_segment.is_none());
let GlobalCommitPosition(self_start_pos) =
self.start_commit_pos.expect("should have start pos");
let Some(GlobalCommitPosition(other_start_pos)) = other.start_commit_pos else {
return;
};
assert_eq!(self_start_pos + self.num_commits, other_start_pos);
self.readonly_segments
.extend_from_slice(&other.readonly_segments);
self.mutable_segment = other.mutable_segment.clone();
self.num_commits += other.num_commits;
}
/// Maps `global_pos` to segment and segment-local position.
fn find_segment(
&self,
global_pos: GlobalCommitPosition,
) -> Option<(
CommitPosition,
Either<&ReadonlyChangedPathIndexSegment, &MutableChangedPathIndexSegment>,
)> {
let mut local_pos = u32::checked_sub(global_pos.0, self.start_commit_pos?.0)?;
for segment in &self.readonly_segments {
local_pos = match u32::checked_sub(local_pos, segment.num_local_commits()) {
Some(next_local_pos) => next_local_pos,
None => return Some((CommitPosition(local_pos), Either::Left(segment))),
};
}
let segment = self.mutable_segment.as_deref()?;
(local_pos < segment.num_local_commits())
.then_some((CommitPosition(local_pos), Either::Right(segment)))
}
/// Returns iterator over paths changed at the specified commit. The paths
/// are sorted. Returns `None` if the commit wasn't indexed.
pub(super) fn changed_paths(
&self,
global_pos: GlobalCommitPosition,
) -> Option<impl ExactSizeIterator<Item = &RepoPath>> {
let (local_pos, segment) = self.find_segment(global_pos)?;
Some(segment.map_either(
|x| x.changed_paths(local_pos),
|x| x.changed_paths(local_pos),
))
}
/// Adds changed paths of the next commit.
///
/// The input `paths` must be sorted.
///
/// Caller must ensure that the commit matches `next_mutable_commit_pos()`.
/// Panics if this index isn't mutable (i.e. `next_mutable_commit_pos()` is
/// `None`.)
pub(super) fn add_changed_paths(&mut self, paths: Vec<RepoPathBuf>) {
let segment = self
.mutable_segment
.as_deref_mut()
.expect("should have mutable");
segment.add_changed_paths(paths);
self.num_commits += 1;
}
/// Squashes parent segments if the mutable segment has more than half the
/// commits of its parent segment. This is done recursively, so the stack of
/// index segments has O(log n) files.
pub(super) fn maybe_squash_with_ancestors(&mut self) {
let Some(mutable_segment) = self.mutable_segment.as_deref() else {
return;
};
let mut num_new_commits = mutable_segment.num_local_commits();
let mut squash_start = self.readonly_segments.len();
for segment in self.readonly_segments.iter().rev() {
// TODO: We should probably also squash if the parent segment has
// less than N commits, regardless of how many (few) are in
// `mutable_segment`.
if 2 * num_new_commits < segment.num_local_commits() {
break;
}
num_new_commits += segment.num_local_commits();
squash_start -= 1;
}
if squash_start == self.readonly_segments.len() {
return;
}
let mut squashed_segment = Box::new(MutableChangedPathIndexSegment::empty());
for segment in self.readonly_segments.drain(squash_start..) {
squashed_segment.extend_from_readonly_segment(&segment);
}
squashed_segment.extend_from_mutable_segment(*self.mutable_segment.take().unwrap());
self.mutable_segment = Some(squashed_segment);
}
/// Writes mutable segment if exists, turns it into readonly segment.
pub(super) fn save_in(&mut self, dir: &Path) -> Result<(), PathError> {
let Some(segment) = self.mutable_segment.take() else {
return Ok(());
};
if segment.is_empty() {
return Ok(());
};
let segment = segment.save_in(dir)?;
self.readonly_segments.push(segment);
Ok(())
}
}
/// Calculates the parent tree of the given `commit`, and builds a sorted list
/// of changed paths compared to the parent tree.
pub(super) async fn collect_changed_paths(
index: &dyn Index,
commit: &Commit,
) -> BackendResult<Vec<RepoPathBuf>> {
let parents: Vec<_> = commit.parents_async().await?;
if let [p] = parents.as_slice()
&& commit.tree_ids() == p.tree_ids()
{
return Ok(vec![]);
}
// Don't resolve the entire tree. It's cheaper to resolve each conflict file
// even if we have to visit all files.
tracing::trace!(?commit, parents_count = parents.len(), "calculating diffs");
let store = commit.store();
let from_tree = merge_commit_trees_no_resolve_without_repo(store, index, &parents).await?;
let to_tree = commit.tree();
let tree_diff = from_tree.diff_stream(&to_tree, &EverythingMatcher);
let paths = tree_diff
.map(|entry| entry.values.map(|values| (entry.path, values)))
.try_filter_map(async |(path, mut diff)| {
diff.before = resolve_file_values(store, &path, diff.before).await?;
Ok(diff.is_changed().then_some(path))
})
.try_collect()
.await?;
Ok(paths)
}
#[cfg(test)]
mod tests {
use test_case::test_case;
use super::*;
use crate::tests::new_temp_dir;
fn repo_path(value: &str) -> &RepoPath {
RepoPath::from_internal_string(value).unwrap()
}
fn repo_path_buf(value: impl Into<String>) -> RepoPathBuf {
RepoPathBuf::from_internal_string(value).unwrap()
}
fn collect_changed_paths(
index: &CompositeChangedPathIndex,
pos: GlobalCommitPosition,
) -> Option<Vec<&RepoPath>> {
Some(index.changed_paths(pos)?.collect())
}
#[test]
fn test_composite_null() {
let mut index = CompositeChangedPathIndex::null();
assert_eq!(index.start_commit_pos(), None);
assert_eq!(index.next_mutable_commit_pos(), None);
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(0)), None);
// No entries can be added to "null" index
index.make_mutable();
assert!(index.mutable_segment.is_none());
assert_eq!(index.num_commits(), 0);
}
#[test]
fn test_composite_empty() {
let temp_dir = new_temp_dir();
let mut index = CompositeChangedPathIndex::empty(GlobalCommitPosition(0));
assert_eq!(index.start_commit_pos(), Some(GlobalCommitPosition(0)));
assert_eq!(index.next_mutable_commit_pos(), None);
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(0)), None);
index.make_mutable();
assert!(index.mutable_segment.is_some());
assert_eq!(
index.next_mutable_commit_pos(),
Some(GlobalCommitPosition(0))
);
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(0)), None);
// Empty segment shouldn't be saved on disk
index.save_in(temp_dir.path()).unwrap();
assert!(index.mutable_segment.is_none());
assert!(index.readonly_segments.is_empty());
assert_eq!(index.start_commit_pos(), Some(GlobalCommitPosition(0)));
assert_eq!(index.next_mutable_commit_pos(), None);
assert_eq!(index.num_commits(), 0);
}
#[test_case(false, false; "mutable")]
#[test_case(true, false; "readonly")]
#[test_case(true, true; "readonly, reloaded")]
fn test_composite_some_commits(on_disk: bool, reload: bool) {
let temp_dir = new_temp_dir();
let start_commit_pos = GlobalCommitPosition(1);
let mut index = CompositeChangedPathIndex::empty(start_commit_pos);
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("foo")]);
index.add_changed_paths(vec![]);
index.add_changed_paths(vec![repo_path_buf("bar/baz"), repo_path_buf("foo")]);
index.add_changed_paths(vec![]);
assert_eq!(
index.next_mutable_commit_pos(),
Some(GlobalCommitPosition(5))
);
assert_eq!(index.num_commits(), 4);
if on_disk {
index.save_in(temp_dir.path()).unwrap();
assert!(index.mutable_segment.is_none());
assert_eq!(index.readonly_segments.len(), 1);
assert_eq!(index.next_mutable_commit_pos(), None);
assert_eq!(index.num_commits(), 4);
}
if reload {
let ids = index
.readonly_segments()
.iter()
.map(|segment| segment.id().clone())
.collect_vec();
index =
CompositeChangedPathIndex::load(temp_dir.path(), start_commit_pos, &ids).unwrap();
}
if let [segment] = &*index.readonly_segments {
assert_eq!(segment.num_local_commits(), 4);
assert_eq!(segment.num_changed_paths(), 3);
assert_eq!(segment.num_paths(), 2);
assert_eq!(
segment.paths().collect_vec(),
[repo_path("bar/baz"), repo_path("foo")]
);
}
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(0)), None);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(1)),
Some(vec![repo_path("foo")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(2)),
Some(vec![])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(3)),
Some(vec![repo_path("bar/baz"), repo_path("foo")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(4)),
Some(vec![])
);
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(5)), None);
}
#[test]
fn test_composite_empty_commits() {
let temp_dir = new_temp_dir();
let mut index = CompositeChangedPathIndex::empty(GlobalCommitPosition(0));
index.make_mutable();
// An empty commits table can be serialized/deserialized if forced
let segment = index.mutable_segment.take().unwrap();
let segment = segment.save_in(temp_dir.path()).unwrap();
index.readonly_segments.push(segment);
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(0)), None);
}
#[test]
fn test_composite_empty_changed_paths() {
let temp_dir = new_temp_dir();
let mut index = CompositeChangedPathIndex::empty(GlobalCommitPosition(0));
index.make_mutable();
index.add_changed_paths(vec![]);
// An empty paths table can be serialized/deserialized
assert_eq!(index.num_commits(), 1);
index.save_in(temp_dir.path()).unwrap();
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(0)),
Some(vec![])
);
}
#[test_case(false; "with mutable")]
#[test_case(true; "fully readonly")]
fn test_composite_segmented(on_disk: bool) {
let temp_dir = new_temp_dir();
let mut index = CompositeChangedPathIndex::empty(GlobalCommitPosition(1));
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("b")]);
index.save_in(temp_dir.path()).unwrap();
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("c")]);
index.add_changed_paths(vec![repo_path_buf("a/b"), repo_path_buf("b")]);
index.save_in(temp_dir.path()).unwrap();
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("d")]);
index.add_changed_paths(vec![repo_path_buf("a/c"), repo_path_buf("c")]);
if on_disk {
index.save_in(temp_dir.path()).unwrap();
assert!(index.mutable_segment.is_none());
assert_eq!(index.readonly_segments.len(), 3);
assert_eq!(index.next_mutable_commit_pos(), None);
} else {
assert_eq!(index.readonly_segments.len(), 2);
assert!(index.mutable_segment.is_some());
}
assert_eq!(index.num_commits(), 5);
assert_eq!(
index.readonly_segments[0].paths().collect_vec(),
[repo_path("b")]
);
assert_eq!(
index.readonly_segments[1].paths().collect_vec(),
[repo_path("a/b"), repo_path("b"), repo_path("c")]
);
if on_disk {
assert_eq!(
index.readonly_segments[2].paths().collect_vec(),
[repo_path("a/c"), repo_path("c"), repo_path("d")]
);
}
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(0)), None);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(1)),
Some(vec![repo_path("b")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(2)),
Some(vec![repo_path("c")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(3)),
Some(vec![repo_path("a/b"), repo_path("b")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(4)),
Some(vec![repo_path("d")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(5)),
Some(vec![repo_path("a/c"), repo_path("c")])
);
assert_eq!(collect_changed_paths(&index, GlobalCommitPosition(6)), None);
}
#[test]
fn test_composite_squash_segments() {
let temp_dir = new_temp_dir();
let mut index = CompositeChangedPathIndex::empty(GlobalCommitPosition(0));
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("0")]);
index.maybe_squash_with_ancestors();
index.save_in(temp_dir.path()).unwrap();
assert_eq!(index.readonly_segments.len(), 1);
assert_eq!(index.readonly_segments[0].num_local_commits(), 1);
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("1")]);
index.maybe_squash_with_ancestors();
index.save_in(temp_dir.path()).unwrap();
assert_eq!(index.readonly_segments.len(), 1);
assert_eq!(index.readonly_segments[0].num_local_commits(), 2);
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("2")]);
index.maybe_squash_with_ancestors();
index.save_in(temp_dir.path()).unwrap();
assert_eq!(index.readonly_segments.len(), 1);
assert_eq!(index.readonly_segments[0].num_local_commits(), 3);
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("3")]);
index.maybe_squash_with_ancestors();
index.save_in(temp_dir.path()).unwrap();
assert_eq!(index.readonly_segments.len(), 2);
assert_eq!(index.readonly_segments[0].num_local_commits(), 3);
assert_eq!(index.readonly_segments[1].num_local_commits(), 1);
index.make_mutable();
index.add_changed_paths(vec![repo_path_buf("4")]);
index.add_changed_paths(vec![repo_path_buf("5")]);
index.maybe_squash_with_ancestors();
index.save_in(temp_dir.path()).unwrap();
assert_eq!(index.readonly_segments.len(), 1);
assert_eq!(index.readonly_segments[0].num_local_commits(), 6);
// Squashed segments should preserve the original entries.
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(0)),
Some(vec![repo_path("0")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(1)),
Some(vec![repo_path("1")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(2)),
Some(vec![repo_path("2")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(3)),
Some(vec![repo_path("3")])
);
assert_eq!(
collect_changed_paths(&index, GlobalCommitPosition(4)),
Some(vec![repo_path("4")])
);
assert_eq!(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/bit_set.rs | lib/src/default_index/bit_set.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::min;
use super::composite::CompositeCommitIndex;
use super::entry::GlobalCommitPosition;
/// Unit of buffer allocation, which is 4kB.
const PAGE_SIZE_IN_BITS: u32 = 4096 * 8;
const PAGE_SIZE_IN_WORDS: u32 = PAGE_SIZE_IN_BITS / u64::BITS;
/// Bit set of [`GlobalCommitPosition`]s.
#[derive(Clone, Debug)]
pub(super) struct PositionsBitSet {
data: Vec<u64>,
bitset_len: u32,
}
impl PositionsBitSet {
/// Creates bit set of the specified capacity.
pub fn with_capacity(len: u32) -> Self {
Self {
data: vec![],
bitset_len: u32::div_ceil(len, u64::BITS),
}
}
/// Creates bit set with the maximum position.
pub fn with_max_pos(max_pos: GlobalCommitPosition) -> Self {
assert_ne!(max_pos, GlobalCommitPosition::MAX);
Self::with_capacity(max_pos.0 + 1)
}
fn ensure_data(&mut self, bitset_pos: u32) {
if usize::try_from(bitset_pos).unwrap() < self.data.len() {
return;
}
// Do not pre-allocate large buffer which might not be used. We could
// manage buffer chunks as Vec<Vec<u64>>, but the extra indirection
// appeared to make get() slow.
let new_len = usize::try_from(min(
u32::div_ceil(bitset_pos + 1, PAGE_SIZE_IN_WORDS) * PAGE_SIZE_IN_WORDS,
self.bitset_len,
))
.unwrap();
if self.data.is_empty() {
self.data = vec![0; new_len]; // request zeroed page
} else {
self.data.resize(new_len, 0); // realloc + copy
}
}
fn to_global_pos(&self, (bitset_pos, bit_pos): (u32, u32)) -> GlobalCommitPosition {
let bitset_rev_pos = u32::checked_sub(self.bitset_len, bitset_pos + 1)
.expect("bitset_pos < self.bitset_len");
GlobalCommitPosition(bitset_rev_pos * u64::BITS + bit_pos)
}
fn to_bitset_pos(&self, pos: GlobalCommitPosition) -> (u32, u32) {
let bitset_rev_pos = pos.0 / u64::BITS;
let bit_pos = pos.0 % u64::BITS;
let bitset_pos = u32::checked_sub(self.bitset_len, bitset_rev_pos + 1)
.expect("bitset_rev_pos < self.bitset_len");
(bitset_pos, bit_pos)
}
/// Returns `true` if the given `pos` is set.
///
/// Panics if the `pos` exceeds the capacity.
pub fn get(&self, pos: GlobalCommitPosition) -> bool {
self.get_bit(self.to_bitset_pos(pos))
}
fn get_bit(&self, (bitset_pos, bit_pos): (u32, u32)) -> bool {
let bit = 1_u64 << bit_pos;
match self.data.get(usize::try_from(bitset_pos).unwrap()) {
Some(word) => *word & bit != 0,
None => false,
}
}
/// Sets `pos` to true.
///
/// Panics if the `pos` exceeds the capacity.
pub fn set(&mut self, pos: GlobalCommitPosition) {
self.set_bit(self.to_bitset_pos(pos));
}
fn set_bit(&mut self, (bitset_pos, bit_pos): (u32, u32)) {
self.ensure_data(bitset_pos);
let bit = 1_u64 << bit_pos;
self.data[usize::try_from(bitset_pos).unwrap()] |= bit;
}
/// Sets `pos` to true. Returns `true` if the old value was set.
///
/// Panics if the `pos` exceeds the capacity.
pub fn get_set(&mut self, pos: GlobalCommitPosition) -> bool {
self.get_set_bit(self.to_bitset_pos(pos))
}
fn get_set_bit(&mut self, (bitset_pos, bit_pos): (u32, u32)) -> bool {
self.ensure_data(bitset_pos);
let bit = 1_u64 << bit_pos;
let word = &mut self.data[usize::try_from(bitset_pos).unwrap()];
let old = *word & bit != 0;
*word |= bit;
old
}
}
/// Computes ancestors set lazily.
///
/// This is similar to `RevWalk` functionality-wise, but implemented with the
/// different design goals:
///
/// * optimized for dense ancestors set
/// * optimized for testing set membership
/// * no iterator API (which could be implemented on top)
#[derive(Clone, Debug)]
pub(super) struct AncestorsBitSet {
bitset: PositionsBitSet,
next_bitset_pos_to_visit: u32,
}
impl AncestorsBitSet {
/// Creates bit set of the specified capacity.
pub fn with_capacity(len: u32) -> Self {
let bitset = PositionsBitSet::with_capacity(len);
let next_bitset_pos_to_visit = bitset.bitset_len;
Self {
bitset,
next_bitset_pos_to_visit,
}
}
/// Adds head `pos` to the set.
///
/// Panics if the `pos` exceeds the capacity.
pub fn add_head(&mut self, pos: GlobalCommitPosition) {
let (bitset_pos, bit_pos) = self.bitset.to_bitset_pos(pos);
self.bitset.set_bit((bitset_pos, bit_pos));
self.next_bitset_pos_to_visit = min(self.next_bitset_pos_to_visit, bitset_pos);
}
/// Returns `true` if the given `pos` is ancestors of the heads.
///
/// Panics if the `pos` exceeds the capacity or has not been visited yet.
pub fn contains(&self, pos: GlobalCommitPosition) -> bool {
let (bitset_pos, bit_pos) = self.bitset.to_bitset_pos(pos);
assert!(bitset_pos < self.next_bitset_pos_to_visit);
self.bitset.get_bit((bitset_pos, bit_pos))
}
/// Updates set by visiting ancestors until the given `to_visit_pos`.
pub fn visit_until(
&mut self,
index: &CompositeCommitIndex,
to_visit_pos: GlobalCommitPosition,
) {
let (last_bitset_pos_to_visit, _) = self.bitset.to_bitset_pos(to_visit_pos);
if last_bitset_pos_to_visit < self.next_bitset_pos_to_visit {
return;
}
self.bitset.ensure_data(last_bitset_pos_to_visit);
for visiting_bitset_pos in self.next_bitset_pos_to_visit..=last_bitset_pos_to_visit {
let mut unvisited_bits =
self.bitset.data[usize::try_from(visiting_bitset_pos).unwrap()];
while unvisited_bits != 0 {
let bit_pos = u64::BITS - unvisited_bits.leading_zeros() - 1; // from MSB
unvisited_bits ^= 1_u64 << bit_pos;
let current_pos = self.bitset.to_global_pos((visiting_bitset_pos, bit_pos));
for parent_pos in index.entry_by_pos(current_pos).parent_positions() {
assert!(parent_pos < current_pos);
let (parent_bitset_pos, parent_bit_pos) = self.bitset.to_bitset_pos(parent_pos);
self.bitset.ensure_data(parent_bitset_pos);
let bit = 1_u64 << parent_bit_pos;
self.bitset.data[usize::try_from(parent_bitset_pos).unwrap()] |= bit;
if visiting_bitset_pos == parent_bitset_pos {
unvisited_bits |= bit;
}
}
}
}
self.next_bitset_pos_to_visit = last_bitset_pos_to_visit + 1;
}
}
#[cfg(test)]
mod tests {
use super::super::composite::AsCompositeIndex as _;
use super::super::mutable::DefaultMutableIndex;
use super::super::readonly::FieldLengths;
use super::*;
use crate::backend::ChangeId;
use crate::backend::CommitId;
/// Generator of unique 16-byte CommitId excluding root id
fn commit_id_generator() -> impl FnMut() -> CommitId {
let mut iter = (1_u128..).map(|n| CommitId::new(n.to_le_bytes().into()));
move || iter.next().unwrap()
}
/// Generator of unique 16-byte ChangeId excluding root id
fn change_id_generator() -> impl FnMut() -> ChangeId {
let mut iter = (1_u128..).map(|n| ChangeId::new(n.to_le_bytes().into()));
move || iter.next().unwrap()
}
#[test]
fn test_positions_bit_set() {
// Create with empty capacity, which is useless, but shouldn't panic
let _set = PositionsBitSet::with_capacity(0);
let mut set = PositionsBitSet::with_capacity(128);
assert!(!set.get(GlobalCommitPosition(0)));
assert!(!set.get(GlobalCommitPosition(127)));
set.set(GlobalCommitPosition(0));
assert!(set.get(GlobalCommitPosition(0)));
assert!(!set.get(GlobalCommitPosition(1)));
assert!(!set.get(GlobalCommitPosition(127)));
let old = set.get_set(GlobalCommitPosition(127));
assert!(!old);
assert!(!set.get(GlobalCommitPosition(63)));
assert!(!set.get(GlobalCommitPosition(64)));
assert!(set.get(GlobalCommitPosition(127)));
let old = set.get_set(GlobalCommitPosition(127));
assert!(old);
}
#[test]
fn test_positions_bit_set_allocation() {
// Exactly one page
let mut set = PositionsBitSet::with_capacity(PAGE_SIZE_IN_BITS);
assert!(set.data.is_empty());
assert!(!set.get(GlobalCommitPosition(0)));
assert!(!set.get(GlobalCommitPosition(PAGE_SIZE_IN_BITS - 1)));
assert!(set.data.is_empty());
set.set(GlobalCommitPosition(0));
assert_eq!(set.data.len(), usize::try_from(PAGE_SIZE_IN_WORDS).unwrap());
set.set(GlobalCommitPosition(PAGE_SIZE_IN_BITS - 1));
assert_eq!(set.data.len(), usize::try_from(PAGE_SIZE_IN_WORDS).unwrap());
assert!(set.get(GlobalCommitPosition(0)));
assert!(set.get(GlobalCommitPosition(PAGE_SIZE_IN_BITS - 1)));
// Two pages
let mut set = PositionsBitSet::with_capacity(PAGE_SIZE_IN_BITS + 1);
assert!(set.data.is_empty());
set.set(GlobalCommitPosition(u64::BITS));
set.set(GlobalCommitPosition(PAGE_SIZE_IN_BITS));
assert_eq!(set.data.len(), usize::try_from(PAGE_SIZE_IN_WORDS).unwrap());
let old = set.get_set(GlobalCommitPosition(u64::BITS - 1));
assert!(!old);
assert_eq!(
set.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS + 1).unwrap()
);
assert!(set.get(GlobalCommitPosition(u64::BITS - 1)));
assert!(set.get(GlobalCommitPosition(u64::BITS)));
assert!(set.get(GlobalCommitPosition(PAGE_SIZE_IN_BITS)));
// Exactly three pages
let mut set = PositionsBitSet::with_capacity(PAGE_SIZE_IN_BITS * 3);
assert!(set.data.is_empty());
set.set(GlobalCommitPosition(PAGE_SIZE_IN_BITS * 2));
assert_eq!(set.data.len(), usize::try_from(PAGE_SIZE_IN_WORDS).unwrap());
set.set(GlobalCommitPosition(PAGE_SIZE_IN_BITS));
assert_eq!(
set.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS * 2).unwrap()
);
set.set(GlobalCommitPosition(0));
assert_eq!(
set.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS * 3).unwrap()
);
assert!(set.get(GlobalCommitPosition(0)));
assert!(set.get(GlobalCommitPosition(PAGE_SIZE_IN_BITS)));
assert!(set.get(GlobalCommitPosition(PAGE_SIZE_IN_BITS * 2)));
}
#[test]
fn test_ancestors_bit_set() {
let mut new_commit_id = commit_id_generator();
let mut new_change_id = change_id_generator();
let mut mutable_index = DefaultMutableIndex::full(FieldLengths {
commit_id: 16,
change_id: 16,
});
// F F = 256
// |\ E = 193,194,195,..,254
// E | D D = 192,255
// | |/ C = 66,68,70,..,190
// B C B = 65,67,69,..,189,191
// |/ A = 0,1,2,..,64
// A
let id_a0 = new_commit_id();
mutable_index.add_commit_data(id_a0.clone(), new_change_id(), &[]);
let id_a64 = (1..=64).fold(id_a0.clone(), |parent_id, i| {
assert_eq!(mutable_index.num_commits(), i);
let id = new_commit_id();
mutable_index.add_commit_data(id.clone(), new_change_id(), &[parent_id]);
id
});
let (id_b189, id_c190) = (65..=190).step_by(2).fold(
(id_a64.clone(), id_a64.clone()),
|(parent_id_b, parent_id_c), i| {
assert_eq!(mutable_index.num_commits(), i);
let id_b = new_commit_id();
let id_c = new_commit_id();
mutable_index.add_commit_data(id_b.clone(), new_change_id(), &[parent_id_b]);
mutable_index.add_commit_data(id_c.clone(), new_change_id(), &[parent_id_c]);
(id_b, id_c)
},
);
let id_b191 = new_commit_id();
mutable_index.add_commit_data(id_b191.clone(), new_change_id(), &[id_b189]);
let id_d192 = new_commit_id();
mutable_index.add_commit_data(
id_d192.clone(),
new_change_id(),
std::slice::from_ref(&id_c190),
);
let id_e254 = (193..=254).fold(id_b191.clone(), |parent_id, i| {
assert_eq!(mutable_index.num_commits(), i);
let id = new_commit_id();
mutable_index.add_commit_data(id.clone(), new_change_id(), &[parent_id]);
id
});
let id_d255 = new_commit_id();
mutable_index.add_commit_data(
id_d255.clone(),
new_change_id(),
std::slice::from_ref(&id_d192),
);
let id_f256 = new_commit_id();
mutable_index.add_commit_data(
id_f256.clone(),
new_change_id(),
&[id_c190.clone(), id_e254.clone()],
);
assert_eq!(mutable_index.num_commits(), 257);
let index = mutable_index.as_composite().commits();
let to_pos = |id: &CommitId| index.commit_id_to_pos(id).unwrap();
let new_ancestors_set = |heads: &[&CommitId]| {
let mut set = AncestorsBitSet::with_capacity(index.num_commits());
for &id in heads {
set.add_head(to_pos(id));
}
set
};
// Nothing reachable
let set = new_ancestors_set(&[]);
assert_eq!(set.next_bitset_pos_to_visit, 5);
for pos in (0..=256).map(GlobalCommitPosition) {
assert!(!set.contains(pos), "{pos:?} should be unreachable");
}
// All reachable
let mut set = new_ancestors_set(&[&id_f256, &id_d255]);
assert_eq!(set.next_bitset_pos_to_visit, 0);
set.visit_until(index, to_pos(&id_f256));
assert_eq!(set.next_bitset_pos_to_visit, 1);
assert!(set.contains(to_pos(&id_f256)));
set.visit_until(index, to_pos(&id_d192));
assert_eq!(set.next_bitset_pos_to_visit, 2);
assert!(set.contains(to_pos(&id_e254)));
assert!(set.contains(to_pos(&id_d255)));
assert!(set.contains(to_pos(&id_d192)));
set.visit_until(index, to_pos(&id_a0));
assert_eq!(set.next_bitset_pos_to_visit, 5);
set.visit_until(index, to_pos(&id_f256)); // should be noop
assert_eq!(set.next_bitset_pos_to_visit, 5);
for pos in (0..=256).map(GlobalCommitPosition) {
assert!(set.contains(pos), "{pos:?} should be reachable");
}
// A, B, C, E, F are reachable
let mut set = new_ancestors_set(&[&id_f256]);
assert_eq!(set.next_bitset_pos_to_visit, 0);
set.visit_until(index, to_pos(&id_f256));
assert_eq!(set.next_bitset_pos_to_visit, 1);
assert!(set.contains(to_pos(&id_f256)));
set.visit_until(index, to_pos(&id_d192));
assert_eq!(set.next_bitset_pos_to_visit, 2);
assert!(!set.contains(to_pos(&id_d255)));
assert!(!set.contains(to_pos(&id_d192)));
set.visit_until(index, to_pos(&id_c190));
assert_eq!(set.next_bitset_pos_to_visit, 3);
assert!(set.contains(to_pos(&id_c190)));
set.visit_until(index, to_pos(&id_a64));
assert_eq!(set.next_bitset_pos_to_visit, 4);
assert!(set.contains(to_pos(&id_b191)));
assert!(set.contains(to_pos(&id_a64)));
set.visit_until(index, to_pos(&id_a0));
assert_eq!(set.next_bitset_pos_to_visit, 5);
assert!(set.contains(to_pos(&id_a0)));
// A, C, D are reachable
let mut set = new_ancestors_set(&[&id_d255]);
assert_eq!(set.next_bitset_pos_to_visit, 1);
assert!(!set.contains(to_pos(&id_f256)));
set.visit_until(index, to_pos(&id_e254));
assert_eq!(set.next_bitset_pos_to_visit, 2);
assert!(!set.contains(to_pos(&id_e254)));
set.visit_until(index, to_pos(&id_d255));
assert_eq!(set.next_bitset_pos_to_visit, 2);
assert!(set.contains(to_pos(&id_d255)));
set.visit_until(index, to_pos(&id_b191));
assert_eq!(set.next_bitset_pos_to_visit, 3);
assert!(!set.contains(to_pos(&id_b191)));
set.visit_until(index, to_pos(&id_c190));
assert_eq!(set.next_bitset_pos_to_visit, 3);
assert!(set.contains(to_pos(&id_c190)));
set.visit_until(index, to_pos(&id_a0));
assert_eq!(set.next_bitset_pos_to_visit, 5);
assert!(set.contains(to_pos(&id_a64)));
assert!(set.contains(to_pos(&id_a0)));
}
#[test]
fn test_ancestors_bit_set_allocation() {
let mut new_commit_id = commit_id_generator();
let mut new_change_id = change_id_generator();
let mut mutable_index = DefaultMutableIndex::full(FieldLengths {
commit_id: 16,
change_id: 16,
});
// Linear history of two-page size
let id_0 = new_commit_id();
mutable_index.add_commit_data(id_0.clone(), new_change_id(), &[]);
(1..=PAGE_SIZE_IN_BITS).fold(id_0.clone(), |parent_id, i| {
assert_eq!(mutable_index.num_commits(), i);
let id = new_commit_id();
mutable_index.add_commit_data(id.clone(), new_change_id(), &[parent_id]);
id
});
let index = mutable_index.as_composite().commits();
let mut set = AncestorsBitSet::with_capacity(index.num_commits());
assert_eq!(set.next_bitset_pos_to_visit, PAGE_SIZE_IN_WORDS + 1);
// Mark the head commit
set.add_head(GlobalCommitPosition(PAGE_SIZE_IN_BITS));
assert_eq!(set.next_bitset_pos_to_visit, 0);
assert_eq!(
set.bitset.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS).unwrap()
);
set.visit_until(index, GlobalCommitPosition(PAGE_SIZE_IN_BITS));
assert_eq!(set.next_bitset_pos_to_visit, 1);
assert_eq!(
set.bitset.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS).unwrap()
);
assert!(set.contains(GlobalCommitPosition(PAGE_SIZE_IN_BITS)));
set.visit_until(index, GlobalCommitPosition(PAGE_SIZE_IN_BITS - 1));
assert_eq!(set.next_bitset_pos_to_visit, 2);
assert_eq!(
set.bitset.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS).unwrap()
);
assert!(set.contains(GlobalCommitPosition(PAGE_SIZE_IN_BITS - 1)));
// Parent link across page boundary
set.visit_until(index, GlobalCommitPosition(u64::BITS));
assert_eq!(set.next_bitset_pos_to_visit, PAGE_SIZE_IN_WORDS);
assert_eq!(
set.bitset.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS + 1).unwrap()
);
assert!(set.contains(GlobalCommitPosition(u64::BITS)));
set.visit_until(index, GlobalCommitPosition(0));
assert_eq!(set.next_bitset_pos_to_visit, PAGE_SIZE_IN_WORDS + 1);
assert_eq!(
set.bitset.data.len(),
usize::try_from(PAGE_SIZE_IN_WORDS + 1).unwrap()
);
assert!(set.contains(GlobalCommitPosition(0)));
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/composite.rs | lib/src/default_index/composite.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::Ordering;
use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::collections::binary_heap;
use std::iter;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use std::sync::Mutex;
use itertools::Itertools as _;
use ref_cast::RefCastCustom;
use ref_cast::ref_cast_custom;
use super::bit_set::AncestorsBitSet;
use super::bit_set::PositionsBitSet;
use super::changed_path::CompositeChangedPathIndex;
use super::entry::CommitIndexEntry;
use super::entry::GlobalCommitPosition;
use super::entry::LocalCommitPosition;
use super::entry::SmallGlobalCommitPositionsVec;
use super::entry::SmallLocalCommitPositionsVec;
use super::mutable::MutableCommitIndexSegment;
use super::readonly::ReadonlyCommitIndexSegment;
use super::rev_walk::filter_slice_by_range;
use super::revset_engine;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::hex_util;
use crate::index::ChangeIdIndex;
use crate::index::Index;
use crate::index::IndexResult;
use crate::index::ResolvedChangeState;
use crate::index::ResolvedChangeTargets;
use crate::object_id::HexPrefix;
use crate::object_id::ObjectId as _;
use crate::object_id::PrefixResolution;
use crate::object_id::id_type;
use crate::repo_path::RepoPathBuf;
use crate::revset::ResolvedExpression;
use crate::revset::Revset;
use crate::revset::RevsetEvaluationError;
use crate::store::Store;
id_type!(pub(super) CommitIndexSegmentId { hex() });
pub(super) trait CommitIndexSegment: Send + Sync {
fn num_parent_commits(&self) -> u32;
fn num_local_commits(&self) -> u32;
fn parent_file(&self) -> Option<&Arc<ReadonlyCommitIndexSegment>>;
fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option<LocalCommitPosition>;
/// Suppose the given `commit_id` exists, returns the previous and next
/// commit ids in lexicographical order.
fn resolve_neighbor_commit_ids(
&self,
commit_id: &CommitId,
) -> (Option<CommitId>, Option<CommitId>);
fn resolve_commit_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<CommitId>;
fn resolve_neighbor_change_ids(
&self,
change_id: &ChangeId,
) -> (Option<ChangeId>, Option<ChangeId>);
// Returns positions in ascending order.
fn resolve_change_id_prefix(
&self,
prefix: &HexPrefix,
) -> PrefixResolution<(ChangeId, SmallLocalCommitPositionsVec)>;
fn generation_number(&self, local_pos: LocalCommitPosition) -> u32;
fn commit_id(&self, local_pos: LocalCommitPosition) -> CommitId;
fn change_id(&self, local_pos: LocalCommitPosition) -> ChangeId;
fn num_parents(&self, local_pos: LocalCommitPosition) -> u32;
fn parent_positions(&self, local_pos: LocalCommitPosition) -> SmallGlobalCommitPositionsVec;
}
pub(super) type DynCommitIndexSegment = dyn CommitIndexSegment;
/// Abstraction over owned and borrowed types that can be cheaply converted to
/// a `CompositeIndex` reference.
pub(super) trait AsCompositeIndex {
/// Returns reference wrapper that provides global access to this index.
fn as_composite(&self) -> &CompositeIndex;
}
impl<T: AsCompositeIndex + ?Sized> AsCompositeIndex for &T {
fn as_composite(&self) -> &CompositeIndex {
<T as AsCompositeIndex>::as_composite(self)
}
}
impl<T: AsCompositeIndex + ?Sized> AsCompositeIndex for &mut T {
fn as_composite(&self) -> &CompositeIndex {
<T as AsCompositeIndex>::as_composite(self)
}
}
/// Provides an index of both commit IDs and change IDs.
///
/// We refer to this as a composite index because it's a composite of multiple
/// nested index segments where each parent segment is roughly twice as large
/// its child. segment. This provides a good balance between read and write
/// performance.
#[derive(RefCastCustom)]
#[repr(transparent)]
pub(super) struct CompositeCommitIndex(DynCommitIndexSegment);
impl CompositeCommitIndex {
#[ref_cast_custom]
pub(super) const fn new(segment: &DynCommitIndexSegment) -> &Self;
/// Iterates parent and its ancestor readonly index segments.
pub(super) fn ancestor_files_without_local(
&self,
) -> impl Iterator<Item = &Arc<ReadonlyCommitIndexSegment>> {
let parent_file = self.0.parent_file();
iter::successors(parent_file, |file| file.parent_file())
}
/// Iterates self and its ancestor index segments.
pub(super) fn ancestor_index_segments(&self) -> impl Iterator<Item = &DynCommitIndexSegment> {
iter::once(&self.0).chain(
self.ancestor_files_without_local()
.map(|file| file.as_ref() as &DynCommitIndexSegment),
)
}
pub fn num_commits(&self) -> u32 {
self.0.num_parent_commits() + self.0.num_local_commits()
}
pub fn has_id(&self, commit_id: &CommitId) -> bool {
self.commit_id_to_pos(commit_id).is_some()
}
pub fn entry_by_pos(&self, pos: GlobalCommitPosition) -> CommitIndexEntry<'_> {
self.ancestor_index_segments()
.find_map(|segment| {
u32::checked_sub(pos.0, segment.num_parent_commits())
.map(LocalCommitPosition)
.map(|local_pos| CommitIndexEntry::new(segment, pos, local_pos))
})
.unwrap()
}
pub fn entry_by_id(&self, commit_id: &CommitId) -> Option<CommitIndexEntry<'_>> {
self.ancestor_index_segments().find_map(|segment| {
let local_pos = segment.commit_id_to_pos(commit_id)?;
let pos = GlobalCommitPosition(local_pos.0 + segment.num_parent_commits());
Some(CommitIndexEntry::new(segment, pos, local_pos))
})
}
pub fn commit_id_to_pos(&self, commit_id: &CommitId) -> Option<GlobalCommitPosition> {
self.ancestor_index_segments().find_map(|segment| {
let LocalCommitPosition(local_pos) = segment.commit_id_to_pos(commit_id)?;
let pos = GlobalCommitPosition(local_pos + segment.num_parent_commits());
Some(pos)
})
}
pub fn resolve_commit_id_prefix(&self, prefix: &HexPrefix) -> PrefixResolution<CommitId> {
self.ancestor_index_segments()
.fold(PrefixResolution::NoMatch, |acc_match, segment| {
if acc_match == PrefixResolution::AmbiguousMatch {
acc_match // avoid checking the parent file(s)
} else {
let local_match = segment.resolve_commit_id_prefix(prefix);
acc_match.plus(&local_match)
}
})
}
/// Suppose the given `commit_id` exists, returns the minimum prefix length
/// to disambiguate it. The length to be returned is a number of hexadecimal
/// digits.
///
/// If the given `commit_id` doesn't exist, this will return the prefix
/// length that never matches with any commit ids.
pub(super) fn shortest_unique_commit_id_prefix_len(&self, commit_id: &CommitId) -> usize {
let (prev_id, next_id) = self.resolve_neighbor_commit_ids(commit_id);
itertools::chain(prev_id, next_id)
.map(|id| hex_util::common_hex_len(commit_id.as_bytes(), id.as_bytes()) + 1)
.max()
.unwrap_or(0)
}
/// Suppose the given `commit_id` exists, returns the previous and next
/// commit ids in lexicographical order.
pub(super) fn resolve_neighbor_commit_ids(
&self,
commit_id: &CommitId,
) -> (Option<CommitId>, Option<CommitId>) {
self.ancestor_index_segments()
.map(|segment| segment.resolve_neighbor_commit_ids(commit_id))
.reduce(|(acc_prev_id, acc_next_id), (prev_id, next_id)| {
(
acc_prev_id.into_iter().chain(prev_id).max(),
acc_next_id.into_iter().chain(next_id).min(),
)
})
.unwrap()
}
/// Suppose the given `change_id` exists, returns the minimum prefix length
/// to disambiguate it within all the indexed ids including hidden ones.
pub(super) fn shortest_unique_change_id_prefix_len(&self, change_id: &ChangeId) -> usize {
let (prev_id, next_id) = self.resolve_neighbor_change_ids(change_id);
itertools::chain(prev_id, next_id)
.map(|id| hex_util::common_hex_len(change_id.as_bytes(), id.as_bytes()) + 1)
.max()
.unwrap_or(0)
}
/// Suppose the given `change_id` exists, returns the previous and next
/// change ids in lexicographical order. The returned change ids may be
/// hidden.
pub(super) fn resolve_neighbor_change_ids(
&self,
change_id: &ChangeId,
) -> (Option<ChangeId>, Option<ChangeId>) {
self.ancestor_index_segments()
.map(|segment| segment.resolve_neighbor_change_ids(change_id))
.reduce(|(acc_prev_id, acc_next_id), (prev_id, next_id)| {
(
acc_prev_id.into_iter().chain(prev_id).max(),
acc_next_id.into_iter().chain(next_id).min(),
)
})
.unwrap()
}
/// Resolves the given change id `prefix` to the associated entries. The
/// returned entries may be hidden.
///
/// The returned index positions are sorted in descending order.
pub(super) fn resolve_change_id_prefix(
&self,
prefix: &HexPrefix,
) -> PrefixResolution<(ChangeId, SmallGlobalCommitPositionsVec)> {
use PrefixResolution::*;
self.ancestor_index_segments()
.fold(NoMatch, |acc_match, segment| {
if acc_match == AmbiguousMatch {
return acc_match; // avoid checking the parent file(s)
}
let to_global_pos = {
let num_parent_commits = segment.num_parent_commits();
move |LocalCommitPosition(pos)| GlobalCommitPosition(pos + num_parent_commits)
};
// Similar to PrefixResolution::plus(), but merges matches of the same id.
match (acc_match, segment.resolve_change_id_prefix(prefix)) {
(NoMatch, local_match) => local_match.map(|(id, positions)| {
(id, positions.into_iter().rev().map(to_global_pos).collect())
}),
(acc_match, NoMatch) => acc_match,
(AmbiguousMatch, _) => AmbiguousMatch,
(_, AmbiguousMatch) => AmbiguousMatch,
(SingleMatch((id1, _)), SingleMatch((id2, _))) if id1 != id2 => AmbiguousMatch,
(SingleMatch((id, mut acc_positions)), SingleMatch((_, local_positions))) => {
acc_positions.extend(local_positions.into_iter().rev().map(to_global_pos));
SingleMatch((id, acc_positions))
}
}
})
}
pub fn is_ancestor(&self, ancestor_id: &CommitId, descendant_id: &CommitId) -> bool {
let ancestor_pos = self.commit_id_to_pos(ancestor_id).unwrap();
let descendant_pos = self.commit_id_to_pos(descendant_id).unwrap();
self.is_ancestor_pos(ancestor_pos, descendant_pos)
}
pub(super) fn is_ancestor_pos(
&self,
ancestor_pos: GlobalCommitPosition,
descendant_pos: GlobalCommitPosition,
) -> bool {
let ancestor_generation = self.entry_by_pos(ancestor_pos).generation_number();
let mut work = vec![descendant_pos];
let mut visited = PositionsBitSet::with_max_pos(descendant_pos);
while let Some(descendant_pos) = work.pop() {
match descendant_pos.cmp(&ancestor_pos) {
Ordering::Less => continue,
Ordering::Equal => return true,
Ordering::Greater => {}
}
if visited.get_set(descendant_pos) {
continue;
}
let descendant_entry = self.entry_by_pos(descendant_pos);
if descendant_entry.generation_number() <= ancestor_generation {
continue;
}
work.extend(descendant_entry.parent_positions());
}
false
}
pub fn common_ancestors(&self, set1: &[CommitId], set2: &[CommitId]) -> Vec<CommitId> {
let pos1 = set1
.iter()
.map(|id| self.commit_id_to_pos(id).unwrap())
.collect_vec();
let pos2 = set2
.iter()
.map(|id| self.commit_id_to_pos(id).unwrap())
.collect_vec();
self.common_ancestors_pos(pos1, pos2)
.iter()
.map(|pos| self.entry_by_pos(*pos).commit_id())
.collect()
}
/// Computes the greatest common ancestors.
///
/// The returned index positions are sorted in descending order.
pub(super) fn common_ancestors_pos(
&self,
set1: Vec<GlobalCommitPosition>,
set2: Vec<GlobalCommitPosition>,
) -> Vec<GlobalCommitPosition> {
let mut items1 = BinaryHeap::from(set1);
let mut items2 = BinaryHeap::from(set2);
let mut result = Vec::new();
while let (Some(&pos1), Some(&pos2)) = (items1.peek(), items2.peek()) {
match pos1.cmp(&pos2) {
Ordering::Greater => shift_to_parents(
&mut items1,
pos1,
&self.entry_by_pos(pos1).parent_positions(),
),
Ordering::Less => shift_to_parents(
&mut items2,
pos2,
&self.entry_by_pos(pos2).parent_positions(),
),
Ordering::Equal => {
result.push(pos1);
dedup_pop(&mut items1).unwrap();
dedup_pop(&mut items2).unwrap();
}
}
}
self.heads_pos(result)
}
pub(super) fn all_heads(&self) -> impl Iterator<Item = CommitId> {
self.all_heads_pos()
.map(move |pos| self.entry_by_pos(pos).commit_id())
}
pub(super) fn all_heads_pos(&self) -> impl Iterator<Item = GlobalCommitPosition> + use<> {
let num_commits = self.num_commits();
let mut not_head = PositionsBitSet::with_capacity(num_commits);
for pos in (0..num_commits).map(GlobalCommitPosition) {
let entry = self.entry_by_pos(pos);
for parent_pos in entry.parent_positions() {
not_head.set(parent_pos);
}
}
(0..num_commits)
.map(GlobalCommitPosition)
// TODO: can be optimized to use leading/trailing_ones()
.filter(move |&pos| !not_head.get(pos))
}
pub fn heads<'a>(
&self,
candidate_ids: impl IntoIterator<Item = &'a CommitId>,
) -> Vec<CommitId> {
let mut candidate_positions = candidate_ids
.into_iter()
.map(|id| self.commit_id_to_pos(id).unwrap())
.collect_vec();
candidate_positions.sort_unstable_by_key(|&pos| Reverse(pos));
candidate_positions.dedup();
self.heads_pos(candidate_positions)
.iter()
.map(|pos| self.entry_by_pos(*pos).commit_id())
.collect()
}
/// Returns the subset of positions in `candidate_positions` which refer to
/// entries that are heads in the repository.
///
/// The `candidate_positions` must be sorted in descending order, and have
/// no duplicates. The returned head positions are also sorted in descending
/// order.
pub fn heads_pos(
&self,
candidate_positions: Vec<GlobalCommitPosition>,
) -> Vec<GlobalCommitPosition> {
debug_assert!(candidate_positions.is_sorted_by(|a, b| a > b));
let Some(min_generation) = candidate_positions
.iter()
.map(|&pos| self.entry_by_pos(pos).generation_number())
.min()
else {
return candidate_positions;
};
// Iterate though the candidates by reverse index position, keeping track of the
// ancestors of already-found heads. If a candidate is an ancestor of an
// already-found head, then it can be removed.
let mut parents = BinaryHeap::new();
let mut heads = Vec::new();
'outer: for candidate in candidate_positions {
while let Some(&parent) = parents.peek().filter(|&&parent| parent >= candidate) {
let entry = self.entry_by_pos(parent);
if entry.generation_number() <= min_generation {
dedup_pop(&mut parents).unwrap();
} else {
shift_to_parents(&mut parents, parent, &entry.parent_positions());
}
if parent == candidate {
// The candidate is an ancestor of an existing head, so we can skip it.
continue 'outer;
}
}
// No parents matched, so this commit is a head.
let entry = self.entry_by_pos(candidate);
parents.extend(entry.parent_positions());
heads.push(candidate);
}
heads
}
/// Find the heads of a range of positions `roots..heads`, applying a filter
/// to the commits in the range. The heads are sorted in descending order.
/// The filter will also be called in descending index position order.
pub fn heads_from_range_and_filter<E>(
&self,
roots: Vec<GlobalCommitPosition>,
heads: Vec<GlobalCommitPosition>,
parents_range: &Range<u32>,
mut filter: impl FnMut(GlobalCommitPosition) -> Result<bool, E>,
) -> Result<Vec<GlobalCommitPosition>, E> {
if heads.is_empty() {
return Ok(heads);
}
let mut wanted_queue = BinaryHeap::from(heads);
let mut unwanted_queue = BinaryHeap::from(roots);
let mut found_heads = Vec::new();
while let Some(&pos) = wanted_queue.peek() {
if shift_to_parents_until(&mut unwanted_queue, self, pos) {
dedup_pop(&mut wanted_queue);
continue;
}
let entry = self.entry_by_pos(pos);
if filter(pos)? {
dedup_pop(&mut wanted_queue);
unwanted_queue.extend(entry.parent_positions());
found_heads.push(pos);
} else {
let parent_positions = entry.parent_positions();
shift_to_parents(
&mut wanted_queue,
pos,
filter_slice_by_range(&parent_positions, parents_range),
);
}
}
Ok(found_heads)
}
}
#[derive(Clone, Debug)]
enum CompositeCommitIndexSegment {
Readonly(Arc<ReadonlyCommitIndexSegment>),
Mutable(Box<MutableCommitIndexSegment>),
}
#[derive(Clone, Debug)]
pub(super) struct CompositeIndex {
commits: CompositeCommitIndexSegment,
changed_paths: CompositeChangedPathIndex,
}
impl CompositeIndex {
pub(super) fn from_readonly(
commits: Arc<ReadonlyCommitIndexSegment>,
changed_paths: CompositeChangedPathIndex,
) -> Self {
Self {
commits: CompositeCommitIndexSegment::Readonly(commits),
changed_paths,
}
}
pub(super) fn from_mutable(
commits: Box<MutableCommitIndexSegment>,
changed_paths: CompositeChangedPathIndex,
) -> Self {
Self {
commits: CompositeCommitIndexSegment::Mutable(commits),
changed_paths,
}
}
pub(super) fn into_mutable(
self,
) -> Option<(Box<MutableCommitIndexSegment>, CompositeChangedPathIndex)> {
let commits = match self.commits {
CompositeCommitIndexSegment::Readonly(_) => return None,
CompositeCommitIndexSegment::Mutable(segment) => segment,
};
Some((commits, self.changed_paths))
}
pub(super) fn commits(&self) -> &CompositeCommitIndex {
match &self.commits {
CompositeCommitIndexSegment::Readonly(segment) => segment.as_composite(),
CompositeCommitIndexSegment::Mutable(segment) => segment.as_composite(),
}
}
pub(super) fn readonly_commits(&self) -> Option<&Arc<ReadonlyCommitIndexSegment>> {
match &self.commits {
CompositeCommitIndexSegment::Readonly(segment) => Some(segment),
CompositeCommitIndexSegment::Mutable(_) => None,
}
}
pub(super) fn mutable_commits(&mut self) -> Option<&mut MutableCommitIndexSegment> {
match &mut self.commits {
CompositeCommitIndexSegment::Readonly(_) => None,
CompositeCommitIndexSegment::Mutable(segment) => Some(segment),
}
}
pub(super) fn changed_paths(&self) -> &CompositeChangedPathIndex {
&self.changed_paths
}
pub(super) fn changed_paths_mut(&mut self) -> &mut CompositeChangedPathIndex {
&mut self.changed_paths
}
}
impl AsCompositeIndex for CompositeIndex {
fn as_composite(&self) -> &CompositeIndex {
self
}
}
// In revset engine, we need to convert &CompositeIndex to &dyn Index.
impl Index for CompositeIndex {
fn shortest_unique_commit_id_prefix_len(&self, commit_id: &CommitId) -> IndexResult<usize> {
Ok(self
.commits()
.shortest_unique_commit_id_prefix_len(commit_id))
}
fn resolve_commit_id_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<CommitId>> {
Ok(self.commits().resolve_commit_id_prefix(prefix))
}
fn has_id(&self, commit_id: &CommitId) -> IndexResult<bool> {
Ok(self.commits().has_id(commit_id))
}
fn is_ancestor(&self, ancestor_id: &CommitId, descendant_id: &CommitId) -> IndexResult<bool> {
Ok(self.commits().is_ancestor(ancestor_id, descendant_id))
}
fn common_ancestors(&self, set1: &[CommitId], set2: &[CommitId]) -> IndexResult<Vec<CommitId>> {
Ok(self.commits().common_ancestors(set1, set2))
}
fn all_heads_for_gc(&self) -> IndexResult<Box<dyn Iterator<Item = CommitId> + '_>> {
Ok(Box::new(self.commits().all_heads()))
}
fn heads(
&self,
candidate_ids: &mut dyn Iterator<Item = &CommitId>,
) -> IndexResult<Vec<CommitId>> {
Ok(self.commits().heads(candidate_ids))
}
fn changed_paths_in_commit(
&self,
commit_id: &CommitId,
) -> IndexResult<Option<Box<dyn Iterator<Item = RepoPathBuf> + '_>>> {
let Some(paths) = self
.commits()
.commit_id_to_pos(commit_id)
.and_then(|pos| self.changed_paths().changed_paths(pos))
else {
return Ok(None);
};
Ok(Some(Box::new(paths.map(|path| path.to_owned()))))
}
fn evaluate_revset(
&self,
expression: &ResolvedExpression,
store: &Arc<Store>,
) -> Result<Box<dyn Revset + '_>, RevsetEvaluationError> {
let revset_impl = revset_engine::evaluate(expression, store, self)?;
Ok(Box::new(revset_impl))
}
}
pub(super) struct ChangeIdIndexImpl<I> {
index: I,
reachable_set: Mutex<AncestorsBitSet>,
}
impl<I: AsCompositeIndex> ChangeIdIndexImpl<I> {
pub fn new(index: I, heads: &mut dyn Iterator<Item = &CommitId>) -> Self {
let composite = index.as_composite().commits();
let mut reachable_set = AncestorsBitSet::with_capacity(composite.num_commits());
for id in heads {
reachable_set.add_head(composite.commit_id_to_pos(id).unwrap());
}
Self {
index,
reachable_set: Mutex::new(reachable_set),
}
}
}
impl<I: AsCompositeIndex + Send + Sync> ChangeIdIndex for ChangeIdIndexImpl<I> {
// Resolves change ID prefix among all IDs.
//
// If `SingleMatch` is returned, there is at least one commit with the given
// change ID (either visible or hidden). `AmbiguousMatch` may be returned even
// if the prefix is unique within the visible entries.
fn resolve_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<ResolvedChangeTargets>> {
let index = self.index.as_composite().commits();
let prefix = match index.resolve_change_id_prefix(prefix) {
PrefixResolution::NoMatch => PrefixResolution::NoMatch,
PrefixResolution::SingleMatch((_change_id, positions)) => {
debug_assert!(positions.is_sorted_by(|a, b| a > b));
let mut reachable_set = self.reachable_set.lock().unwrap();
reachable_set.visit_until(index, *positions.last().unwrap());
let targets = positions
.iter()
.map(|&pos| {
let commit_id = index.entry_by_pos(pos).commit_id();
let state = if reachable_set.contains(pos) {
ResolvedChangeState::Visible
} else {
ResolvedChangeState::Hidden
};
(commit_id, state)
})
.collect_vec();
if targets.is_empty() {
PrefixResolution::NoMatch
} else {
PrefixResolution::SingleMatch(ResolvedChangeTargets { targets })
}
}
PrefixResolution::AmbiguousMatch => PrefixResolution::AmbiguousMatch,
};
Ok(prefix)
}
// Calculates the shortest prefix length of the given `change_id` among all
// IDs, including hidden entries.
//
// The returned length is usually a few digits longer than the minimum
// length necessary to disambiguate within the visible entries since hidden
// entries are also considered when determining the prefix length.
fn shortest_unique_prefix_len(&self, change_id: &ChangeId) -> IndexResult<usize> {
let index = self.index.as_composite().commits();
Ok(index.shortest_unique_change_id_prefix_len(change_id))
}
}
/// Repeatedly `shift_to_parents` until reaching a target position. Returns true
/// if the target position matched a position in the queue.
fn shift_to_parents_until(
queue: &mut BinaryHeap<GlobalCommitPosition>,
index: &CompositeCommitIndex,
target_pos: GlobalCommitPosition,
) -> bool {
while let Some(&pos) = queue.peek().filter(|&&pos| pos >= target_pos) {
shift_to_parents(queue, pos, &index.entry_by_pos(pos).parent_positions());
if pos == target_pos {
return true;
}
}
false
}
/// Removes an entry from the queue and replace it with its parents.
fn shift_to_parents(
items: &mut BinaryHeap<GlobalCommitPosition>,
pos: GlobalCommitPosition,
parent_positions: &[GlobalCommitPosition],
) {
let mut parent_positions = parent_positions.iter();
if let Some(&parent_pos) = parent_positions.next() {
assert!(parent_pos < pos);
dedup_replace(items, parent_pos).unwrap();
} else {
dedup_pop(items).unwrap();
return;
}
for &parent_pos in parent_positions {
assert!(parent_pos < pos);
items.push(parent_pos);
}
}
/// Removes the greatest items (including duplicates) from the heap, returns
/// one.
fn dedup_pop<T: Ord>(heap: &mut BinaryHeap<T>) -> Option<T> {
let item = heap.pop()?;
remove_dup(heap, &item);
Some(item)
}
/// Removes the greatest items (including duplicates) from the heap, inserts
/// lesser `new_item` to the heap, returns the removed one.
///
/// This is faster than calling `dedup_pop(heap)` and `heap.push(new_item)`
/// especially when `new_item` is the next greatest item.
fn dedup_replace<T: Ord>(heap: &mut BinaryHeap<T>, new_item: T) -> Option<T> {
let old_item = {
let mut x = heap.peek_mut()?;
mem::replace(&mut *x, new_item)
};
remove_dup(heap, &old_item);
Some(old_item)
}
fn remove_dup<T: Ord>(heap: &mut BinaryHeap<T>, item: &T) {
while let Some(x) = heap.peek_mut().filter(|x| **x == *item) {
binary_heap::PeekMut::pop(x);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/default_index/entry.rs | lib/src/default_index/entry.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use std::fmt::Formatter;
use std::hash::Hash;
use std::hash::Hasher;
use smallvec::SmallVec;
use super::composite::CompositeCommitIndex;
use super::composite::DynCommitIndexSegment;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::object_id::ObjectId as _;
/// Global commit index position.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash)]
pub(super) struct GlobalCommitPosition(pub(super) u32);
impl GlobalCommitPosition {
pub const MIN: Self = Self(u32::MIN);
pub const MAX: Self = Self(u32::MAX);
}
/// Local commit position within an index segment.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Hash)]
pub(super) struct LocalCommitPosition(pub(super) u32);
// SmallVec reuses two pointer-size fields as inline area, which meas we can
// inline up to 16 bytes (on 64-bit platform) for free.
pub(super) type SmallGlobalCommitPositionsVec = SmallVec<[GlobalCommitPosition; 4]>;
pub(super) type SmallLocalCommitPositionsVec = SmallVec<[LocalCommitPosition; 4]>;
#[derive(Clone)]
pub(super) struct CommitIndexEntry<'a> {
source: &'a DynCommitIndexSegment,
pos: GlobalCommitPosition,
/// Position within the source segment
local_pos: LocalCommitPosition,
}
impl Debug for CommitIndexEntry<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CommitIndexEntry")
.field("pos", &self.pos)
.field("local_pos", &self.local_pos)
.field("commit_id", &self.commit_id().hex())
.finish()
}
}
impl PartialEq for CommitIndexEntry<'_> {
fn eq(&self, other: &Self) -> bool {
self.pos == other.pos
}
}
impl Eq for CommitIndexEntry<'_> {}
impl Hash for CommitIndexEntry<'_> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.pos.hash(state);
}
}
impl<'a> CommitIndexEntry<'a> {
pub(super) fn new(
source: &'a DynCommitIndexSegment,
pos: GlobalCommitPosition,
local_pos: LocalCommitPosition,
) -> Self {
Self {
source,
pos,
local_pos,
}
}
pub fn position(&self) -> GlobalCommitPosition {
self.pos
}
pub fn generation_number(&self) -> u32 {
self.source.generation_number(self.local_pos)
}
pub fn commit_id(&self) -> CommitId {
self.source.commit_id(self.local_pos)
}
pub fn change_id(&self) -> ChangeId {
self.source.change_id(self.local_pos)
}
pub fn num_parents(&self) -> u32 {
self.source.num_parents(self.local_pos)
}
pub fn parent_positions(&self) -> SmallGlobalCommitPositionsVec {
self.source.parent_positions(self.local_pos)
}
pub fn parents(&self) -> impl ExactSizeIterator<Item = CommitIndexEntry<'a>> + use<'a> {
let composite = CompositeCommitIndex::new(self.source);
self.parent_positions()
.into_iter()
.map(move |pos| composite.entry_by_pos(pos))
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/diff_presentation/unified.rs | lib/src/diff_presentation/unified.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Utilities to compute unified (Git-style) diffs of 2 sides
use std::ops::Range;
use bstr::BStr;
use bstr::BString;
use thiserror::Error;
use super::DiffTokenType;
use super::DiffTokenVec;
use super::FileContent;
use super::LineCompareMode;
use super::diff_by_line;
use super::file_content_for_diff;
use super::unzip_diff_hunks_to_lines;
use crate::backend::BackendError;
use crate::conflicts::ConflictMaterializeOptions;
use crate::conflicts::MaterializedTreeValue;
use crate::conflicts::materialize_merge_result_to_bytes;
use crate::diff::ContentDiff;
use crate::diff::DiffHunkKind;
use crate::merge::Diff;
use crate::object_id::ObjectId as _;
use crate::repo_path::RepoPath;
#[derive(Clone, Debug)]
pub struct GitDiffPart {
/// Octal mode string or `None` if the file is absent.
pub mode: Option<&'static str>,
pub hash: String,
pub content: FileContent<BString>,
}
#[derive(Debug, Error)]
pub enum UnifiedDiffError {
#[error(transparent)]
Backend(#[from] BackendError),
#[error("Access denied to {path}")]
AccessDenied {
path: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
}
pub fn git_diff_part(
path: &RepoPath,
value: MaterializedTreeValue,
materialize_options: &ConflictMaterializeOptions,
) -> Result<GitDiffPart, UnifiedDiffError> {
const DUMMY_HASH: &str = "0000000000";
let mode;
let mut hash;
let content;
match value {
MaterializedTreeValue::Absent => {
return Ok(GitDiffPart {
mode: None,
hash: DUMMY_HASH.to_owned(),
content: FileContent {
is_binary: false,
contents: BString::default(),
},
});
}
MaterializedTreeValue::AccessDenied(err) => {
return Err(UnifiedDiffError::AccessDenied {
path: path.as_internal_file_string().to_owned(),
source: err,
});
}
MaterializedTreeValue::File(mut file) => {
mode = if file.executable { "100755" } else { "100644" };
hash = file.id.hex();
content = file_content_for_diff(path, &mut file, |content| content)?;
}
MaterializedTreeValue::Symlink { id, target } => {
mode = "120000";
hash = id.hex();
content = FileContent {
// Unix file paths can't contain null bytes.
is_binary: false,
contents: target.into(),
};
}
MaterializedTreeValue::GitSubmodule(id) => {
// TODO: What should we actually do here?
mode = "040000";
hash = id.hex();
content = FileContent {
is_binary: false,
contents: BString::default(),
};
}
MaterializedTreeValue::FileConflict(file) => {
mode = match file.executable {
Some(true) => "100755",
Some(false) | None => "100644",
};
hash = DUMMY_HASH.to_owned();
content = FileContent {
is_binary: false, // TODO: are we sure this is never binary?
contents: materialize_merge_result_to_bytes(
&file.contents,
&file.labels,
materialize_options,
),
};
}
MaterializedTreeValue::OtherConflict { id, labels } => {
mode = "100644";
hash = DUMMY_HASH.to_owned();
content = FileContent {
is_binary: false,
contents: id.describe(&labels).into(),
};
}
MaterializedTreeValue::Tree(_) => {
panic!("Unexpected tree in diff at path {path:?}");
}
}
hash.truncate(10);
Ok(GitDiffPart {
mode: Some(mode),
hash,
content,
})
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum DiffLineType {
Context,
Removed,
Added,
}
pub struct UnifiedDiffHunk<'content> {
pub left_line_range: Range<usize>,
pub right_line_range: Range<usize>,
pub lines: Vec<(DiffLineType, DiffTokenVec<'content>)>,
}
impl<'content> UnifiedDiffHunk<'content> {
fn extend_context_lines(&mut self, lines: impl IntoIterator<Item = &'content [u8]>) {
let old_len = self.lines.len();
self.lines.extend(lines.into_iter().map(|line| {
let tokens = vec![(DiffTokenType::Matching, line)];
(DiffLineType::Context, tokens)
}));
self.left_line_range.end += self.lines.len() - old_len;
self.right_line_range.end += self.lines.len() - old_len;
}
fn extend_removed_lines(&mut self, lines: impl IntoIterator<Item = DiffTokenVec<'content>>) {
let old_len = self.lines.len();
self.lines
.extend(lines.into_iter().map(|line| (DiffLineType::Removed, line)));
self.left_line_range.end += self.lines.len() - old_len;
}
fn extend_added_lines(&mut self, lines: impl IntoIterator<Item = DiffTokenVec<'content>>) {
let old_len = self.lines.len();
self.lines
.extend(lines.into_iter().map(|line| (DiffLineType::Added, line)));
self.right_line_range.end += self.lines.len() - old_len;
}
}
pub fn unified_diff_hunks<'content>(
contents: Diff<&'content BStr>,
context: usize,
options: LineCompareMode,
) -> Vec<UnifiedDiffHunk<'content>> {
let mut hunks = vec![];
let mut current_hunk = UnifiedDiffHunk {
left_line_range: 0..0,
right_line_range: 0..0,
lines: vec![],
};
let diff = diff_by_line(contents.into_array(), &options);
let mut diff_hunks = diff.hunks().peekable();
while let Some(hunk) = diff_hunks.next() {
match hunk.kind {
DiffHunkKind::Matching => {
// Just use the right (i.e. new) content. We could count the
// number of skipped lines separately, but the number of the
// context lines should match the displayed content.
let [_, right] = hunk.contents[..].try_into().unwrap();
let mut lines = right.split_inclusive(|b| *b == b'\n').fuse();
if !current_hunk.lines.is_empty() {
// The previous hunk line should be either removed/added.
current_hunk.extend_context_lines(lines.by_ref().take(context));
}
let before_lines = if diff_hunks.peek().is_some() {
lines.by_ref().rev().take(context).collect()
} else {
vec![] // No more hunks
};
let num_skip_lines = lines.count();
if num_skip_lines > 0 {
let left_start = current_hunk.left_line_range.end + num_skip_lines;
let right_start = current_hunk.right_line_range.end + num_skip_lines;
if !current_hunk.lines.is_empty() {
hunks.push(current_hunk);
}
current_hunk = UnifiedDiffHunk {
left_line_range: left_start..left_start,
right_line_range: right_start..right_start,
lines: vec![],
};
}
// The next hunk should be of DiffHunk::Different type if any.
current_hunk.extend_context_lines(before_lines.into_iter().rev());
}
DiffHunkKind::Different => {
let lines = unzip_diff_hunks_to_lines(ContentDiff::by_word(hunk.contents).hunks());
current_hunk.extend_removed_lines(lines.before);
current_hunk.extend_added_lines(lines.after);
}
}
}
if !current_hunk.lines.is_empty() {
hunks.push(current_hunk);
}
hunks
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/diff_presentation/mod.rs | lib/src/diff_presentation/mod.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Utilities to present file diffs to the user
#![expect(missing_docs)]
use std::borrow::Borrow;
use std::mem;
use bstr::BString;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use crate::backend::BackendResult;
use crate::conflicts::MaterializedFileValue;
use crate::diff::CompareBytesExactly;
use crate::diff::CompareBytesIgnoreAllWhitespace;
use crate::diff::CompareBytesIgnoreWhitespaceAmount;
use crate::diff::ContentDiff;
use crate::diff::DiffHunk;
use crate::diff::DiffHunkKind;
use crate::diff::find_line_ranges;
use crate::merge::Diff;
use crate::repo_path::RepoPath;
pub mod unified;
// TODO: colored_diffs utils should also be moved from `jj_cli::diff_utils` to
// here.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum DiffTokenType {
Matching,
Different,
}
type DiffTokenVec<'content> = Vec<(DiffTokenType, &'content [u8])>;
#[derive(Clone, Debug)]
pub struct FileContent<T> {
/// false if this file is likely text; true if it is likely binary.
pub is_binary: bool,
pub contents: T,
}
pub fn file_content_for_diff<T>(
path: &RepoPath,
file: &mut MaterializedFileValue,
map_resolved: impl FnOnce(BString) -> T,
) -> BackendResult<FileContent<T>> {
// If this is a binary file, don't show the full contents.
// Determine whether it's binary by whether the first 8k bytes contain a null
// character; this is the same heuristic used by git as of writing: https://github.com/git/git/blob/eea0e59ffbed6e33d171ace5be13cde9faa41639/xdiff-interface.c#L192-L198
const PEEK_SIZE: usize = 8000;
// TODO: currently we look at the whole file, even though for binary files we
// only need to know the file size. To change that we'd have to extend all
// the data backends to support getting the length.
let contents = BString::new(file.read_all(path).block_on()?);
let start = &contents[..PEEK_SIZE.min(contents.len())];
Ok(FileContent {
is_binary: start.contains(&b'\0'),
contents: map_resolved(contents),
})
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub enum LineCompareMode {
/// Compares lines literally.
#[default]
Exact,
/// Compares lines ignoring any whitespace occurrences.
IgnoreAllSpace,
/// Compares lines ignoring changes in whitespace amount.
IgnoreSpaceChange,
}
pub fn diff_by_line<'input, T: AsRef<[u8]> + ?Sized + 'input>(
inputs: impl IntoIterator<Item = &'input T>,
options: &LineCompareMode,
) -> ContentDiff<'input> {
// TODO: If we add --ignore-blank-lines, its tokenizer will have to attach
// blank lines to the preceding range. Maybe it can also be implemented as a
// post-process (similar to refine_changed_regions()) that expands unchanged
// regions across blank lines.
match options {
LineCompareMode::Exact => {
ContentDiff::for_tokenizer(inputs, find_line_ranges, CompareBytesExactly)
}
LineCompareMode::IgnoreAllSpace => {
ContentDiff::for_tokenizer(inputs, find_line_ranges, CompareBytesIgnoreAllWhitespace)
}
LineCompareMode::IgnoreSpaceChange => {
ContentDiff::for_tokenizer(inputs, find_line_ranges, CompareBytesIgnoreWhitespaceAmount)
}
}
}
/// Splits `[left, right]` hunk pairs into `[left_lines, right_lines]`.
pub fn unzip_diff_hunks_to_lines<'content, I>(diff_hunks: I) -> Diff<Vec<DiffTokenVec<'content>>>
where
I: IntoIterator,
I::Item: Borrow<DiffHunk<'content>>,
{
let mut left_lines: Vec<DiffTokenVec<'content>> = vec![];
let mut right_lines: Vec<DiffTokenVec<'content>> = vec![];
let mut left_tokens: DiffTokenVec<'content> = vec![];
let mut right_tokens: DiffTokenVec<'content> = vec![];
for hunk in diff_hunks {
let hunk = hunk.borrow();
match hunk.kind {
DiffHunkKind::Matching => {
// TODO: add support for unmatched contexts
debug_assert!(hunk.contents.iter().all_equal());
for token in hunk.contents[0].split_inclusive(|b| *b == b'\n') {
left_tokens.push((DiffTokenType::Matching, token));
right_tokens.push((DiffTokenType::Matching, token));
if token.ends_with(b"\n") {
left_lines.push(mem::take(&mut left_tokens));
right_lines.push(mem::take(&mut right_tokens));
}
}
}
DiffHunkKind::Different => {
let [left, right] = hunk.contents[..]
.try_into()
.expect("hunk should have exactly two inputs");
for token in left.split_inclusive(|b| *b == b'\n') {
left_tokens.push((DiffTokenType::Different, token));
if token.ends_with(b"\n") {
left_lines.push(mem::take(&mut left_tokens));
}
}
for token in right.split_inclusive(|b| *b == b'\n') {
right_tokens.push((DiffTokenType::Different, token));
if token.ends_with(b"\n") {
right_lines.push(mem::take(&mut right_tokens));
}
}
}
}
}
if !left_tokens.is_empty() {
left_lines.push(left_tokens);
}
if !right_tokens.is_empty() {
right_lines.push(right_tokens);
}
Diff::new(left_lines, right_lines)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/protos/default_index.rs | lib/src/protos/default_index.rs | // This file is @generated by prost-build.
/// Control file to link operation to index segments.
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct SegmentControl {
/// Hash (or file name) of the commit index segment.
#[prost(bytes = "vec", tag = "1")]
pub commit_segment_id: ::prost::alloc::vec::Vec<u8>,
/// First commit stored in the changed-path index segments. Unset if
/// changed-path index is disabled.
#[prost(uint32, optional, tag = "2")]
pub changed_path_start_commit_pos: ::core::option::Option<u32>,
/// Hashes (or file names) of the changed-path index segments.
#[prost(bytes = "vec", repeated, tag = "3")]
pub changed_path_segment_ids: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/protos/simple_op_store.rs | lib/src/protos/simple_op_store.rs | // This file is @generated by prost-build.
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct RefConflictLegacy {
#[deprecated]
#[prost(bytes = "vec", repeated, tag = "1")]
pub removes: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
#[deprecated]
#[prost(bytes = "vec", repeated, tag = "2")]
pub adds: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
}
/// This is redundant with `RefTargetTerm`. It exists for historical reasons.
/// TODO: Consider deprecating it.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RefConflict {
#[prost(message, repeated, tag = "1")]
pub removes: ::prost::alloc::vec::Vec<ref_conflict::Term>,
#[prost(message, repeated, tag = "2")]
pub adds: ::prost::alloc::vec::Vec<ref_conflict::Term>,
}
/// Nested message and enum types in `RefConflict`.
pub mod ref_conflict {
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Term {
#[prost(bytes = "vec", optional, tag = "1")]
pub value: ::core::option::Option<::prost::alloc::vec::Vec<u8>>,
}
}
/// RefTarget that may be serialized in legacy form.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RefTarget {
/// New `RefConflict` type represents both `commit_id` and `conflict_legacy`.
#[prost(oneof = "ref_target::Value", tags = "1, 2, 3")]
pub value: ::core::option::Option<ref_target::Value>,
}
/// Nested message and enum types in `RefTarget`.
pub mod ref_target {
/// New `RefConflict` type represents both `commit_id` and `conflict_legacy`.
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Value {
#[prost(bytes, tag = "1")]
CommitId(::prost::alloc::vec::Vec<u8>),
#[prost(message, tag = "2")]
ConflictLegacy(super::RefConflictLegacy),
#[prost(message, tag = "3")]
Conflict(super::RefConflict),
}
}
/// RefTarget term that should be serialized in native form, in which positive
/// and negative terms alternate.
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct RefTargetTerm {
#[prost(bytes = "vec", optional, tag = "1")]
pub value: ::core::option::Option<::prost::alloc::vec::Vec<u8>>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RemoteBookmark {
#[prost(string, tag = "1")]
pub remote_name: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub target: ::core::option::Option<RefTarget>,
/// Introduced in jj 0.11.
#[prost(enumeration = "RemoteRefState", optional, tag = "3")]
pub state: ::core::option::Option<i32>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Bookmark {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// Unset if the bookmark has been deleted locally.
#[prost(message, optional, tag = "2")]
pub local_target: ::core::option::Option<RefTarget>,
/// Deprecated since jj 0.34.
#[deprecated]
#[prost(message, repeated, tag = "3")]
pub remote_bookmarks: ::prost::alloc::vec::Vec<RemoteBookmark>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GitRef {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// This field is just for historical reasons (before we had the RefTarget
/// type). New GitRefs have (only) the target field.
/// TODO: Delete support for the old format.
#[deprecated]
#[prost(bytes = "vec", tag = "2")]
pub commit_id: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag = "3")]
pub target: ::core::option::Option<RefTarget>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RemoteRef {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "2")]
pub target_terms: ::prost::alloc::vec::Vec<RefTargetTerm>,
#[prost(enumeration = "RemoteRefState", tag = "3")]
pub state: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Tag {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub target: ::core::option::Option<RefTarget>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct View {
#[prost(bytes = "vec", repeated, tag = "1")]
pub head_ids: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
#[deprecated]
#[prost(bytes = "vec", tag = "2")]
pub wc_commit_id: ::prost::alloc::vec::Vec<u8>,
#[prost(map = "string, bytes", tag = "8")]
pub wc_commit_ids: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::vec::Vec<u8>,
>,
/// Local bookmarks and remote bookmarks in legacy form.
#[prost(message, repeated, tag = "5")]
pub bookmarks: ::prost::alloc::vec::Vec<Bookmark>,
#[prost(message, repeated, tag = "6")]
pub local_tags: ::prost::alloc::vec::Vec<Tag>,
/// Introduced in jj 0.34.
#[prost(message, repeated, tag = "11")]
pub remote_views: ::prost::alloc::vec::Vec<RemoteView>,
/// Only a subset of the refs. For example, does not include refs/notes/.
#[prost(message, repeated, tag = "3")]
pub git_refs: ::prost::alloc::vec::Vec<GitRef>,
/// This field is just for historical reasons (before we had the RefTarget
/// type). New Views have (only) the target field.
/// TODO: Delete support for the old format.
#[deprecated]
#[prost(bytes = "vec", tag = "7")]
pub git_head_legacy: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag = "9")]
pub git_head: ::core::option::Option<RefTarget>,
/// Whether "@git" tags have been migrated to remote_views.
#[prost(bool, tag = "12")]
pub has_git_refs_migrated_to_remote_tags: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RemoteView {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(message, repeated, tag = "2")]
pub bookmarks: ::prost::alloc::vec::Vec<RemoteRef>,
#[prost(message, repeated, tag = "3")]
pub tags: ::prost::alloc::vec::Vec<RemoteRef>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Operation {
#[prost(bytes = "vec", tag = "1")]
pub view_id: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes = "vec", repeated, tag = "2")]
pub parents: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
#[prost(message, optional, tag = "3")]
pub metadata: ::core::option::Option<OperationMetadata>,
/// Introduced in jj 0.30.
#[prost(message, repeated, tag = "4")]
pub commit_predecessors: ::prost::alloc::vec::Vec<CommitPredecessors>,
/// Whether or not `commit_predecessors` is recorded.
#[prost(bool, tag = "5")]
pub stores_commit_predecessors: bool,
}
/// TODO: Share with store.proto? Do we even need the timezone here?
#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Timestamp {
#[prost(int64, tag = "1")]
pub millis_since_epoch: i64,
#[prost(int32, tag = "2")]
pub tz_offset: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct OperationMetadata {
#[prost(message, optional, tag = "1")]
pub start_time: ::core::option::Option<Timestamp>,
#[prost(message, optional, tag = "2")]
pub end_time: ::core::option::Option<Timestamp>,
#[prost(string, tag = "3")]
pub description: ::prost::alloc::string::String,
#[prost(string, tag = "4")]
pub hostname: ::prost::alloc::string::String,
#[prost(string, tag = "5")]
pub username: ::prost::alloc::string::String,
#[prost(bool, tag = "7")]
pub is_snapshot: bool,
#[prost(map = "string, string", tag = "6")]
pub tags: ::std::collections::HashMap<
::prost::alloc::string::String,
::prost::alloc::string::String,
>,
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct CommitPredecessors {
#[prost(bytes = "vec", tag = "1")]
pub commit_id: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes = "vec", repeated, tag = "2")]
pub predecessor_ids: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum RemoteRefState {
New = 0,
Tracked = 1,
}
impl RemoteRefState {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::New => "New",
Self::Tracked => "Tracked",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"New" => Some(Self::New),
"Tracked" => Some(Self::Tracked),
_ => None,
}
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/protos/simple_store.rs | lib/src/protos/simple_store.rs | // This file is @generated by prost-build.
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct TreeValue {
#[prost(oneof = "tree_value::Value", tags = "2, 3, 4")]
pub value: ::core::option::Option<tree_value::Value>,
}
/// Nested message and enum types in `TreeValue`.
pub mod tree_value {
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct File {
#[prost(bytes = "vec", tag = "1")]
pub id: ::prost::alloc::vec::Vec<u8>,
#[prost(bool, tag = "2")]
pub executable: bool,
#[prost(bytes = "vec", tag = "3")]
pub copy_id: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
pub enum Value {
#[prost(message, tag = "2")]
File(File),
#[prost(bytes, tag = "3")]
SymlinkId(::prost::alloc::vec::Vec<u8>),
#[prost(bytes, tag = "4")]
TreeId(::prost::alloc::vec::Vec<u8>),
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Tree {
#[prost(message, repeated, tag = "1")]
pub entries: ::prost::alloc::vec::Vec<tree::Entry>,
}
/// Nested message and enum types in `Tree`.
pub mod tree {
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Entry {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub value: ::core::option::Option<super::TreeValue>,
}
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Commit {
#[prost(bytes = "vec", repeated, tag = "1")]
pub parents: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
#[prost(bytes = "vec", repeated, tag = "2")]
pub predecessors: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
/// Alternating positive and negative terms
#[prost(bytes = "vec", repeated, tag = "3")]
pub root_tree: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
/// Labels for the terms of a conflict.
#[prost(string, repeated, tag = "10")]
pub conflict_labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(bytes = "vec", tag = "4")]
pub change_id: ::prost::alloc::vec::Vec<u8>,
#[prost(string, tag = "5")]
pub description: ::prost::alloc::string::String,
#[prost(message, optional, tag = "6")]
pub author: ::core::option::Option<commit::Signature>,
#[prost(message, optional, tag = "7")]
pub committer: ::core::option::Option<commit::Signature>,
#[prost(bytes = "vec", optional, tag = "9")]
pub secure_sig: ::core::option::Option<::prost::alloc::vec::Vec<u8>>,
}
/// Nested message and enum types in `Commit`.
pub mod commit {
#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Timestamp {
#[prost(int64, tag = "1")]
pub millis_since_epoch: i64,
#[prost(int32, tag = "2")]
pub tz_offset: i32,
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Signature {
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
#[prost(string, tag = "2")]
pub email: ::prost::alloc::string::String,
#[prost(message, optional, tag = "3")]
pub timestamp: ::core::option::Option<Timestamp>,
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/protos/local_working_copy.rs | lib/src/protos/local_working_copy.rs | // This file is @generated by prost-build.
#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]
pub struct MaterializedConflictData {
/// TODO: maybe we should store num_sides here as well
#[prost(uint32, tag = "1")]
pub conflict_marker_len: u32,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]
pub struct FileState {
#[prost(int64, tag = "1")]
pub mtime_millis_since_epoch: i64,
#[prost(uint64, tag = "2")]
pub size: u64,
#[prost(enumeration = "FileType", tag = "3")]
pub file_type: i32,
#[prost(message, optional, tag = "5")]
pub materialized_conflict_data: ::core::option::Option<MaterializedConflictData>,
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct FileStateEntry {
#[prost(string, tag = "1")]
pub path: ::prost::alloc::string::String,
#[prost(message, optional, tag = "2")]
pub state: ::core::option::Option<FileState>,
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct SparsePatterns {
#[prost(string, repeated, tag = "1")]
pub prefixes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TreeState {
#[deprecated]
#[prost(bytes = "vec", tag = "1")]
pub legacy_tree_id: ::prost::alloc::vec::Vec<u8>,
/// Alternating positive and negative terms if there's a conflict, otherwise a
/// single (positive) value
#[prost(bytes = "vec", repeated, tag = "5")]
pub tree_ids: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
/// Labels for the terms of a conflict.
#[prost(string, repeated, tag = "7")]
pub conflict_labels: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
#[prost(message, repeated, tag = "2")]
pub file_states: ::prost::alloc::vec::Vec<FileStateEntry>,
#[prost(bool, tag = "6")]
pub is_file_states_sorted: bool,
#[prost(message, optional, tag = "3")]
pub sparse_patterns: ::core::option::Option<SparsePatterns>,
#[prost(message, optional, tag = "4")]
pub watchman_clock: ::core::option::Option<WatchmanClock>,
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct WatchmanClock {
#[prost(oneof = "watchman_clock::WatchmanClock", tags = "1, 2")]
pub watchman_clock: ::core::option::Option<watchman_clock::WatchmanClock>,
}
/// Nested message and enum types in `WatchmanClock`.
pub mod watchman_clock {
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
pub enum WatchmanClock {
#[prost(string, tag = "1")]
StringClock(::prost::alloc::string::String),
#[prost(int64, tag = "2")]
UnixTimestamp(i64),
}
}
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Checkout {
/// The operation at which the working copy was updated.
#[prost(bytes = "vec", tag = "2")]
pub operation_id: ::prost::alloc::vec::Vec<u8>,
/// An identifier for this workspace. It is used for looking up the current
/// working-copy commit in the repo view. Currently a human-readable name.
/// TODO: Is it better to make this a UUID and a have map that to a name in
/// config? That way users can rename a workspace.
#[prost(string, tag = "3")]
pub workspace_name: ::prost::alloc::string::String,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum FileType {
Normal = 0,
Symlink = 1,
Executable = 2,
Conflict = 3,
GitSubmodule = 4,
}
impl FileType {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Normal => "Normal",
Self::Symlink => "Symlink",
Self::Executable => "Executable",
Self::Conflict => "Conflict",
Self::GitSubmodule => "GitSubmodule",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"Normal" => Some(Self::Normal),
"Symlink" => Some(Self::Symlink),
"Executable" => Some(Self::Executable),
"Conflict" => Some(Self::Conflict),
"GitSubmodule" => Some(Self::GitSubmodule),
_ => None,
}
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/protos/mod.rs | lib/src/protos/mod.rs | // This file is @generated by prost-build.
pub mod default_index {
include!("default_index.rs");
}
pub mod git_store {
include!("git_store.rs");
}
pub mod local_working_copy {
include!("local_working_copy.rs");
}
pub mod simple_op_store {
include!("simple_op_store.rs");
}
pub mod simple_store {
include!("simple_store.rs");
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/protos/git_store.rs | lib/src/protos/git_store.rs | // This file is @generated by prost-build.
#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
pub struct Commit {
#[prost(bytes = "vec", repeated, tag = "2")]
pub predecessors: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
#[prost(bytes = "vec", tag = "4")]
pub change_id: ::prost::alloc::vec::Vec<u8>,
/// Alternating positive and negative terms. Set only for conflicts.
/// Resolved trees are stored in the git commit
#[prost(bytes = "vec", repeated, tag = "1")]
pub root_tree: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
/// TODO(#1624): delete when we assume that all commits use this format
#[prost(bool, tag = "10")]
pub uses_tree_conflict_format: bool,
#[deprecated]
#[prost(bool, tag = "8")]
pub is_open: bool,
#[deprecated]
#[prost(bool, tag = "9")]
pub is_pruned: bool,
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/lock/unix.rs | lib/src/lock/unix.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fs::File;
use std::path::PathBuf;
use rustix::fs::FlockOperation;
use tracing::instrument;
use super::FileLockError;
pub struct FileLock {
path: PathBuf,
file: File,
}
impl FileLock {
pub fn lock(path: PathBuf) -> Result<Self, FileLockError> {
tracing::info!("Attempting to lock {path:?}");
loop {
// Create lockfile, or open pre-existing one
let file = File::create(&path).map_err(|err| FileLockError {
message: "Failed to open lock file",
path: path.clone(),
err,
})?;
// If the lock was already held, wait for it to be released
rustix::fs::flock(&file, FlockOperation::LockExclusive).map_err(|errno| {
FileLockError {
message: "Failed to lock lock file",
path: path.clone(),
err: errno.into(),
}
})?;
match rustix::fs::fstat(&file) {
Ok(stat) => {
if stat.st_nlink == 0 {
// Lockfile was deleted, probably by the previous holder's `Drop` impl;
// create a new one so our ownership is visible,
// rather than hidden in an unlinked file. Not
// always necessary, since the previous holder might
// have exited abruptly.
continue;
}
}
Err(rustix::io::Errno::STALE) => {
// The file handle is stale.
// This can happen when using NFS,
// likely caused by a remote deletion of the lockfile.
// Treat this like a normal lockfile deletion and retry.
continue;
}
Err(errno) => {
return Err(FileLockError {
message: "failed to stat lock file",
path: path.clone(),
err: errno.into(),
});
}
}
tracing::info!("Locked {path:?}");
return Ok(Self { path, file });
}
}
}
impl Drop for FileLock {
#[instrument(skip_all)]
fn drop(&mut self) {
// Removing the file isn't strictly necessary, but reduces confusion.
std::fs::remove_file(&self.path).ok();
// Unblock any processes that tried to acquire the lock while we held it.
// They're responsible for creating and locking a new lockfile, since we
// just deleted this one.
rustix::fs::flock(&self.file, FlockOperation::Unlock).ok();
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/lock/mod.rs | lib/src/lock/mod.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
mod fallback;
#[cfg(unix)]
mod unix;
use std::io;
use std::path::PathBuf;
use thiserror::Error;
#[cfg(not(unix))]
pub use self::fallback::FileLock;
#[cfg(unix)]
pub use self::unix::FileLock;
#[derive(Debug, Error)]
#[error("{message}: {path}")]
pub struct FileLockError {
pub message: &'static str,
pub path: PathBuf,
#[source]
pub err: io::Error,
}
#[cfg(test)]
mod tests {
use std::cmp::max;
use std::fs;
use std::thread;
use std::time::Duration;
use test_case::test_case;
use super::*;
use crate::tests::new_temp_dir;
#[test_case(FileLock::lock)]
#[cfg_attr(unix, test_case(fallback::FileLock::lock))]
fn lock_basic<T>(lock_fn: fn(PathBuf) -> Result<T, FileLockError>) {
let temp_dir = new_temp_dir();
let lock_path = temp_dir.path().join("test.lock");
assert!(!lock_path.exists());
{
let _lock = lock_fn(lock_path.clone()).unwrap();
assert!(lock_path.exists());
}
assert!(!lock_path.exists());
}
#[test_case(FileLock::lock)]
#[cfg_attr(unix, test_case(fallback::FileLock::lock))]
fn lock_concurrent<T>(lock_fn: fn(PathBuf) -> Result<T, FileLockError>) {
let temp_dir = new_temp_dir();
let data_path = temp_dir.path().join("test");
let lock_path = temp_dir.path().join("test.lock");
fs::write(&data_path, 0_u32.to_le_bytes()).unwrap();
let num_threads = max(num_cpus::get(), 4);
thread::scope(|s| {
for _ in 0..num_threads {
s.spawn(|| {
let _lock = lock_fn(lock_path.clone()).unwrap();
let data = fs::read(&data_path).unwrap();
let value = u32::from_le_bytes(data.try_into().unwrap());
thread::sleep(Duration::from_millis(1));
fs::write(&data_path, (value + 1).to_le_bytes()).unwrap();
});
}
});
let data = fs::read(&data_path).unwrap();
let value = u32::from_le_bytes(data.try_into().unwrap());
assert_eq!(value, num_threads as u32);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/lock/fallback.rs | lib/src/lock/fallback.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs::File;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::time::Duration;
use tracing::instrument;
use super::FileLockError;
pub struct FileLock {
path: PathBuf,
_file: File,
}
struct BackoffIterator {
next_sleep_secs: f32,
elapsed_secs: f32,
}
impl BackoffIterator {
fn new() -> Self {
Self {
next_sleep_secs: 0.001,
elapsed_secs: 0.0,
}
}
}
impl Iterator for BackoffIterator {
type Item = Duration;
fn next(&mut self) -> Option<Self::Item> {
if self.elapsed_secs >= 10.0 {
None
} else {
let current_sleep = self.next_sleep_secs * (rand::random::<f32>() + 0.5);
self.next_sleep_secs *= 1.5;
self.elapsed_secs += current_sleep;
Some(Duration::from_secs_f32(current_sleep))
}
}
}
// Suppress warning on platforms where specialized lock impl is available
#[cfg_attr(all(unix, not(test)), expect(dead_code))]
impl FileLock {
pub fn lock(path: PathBuf) -> Result<Self, FileLockError> {
tracing::info!("Attempting to lock {path:?}");
let mut options = OpenOptions::new();
options.create_new(true);
options.write(true);
let mut backoff_iterator = BackoffIterator::new();
loop {
match options.open(&path) {
Ok(file) => {
tracing::info!("Locked {path:?}");
return Ok(Self { path, _file: file });
}
Err(err)
if err.kind() == std::io::ErrorKind::AlreadyExists
|| (cfg!(windows)
&& err.kind() == std::io::ErrorKind::PermissionDenied) =>
{
if let Some(duration) = backoff_iterator.next() {
std::thread::sleep(duration);
} else {
return Err(FileLockError {
message: "Timed out while trying to create lock file",
path,
err,
});
}
}
Err(err) => {
return Err(FileLockError {
message: "Failed to create lock file",
path,
err,
});
}
}
}
}
}
impl Drop for FileLock {
#[instrument(skip_all)]
fn drop(&mut self) {
std::fs::remove_file(&self.path)
.inspect_err(|err| tracing::warn!(?err, ?self.path, "Failed to delete lock file"))
.ok();
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_refs.rs | lib/tests/test_refs.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::merge::Merge;
use jj_lib::op_store::RefTarget;
use jj_lib::refs::merge_ref_targets;
use jj_lib::repo::Repo as _;
use testutils::TestWorkspace;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
#[test]
fn test_merge_ref_targets() {
let test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
// 6 7
// |/
// 5
// | 3 4
// | |/
// | 2
// |/
// 1
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let commit4 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let commit5 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit6 = write_random_commit_with_parents(tx.repo_mut(), &[&commit5]);
let commit7 = write_random_commit_with_parents(tx.repo_mut(), &[&commit5]);
let repo = tx.commit("test").unwrap();
let target1 = RefTarget::normal(commit1.id().clone());
let target2 = RefTarget::normal(commit2.id().clone());
let target3 = RefTarget::normal(commit3.id().clone());
let target4 = RefTarget::normal(commit4.id().clone());
let target5 = RefTarget::normal(commit5.id().clone());
let target6 = RefTarget::normal(commit6.id().clone());
let _target7 = RefTarget::normal(commit7.id().clone());
let index = repo.index();
let merge = |left: &RefTarget, base: &RefTarget, right: &RefTarget| {
merge_ref_targets(index, left, base, right).unwrap()
};
// Left moved forward
assert_eq!(merge(&target3, &target1, &target1), target3);
// Right moved forward
assert_eq!(merge(&target1, &target1, &target3), target3);
// Left moved backward
assert_eq!(merge(&target1, &target3, &target3), target1);
// Right moved backward
assert_eq!(merge(&target3, &target3, &target1), target1);
// Left moved sideways
assert_eq!(merge(&target4, &target3, &target3), target4);
// Right moved sideways
assert_eq!(merge(&target3, &target3, &target4), target4);
// Both moved sideways ("A - B + A" - type conflict)
assert_eq!(merge(&target4, &target3, &target4), target4);
// Both added same target ("A - B + A" - type conflict)
assert_eq!(merge(&target3, RefTarget::absent_ref(), &target3), target3);
// Both removed ("A - B + A" - type conflict)
assert_eq!(
merge(RefTarget::absent_ref(), &target3, RefTarget::absent_ref()),
RefTarget::absent()
);
// Left added target, right added descendant target
assert_eq!(merge(&target2, RefTarget::absent_ref(), &target3), target3);
// Right added target, left added descendant target
assert_eq!(merge(&target3, RefTarget::absent_ref(), &target2), target3);
// Both moved forward to same target
assert_eq!(merge(&target3, &target1, &target3), target3);
// Both moved forward, left moved further
assert_eq!(merge(&target3, &target1, &target2), target3);
// Both moved forward, right moved further
assert_eq!(merge(&target2, &target1, &target3), target3);
// Left and right moved forward to divergent targets
assert_eq!(
merge(&target3, &target1, &target4),
RefTarget::from_legacy_form(
[commit1.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
)
);
// Left moved back, right moved forward
assert_eq!(
merge(&target1, &target2, &target3),
RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit1.id().clone(), commit3.id().clone()]
)
);
// Right moved back, left moved forward
assert_eq!(
merge(&target3, &target2, &target1),
RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit1.id().clone()]
)
);
// Left removed
assert_eq!(
merge(RefTarget::absent_ref(), &target3, &target3),
RefTarget::absent()
);
// Right removed
assert_eq!(
merge(&target3, &target3, RefTarget::absent_ref()),
RefTarget::absent()
);
// Left removed, right moved forward
assert_eq!(
merge(RefTarget::absent_ref(), &target1, &target3),
RefTarget::from_merge(Merge::from_vec(vec![
None,
Some(commit1.id().clone()),
Some(commit3.id().clone()),
]))
);
// Right removed, left moved forward
assert_eq!(
merge(&target3, &target1, RefTarget::absent_ref()),
RefTarget::from_merge(Merge::from_vec(vec![
Some(commit3.id().clone()),
Some(commit1.id().clone()),
None,
]))
);
// Left became conflicted, right moved forward
assert_eq!(
merge(
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
&target1,
&target3
),
// TODO: "removes" should have commit 2, just like it does in the next test case
RefTarget::from_legacy_form(
[commit1.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
)
);
// Right became conflicted, left moved forward
assert_eq!(
merge(
&target3,
&target1,
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
),
RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit4.id().clone(), commit3.id().clone()]
)
);
// Existing conflict on left, right moves an "add" sideways
//
// Under the hood, the conflict is simplified as below:
// ```
// 3 4 5 3 4 5 5 4
// 2 / => 2 3 => 2
// 3
// ```
assert_eq!(
merge(
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
&target3,
&target5
),
RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit5.id().clone(), commit4.id().clone()]
)
);
// Existing conflict on right, left moves an "add" sideways
//
// Under the hood, the conflict is simplified as below:
// ```
// 5 3 4 5 3 4 5 4
// \ 2 => 3 2 => 2
// 3
// ```
assert_eq!(
merge(
&target5,
&target3,
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
),
RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit5.id().clone(), commit4.id().clone()]
)
);
// Existing conflict on left, right moves an "add" backwards, past point of
// divergence
//
// Under the hood, the conflict is simplified as below:
// ```
// 3 4 1 3 4 1 1 4
// 2 / => 2 3 => 2
// 3
// ```
assert_eq!(
merge(
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
&target3,
&target1
),
RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit1.id().clone(), commit4.id().clone()]
)
);
// Existing conflict on right, left moves an "add" backwards, past point of
// divergence
//
// Under the hood, the conflict is simplified as below:
// ```
// 1 3 4 1 3 4 1 4
// \ 2 => 3 2 => 2
// 3
// ```
assert_eq!(
merge(
&target1,
&target3,
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
),
RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit1.id().clone(), commit4.id().clone()]
)
);
// Existing conflict on left, right undoes one side of conflict
assert_eq!(
merge(
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
&target3,
&target2
),
target4
);
// Existing conflict on right, left undoes one side of conflict
assert_eq!(
merge(
&target2,
&target3,
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
),
target4
);
// Existing conflict on left, right moves one side of conflict to the other
// side ("A - B + A" - type conflict)
assert_eq!(
merge(
&RefTarget::from_legacy_form(
[commit5.id().clone()], // not an ancestor of commit3, 4
[commit3.id().clone(), commit4.id().clone()],
),
&target4,
&target3,
),
target3
);
// Existing conflict on right, left moves one side of conflict to the other
// side ("A - B + A" - type conflict)
assert_eq!(
merge(
&target4,
&target3,
&RefTarget::from_legacy_form(
[commit5.id().clone()], // not an ancestor of commit3, 4
[commit3.id().clone(), commit4.id().clone()],
),
),
target4
);
// Existing conflict on left, right makes unrelated update
assert_eq!(
merge(
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
&target5,
&target6
),
RefTarget::from_legacy_form(
[commit2.id().clone(), commit5.id().clone()],
[
commit3.id().clone(),
commit4.id().clone(),
commit6.id().clone()
]
)
);
// Existing conflict on right, left makes unrelated update
assert_eq!(
merge(
&target6,
&target5,
&RefTarget::from_legacy_form(
[commit2.id().clone()],
[commit3.id().clone(), commit4.id().clone()]
),
),
RefTarget::from_legacy_form(
[commit5.id().clone(), commit2.id().clone()],
[
commit6.id().clone(),
commit3.id().clone(),
commit4.id().clone()
]
)
);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_commit_concurrent.rs | lib/tests/test_commit_concurrent.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::max;
use std::sync::Arc;
use std::thread;
use jj_lib::dag_walk;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo as _;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepoBackend;
use testutils::TestWorkspace;
use testutils::write_random_commit;
fn count_non_merge_operations(repo: &Arc<ReadonlyRepo>) -> usize {
let op_store = repo.op_store();
let op_id = repo.op_id().clone();
let mut num_ops = 0;
for op_id in dag_walk::dfs(
vec![op_id],
|op_id| op_id.clone(),
|op_id| op_store.read_operation(op_id).block_on().unwrap().parents,
) {
let op = op_store.read_operation(&op_id).block_on().unwrap();
if op.parents.len() <= 1 {
num_ops += 1;
}
}
num_ops
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_commit_parallel(backend: TestRepoBackend) {
// This loads a Repo instance and creates and commits many concurrent
// transactions from it. It then reloads the repo. That should merge all the
// operations and all commits should be visible.
let test_workspace = TestWorkspace::init_with_backend(backend);
let repo = &test_workspace.repo;
let num_threads = max(num_cpus::get(), 4);
thread::scope(|s| {
for _ in 0..num_threads {
let repo = repo.clone();
s.spawn(move || {
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
tx.commit("test").unwrap();
});
}
});
let repo = repo.reload_at_head().unwrap();
// One commit per thread plus the commit from the initial working-copy on top of
// the root commit
assert_eq!(repo.view().heads().len(), num_threads + 1);
// One additional operation for the root operation, one for checking out the
// initial commit.
assert_eq!(count_non_merge_operations(&repo), num_threads + 2);
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_commit_parallel_instances(backend: TestRepoBackend) {
// Like the test above but creates a new repo instance for every thread, which
// makes it behave very similar to separate processes.
let settings = testutils::user_settings();
let test_workspace = TestWorkspace::init_with_backend_and_settings(backend, &settings);
let test_env = &test_workspace.env;
let num_threads = max(num_cpus::get(), 4);
thread::scope(|s| {
for _ in 0..num_threads {
let settings = settings.clone();
let repo = test_env.load_repo_at_head(&settings, test_workspace.repo_path());
s.spawn(move || {
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
tx.commit("test").unwrap();
});
}
});
// One commit per thread plus the commit from the initial working-copy commit on
// top of the root commit
let repo = test_env.load_repo_at_head(&settings, test_workspace.repo_path());
assert_eq!(repo.view().heads().len(), num_threads + 1);
// One additional operation for the root operation, one for checking out the
// initial commit.
assert_eq!(count_non_merge_operations(&repo), num_threads + 2);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_gpg.rs | lib/tests/test_gpg.rs | #[cfg(unix)]
use std::fs::Permissions;
use std::io::Write as _;
#[cfg(unix)]
use std::os::unix::prelude::PermissionsExt as _;
use std::process::Stdio;
use assert_matches::assert_matches;
use insta::assert_debug_snapshot;
use jj_lib::gpg_signing::GpgBackend;
use jj_lib::gpg_signing::GpgsmBackend;
use jj_lib::signing::SigStatus;
use jj_lib::signing::SignError;
use jj_lib::signing::SigningBackend as _;
use testutils::ensure_running_outside_ci;
use testutils::is_external_tool_installed;
static GPG_PRIVATE_KEY: &str = r#"-----BEGIN PGP PRIVATE KEY BLOCK-----
lFgEZWI3pBYJKwYBBAHaRw8BAQdAaPLTNADvDWapjAPlxaUnx3HXQNIlwSz4EZrW
3Z7hxSwAAP9liwHZWJCGI2xW+XNqMT36qpIvoRcd5YPaKYwvnlkG1w+UtDNTb21l
b25lIChqaiB0ZXN0IHNpZ25pbmcga2V5KSA8c29tZW9uZUBleGFtcGxlLmNvbT6I
kwQTFgoAOxYhBKWOXukGcVPI9eXp6WOHhcsW/qBhBQJlYjekAhsDBQsJCAcCAiIC
BhUKCQgLAgQWAgMBAh4HAheAAAoJEGOHhcsW/qBhyBgBAMph1HkBkKlrZmsun+3i
kTEaOsWmaW/D6NEdMFiw0S/jAP9G3jOYGiZbUN3dWWB2246Oi7SaMTX8Xb2BrLP2
axCbC5RYBGVjxv8WCSsGAQQB2kcPAQEHQE8Oa4ahtVG29gIRssPxjqF4utn8iHPz
m5z/8lX/nl3eAAD5AZ6H2pNhiy2gnGkbPLHw3ZyY4d0NXzCa7qc9EXqOj+sRrLQ9
U29tZW9uZSBFbHNlIChqaiB0ZXN0IHNpZ25pbmcga2V5KSA8c29tZW9uZS1lbHNl
QGV4YW1wbGUuY29tPoiTBBMWCgA7FiEER1BAaEpU3TKUiUvFTtVW6XKeAA8FAmVj
xv8CGwMFCwkIBwICIgIGFQoJCAsCBBYCAwECHgcCF4AACgkQTtVW6XKeAA/6TQEA
2DkPm3LmH8uG6qLirtf62kbG7T+qljIsarQKFw3CGakA/AveCtrL7wVSpINiu1Rz
lBqJFFP2PqzT0CRfh94HSIMM
=6JC8
-----END PGP PRIVATE KEY BLOCK-----
"#;
static GPGSM_FINGERPRINT: &str = "4C625C10FF7180164F19C6571D513E4E0BEA555C";
static GPGSM_PRIVATE_KEY: &str = r#"-----BEGIN PKCS12-----
MIIEjAIBAzCCBEIGCSqGSIb3DQEHAaCCBDMEggQvMIIEKzCCAuIGCSqGSIb3DQEHBqCCAtMwggLP
AgEAMIICyAYJKoZIhvcNAQcBMFcGCSqGSIb3DQEFDTBKMCkGCSqGSIb3DQEFDDAcBAhW4TA5N5aE
qAICCAAwDAYIKoZIhvcNAgkFADAdBglghkgBZQMEASoEEDyELdhdBjhSJgPcPmmdJQWAggJgR3zZ
ZHJQj2aoCDuPQrxBkklgnDmTF91bDStMX9J6B7ucFS2V7YEO1YcwfdphRRYRCkTO0L4/qLO5l/xg
R0CwchpOUbo9Xl6MHiRZW7nTEU2bO1oq45lTzIQfJtWK9R/Nujvx3KyTIm+2ZGBrVHZ301rmCepU
YtSBmtoo+9rlp+lkkvGh+E9+gWjvDhXUkaxkUjRvx/cdOeEKDM8SmfhX6nZ7lzbnI9xQ4d7g4Sn2
9Y3F0HHe5+qBwd97i4xL1fFQs9vKVe2Iqr46B6T++GuClR+66yjGHxeQ6qjMSAEk4kPP8/LPI5i0
xC15U38J8dOyXX1jNP9W44nu1CpiX7MEuEyeEel4mDq5HzbQp2AOeS6Zg4VSf8nz8uSES48DrPMw
lDFH/YCAWHEPgcTBqMKO0+EnVL4297WNKA8aJiD/tKZZEyS1SGqoXX5eHazZQHD9PReZBv0gTFSz
Aq/K+Gcrsh7I5/lhyuQ6gwbi2uluCdwJirRzc85RrO5GsBxDHdcngy9ez0duLsOf7UVgIku21PmD
d4ureqfT1rQZkE+hGXUc+NNF7ZTvCDHETCJwVgqqZttZ43ILT2yBAG7dV+X7AUNLn/LpZmZ6adIH
gyviuhleTMGoSnPJXCMkEnU00QoROo7yceSikjuaLV33HXEpcepOBRXW91r7DLQWLHT+mX2W8/oA
UX0UKQ2al0R9JrWsQOdGwNcbNHfRldAmRBW7ktOUyXlN71BE90TPjqA2Xu5Ta1yIs+XuU5BUAWzb
v9agzbfU4ZOa9FgSxExE6iQ+NkCuJ+05bHeVVqtbBgqurwswggFBBgkqhkiG9w0BBwGgggEyBIIB
LjCCASowggEmBgsqhkiG9w0BDAoBAqCB7zCB7DBXBgkqhkiG9w0BBQ0wSjApBgkqhkiG9w0BBQww
HAQIjo1upovnkrcCAggAMAwGCCqGSIb3DQIJBQAwHQYJYIZIAWUDBAEqBBBF0GsMP3O/uZs3/OHS
Fdl/BIGQmrK7oxltgZa0TihDJ7OVmCnbLawSB5E38Wjo7gSwPa2/1ofg8yU9ZBjdlYQRFevZcj1I
rU307BQIPmjqxIMSV8K/F1OfvWWrfRDXwvvn1CHNM4VuqfoJzwfYsD2jEedXAHN7a90sjtZeDqMs
ibOEeIIN2hOh6FBnaO2f4QVXTUoe4k0BJ2WTMtjoIJod0LKiMSUwIwYJKoZIhvcNAQkVMRYEFExi
XBD/cYAWTxnGVx1RPk4L6lVcMEEwMTANBglghkgBZQMEAgEFAAQgj7Jjd7XJ3icDiNTp080RDoUw
J+57G8w4qtRQPRTuOvcECGz+PguPT+pLAgIIAA==
-----END PKCS12-----
"#;
struct GpgEnvironment {
homedir: tempfile::TempDir,
}
impl GpgEnvironment {
fn new() -> Result<Self, std::process::Output> {
let dir = tempfile::Builder::new()
.prefix("gpg-test-")
.tempdir()
.unwrap();
let path = dir.path();
#[cfg(unix)]
std::fs::set_permissions(path, Permissions::from_mode(0o700)).unwrap();
let mut gpg = std::process::Command::new("gpg")
.arg("--homedir")
.arg(path)
.arg("--import")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
gpg.stdin
.as_mut()
.unwrap()
.write_all(GPG_PRIVATE_KEY.as_bytes())
.unwrap();
gpg.stdin.as_mut().unwrap().flush().unwrap();
let res = gpg.wait_with_output().unwrap();
if !res.status.success() {
eprintln!("Failed to add private key to gpg-agent. Make sure it is running!");
eprintln!("{}", String::from_utf8_lossy(&res.stderr));
return Err(res);
}
Ok(Self { homedir: dir })
}
}
struct GpgsmEnvironment {
homedir: tempfile::TempDir,
}
impl GpgsmEnvironment {
fn new() -> Result<Self, std::process::Output> {
let dir = tempfile::Builder::new()
.prefix("gpgsm-test-")
.tempdir()
.unwrap();
let path = dir.path();
#[cfg(unix)]
std::fs::set_permissions(path, Permissions::from_mode(0o700)).unwrap();
std::fs::write(
path.join("trustlist.txt"),
format!("{GPGSM_FINGERPRINT} S\n"),
)
.unwrap();
let mut gpgsm = std::process::Command::new("gpgsm")
.arg("--homedir")
.arg(path)
.arg("--batch")
.arg("--pinentry-mode")
.arg("loopback")
.arg("--import")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
gpgsm
.stdin
.as_mut()
.unwrap()
.write_all(GPGSM_PRIVATE_KEY.as_bytes())
.unwrap();
gpgsm.stdin.as_mut().unwrap().flush().unwrap();
let res = gpgsm.wait_with_output().unwrap();
if !res.status.success() && res.status.code() != Some(2) {
eprintln!("Failed to add certificate.");
eprintln!("{}", String::from_utf8_lossy(&res.stderr));
return Err(res);
}
Ok(Self { homedir: dir })
}
}
macro_rules! socket_path_length_guard {
() => {{
// ref: https://developer.apple.com/forums/thread/756756?answerId=790507022#790507022
let max_socket_len = 104;
// space needed to account for socket path suffixes
let socket_length = 36;
if cfg!(target_os = "macos")
&& (std::env::temp_dir().as_os_str().len() + socket_length) > max_socket_len
{
eprintln!(
"Skipping test because the temporary directory's path is too long for unix domain \
sockets on macOS"
);
return;
}
}};
}
macro_rules! gpg_guard {
() => {
socket_path_length_guard!();
if !is_external_tool_installed("gpg") {
ensure_running_outside_ci("`gpg` must be in the PATH");
eprintln!("Skipping test because gpg is not installed on the system");
return;
}
};
}
macro_rules! gpgsm_guard {
() => {
socket_path_length_guard!();
if !is_external_tool_installed("gpgsm") {
ensure_running_outside_ci("`gpgsm` must be in the PATH");
eprintln!("Skipping test because gpgsm is not installed on the system");
return;
}
};
}
fn gpg_backend(env: &GpgEnvironment) -> GpgBackend {
// don't really need faked time for current tests,
// but probably will need it for end-to-end cli tests
GpgBackend::new("gpg".into(), false, "someone@example.com".to_owned()).with_extra_args(&[
"--homedir".into(),
env.homedir.path().as_os_str().into(),
"--faked-system-time=1701042000!".into(),
])
}
fn gpgsm_backend(env: &GpgsmEnvironment) -> GpgsmBackend {
// don't really need faked time for current tests,
// but probably will need it for end-to-end cli tests
GpgsmBackend::new("gpgsm".into(), false, "someone@example.com".to_owned()).with_extra_args(&[
"--homedir".into(),
env.homedir.path().as_os_str().into(),
"--faked-system-time=1742477110!".into(),
])
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpg_signing_roundtrip() {
gpg_guard!();
let env = GpgEnvironment::new().unwrap();
let backend = gpg_backend(&env);
let data = b"hello world";
let signature = backend.sign(data, None).unwrap();
let check = backend.verify(data, &signature).unwrap();
assert_eq!(check.status, SigStatus::Good);
assert_eq!(check.key.unwrap(), "638785CB16FEA061");
assert_eq!(
check.display.unwrap(),
"Someone (jj test signing key) <someone@example.com>"
);
let check = backend.verify(b"so so bad", &signature).unwrap();
assert_eq!(check.status, SigStatus::Bad);
assert_eq!(check.key.unwrap(), "638785CB16FEA061");
assert_eq!(
check.display.unwrap(),
"Someone (jj test signing key) <someone@example.com>"
);
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpg_signing_roundtrip_explicit_key() {
gpg_guard!();
let env = GpgEnvironment::new().unwrap();
let backend = gpg_backend(&env);
let data = b"hello world";
let signature = backend.sign(data, Some("Someone Else")).unwrap();
assert_debug_snapshot!(backend.verify(data, &signature).unwrap(), @r#"
Verification {
status: Good,
key: Some(
"4ED556E9729E000F",
),
display: Some(
"Someone Else (jj test signing key) <someone-else@example.com>",
),
}
"#);
assert_debug_snapshot!(backend.verify(b"so so bad", &signature).unwrap(), @r#"
Verification {
status: Bad,
key: Some(
"4ED556E9729E000F",
),
display: Some(
"Someone Else (jj test signing key) <someone-else@example.com>",
),
}
"#);
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpg_unknown_key() {
gpg_guard!();
let env = GpgEnvironment::new().unwrap();
let backend = gpg_backend(&env);
let signature = br"-----BEGIN PGP SIGNATURE-----
iHUEABYKAB0WIQQs238pU7eC/ROoPJ0HH+PjJN1zMwUCZWPa5AAKCRAHH+PjJN1z
MyylAP9WQ3sZdbC4b1C+/nxs+Wl+rfwzeQWGbdcsBMyDABcpmgD/U+4KdO7eZj/I
e+U6bvqw3pOBoI53Th35drQ0qPI+jAE=
=kwsk
-----END PGP SIGNATURE-----";
assert_debug_snapshot!(backend.verify(b"hello world", signature).unwrap(), @r#"
Verification {
status: Unknown,
key: Some(
"071FE3E324DD7333",
),
display: None,
}
"#);
assert_debug_snapshot!(backend.verify(b"so bad", signature).unwrap(), @r#"
Verification {
status: Unknown,
key: Some(
"071FE3E324DD7333",
),
display: None,
}
"#);
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpg_invalid_signature() {
gpg_guard!();
let env = GpgEnvironment::new().unwrap();
let backend = gpg_backend(&env);
let signature = br"-----BEGIN PGP SIGNATURE-----
super duper invalid
-----END PGP SIGNATURE-----";
// Small data: gpg command will exit late.
assert_matches!(
backend.verify(b"a", signature),
Err(SignError::InvalidSignatureFormat)
);
// Large data: gpg command will exit early because the signature is invalid.
assert_matches!(
backend.verify(&b"a".repeat(100 * 1024), signature),
Err(SignError::InvalidSignatureFormat)
);
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpgsm_signing_roundtrip() {
gpgsm_guard!();
let env = GpgsmEnvironment::new().unwrap();
let backend = gpgsm_backend(&env);
let data = b"hello world";
let signature = backend.sign(data, None);
let signature = signature.unwrap();
let check = backend.verify(data, &signature).unwrap();
assert_eq!(check.status, SigStatus::Good);
assert_eq!(check.key.unwrap(), GPGSM_FINGERPRINT);
assert_eq!(
check.display.unwrap(),
"/CN=JJ Cert/O=GPGSM Signing Test/EMail=someone@example.com"
);
let check = backend.verify(b"so so bad", &signature).unwrap();
assert_eq!(check.status, SigStatus::Bad);
assert_eq!(check.key.unwrap(), GPGSM_FINGERPRINT);
assert_eq!(
check.display.unwrap(),
"/CN=JJ Cert/O=GPGSM Signing Test/EMail=someone@example.com"
);
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpgsm_signing_roundtrip_explicit_key() {
gpgsm_guard!();
let env = GpgsmEnvironment::new().unwrap();
let backend = gpgsm_backend(&env);
let data = b"hello world";
let signature = backend.sign(data, Some("someone@example.com")).unwrap();
assert_debug_snapshot!(backend.verify(data, &signature).unwrap(), @r#"
Verification {
status: Good,
key: Some(
"4C625C10FF7180164F19C6571D513E4E0BEA555C",
),
display: Some(
"/CN=JJ Cert/O=GPGSM Signing Test/EMail=someone@example.com",
),
}
"#);
assert_debug_snapshot!(backend.verify(b"so so bad", &signature).unwrap(), @r#"
Verification {
status: Bad,
key: Some(
"4C625C10FF7180164F19C6571D513E4E0BEA555C",
),
display: Some(
"/CN=JJ Cert/O=GPGSM Signing Test/EMail=someone@example.com",
),
}
"#);
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpgsm_unknown_key() {
gpgsm_guard!();
let env = GpgsmEnvironment::new().unwrap();
let backend = gpgsm_backend(&env);
let signature = br"-----BEGIN SIGNED MESSAGE-----
MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0B
BwEAADGCAnYwggJyAgEBMDUwKTEaMBgGA1UEChMRWDUwOSBTaWduaW5nIFRlc3Qx
CzAJBgNVBAMTAkpKAgh8bds9GXiZmzANBglghkgBZQMEAgEFAKCBkzAYBgkqhkiG
9w0BCQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0yNTAzMTgyMDAzNDBa
MCgGCSqGSIb3DQEJDzEbMBkwCwYJYIZIAWUDBAECMAoGCCqGSIb3DQMHMC8GCSqG
SIb3DQEJBDEiBCCpSJBPLw9Hm4+Bl2lLMBhLDS7Rwc0qHsD7hdKZoZKkRzANBgkq
hkiG9w0BAQEFAASCAYANOvWCJuOKn018s731TWFHq5wS13xB7L83/2q8Mi9cQ3YT
kq8CQlyJV0spIW7dwztjsllX8X2szE4N0l83ghf3ol6B6n9Vyb844oKgb6cwc9uX
S8D1yiaj1Mfft3PDp+THH+ESezw1Djzj7E53Yx5j3kna/ylJhheg3raWit2MUxI0
V42Svm4PLcpOf+ywzstlSSx9p6Y8woctdkMkpyivNCsfwlRARFGSTP3G9DXZNv03
WZ51zlMT8lsYbT9EJUxzXuEpcJZJL0TYcbJ3n7uSopivHk843onIc71gbH/ByuMp
qokJ7jYzEMrk0YowzsD7wrtwhF5OgpW5ane8vuyquLOrRNX9H/TooE4+8OCM6nvQ
w7jgv1/hsdtDnZCkVaM0plhb2btE7Awgol5M8f9IDz1Z+b0t4ydc/iqHtE9yaqvZ
+aT9XXKKcj9XBhi1S790B4r8YoDyeiyzBs0gwvMuWjWMS7wixTbgx+IkQUrkgTLY
xiNbRmGtEonl9d8JS/IAAAAAAAA=
-----END SIGNED MESSAGE-----
";
assert_debug_snapshot!(backend.verify(b"hello world", signature).unwrap(), @r#"
Verification {
status: Unknown,
key: None,
display: None,
}
"#);
assert_debug_snapshot!(backend.verify(b"so bad", signature).unwrap(), @r#"
Verification {
status: Unknown,
key: None,
display: None,
}
"#);
}
#[test]
#[cfg_attr(windows, ignore = "stuck randomly on Windows CI #3140")] // FIXME
fn gpgsm_invalid_signature() {
gpgsm_guard!();
let env = GpgsmEnvironment::new().unwrap();
let backend = gpgsm_backend(&env);
let signature = br"-----BEGIN SIGNED MESSAGE-----
super duper invalid
-----END SIGNED MESSAGE-----";
// Small data: gpgsm command will exit late.
assert_matches!(
backend.verify(b"a", signature),
Err(SignError::InvalidSignatureFormat)
);
// Large data: gpgsm command will exit early because the signature is invalid.
assert_matches!(
backend.verify(&b"a".repeat(100 * 1024), signature),
Err(SignError::InvalidSignatureFormat)
);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_default_revset_graph_iterator.rs | lib/tests/test_default_revset_graph_iterator.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use itertools::Itertools as _;
use jj_lib::backend::CommitId;
use jj_lib::commit::Commit;
use jj_lib::default_index::DefaultReadonlyIndex;
use jj_lib::default_index::DefaultReadonlyIndexRevset;
use jj_lib::graph::GraphEdge;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo as _;
use jj_lib::revset::ResolvedExpression;
use test_case::test_case;
use testutils::TestRepo;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn revset_for_commits(repo: &ReadonlyRepo, commits: &[&Commit]) -> DefaultReadonlyIndexRevset {
let index: &DefaultReadonlyIndex = repo.readonly_index().downcast_ref().unwrap();
let expression =
ResolvedExpression::Commits(commits.iter().map(|commit| commit.id().clone()).collect());
index
.evaluate_revset_impl(&expression, repo.store())
.unwrap()
}
fn direct(commit: &Commit) -> GraphEdge<CommitId> {
GraphEdge::direct(commit.id().clone())
}
fn indirect(commit: &Commit) -> GraphEdge<CommitId> {
GraphEdge::indirect(commit.id().clone())
}
fn missing(commit: &Commit) -> GraphEdge<CommitId> {
GraphEdge::missing(commit.id().clone())
}
#[test_case(false, 0; "keep transitive edges")]
#[test_case(true, 0; "skip transitive edges")]
#[test_matrix(true, 60..64; "skip transitive edges")]
fn test_graph_iterator_linearized(skip_transitive_edges: bool, padding: u32) {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Tests that a fork and a merge becomes a single edge:
// D
// |\ D
// b c => :
// |/ A
// A ~
// |
// root
let mut tx = repo.start_transaction();
// Pad commits to move interesting parts to boundary of u64 bit set
for _ in 0..padding {
write_random_commit(tx.repo_mut());
}
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
let repo = tx.commit("test").unwrap();
let root_commit = repo.store().root_commit();
let revset = revset_for_commits(repo.as_ref(), &[&commit_a, &commit_d]);
let commits: Vec<_> = revset
.iter_graph_impl(skip_transitive_edges)
.try_collect()
.unwrap();
assert_eq!(commits.len(), 2);
assert_eq!(commits[0].0, *commit_d.id());
assert_eq!(commits[1].0, *commit_a.id());
assert_eq!(commits[0].1, vec![indirect(&commit_a)]);
assert_eq!(commits[1].1, vec![missing(&root_commit)]);
}
#[test_case(false, 0; "keep transitive edges")]
#[test_case(true, 0; "skip transitive edges")]
#[test_matrix(true, 58..64; "skip transitive edges")]
fn test_graph_iterator_virtual_octopus(skip_transitive_edges: bool, padding: u32) {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Tests that merges outside the set can result in more parent edges than there
// was in the input:
//
// F
// |\
// d e F
// |\|\ => /|\
// A B C A B C
// \|/ ~ ~ ~
// root
let mut tx = repo.start_transaction();
// Pad commits to move interesting parts to boundary of u64 bit set
for _ in 0..padding {
write_random_commit(tx.repo_mut());
}
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit(tx.repo_mut());
let commit_c = write_random_commit(tx.repo_mut());
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a, &commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d, &commit_e]);
let repo = tx.commit("test").unwrap();
let root_commit = repo.store().root_commit();
let revset = revset_for_commits(repo.as_ref(), &[&commit_a, &commit_b, &commit_c, &commit_f]);
let commits: Vec<_> = revset
.iter_graph_impl(skip_transitive_edges)
.try_collect()
.unwrap();
assert_eq!(commits.len(), 4);
assert_eq!(commits[0].0, *commit_f.id());
assert_eq!(commits[1].0, *commit_c.id());
assert_eq!(commits[2].0, *commit_b.id());
assert_eq!(commits[3].0, *commit_a.id());
assert_eq!(
commits[0].1,
vec![
indirect(&commit_a),
indirect(&commit_b),
indirect(&commit_c),
]
);
assert_eq!(commits[1].1, vec![missing(&root_commit)]);
assert_eq!(commits[2].1, vec![missing(&root_commit)]);
assert_eq!(commits[3].1, vec![missing(&root_commit)]);
}
#[test_case(false, 0; "keep transitive edges")]
#[test_case(true, 0; "skip transitive edges")]
#[test_matrix(true, 59..64; "skip transitive edges")]
fn test_graph_iterator_simple_fork(skip_transitive_edges: bool, padding: u32) {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Tests that the branch with "C" gets emitted correctly:
// E
// |
// d
// | C E C
// |/ |/
// b => A
// | ~
// A
// |
// root
let mut tx = repo.start_transaction();
// Pad commits to move interesting parts to boundary of u64 bit set
for _ in 0..padding {
write_random_commit(tx.repo_mut());
}
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let repo = tx.commit("test").unwrap();
let root_commit = repo.store().root_commit();
let revset = revset_for_commits(repo.as_ref(), &[&commit_a, &commit_c, &commit_e]);
let commits: Vec<_> = revset
.iter_graph_impl(skip_transitive_edges)
.try_collect()
.unwrap();
assert_eq!(commits.len(), 3);
assert_eq!(commits[0].0, *commit_e.id());
assert_eq!(commits[1].0, *commit_c.id());
assert_eq!(commits[2].0, *commit_a.id());
assert_eq!(commits[0].1, vec![indirect(&commit_a)]);
assert_eq!(commits[1].1, vec![indirect(&commit_a)]);
assert_eq!(commits[2].1, vec![missing(&root_commit)]);
}
#[test_case(false, 0; "keep transitive edges")]
#[test_case(true, 0; "skip transitive edges")]
#[test_matrix(true, 58..64; "skip transitive edges")]
fn test_graph_iterator_multiple_missing(skip_transitive_edges: bool, padding: u32) {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Tests that we get missing edges to "a" and "c" and not just one missing edge
// to the root.
// F
// / \ F
// d e => /|\
// |\ /| ~ B ~
// a B c ~
// \|/
// root
let mut tx = repo.start_transaction();
// Pad commits to move interesting parts to boundary of u64 bit set
for _ in 0..padding {
write_random_commit(tx.repo_mut());
}
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit(tx.repo_mut());
let commit_c = write_random_commit(tx.repo_mut());
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a, &commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d, &commit_e]);
let repo = tx.commit("test").unwrap();
let root_commit = repo.store().root_commit();
let revset = revset_for_commits(repo.as_ref(), &[&commit_b, &commit_f]);
let commits: Vec<_> = revset
.iter_graph_impl(skip_transitive_edges)
.try_collect()
.unwrap();
assert_eq!(commits.len(), 2);
assert_eq!(commits[0].0, *commit_f.id());
assert_eq!(commits[1].0, *commit_b.id());
assert_eq!(
commits[0].1,
vec![missing(&commit_a), indirect(&commit_b), missing(&commit_c)]
);
assert_eq!(commits[1].1, vec![missing(&root_commit)]);
}
#[test_case(false, 0; "keep transitive edges")]
#[test_case(true, 0; "skip transitive edges")]
#[test_matrix(true, 58..64; "skip transitive edges")]
fn test_graph_iterator_edge_to_ancestor(skip_transitive_edges: bool, padding: u32) {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Tests that we get both an edge from F to D and to D's ancestor C if we keep
// transitive edges and only the edge from F to D if we skip transitive
// edges:
// F F
// |\ |\
// D e D :
// |\| => |\:
// b C ~ C
// | ~
// a
// |
// root
let mut tx = repo.start_transaction();
// Pad commits to move interesting parts to boundary of u64 bit set
for _ in 0..padding {
write_random_commit(tx.repo_mut());
}
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit(tx.repo_mut());
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d, &commit_e]);
let repo = tx.commit("test").unwrap();
let revset = revset_for_commits(repo.as_ref(), &[&commit_c, &commit_d, &commit_f]);
let commits: Vec<_> = revset
.iter_graph_impl(skip_transitive_edges)
.try_collect()
.unwrap();
assert_eq!(commits.len(), 3);
assert_eq!(commits[0].0, *commit_f.id());
assert_eq!(commits[1].0, *commit_d.id());
assert_eq!(commits[2].0, *commit_c.id());
if skip_transitive_edges {
assert_eq!(commits[0].1, vec![direct(&commit_d)]);
} else {
assert_eq!(commits[0].1, vec![direct(&commit_d), indirect(&commit_c),]);
}
assert_eq!(commits[1].1, vec![missing(&commit_b), direct(&commit_c)]);
assert_eq!(commits[2].1, vec![missing(&commit_a)]);
}
#[test_case(false, 0; "keep transitive edges")]
#[test_case(true, 0; "skip transitive edges")]
#[test_matrix(true, 54..64; "skip transitive edges")]
fn test_graph_iterator_edge_escapes_from_(skip_transitive_edges: bool, padding: u32) {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Tests a more complex case for skipping transitive edges.
// J
// /|
// | i J
// | |\ /:
// | | H | H
// G | | G :
// | e f => : D
// | \|\ :/
// | D | A
// \ / c |
// b / root
// |/
// A
// |
// root
let mut tx = repo.start_transaction();
// Pad commits to move interesting parts to boundary of u64 bit set
for _ in 0..padding {
write_random_commit(tx.repo_mut());
}
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d, &commit_c]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_h = write_random_commit_with_parents(tx.repo_mut(), &[&commit_f]);
let commit_i = write_random_commit_with_parents(tx.repo_mut(), &[&commit_e, &commit_h]);
let commit_j = write_random_commit_with_parents(tx.repo_mut(), &[&commit_g, &commit_i]);
let repo = tx.commit("test").unwrap();
let root_commit = repo.store().root_commit();
let revset = revset_for_commits(
repo.as_ref(),
&[&commit_a, &commit_d, &commit_g, &commit_h, &commit_j],
);
let commits: Vec<_> = revset
.iter_graph_impl(skip_transitive_edges)
.try_collect()
.unwrap();
assert_eq!(commits.len(), 5);
assert_eq!(commits[0].0, *commit_j.id());
assert_eq!(commits[1].0, *commit_h.id());
assert_eq!(commits[2].0, *commit_g.id());
assert_eq!(commits[3].0, *commit_d.id());
assert_eq!(commits[4].0, *commit_a.id());
if skip_transitive_edges {
assert_eq!(commits[0].1, vec![direct(&commit_g), indirect(&commit_h)]);
assert_eq!(commits[1].1, vec![indirect(&commit_d)]);
} else {
assert_eq!(
commits[0].1,
vec![direct(&commit_g), indirect(&commit_d), indirect(&commit_h)]
);
assert_eq!(commits[1].1, vec![indirect(&commit_d), indirect(&commit_a)]);
}
assert_eq!(commits[2].1, vec![indirect(&commit_a)]);
assert_eq!(commits[3].1, vec![indirect(&commit_a)]);
assert_eq!(commits[4].1, vec![missing(&root_commit)]);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_annotate.rs | lib/tests/test_annotate.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Write as _;
use std::sync::Arc;
use itertools::Itertools as _;
use jj_lib::annotate::FileAnnotation;
use jj_lib::annotate::FileAnnotator;
use jj_lib::backend::CommitId;
use jj_lib::backend::MillisSinceEpoch;
use jj_lib::backend::Signature;
use jj_lib::backend::Timestamp;
use jj_lib::backend::TreeValue;
use jj_lib::commit::Commit;
use jj_lib::merged_tree::MergedTree;
use jj_lib::repo::MutableRepo;
use jj_lib::repo::Repo;
use jj_lib::repo_path::RepoPath;
use jj_lib::revset::ResolvedRevsetExpression;
use jj_lib::revset::RevsetExpression;
use testutils::TestRepo;
use testutils::create_tree;
use testutils::read_file;
use testutils::repo_path;
fn create_commit_fn(
mut_repo: &mut MutableRepo,
) -> impl FnMut(&str, &[&CommitId], MergedTree) -> Commit {
// stabilize commit IDs for ease of debugging
let signature = Signature {
name: "Some One".to_owned(),
email: "some.one@example.com".to_owned(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
},
};
move |description, parent_ids, tree| {
let parent_ids = parent_ids.iter().map(|&id| id.clone()).collect();
mut_repo
.new_commit(parent_ids, tree)
.set_author(signature.clone())
.set_committer(signature.clone())
.set_description(description)
.write()
.unwrap()
}
}
fn annotate(repo: &dyn Repo, commit: &Commit, file_path: &RepoPath) -> String {
let domain = RevsetExpression::all();
annotate_within(repo, commit, &domain, file_path)
}
fn annotate_within(
repo: &dyn Repo,
commit: &Commit,
domain: &Arc<ResolvedRevsetExpression>,
file_path: &RepoPath,
) -> String {
let mut annotator = FileAnnotator::from_commit(commit, file_path).unwrap();
annotator.compute(repo, domain).unwrap();
format_annotation(repo, &annotator.to_annotation())
}
fn annotate_parent_tree(repo: &dyn Repo, commit: &Commit, file_path: &RepoPath) -> String {
let tree = commit.parent_tree(repo).unwrap();
let text = match tree.path_value(file_path).unwrap().into_resolved().unwrap() {
Some(TreeValue::File { id, .. }) => read_file(repo.store(), file_path, &id),
value => panic!("unexpected path value: {value:?}"),
};
let mut annotator = FileAnnotator::with_file_content(commit.id(), file_path, text);
annotator.compute(repo, &RevsetExpression::all()).unwrap();
format_annotation(repo, &annotator.to_annotation())
}
fn format_annotation(repo: &dyn Repo, annotation: &FileAnnotation) -> String {
let mut output = String::new();
for (origin, line) in annotation.line_origins() {
let line_origin = origin.unwrap_or_else(|line_origin| line_origin);
let line_number = line_origin.line_number + 1;
let commit = repo.store().get_commit(&line_origin.commit_id).unwrap();
let desc = commit.description().trim_end();
let sigil = if origin.is_err() { '*' } else { ' ' };
write!(output, "{desc}:{line_number}{sigil}: {line}").unwrap();
}
output
}
#[test]
fn test_annotate_linear() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let file_path = repo_path("file");
let mut tx = repo.start_transaction();
let mut create_commit = create_commit_fn(tx.repo_mut());
let content1 = "";
let content2 = "2a\n2b\n";
let content3 = "2b\n3\n";
let tree1 = create_tree(repo, &[(file_path, content1)]);
let tree2 = create_tree(repo, &[(file_path, content2)]);
let tree3 = create_tree(repo, &[(file_path, content3)]);
let commit1 = create_commit("commit1", &[root_commit_id], tree1);
let commit2 = create_commit("commit2", &[commit1.id()], tree2);
let commit3 = create_commit("commit3", &[commit2.id()], tree3.clone());
let commit4 = create_commit("commit4", &[commit3.id()], tree3); // empty commit
drop(create_commit);
insta::assert_snapshot!(annotate(tx.repo(), &commit1, file_path), @"");
insta::assert_snapshot!(annotate(tx.repo(), &commit2, file_path), @r"
commit2:1 : 2a
commit2:2 : 2b
");
insta::assert_snapshot!(annotate(tx.repo(), &commit3, file_path), @r"
commit2:2 : 2b
commit3:2 : 3
");
insta::assert_snapshot!(annotate(tx.repo(), &commit4, file_path), @r"
commit2:2 : 2b
commit3:2 : 3
");
}
#[test]
fn test_annotate_merge_simple() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let file_path = repo_path("file");
// 4 "2 1 3"
// |\
// | 3 "1 3"
// | |
// 2 | "2 1"
// |/
// 1 "1"
let mut tx = repo.start_transaction();
let mut create_commit = create_commit_fn(tx.repo_mut());
let content1 = "1\n";
let content2 = "2\n1\n";
let content3 = "1\n3\n";
let content4 = "2\n1\n3\n";
let tree1 = create_tree(repo, &[(file_path, content1)]);
let tree2 = create_tree(repo, &[(file_path, content2)]);
let tree3 = create_tree(repo, &[(file_path, content3)]);
let tree4 = create_tree(repo, &[(file_path, content4)]);
let commit1 = create_commit("commit1", &[root_commit_id], tree1);
let commit2 = create_commit("commit2", &[commit1.id()], tree2);
let commit3 = create_commit("commit3", &[commit1.id()], tree3);
let commit4 = create_commit("commit4", &[commit2.id(), commit3.id()], tree4);
drop(create_commit);
insta::assert_snapshot!(annotate(tx.repo(), &commit4, file_path), @r"
commit2:1 : 2
commit1:1 : 1
commit3:2 : 3
");
// Exclude the fork commit and its ancestors.
let domain = RevsetExpression::commit(commit1.id().clone())
.ancestors()
.negated();
insta::assert_snapshot!(annotate_within(tx.repo(), &commit4, &domain, file_path), @r"
commit2:1 : 2
commit1:1*: 1
commit3:2 : 3
");
// Exclude one side of the merge and its ancestors.
let domain = RevsetExpression::commit(commit2.id().clone())
.ancestors()
.negated();
insta::assert_snapshot!(annotate_within(tx.repo(), &commit4, &domain, file_path), @r"
commit2:1*: 2
commit2:2*: 1
commit3:2 : 3
");
// Exclude both sides of the merge and their ancestors.
let domain = RevsetExpression::commit(commit4.id().clone());
insta::assert_snapshot!(annotate_within(tx.repo(), &commit4, &domain, file_path), @r"
commit2:1*: 2
commit2:2*: 1
commit3:2*: 3
");
// Exclude intermediate commit, which is useless but works.
let domain = RevsetExpression::commit(commit3.id().clone()).negated();
insta::assert_snapshot!(annotate_within(tx.repo(), &commit4, &domain, file_path), @r"
commit2:1 : 2
commit1:1 : 1
commit4:3 : 3
");
// Calculate incrementally
let mut annotator = FileAnnotator::from_commit(&commit4, file_path).unwrap();
assert_eq!(annotator.pending_commits().collect_vec(), [commit4.id()]);
insta::assert_snapshot!(format_annotation(tx.repo(), &annotator.to_annotation()), @r"
commit4:1*: 2
commit4:2*: 1
commit4:3*: 3
");
annotator
.compute(
tx.repo(),
&RevsetExpression::commits(vec![
commit4.id().clone(),
commit3.id().clone(),
commit2.id().clone(),
]),
)
.unwrap();
assert_eq!(annotator.pending_commits().collect_vec(), [commit1.id()]);
insta::assert_snapshot!(format_annotation(tx.repo(), &annotator.to_annotation()), @r"
commit2:1 : 2
commit1:1*: 1
commit3:2 : 3
");
annotator
.compute(
tx.repo(),
&RevsetExpression::commits(vec![commit1.id().clone()]),
)
.unwrap();
assert!(annotator.pending_commits().next().is_none());
insta::assert_snapshot!(format_annotation(tx.repo(), &annotator.to_annotation()), @r"
commit2:1 : 2
commit1:1 : 1
commit3:2 : 3
");
}
#[test]
fn test_annotate_merge_split() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let file_path = repo_path("file");
// 4 "2 1a 1b 3 4"
// |\
// | 3 "1b 3"
// | |
// 2 | "2 1a"
// |/
// 1 "1a 1b"
let mut tx = repo.start_transaction();
let mut create_commit = create_commit_fn(tx.repo_mut());
let content1 = "1a\n1b\n";
let content2 = "2\n1a\n";
let content3 = "1b\n3\n";
let content4 = "2\n1a\n1b\n3\n4\n";
let tree1 = create_tree(repo, &[(file_path, content1)]);
let tree2 = create_tree(repo, &[(file_path, content2)]);
let tree3 = create_tree(repo, &[(file_path, content3)]);
let tree4 = create_tree(repo, &[(file_path, content4)]);
let commit1 = create_commit("commit1", &[root_commit_id], tree1);
let commit2 = create_commit("commit2", &[commit1.id()], tree2);
let commit3 = create_commit("commit3", &[commit1.id()], tree3);
let commit4 = create_commit("commit4", &[commit2.id(), commit3.id()], tree4);
drop(create_commit);
insta::assert_snapshot!(annotate(tx.repo(), &commit4, file_path), @r"
commit2:1 : 2
commit1:1 : 1a
commit1:2 : 1b
commit3:2 : 3
commit4:5 : 4
");
}
#[test]
fn test_annotate_merge_split_interleaved() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let file_path = repo_path("file");
// 6 "1a 4 1b 6 2a 5 2b"
// |\
// | 5 "1b 5 2b"
// | |
// 4 | "1a 4 2a"
// |/
// 3 "1a 1b 2a 2b"
// |\
// | 2 "2a 2b"
// |
// 1 "1a 1b"
let mut tx = repo.start_transaction();
let mut create_commit = create_commit_fn(tx.repo_mut());
let content1 = "1a\n1b\n";
let content2 = "2a\n2b\n";
let content3 = "1a\n1b\n2a\n2b\n";
let content4 = "1a\n4\n2a\n";
let content5 = "1b\n5\n2b\n";
let content6 = "1a\n4\n1b\n6\n2a\n5\n2b\n";
let tree1 = create_tree(repo, &[(file_path, content1)]);
let tree2 = create_tree(repo, &[(file_path, content2)]);
let tree3 = create_tree(repo, &[(file_path, content3)]);
let tree4 = create_tree(repo, &[(file_path, content4)]);
let tree5 = create_tree(repo, &[(file_path, content5)]);
let tree6 = create_tree(repo, &[(file_path, content6)]);
let commit1 = create_commit("commit1", &[root_commit_id], tree1);
let commit2 = create_commit("commit2", &[root_commit_id], tree2);
let commit3 = create_commit("commit3", &[commit1.id(), commit2.id()], tree3);
let commit4 = create_commit("commit4", &[commit3.id()], tree4);
let commit5 = create_commit("commit5", &[commit3.id()], tree5);
let commit6 = create_commit("commit6", &[commit4.id(), commit5.id()], tree6);
drop(create_commit);
insta::assert_snapshot!(annotate(tx.repo(), &commit6, file_path), @r"
commit1:1 : 1a
commit4:2 : 4
commit1:2 : 1b
commit6:4 : 6
commit2:1 : 2a
commit5:2 : 5
commit2:2 : 2b
");
}
#[test]
fn test_annotate_merge_dup() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let file_path = repo_path("file");
// 4 "2 1 1 3 4"
// |\
// | 3 "1 3"
// | |
// 2 | "2 1"
// |/
// 1 "1"
let mut tx = repo.start_transaction();
let mut create_commit = create_commit_fn(tx.repo_mut());
let content1 = "1\n";
let content2 = "2\n1\n";
let content3 = "1\n3\n";
let content4 = "2\n1\n1\n3\n4\n";
let tree1 = create_tree(repo, &[(file_path, content1)]);
let tree2 = create_tree(repo, &[(file_path, content2)]);
let tree3 = create_tree(repo, &[(file_path, content3)]);
let tree4 = create_tree(repo, &[(file_path, content4)]);
let commit1 = create_commit("commit1", &[root_commit_id], tree1);
let commit2 = create_commit("commit2", &[commit1.id()], tree2);
let commit3 = create_commit("commit3", &[commit1.id()], tree3);
let commit4 = create_commit("commit4", &[commit2.id(), commit3.id()], tree4);
drop(create_commit);
// Both "1"s can be propagated to commit1 through commit2 and commit3.
// Alternatively, it's also good to interpret that one of the "1"s was
// produced at commit2, commit3, or commit4.
insta::assert_snapshot!(annotate(tx.repo(), &commit4, file_path), @r"
commit2:1 : 2
commit1:1 : 1
commit1:1 : 1
commit3:2 : 3
commit4:5 : 4
");
// For example, the parent tree of commit4 doesn't contain multiple "1"s.
// If annotation were computed compared to the parent tree, not trees of the
// parent commits, "1" would be inserted at commit4.
insta::assert_snapshot!(annotate_parent_tree(tx.repo(), &commit4, file_path), @r"
commit2:1 : 2
commit1:1 : 1
commit3:2 : 3
");
}
#[test]
fn test_annotate_file_directory_transition() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let file_path1 = repo_path("file/was_dir");
let file_path2 = repo_path("file");
let mut tx = repo.start_transaction();
let mut create_commit = create_commit_fn(tx.repo_mut());
let tree1 = create_tree(repo, &[(file_path1, "1\n")]);
let tree2 = create_tree(repo, &[(file_path2, "2\n")]);
let commit1 = create_commit("commit1", &[root_commit_id], tree1);
let commit2 = create_commit("commit2", &[commit1.id()], tree2);
drop(create_commit);
insta::assert_snapshot!(annotate(tx.repo(), &commit2, file_path2), @"commit2:1 : 2");
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_eol.rs | lib/tests/test_eol.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs::File;
use std::io::Write as _;
use bstr::ByteSlice as _;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::repo::Repo as _;
use jj_lib::repo::StoreFactories;
use jj_lib::rewrite::merge_commit_trees;
use jj_lib::settings::UserSettings;
use jj_lib::workspace::Workspace;
use jj_lib::workspace::default_working_copy_factories;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepoBackend;
use testutils::TestWorkspace;
use testutils::assert_tree_eq;
use testutils::base_user_config;
use testutils::commit_with_tree;
use testutils::repo_path;
static LF_FILE_CONTENT: &[u8] = b"aaa\nbbbb\nccccc\n";
static CRLF_FILE_CONTENT: &[u8] = b"aaa\r\nbbbb\r\nccccc\r\n";
static MIXED_EOL_FILE_CONTENT: &[u8] = b"aaa\nbbbb\r\nccccc\n";
static BINARY_FILE_CONTENT: &[u8] = b"\0";
struct Config {
extra_setting: &'static str,
file_content: &'static [u8],
}
fn base_user_settings_with_extra_configs(extra_settings: &str) -> UserSettings {
let mut config = base_user_config();
config.add_layer(
ConfigLayer::parse(ConfigSource::User, extra_settings)
.expect("Failed to parse the settings"),
);
UserSettings::from_config(config).expect("Failed to create the UserSettings from the config")
}
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: LF_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion input-output LF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: CRLF_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion input-output CRLF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: MIXED_EOL_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion input-output mixed EOL file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: BINARY_FILE_CONTENT,
} => BINARY_FILE_CONTENT; "eol-conversion input-output binary file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: LF_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion input LF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: CRLF_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion input CRLF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: MIXED_EOL_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion input mixed EOL file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: BINARY_FILE_CONTENT,
} => BINARY_FILE_CONTENT; "eol-conversion input binary file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: LF_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion none LF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: CRLF_FILE_CONTENT,
} => CRLF_FILE_CONTENT; "eol-conversion none CRLF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: MIXED_EOL_FILE_CONTENT,
} => MIXED_EOL_FILE_CONTENT; "eol-conversion none mixed EOL file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: BINARY_FILE_CONTENT,
} => BINARY_FILE_CONTENT; "eol-conversion none binary file")]
fn test_eol_conversion_snapshot(
Config {
extra_setting,
file_content,
}: Config,
) -> Vec<u8> {
// This test creates snapshots with different working-copy.eol-conversion
// configurations, where proper EOL conversion should apply before writing files
// back to the store. Then files are checked out with
// working-copy.eol-conversion = "none", which won't touch the EOLs, so that we
// can tell whether the exact EOLs written to the store are expected.
let extra_setting = format!("{extra_setting}\n");
let user_settings = base_user_settings_with_extra_configs(&extra_setting);
let mut test_workspace =
TestWorkspace::init_with_backend_and_settings(TestRepoBackend::Git, &user_settings);
let file_repo_path = repo_path("test-eol-file");
let file_disk_path = file_repo_path
.to_fs_path(test_workspace.workspace.workspace_root())
.unwrap();
testutils::write_working_copy_file(
test_workspace.workspace.workspace_root(),
file_repo_path,
file_content,
);
let tree = test_workspace.snapshot().unwrap();
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(new_tree, tree, "The working copy should be clean.");
let file_added_commit = commit_with_tree(test_workspace.repo.store(), tree);
// Create a commit with the file removed, so that later when we checkout the
// file_added_commit, the test file is recreated.
std::fs::remove_file(&file_disk_path).unwrap();
let tree = test_workspace.snapshot().unwrap();
let file_removed_commit = commit_with_tree(test_workspace.repo.store(), tree);
let workspace = &mut test_workspace.workspace;
workspace
.check_out(
test_workspace.repo.op_id().clone(),
None,
&file_removed_commit,
)
.unwrap();
assert!(!file_disk_path.exists());
let user_settings =
base_user_settings_with_extra_configs("working-copy.eol-conversion = \"none\"\n");
// Reload the workspace with the new working-copy.eol-conversion = "none"
// setting to verify the EOL of files previously written to the store.
let mut workspace = Workspace::load(
&user_settings,
test_workspace.workspace.workspace_root(),
&StoreFactories::default(),
&default_working_copy_factories(),
)
.expect("Failed to reload the workspace");
// We have to query the Commit again. The Workspace is backed by a different
// Store from the original Commit.
let file_added_commit = workspace
.repo_loader()
.store()
.get_commit(file_added_commit.id())
.expect("Failed to find the commit with the test file");
workspace
.check_out(
test_workspace.repo.op_id().clone(),
None,
&file_added_commit,
)
.unwrap();
assert!(file_disk_path.exists());
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(
new_tree,
file_added_commit.tree(),
"The working copy should be clean."
);
std::fs::read(&file_disk_path).expect("Failed to read the checked out test file")
}
// Create a conflict commit in a CRLF EOL file, and append another line with the
// CRLF EOL to the file, create a snapshot on the modified merge conflict,
// checkout the snapshot with the given setting, and return the content of the
// file.
fn create_conflict_snapshot_and_read(extra_setting: &str) -> Vec<u8> {
// Use the working-copy.eol-conversion = "none" setting to write files to the
// store as is.
let no_eol_conversion_settings =
base_user_settings_with_extra_configs("working-copy.eol-conversion = \"none\"\n");
let mut test_workspace = TestWorkspace::init_with_backend_and_settings(
TestRepoBackend::Git,
&no_eol_conversion_settings,
);
let file_repo_path = repo_path("test-eol-file");
let file_disk_path = file_repo_path
.to_fs_path(test_workspace.workspace.workspace_root())
.unwrap();
// The commit graph:
// C (conflict)
// |\
// A B
// |/
// (empty)
let root_commit = test_workspace.repo.store().root_commit();
testutils::write_working_copy_file(
test_workspace.workspace.workspace_root(),
file_repo_path,
"a\r\n",
);
let tree = test_workspace.snapshot().unwrap();
let mut tx = test_workspace.repo.start_transaction();
let parent1_commit = tx
.repo_mut()
.new_commit(vec![root_commit.id().clone()], tree)
.write()
.unwrap();
tx.commit("commit parent1").unwrap();
test_workspace
.workspace
.check_out(test_workspace.repo.op_id().clone(), None, &root_commit)
.unwrap();
testutils::write_working_copy_file(
test_workspace.workspace.workspace_root(),
file_repo_path,
"b\r\n",
);
let tree = test_workspace.snapshot().unwrap();
let mut tx = test_workspace.repo.start_transaction();
let parent2_commit = tx
.repo_mut()
.new_commit(vec![root_commit.id().clone()], tree)
.write()
.unwrap();
tx.commit("commit parent2").unwrap();
// Reload the repo to pick up the new commits.
test_workspace.repo = test_workspace.repo.reload_at_head().unwrap();
// Create the merge commit.
let tree = merge_commit_trees(&*test_workspace.repo, &[parent1_commit, parent2_commit])
.block_on()
.unwrap();
let merge_commit = commit_with_tree(test_workspace.repo.store(), tree);
// Append new texts to the file with conflicts to make sure the last line is not
// conflict markers.
test_workspace
.workspace
.check_out(test_workspace.repo.op_id().clone(), None, &merge_commit)
.unwrap();
let mut file = File::options().append(true).open(&file_disk_path).unwrap();
file.write_all(b"c\r\n").unwrap();
drop(file);
let extra_setting = format!("{extra_setting}\n");
let user_settings = base_user_settings_with_extra_configs(&extra_setting);
// Reload the Workspace to apply the settings under testing.
test_workspace.workspace = Workspace::load(
&user_settings,
test_workspace.workspace.workspace_root(),
&StoreFactories::default(),
&default_working_copy_factories(),
)
.expect("Failed to reload the workspace");
let tree = test_workspace.snapshot().unwrap();
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(new_tree, tree, "The working copy should be clean.");
// Create the new merge commit with the conflict file appended.
let merge_commit = commit_with_tree(test_workspace.repo.store(), tree);
// Reload the Workspace with the working-copy.eol-conversion = "none" setting to
// check the EOL of the file written to the store previously.
test_workspace.workspace = Workspace::load(
&no_eol_conversion_settings,
test_workspace.workspace.workspace_root(),
&StoreFactories::default(),
&default_working_copy_factories(),
)
.expect("Failed to reload the workspace");
// Checkout the empty commit to clear the directory, so that the test file will
// be recreated.
test_workspace
.workspace
.check_out(
test_workspace.repo.op_id().clone(),
None,
&test_workspace.workspace.repo_loader().store().root_commit(),
)
.unwrap();
// We have to query the Commit again. The Workspace is backed by a different
// Store from the original Commit.
let merge_commit = test_workspace
.workspace
.repo_loader()
.store()
.get_commit(merge_commit.id())
.expect("Failed to find the commit with the test file");
test_workspace
.workspace
.check_out(test_workspace.repo.op_id().clone(), None, &merge_commit)
.unwrap();
assert!(std::fs::exists(&file_disk_path).unwrap());
std::fs::read(&file_disk_path).unwrap()
}
#[test]
fn test_eol_conversion_input_output_snapshot_conflicts() {
let contents =
create_conflict_snapshot_and_read(r#"working-copy.eol-conversion = "input-output""#);
for line in contents.lines_with_terminator() {
assert!(
!line.ends_with(b"\r\n"),
"{:?} should not end with CRLF",
line.to_str_lossy().as_ref()
);
}
}
#[test]
fn test_eol_conversion_input_snapshot_conflicts() {
let contents = create_conflict_snapshot_and_read(r#"working-copy.eol-conversion = "input""#);
for line in contents.lines_with_terminator() {
assert!(
!line.ends_with(b"\r\n"),
"{:?} should not end with CRLF",
line.to_str_lossy().as_ref()
);
}
}
#[test]
fn test_eol_conversion_none_snapshot_conflicts() {
let contents = create_conflict_snapshot_and_read(r#"working-copy.eol-conversion = "none""#);
// We only check the last line, because it is only guaranteed that the last line
// is not the conflict markers. The conflict markers in the store are supposed
// to use the LF EOL.
let line = contents.lines_with_terminator().next_back().unwrap();
assert!(
line.ends_with(b"\r\n"),
"{:?} should end with CRLF",
line.to_str_lossy().as_ref()
);
}
struct UpdateConflictsTestConfig {
parent1_contents: &'static str,
parent2_contents: &'static str,
extra_setting: &'static str,
expected_eol: &'static str,
expected_conflict_side1: &'static str,
expected_conflict_side2: &'static str,
}
#[test_case(UpdateConflictsTestConfig {
parent1_contents: "a\n",
parent2_contents: "b\n",
extra_setting: r#"working-copy.eol-conversion = "none""#,
expected_eol: "\n",
expected_conflict_side1: "a\n",
expected_conflict_side2: "b\n",
}; "LF parents with none settings")]
#[test_case(UpdateConflictsTestConfig {
parent1_contents: "a\n",
parent2_contents: "b\n",
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
expected_eol: "\r\n",
expected_conflict_side1: "a\r\n",
expected_conflict_side2: "b\r\n",
}; "LF parents with input-output settings")]
#[test_case(UpdateConflictsTestConfig {
parent1_contents: "a\r\n",
parent2_contents: "b\r\n",
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
expected_eol: "\r\n",
expected_conflict_side1: "a\r\n",
expected_conflict_side2: "b\r\n",
}; "CRLF parents with input-output settings")]
fn test_eol_conversion_update_conflicts(
UpdateConflictsTestConfig {
parent1_contents,
parent2_contents,
extra_setting,
expected_eol,
expected_conflict_side1,
expected_conflict_side2,
}: UpdateConflictsTestConfig,
) {
// Create a conflict commit with 2 given contents on one file, checkout that
// conflict with the given EOL conversion settings, and test if the EOL matches.
let extra_setting = format!("{extra_setting}\n");
let user_settings = base_user_settings_with_extra_configs(&extra_setting);
let mut test_workspace =
TestWorkspace::init_with_backend_and_settings(TestRepoBackend::Git, &user_settings);
let file_repo_path = repo_path("test-eol-file");
let file_disk_path = file_repo_path
.to_fs_path(test_workspace.workspace.workspace_root())
.unwrap();
// The commit graph:
// C (conflict)
// |\
// A B
// |/
// (empty)
let root_commit = test_workspace.repo.store().root_commit();
let mut tx = test_workspace.repo.start_transaction();
let tree = testutils::create_tree(&test_workspace.repo, &[(file_repo_path, parent1_contents)]);
let parent1_commit = tx
.repo_mut()
.new_commit(vec![root_commit.id().clone()], tree)
.write()
.unwrap();
let tree = testutils::create_tree(&test_workspace.repo, &[(file_repo_path, parent2_contents)]);
let parent2_commit = tx
.repo_mut()
.new_commit(vec![root_commit.id().clone()], tree)
.write()
.unwrap();
tx.commit("commit parent 2").unwrap();
// Reload the repo to pick up the new commits.
test_workspace.repo = test_workspace.repo.reload_at_head().unwrap();
// Create the merge commit.
let tree = merge_commit_trees(&*test_workspace.repo, &[parent1_commit, parent2_commit])
.block_on()
.unwrap();
let merge_commit = commit_with_tree(test_workspace.repo.store(), tree);
// Checkout the merge commit.
test_workspace
.workspace
.check_out(test_workspace.repo.op_id().clone(), None, &merge_commit)
.unwrap();
let contents = std::fs::read(&file_disk_path).unwrap();
for line in contents.lines_with_terminator() {
assert!(
line.ends_with_str(expected_eol),
"{:?} should end with {:?}",
&*line.to_str_lossy(),
expected_eol
);
}
let hunks =
jj_lib::conflicts::parse_conflict(&contents, 2, jj_lib::conflicts::MIN_CONFLICT_MARKER_LEN)
.unwrap();
let hunk = &hunks[0];
assert!(!hunk.is_resolved());
let sides = hunk.iter().collect::<Vec<_>>();
assert_eq!(sides[0], expected_conflict_side1);
assert_eq!(sides[2], expected_conflict_side2);
}
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: LF_FILE_CONTENT,
} => CRLF_FILE_CONTENT; "eol-conversion input-output LF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: CRLF_FILE_CONTENT,
} => CRLF_FILE_CONTENT; "eol-conversion input-output CRLF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: MIXED_EOL_FILE_CONTENT,
} => CRLF_FILE_CONTENT; "eol-conversion input-output mixed EOL file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input-output""#,
file_content: BINARY_FILE_CONTENT,
} => BINARY_FILE_CONTENT; "eol-conversion input-output binary file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: LF_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion input LF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: CRLF_FILE_CONTENT,
} => CRLF_FILE_CONTENT; "eol-conversion input CRLF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: MIXED_EOL_FILE_CONTENT,
} => MIXED_EOL_FILE_CONTENT; "eol-conversion input mixed EOL file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "input""#,
file_content: BINARY_FILE_CONTENT,
} => BINARY_FILE_CONTENT; "eol-conversion input binary file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: LF_FILE_CONTENT,
} => LF_FILE_CONTENT; "eol-conversion none LF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: CRLF_FILE_CONTENT,
} => CRLF_FILE_CONTENT; "eol-conversion none CRLF only file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: MIXED_EOL_FILE_CONTENT,
} => MIXED_EOL_FILE_CONTENT; "eol-conversion none mixed EOL file")]
#[test_case(Config {
extra_setting: r#"working-copy.eol-conversion = "none""#,
file_content: BINARY_FILE_CONTENT,
} => BINARY_FILE_CONTENT; "eol-conversion none binary file")]
fn test_eol_conversion_checkout(
Config {
extra_setting,
file_content,
}: Config,
) -> Vec<u8> {
// This test checks in files with working-copy.eol-conversion = "none", so that
// the store stores files as is. Then we use jj to check out those files with
// different working-copy.eol-conversion configurations to verify if the EOLs
// are converted as expected.
let no_eol_conversion_settings =
base_user_settings_with_extra_configs("working-copy.eol-conversion = \"none\"\n");
// Use the working-copy.eol-conversion = "none" setting, so that the input files
// are stored as is.
let mut test_workspace = TestWorkspace::init_with_backend_and_settings(
TestRepoBackend::Git,
&no_eol_conversion_settings,
);
let file_repo_path = repo_path("test-eol-file");
let file_disk_path = file_repo_path
.to_fs_path(test_workspace.workspace.workspace_root())
.unwrap();
testutils::write_working_copy_file(
test_workspace.workspace.workspace_root(),
file_repo_path,
file_content,
);
let tree = test_workspace.snapshot().unwrap();
let commit = commit_with_tree(test_workspace.repo.store(), tree);
// Checkout the empty commit to clear the directory, so that later when we
// checkout, files are recreated.
test_workspace
.workspace
.check_out(
test_workspace.repo.op_id().clone(),
None,
&test_workspace.workspace.repo_loader().store().root_commit(),
)
.unwrap();
assert!(!std::fs::exists(&file_disk_path).unwrap());
let extra_setting = format!("{extra_setting}\n");
let user_settings = base_user_settings_with_extra_configs(&extra_setting);
// Change the working-copy.eol-conversion setting to the configuration under
// testing.
test_workspace.workspace = Workspace::load(
&user_settings,
test_workspace.workspace.workspace_root(),
&StoreFactories::default(),
&default_working_copy_factories(),
)
.expect("Failed to reload the workspace");
// We have to query the Commit again. The Workspace is backed by a different
// Store from the original Commit.
let commit = test_workspace
.workspace
.repo_loader()
.store()
.get_commit(commit.id())
.expect("Failed to find the commit with the test file");
// Check out the commit with the test file. TreeState::update should update the
// EOL accordingly.
test_workspace
.workspace
.check_out(test_workspace.repo.op_id().clone(), None, &commit)
.unwrap();
// When we take a snapshot now, the tree may not be clean, because the EOL our
// snapshot creates may not align with what is currently used in store. e.g.
// with working-copy.eol-conversion = "input-output", the test-eol-file may have
// CRLF line endings in the store, but the snapshot will change the EOL to LF,
// hence the diff.
assert!(std::fs::exists(&file_disk_path).unwrap());
std::fs::read(&file_disk_path).unwrap()
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_operations.rs | lib/tests/test_operations.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::slice;
use std::sync::Arc;
use std::time::SystemTime;
use assert_matches::assert_matches;
use itertools::Itertools as _;
use jj_lib::backend::CommitId;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::evolution::walk_predecessors;
use jj_lib::index::Index;
use jj_lib::object_id::ObjectId as _;
use jj_lib::op_store::OperationId;
use jj_lib::op_walk;
use jj_lib::op_walk::OpsetEvaluationError;
use jj_lib::op_walk::OpsetResolutionError;
use jj_lib::operation::Operation;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo;
use jj_lib::settings::UserSettings;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn get_predecessors(repo: &ReadonlyRepo, id: &CommitId) -> Vec<CommitId> {
let entries: Vec<_> = walk_predecessors(repo, slice::from_ref(id))
.try_collect()
.expect("unreachable predecessors shouldn't be visited");
let first = entries
.first()
.expect("specified commit should be reachable");
first.predecessor_ids().to_vec()
}
fn list_dir(dir: &Path) -> Vec<String> {
std::fs::read_dir(dir)
.unwrap()
.map(|entry| entry.unwrap().file_name().to_str().unwrap().to_owned())
.sorted()
.collect()
}
fn index_has_id(index: &dyn Index, commit_id: &CommitId) -> bool {
index.has_id(commit_id).unwrap()
}
#[test]
fn test_unpublished_operation() {
// Test that the operation doesn't get published until that's requested.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let op_heads_dir = test_repo.repo_path().join("op_heads").join("heads");
let op_id0 = repo.op_id().clone();
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
let mut tx1 = repo.start_transaction();
write_random_commit(tx1.repo_mut());
let unpublished_op = tx1.write("transaction 1").unwrap();
let op_id1 = unpublished_op.operation().id().clone();
assert_ne!(op_id1, op_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_id0.hex()]);
unpublished_op.publish().unwrap();
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
}
#[test]
fn test_consecutive_operations() {
// Test that consecutive operations result in a single op-head on disk after
// each operation
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let op_heads_dir = test_repo.repo_path().join("op_heads").join("heads");
let op_id0 = repo.op_id().clone();
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
let mut tx1 = repo.start_transaction();
write_random_commit(tx1.repo_mut());
let op_id1 = tx1
.commit("transaction 1")
.unwrap()
.operation()
.id()
.clone();
assert_ne!(op_id1, op_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
let repo = repo.reload_at_head().unwrap();
let mut tx2 = repo.start_transaction();
write_random_commit(tx2.repo_mut());
let op_id2 = tx2
.commit("transaction 2")
.unwrap()
.operation()
.id()
.clone();
assert_ne!(op_id2, op_id0);
assert_ne!(op_id2, op_id1);
assert_eq!(list_dir(&op_heads_dir), vec![op_id2.hex()]);
// Reloading the repo makes no difference (there are no conflicting operations
// to resolve).
let _repo = repo.reload_at_head().unwrap();
assert_eq!(list_dir(&op_heads_dir), vec![op_id2.hex()]);
}
#[test]
fn test_concurrent_operations() {
// Test that consecutive operations result in multiple op-heads on disk until
// the repo has been reloaded (which currently happens right away).
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let op_heads_dir = test_repo.repo_path().join("op_heads").join("heads");
let op_id0 = repo.op_id().clone();
assert_eq!(list_dir(&op_heads_dir), vec![repo.op_id().hex()]);
let mut tx1 = repo.start_transaction();
write_random_commit(tx1.repo_mut());
let op_id1 = tx1
.commit("transaction 1")
.unwrap()
.operation()
.id()
.clone();
assert_ne!(op_id1, op_id0);
assert_eq!(list_dir(&op_heads_dir), vec![op_id1.hex()]);
// After both transactions have committed, we should have two op-heads on disk,
// since they were run in parallel.
let mut tx2 = repo.start_transaction();
write_random_commit(tx2.repo_mut());
let op_id2 = tx2
.commit("transaction 2")
.unwrap()
.operation()
.id()
.clone();
assert_ne!(op_id2, op_id0);
assert_ne!(op_id2, op_id1);
let mut actual_heads_on_disk = list_dir(&op_heads_dir);
actual_heads_on_disk.sort();
let mut expected_heads_on_disk = vec![op_id1.hex(), op_id2.hex()];
expected_heads_on_disk.sort();
assert_eq!(actual_heads_on_disk, expected_heads_on_disk);
// Reloading the repo causes the operations to be merged
let repo = repo.reload_at_head().unwrap();
let merged_op_id = repo.op_id().clone();
assert_ne!(merged_op_id, op_id0);
assert_ne!(merged_op_id, op_id1);
assert_ne!(merged_op_id, op_id2);
assert_eq!(list_dir(&op_heads_dir), vec![merged_op_id.hex()]);
}
fn assert_heads(repo: &dyn Repo, expected: Vec<&CommitId>) {
let expected = expected.iter().cloned().cloned().collect();
assert_eq!(*repo.view().heads(), expected);
}
#[test]
fn test_isolation() {
// Test that two concurrent transactions don't see each other's changes.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let initial = write_random_commit_with_parents(tx.repo_mut(), &[]);
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let mut_repo1 = tx1.repo_mut();
let mut tx2 = repo.start_transaction();
let mut_repo2 = tx2.repo_mut();
assert_heads(repo.as_ref(), vec![initial.id()]);
assert_heads(mut_repo1, vec![initial.id()]);
assert_heads(mut_repo2, vec![initial.id()]);
let rewrite1 = mut_repo1
.rewrite_commit(&initial)
.set_description("rewrite1")
.write()
.unwrap();
mut_repo1.rebase_descendants().unwrap();
let rewrite2 = mut_repo2
.rewrite_commit(&initial)
.set_description("rewrite2")
.write()
.unwrap();
mut_repo2.rebase_descendants().unwrap();
// Neither transaction has committed yet, so each transaction sees its own
// commit.
assert_heads(repo.as_ref(), vec![initial.id()]);
assert_heads(mut_repo1, vec![rewrite1.id()]);
assert_heads(mut_repo2, vec![rewrite2.id()]);
// The base repo and tx2 don't see the commits from tx1.
tx1.commit("transaction 1").unwrap();
assert_heads(repo.as_ref(), vec![initial.id()]);
assert_heads(mut_repo2, vec![rewrite2.id()]);
// The base repo still doesn't see the commits after both transactions commit.
tx2.commit("transaction 2").unwrap();
assert_heads(repo.as_ref(), vec![initial.id()]);
// After reload, the base repo sees both rewrites.
let repo = repo.reload_at_head().unwrap();
assert_heads(repo.as_ref(), vec![rewrite1.id(), rewrite2.id()]);
}
#[test]
fn test_stored_commit_predecessors() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let loader = repo.loader();
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
// Reload operation from disk.
let op = loader.load_operation(repo.op_id()).unwrap();
assert!(op.stores_commit_predecessors());
assert_matches!(op.predecessors_for_commit(commit1.id()), Some([]));
assert_matches!(op.predecessors_for_commit(commit2.id()), Some([id]) if id == commit1.id());
// Save operation without the predecessors as old jj would do.
let mut data = op.store_operation().clone();
data.commit_predecessors = None;
let op_id = loader.op_store().write_operation(&data).block_on().unwrap();
assert_ne!(&op_id, op.id());
let op = loader.load_operation(&op_id).unwrap();
assert!(!op.stores_commit_predecessors());
}
#[test]
fn test_reparent_range_linear() {
let test_repo = TestRepo::init();
let repo_0 = test_repo.repo;
let loader = repo_0.loader();
let op_store = repo_0.op_store();
let read_op = |id| loader.load_operation(id).unwrap();
fn op_parents<const N: usize>(op: &Operation) -> [Operation; N] {
let parents: Vec<_> = op.parents().try_collect().unwrap();
parents.try_into().unwrap()
}
// Set up linear operation graph:
// D
// C
// B
// A
// 0 (initial)
let random_tx = |repo: &Arc<ReadonlyRepo>| {
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
tx
};
let repo_a = random_tx(&repo_0).commit("op A").unwrap();
let repo_b = random_tx(&repo_a).commit("op B").unwrap();
let repo_c = random_tx(&repo_b).commit("op C").unwrap();
let repo_d = random_tx(&repo_c).commit("op D").unwrap();
// Reparent B..D (=C|D) onto A:
// D'
// C'
// A
// 0 (initial)
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_b.operation()),
slice::from_ref(repo_d.operation()),
repo_a.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 2);
assert_eq!(stats.unreachable_count, 1);
let new_op_d = read_op(&stats.new_head_ids[0]);
assert_eq!(new_op_d.metadata(), repo_d.operation().metadata());
assert_eq!(new_op_d.view_id(), repo_d.operation().view_id());
let [new_op_c] = op_parents(&new_op_d);
assert_eq!(new_op_c.metadata(), repo_c.operation().metadata());
assert_eq!(new_op_c.view_id(), repo_c.operation().view_id());
assert_eq!(new_op_c.parent_ids(), slice::from_ref(repo_a.op_id()));
// Reparent empty range onto A
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_d.operation()),
slice::from_ref(repo_d.operation()),
repo_a.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids, vec![repo_a.op_id().clone()]);
assert_eq!(stats.rewritten_count, 0);
assert_eq!(stats.unreachable_count, 3);
}
#[test]
fn test_reparent_range_branchy() {
let test_repo = TestRepo::init();
let repo_0 = test_repo.repo;
let loader = repo_0.loader();
let op_store = repo_0.op_store();
let read_op = |id| loader.load_operation(id).unwrap();
fn op_parents<const N: usize>(op: &Operation) -> [Operation; N] {
let parents: Vec<_> = op.parents().try_collect().unwrap();
parents.try_into().unwrap()
}
// Set up branchy operation graph:
// G
// |\
// | F
// E |
// D |
// |/
// C
// B
// A
// 0 (initial)
let random_tx = |repo: &Arc<ReadonlyRepo>| {
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
tx
};
let repo_a = random_tx(&repo_0).commit("op A").unwrap();
let repo_b = random_tx(&repo_a).commit("op B").unwrap();
let repo_c = random_tx(&repo_b).commit("op C").unwrap();
let repo_d = random_tx(&repo_c).commit("op D").unwrap();
let tx_e = random_tx(&repo_d);
let tx_f = random_tx(&repo_c);
let repo_g = testutils::commit_transactions(vec![tx_e, tx_f]);
let [op_e, op_f] = op_parents(repo_g.operation());
// Reparent D..G (= E|F|G) onto B:
// G'
// |\
// | F'
// E'|
// |/
// B
// A
// 0 (initial)
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_d.operation()),
slice::from_ref(repo_g.operation()),
repo_b.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 3);
assert_eq!(stats.unreachable_count, 2);
let new_op_g = read_op(&stats.new_head_ids[0]);
assert_eq!(new_op_g.metadata(), repo_g.operation().metadata());
assert_eq!(new_op_g.view_id(), repo_g.operation().view_id());
let [new_op_e, new_op_f] = op_parents(&new_op_g);
assert_eq!(new_op_e.parent_ids(), slice::from_ref(repo_b.op_id()));
assert_eq!(new_op_f.parent_ids(), slice::from_ref(repo_b.op_id()));
// Reparent B..G (=C|D|E|F|G) onto A:
// G'
// |\
// | F'
// E'|
// D'|
// |/
// C'
// A
// 0 (initial)
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_b.operation()),
slice::from_ref(repo_g.operation()),
repo_a.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 5);
assert_eq!(stats.unreachable_count, 1);
let new_op_g = read_op(&stats.new_head_ids[0]);
assert_eq!(new_op_g.metadata(), repo_g.operation().metadata());
assert_eq!(new_op_g.view_id(), repo_g.operation().view_id());
let [new_op_e, new_op_f] = op_parents(&new_op_g);
let [new_op_d] = op_parents(&new_op_e);
assert_eq!(new_op_d.parent_ids(), new_op_f.parent_ids());
let [new_op_c] = op_parents(&new_op_d);
assert_eq!(new_op_c.parent_ids(), slice::from_ref(repo_a.op_id()));
// Reparent (E|F)..G (=G) onto D:
// G'
// D
// C
// B
// A
// 0 (initial)
let stats = op_walk::reparent_range(
op_store.as_ref(),
&[op_e.clone(), op_f.clone()],
slice::from_ref(repo_g.operation()),
repo_d.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 1);
assert_eq!(stats.unreachable_count, 2);
let new_op_g = read_op(&stats.new_head_ids[0]);
assert_eq!(new_op_g.metadata(), repo_g.operation().metadata());
assert_eq!(new_op_g.view_id(), repo_g.operation().view_id());
assert_eq!(new_op_g.parent_ids(), slice::from_ref(repo_d.op_id()));
// Reparent C..F (=F) onto D (ignoring G):
// F'
// D
// C
// B
// A
// 0 (initial)
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_c.operation()),
slice::from_ref(&op_f),
repo_d.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 1);
assert_eq!(stats.unreachable_count, 0);
let new_op_f = read_op(&stats.new_head_ids[0]);
assert_eq!(new_op_f.metadata(), op_f.metadata());
assert_eq!(new_op_f.view_id(), op_f.view_id());
assert_eq!(new_op_f.parent_ids(), slice::from_ref(repo_d.op_id()));
}
#[test_case(false; "legacy commit.predecessors")]
#[test_case(true; "op.commit_predecessors")]
fn test_reparent_discarding_predecessors(op_stores_commit_predecessors: bool) {
let test_repo = TestRepo::init();
let repo_0 = test_repo.repo;
let loader = repo_0.loader();
let op_store = repo_0.op_store();
let repo_at = |id: &OperationId| {
let op = loader.load_operation(id).unwrap();
loader.load_at(&op).unwrap()
};
let head_commits = |repo: &dyn Repo| {
repo.view()
.heads()
.iter()
.map(|id| repo.store().get_commit(id).unwrap())
.collect_vec()
};
// Set up rewriting as follows:
//
// op1 op2 op3 op4
// B0 B0 B1 B1
// | | | |
// A0 A0 A1 A0 A1 A2
let mut tx = repo_0.start_transaction();
let commit_a0 = write_random_commit(tx.repo_mut());
let commit_b0 = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a0]);
let repo_1 = tx.commit("op1").unwrap();
let mut tx = repo_1.start_transaction();
let commit_a1 = tx
.repo_mut()
.rewrite_commit(&commit_a0)
.set_description("a1")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let [commit_b1] = head_commits(tx.repo()).try_into().unwrap();
tx.repo_mut().add_head(&commit_b0).unwrap(); // resurrect rewritten commits
let repo_2 = tx.commit("op2").unwrap();
let mut tx = repo_2.start_transaction();
tx.repo_mut().record_abandoned_commit(&commit_b0);
tx.repo_mut().rebase_descendants().unwrap();
let repo_3 = tx.commit("op3").unwrap();
let mut tx = repo_3.start_transaction();
tx.repo_mut().record_abandoned_commit(&commit_a0);
tx.repo_mut().record_abandoned_commit(&commit_b1);
let commit_a2 = tx
.repo_mut()
.rewrite_commit(&commit_a1)
.set_description("a2")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo_4 = tx.commit("op4").unwrap();
let repo_4 = if op_stores_commit_predecessors {
repo_4
} else {
// Save operation without the predecessors as old jj would do. We only
// need to rewrite the head operation since walk_predecessors() will
// fall back to the legacy code path immediately.
let mut data = repo_4.operation().store_operation().clone();
data.commit_predecessors = None;
let op_id = op_store.write_operation(&data).block_on().unwrap();
repo_at(&op_id)
};
// Sanity check for the setup
assert_eq!(repo_1.view().heads().len(), 1);
assert_eq!(repo_2.view().heads().len(), 2);
assert_eq!(repo_3.view().heads().len(), 2);
assert_eq!(repo_4.view().heads().len(), 1);
assert_eq!(repo_4.index().all_heads_for_gc().unwrap().count(), 3);
assert_eq!(
repo_4.operation().stores_commit_predecessors(),
op_stores_commit_predecessors
);
assert_eq!(
get_predecessors(&repo_4, commit_a1.id()),
[commit_a0.id().clone()]
);
assert_eq!(
get_predecessors(&repo_4, commit_a2.id()),
[commit_a1.id().clone()]
);
assert_eq!(
get_predecessors(&repo_4, commit_b1.id()),
[commit_b0.id().clone()]
);
// Abandon op1
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_1.operation()),
slice::from_ref(repo_4.operation()),
repo_0.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 3);
assert_eq!(stats.unreachable_count, 1);
let repo = repo_at(&stats.new_head_ids[0]);
// A0 - B0 are still reachable
assert!(index_has_id(repo.index(), commit_a0.id()));
assert!(index_has_id(repo.index(), commit_b0.id()));
assert_eq!(
get_predecessors(&repo, commit_a1.id()),
[commit_a0.id().clone()]
);
assert_eq!(
get_predecessors(&repo, commit_b1.id()),
[commit_b0.id().clone()]
);
assert_eq!(get_predecessors(&repo, commit_a0.id()), []);
assert_eq!(get_predecessors(&repo, commit_b0.id()), []);
// Abandon op1 and op2
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_2.operation()),
slice::from_ref(repo_4.operation()),
repo_0.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 2);
assert_eq!(stats.unreachable_count, 2);
let repo = repo_at(&stats.new_head_ids[0]);
// A0 is still reachable
assert!(index_has_id(repo.index(), commit_a0.id()));
if op_stores_commit_predecessors {
// B0 is no longer reachable
assert!(!index_has_id(repo.index(), commit_b0.id()));
// the predecessor record `A1: A0` no longer exists
assert_eq!(get_predecessors(&repo, commit_a1.id()), []);
// Unreachable predecessors should be excluded
assert_eq!(get_predecessors(&repo, commit_b1.id()), []);
} else {
// B0 is retained because it is immediate predecessor of B1
assert!(index_has_id(repo.index(), commit_b0.id()));
assert_eq!(
get_predecessors(&repo, commit_a1.id()),
[commit_a0.id().clone()]
);
assert_eq!(
get_predecessors(&repo, commit_b1.id()),
[commit_b0.id().clone()]
);
}
// Abandon op1, op2, and op3
let stats = op_walk::reparent_range(
op_store.as_ref(),
slice::from_ref(repo_3.operation()),
slice::from_ref(repo_4.operation()),
repo_0.operation(),
)
.unwrap();
assert_eq!(stats.new_head_ids.len(), 1);
assert_eq!(stats.rewritten_count, 1);
assert_eq!(stats.unreachable_count, 3);
let repo = repo_at(&stats.new_head_ids[0]);
// A0 is no longer reachable
assert!(!index_has_id(repo.index(), commit_a0.id()));
// A1 is still reachable through A2
assert!(index_has_id(repo.index(), commit_a1.id()));
assert_eq!(
get_predecessors(&repo, commit_a2.id()),
[commit_a1.id().clone()]
);
assert_eq!(get_predecessors(&repo, commit_a1.id()), []);
}
fn stable_op_id_settings() -> UserSettings {
let mut config = testutils::base_user_config();
config.add_layer(
ConfigLayer::parse(
ConfigSource::User,
"debug.operation-timestamp = 2001-02-03T04:05:06+07:00",
)
.unwrap(),
);
UserSettings::from_config(config).unwrap()
}
#[test]
fn test_resolve_op_id() {
let settings = stable_op_id_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let repo = test_repo.repo;
let loader = repo.loader();
let mut operations = Vec::new();
// The actual value of `i` doesn't matter, we just need to make sure we end
// up with hashes with ambiguous prefixes.
for i in (1..5).chain([9, 27]) {
let tx = repo.start_transaction();
let repo = tx.commit(format!("transaction {i}")).unwrap();
operations.push(repo.operation().clone());
}
// "6" and "0" are ambiguous
insta::assert_debug_snapshot!(operations.iter().map(|op| op.id().hex()).collect_vec(), @r#"
[
"ff7608ec55acf1ccb44bee52a0972f8b36864540ee6547d5e124a30bafd3bfbe00418446d5581cf71debac912a091dd2f93a2dd3bcb34bc53c61604aa9c129d6",
"b83ded05e46bef569737b8c1293c59af3fe89f72bc2cecd017b5eb96f5c69e50205069eedf144ca4fa9e55ac0c27842dce874b371a752223c5f85c4b6faadf96",
"6412c4e33f791b71f440817d3d16c0ee1b7640845db8f5e4146c58e8c3f4329df4662b0edeab5860c86b4679b150f38318a6e3d4ada5803176f9c5273d97f4dd",
"6838be3a934e1b8fc966dcf43796a3cc639a3d26edb1e9af94a285f4ce7edaecfe9e101dd7f0338af22e1632f36d634916015b72b026e1529a7b600566c29067",
"0ecccbdd90dd12a592dd0de010feda8bf23e4a5650f1946a82df854fc28791ad046b2d42b38060103db7fb99c00787689df98a7d2166d8180666b745cc32b172",
"065be6feb0ab573f0638e952ead8482899307d211f80af5dc90952a6171cc7122d5af9f13fde2ce3e37fc5e7776d5e3bc4236d82ce7d2ecbf1f63373c20772e4",
]
"#);
let repo_loader = repo.loader();
let resolve = |op_str: &str| op_walk::resolve_op_for_load(repo_loader, op_str);
// Full id
assert_eq!(resolve(&operations[0].id().hex()).unwrap(), operations[0]);
// Short id, odd length
assert_eq!(
resolve(&operations[0].id().hex()[..3]).unwrap(),
operations[0]
);
// Short id, even length
assert_eq!(
resolve(&operations[1].id().hex()[..2]).unwrap(),
operations[1]
);
// Ambiguous id
assert_matches!(
resolve("6"),
Err(OpsetEvaluationError::OpsetResolution(
OpsetResolutionError::AmbiguousIdPrefix(_)
))
);
// Empty id
assert_matches!(
resolve(""),
Err(OpsetEvaluationError::OpsetResolution(
OpsetResolutionError::InvalidIdPrefix(_)
))
);
// Unknown id
assert_matches!(
resolve("deadbee"),
Err(OpsetEvaluationError::OpsetResolution(
OpsetResolutionError::NoSuchOperation(_)
))
);
// Virtual root id
let root_operation = loader.root_operation();
assert_eq!(resolve(&root_operation.id().hex()).unwrap(), root_operation);
assert_eq!(resolve("00").unwrap(), root_operation);
assert_eq!(resolve("0e").unwrap(), operations[4]);
assert_matches!(
resolve("0"),
Err(OpsetEvaluationError::OpsetResolution(
OpsetResolutionError::AmbiguousIdPrefix(_)
))
);
}
#[test]
fn test_resolve_current_op() {
let settings = stable_op_id_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let repo = test_repo.repo;
assert_eq!(
op_walk::resolve_op_with_repo(&repo, "@").unwrap(),
*repo.operation()
);
}
#[test]
fn test_resolve_op_parents_children() {
// Use monotonic timestamp to stabilize merge order of transactions
let settings = testutils::user_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let mut repo = &test_repo.repo;
let mut repos = Vec::new();
for _ in 0..3 {
let tx = repo.start_transaction();
repos.push(tx.commit("test").unwrap());
repo = repos.last().unwrap();
}
let operations = repos.iter().map(|repo| repo.operation()).collect_vec();
// Parent
let op2_id_hex = operations[2].id().hex();
assert_eq!(
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}-")).unwrap(),
*operations[1]
);
assert_eq!(
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}--")).unwrap(),
*operations[0]
);
// "{op2_id_hex}----" is the root operation
assert_matches!(
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}-----")),
Err(OpsetEvaluationError::OpsetResolution(
OpsetResolutionError::EmptyOperations(_)
))
);
// Child
let op0_id_hex = operations[0].id().hex();
assert_eq!(
op_walk::resolve_op_with_repo(repo, &format!("{op0_id_hex}+")).unwrap(),
*operations[1]
);
assert_eq!(
op_walk::resolve_op_with_repo(repo, &format!("{op0_id_hex}++")).unwrap(),
*operations[2]
);
assert_matches!(
op_walk::resolve_op_with_repo(repo, &format!("{op0_id_hex}+++")),
Err(OpsetEvaluationError::OpsetResolution(
OpsetResolutionError::EmptyOperations(_)
))
);
// Child of parent
assert_eq!(
op_walk::resolve_op_with_repo(repo, &format!("{op2_id_hex}--+")).unwrap(),
*operations[1]
);
// Child at old repo: new operations shouldn't be visible
assert_eq!(
op_walk::resolve_op_with_repo(&repos[1], &format!("{op0_id_hex}+")).unwrap(),
*operations[1]
);
assert_matches!(
op_walk::resolve_op_with_repo(&repos[0], &format!("{op0_id_hex}+")),
Err(OpsetEvaluationError::OpsetResolution(
OpsetResolutionError::EmptyOperations(_)
))
);
// Merge and fork
let tx1 = repo.start_transaction();
let tx2 = repo.start_transaction();
let repo = testutils::commit_transactions(vec![tx1, tx2]);
let parent_op_ids = repo.operation().parent_ids();
// The subexpression that resolves to multiple operations (i.e. the accompanying
// op ids) should be reported, not the full expression provided by the user.
let op5_id_hex = repo.operation().id().hex();
let parents_op_str = format!("{op5_id_hex}-");
let error = op_walk::resolve_op_with_repo(&repo, &parents_op_str).unwrap_err();
assert_eq!(
extract_multiple_operations_error(&error).unwrap(),
(&parents_op_str, parent_op_ids)
);
let grandparents_op_str = format!("{op5_id_hex}--");
let error = op_walk::resolve_op_with_repo(&repo, &grandparents_op_str).unwrap_err();
assert_eq!(
extract_multiple_operations_error(&error).unwrap(),
(&parents_op_str, parent_op_ids)
);
let children_of_parents_op_str = format!("{op5_id_hex}-+");
let error = op_walk::resolve_op_with_repo(&repo, &children_of_parents_op_str).unwrap_err();
assert_eq!(
extract_multiple_operations_error(&error).unwrap(),
(&parents_op_str, parent_op_ids)
);
let op2_id_hex = operations[2].id().hex();
let op_str = format!("{op2_id_hex}+");
let error = op_walk::resolve_op_with_repo(&repo, &op_str).unwrap_err();
assert_eq!(
extract_multiple_operations_error(&error).unwrap(),
(&op_str, parent_op_ids)
);
}
#[test]
fn test_walk_ancestors() {
let test_repo = TestRepo::init();
let repo_0 = test_repo.repo;
let loader = repo_0.loader();
fn op_parents<const N: usize>(op: &Operation) -> [Operation; N] {
let parents: Vec<_> = op.parents().try_collect().unwrap();
parents.try_into().unwrap()
}
fn collect_ancestors(head_ops: &[Operation]) -> Vec<Operation> {
op_walk::walk_ancestors(head_ops).try_collect().unwrap()
}
fn collect_ancestors_range(head_ops: &[Operation], root_ops: &[Operation]) -> Vec<Operation> {
op_walk::walk_ancestors_range(head_ops, root_ops)
.try_collect()
.unwrap()
}
// Set up operation graph:
// H
// G
// |\
// | F
// E |
// D |
// |/
// C
// | B
// A |
// |/
// 0 (initial)
let repo_a = repo_0.start_transaction().commit("op A").unwrap();
let repo_b = repo_0
.start_transaction()
.write("op B")
.unwrap()
.leave_unpublished();
let repo_c = repo_a.start_transaction().commit("op C").unwrap();
let repo_d = repo_c.start_transaction().commit("op D").unwrap();
let tx_e = repo_d.start_transaction();
let tx_f = repo_c.start_transaction();
let repo_g = testutils::commit_transactions(vec![tx_e, tx_f]);
let [op_e, op_f] = op_parents(repo_g.operation());
let repo_h = repo_g.start_transaction().commit("op H").unwrap();
// At merge, parents are visited in forward order, which isn't important.
assert_eq!(
collect_ancestors(slice::from_ref(repo_h.operation())),
[
repo_h.operation().clone(),
repo_g.operation().clone(),
op_e.clone(),
repo_d.operation().clone(),
op_f.clone(),
repo_c.operation().clone(),
repo_a.operation().clone(),
loader.root_operation(),
]
);
// Ancestors of multiple heads
assert_eq!(
collect_ancestors(&[op_f.clone(), repo_b.operation().clone()]),
[
op_f.clone(),
repo_c.operation().clone(),
repo_a.operation().clone(),
repo_b.operation().clone(),
loader.root_operation(),
]
);
// Exclude direct ancestor
assert_eq!(
collect_ancestors_range(
slice::from_ref(repo_h.operation()),
slice::from_ref(repo_d.operation()),
),
[
repo_h.operation().clone(),
repo_g.operation().clone(),
op_e.clone(),
op_f.clone(),
]
);
// Exclude indirect ancestor
assert_eq!(
collect_ancestors_range(slice::from_ref(&op_e), slice::from_ref(&op_f)),
[op_e.clone(), repo_d.operation().clone()]
);
// Exclude far ancestor
assert_eq!(
collect_ancestors_range(
slice::from_ref(repo_h.operation()),
slice::from_ref(repo_a.operation()),
),
[
repo_h.operation().clone(),
repo_g.operation().clone(),
op_e.clone(),
repo_d.operation().clone(),
op_f.clone(),
repo_c.operation().clone(),
]
);
// Exclude ancestors of descendant
assert_eq!(
collect_ancestors_range(
slice::from_ref(repo_g.operation()),
slice::from_ref(repo_h.operation()),
),
[]
);
// Exclude multiple roots
assert_eq!(
collect_ancestors_range(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/runner.rs | lib/tests/runner.rs | use std::path::PathBuf;
#[test]
fn test_no_forgotten_test_files() {
let test_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests");
testutils::assert_no_forgotten_test_files(&test_dir);
}
mod test_annotate;
mod test_bad_locking;
mod test_bisect;
mod test_commit_builder;
mod test_commit_concurrent;
mod test_conflicts;
mod test_default_revset_graph_iterator;
mod test_eol;
mod test_evolution_predecessors;
mod test_fix;
mod test_git;
mod test_git_backend;
mod test_gpg;
mod test_id_prefix;
mod test_index;
mod test_init;
mod test_load_repo;
mod test_local_working_copy;
mod test_local_working_copy_concurrent;
mod test_local_working_copy_executable_bit;
mod test_local_working_copy_sparse;
mod test_merge_trees;
mod test_merged_tree;
mod test_mut_repo;
mod test_operations;
mod test_refs;
mod test_revset;
mod test_revset_optimized;
mod test_rewrite;
mod test_rewrite_duplicate;
mod test_rewrite_transform;
mod test_signing;
mod test_ssh_signing;
mod test_view;
mod test_workspace;
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_bad_locking.rs | lib/tests/test_bad_locking.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use itertools::Itertools as _;
use jj_lib::repo::Repo as _;
use jj_lib::repo::StoreFactories;
use jj_lib::workspace::Workspace;
use jj_lib::workspace::default_working_copy_factories;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepoBackend;
use testutils::TestWorkspace;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn copy_directory(src: &Path, dst: &Path) {
std::fs::create_dir(dst).ok();
for entry in std::fs::read_dir(src).unwrap() {
let child_src = entry.unwrap().path();
let base_name = child_src.file_name().unwrap();
let child_dst = dst.join(base_name);
if child_src.is_dir() {
copy_directory(&child_src, &child_dst);
} else {
std::fs::copy(&child_src, &child_dst).unwrap();
}
}
}
fn merge_directories(left: &Path, base: &Path, right: &Path, output: &Path) {
std::fs::create_dir(output).unwrap();
let mut sub_dirs = vec![];
// Walk the left side and copy to the output
if left.exists() {
for entry in std::fs::read_dir(left).unwrap() {
let path = entry.unwrap().path();
let base_name = path.file_name().unwrap();
let child_left = left.join(base_name);
let child_output = output.join(base_name);
if child_left.is_dir() {
sub_dirs.push(base_name.to_os_string());
} else {
std::fs::copy(&child_left, child_output).unwrap();
}
}
}
// Walk the base and find files removed in the right side, then remove them in
// the output
if base.exists() {
for entry in std::fs::read_dir(base).unwrap() {
let path = entry.unwrap().path();
let base_name = path.file_name().unwrap();
let child_base = base.join(base_name);
let child_right = right.join(base_name);
let child_output = output.join(base_name);
if child_base.is_dir() {
sub_dirs.push(base_name.to_os_string());
} else if !child_right.exists() {
std::fs::remove_file(child_output).ok();
}
}
}
// Walk the right side and find files added in the right side, then add them in
// the output
if right.exists() {
for entry in std::fs::read_dir(right).unwrap() {
let path = entry.unwrap().path();
let base_name = path.file_name().unwrap();
let child_base = base.join(base_name);
let child_right = right.join(base_name);
let child_output = output.join(base_name);
if child_right.is_dir() {
sub_dirs.push(base_name.to_os_string());
} else if !child_base.exists() {
// This overwrites the left side if that's been written. That's fine, since the
// point of the test is that it should be okay for either side to win.
std::fs::copy(&child_right, child_output).unwrap();
}
}
}
// Do the merge in subdirectories
for base_name in sub_dirs.iter().sorted().dedup() {
let child_base = base.join(base_name);
let child_right = right.join(base_name);
let child_left = left.join(base_name);
let child_output = output.join(base_name);
merge_directories(&child_left, &child_base, &child_right, &child_output);
}
}
#[test_case(TestRepoBackend::Simple; "simple backend")]
#[test_case(TestRepoBackend::Git; "git backend")]
fn test_bad_locking_children(backend: TestRepoBackend) {
// Test that two new commits created on separate machines are both visible (not
// lost due to lack of locking)
let settings = testutils::user_settings();
let test_workspace = TestWorkspace::init_with_backend_and_settings(backend, &settings);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root();
let mut tx = repo.start_transaction();
let initial = write_random_commit(tx.repo_mut());
tx.commit("test").unwrap();
// Simulate a write of a commit that happens on one machine
let machine1_root = test_workspace.root_dir().join("machine1");
copy_directory(workspace_root, &machine1_root);
let machine1_workspace = Workspace::load(
&settings,
&machine1_root,
&StoreFactories::default(),
&default_working_copy_factories(),
)
.unwrap();
let machine1_repo = machine1_workspace.repo_loader().load_at_head().unwrap();
let mut machine1_tx = machine1_repo.start_transaction();
let child1 = write_random_commit_with_parents(machine1_tx.repo_mut(), &[&initial]);
machine1_tx.commit("test").unwrap();
// Simulate a write of a commit that happens on another machine
let machine2_root = test_workspace.root_dir().join("machine2");
copy_directory(workspace_root, &machine2_root);
let machine2_workspace = Workspace::load(
&settings,
&machine2_root,
&StoreFactories::default(),
&default_working_copy_factories(),
)
.unwrap();
let machine2_repo = machine2_workspace.repo_loader().load_at_head().unwrap();
let mut machine2_tx = machine2_repo.start_transaction();
let child2 = write_random_commit_with_parents(machine2_tx.repo_mut(), &[&initial]);
machine2_tx.commit("test").unwrap();
// Simulate that the distributed file system now has received the changes from
// both machines
let merged_path = test_workspace.root_dir().join("merged");
merge_directories(&machine1_root, workspace_root, &machine2_root, &merged_path);
let merged_workspace = Workspace::load(
&settings,
&merged_path,
&StoreFactories::default(),
&default_working_copy_factories(),
)
.unwrap();
let merged_repo = merged_workspace.repo_loader().load_at_head().unwrap();
assert!(merged_repo.view().heads().contains(child1.id()));
assert!(merged_repo.view().heads().contains(child2.id()));
let op_id = merged_repo.op_id().clone();
let op = merged_repo
.op_store()
.read_operation(&op_id)
.block_on()
.unwrap();
assert_eq!(op.parents.len(), 2);
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_bad_locking_interrupted(backend: TestRepoBackend) {
// Test that an interrupted update of the op-heads resulting in on op-head
// that's a descendant of the other is resolved without creating a new
// operation.
let settings = testutils::user_settings();
let test_workspace = TestWorkspace::init_with_backend_and_settings(backend, &settings);
let test_env = &test_workspace.env;
let repo = &test_workspace.repo;
let mut tx = repo.start_transaction();
let initial = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
// Simulate a crash that resulted in the old op-head left in place. We simulate
// it somewhat hackily by copying the .jj/op_heads/ directory before the
// operation and then copying that back afterwards, leaving the existing
// op-head(s) in place.
let op_heads_dir = test_workspace.repo_path().join("op_heads");
let backup_path = test_workspace.root_dir().join("backup");
copy_directory(&op_heads_dir, &backup_path);
let mut tx = repo.start_transaction();
write_random_commit_with_parents(tx.repo_mut(), &[&initial]);
let op_id = tx.commit("test").unwrap().operation().id().clone();
copy_directory(&backup_path, &op_heads_dir);
// Reload the repo and check that only the new head is present.
let reloaded_repo = test_env.load_repo_at_head(&settings, test_workspace.repo_path());
assert_eq!(reloaded_repo.op_id(), &op_id);
// Reload once more to make sure that the .jj/op_heads/ directory was updated
// correctly.
let reloaded_repo = test_env.load_repo_at_head(&settings, test_workspace.repo_path());
assert_eq!(reloaded_repo.op_id(), &op_id);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_fix.rs | lib/tests/test_fix.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::collections::HashSet;
use jj_lib::backend::CommitId;
use jj_lib::backend::FileId;
use jj_lib::fix::FileFixer;
use jj_lib::fix::FileToFix;
use jj_lib::fix::FixError;
use jj_lib::fix::ParallelFileFixer;
use jj_lib::fix::fix_files;
use jj_lib::matchers::EverythingMatcher;
use jj_lib::merged_tree::MergedTree;
use jj_lib::repo::Repo as _;
use jj_lib::store::Store;
use jj_lib::transaction::Transaction;
use pollster::FutureExt as _;
use testutils::TestRepo;
use testutils::assert_tree_eq;
use testutils::create_tree;
use testutils::create_tree_with;
use testutils::read_file;
use testutils::repo_path;
use thiserror::Error;
struct TestFileFixer {}
impl TestFileFixer {
fn new() -> Self {
Self {}
}
}
// A file fixer that changes files to uppercase if the file content starts with
// "fixme", returns an error if the content starts with "error", and otherwise
// leaves files unchanged.
impl FileFixer for TestFileFixer {
fn fix_files<'a>(
&mut self,
store: &Store,
files_to_fix: &'a HashSet<FileToFix>,
) -> Result<HashMap<&'a FileToFix, FileId>, FixError> {
let mut changed_files = HashMap::new();
for file_to_fix in files_to_fix {
if let Some(new_file_id) = fix_file(store, file_to_fix)? {
changed_files.insert(file_to_fix, new_file_id);
}
}
Ok(changed_files)
}
}
#[derive(Error, Debug)]
#[error("Forced failure: {0}")]
struct MyFixerError(String);
fn make_fix_content_error(message: &str) -> FixError {
FixError::FixContent(Box::new(MyFixerError(message.into())))
}
// Reads the file from store. If the file starts with "fixme", its contents are
// changed to uppercase and the new file id is returned. If the file starts with
// "error", an error is raised. Otherwise returns None.
fn fix_file(store: &Store, file_to_fix: &FileToFix) -> Result<Option<FileId>, FixError> {
let old_content = read_file(store, &file_to_fix.repo_path, &file_to_fix.file_id);
if let Some(rest) = old_content.strip_prefix(b"fixme:") {
let new_content = rest.to_ascii_uppercase();
let new_file_id = store
.write_file(&file_to_fix.repo_path, &mut new_content.as_slice())
.block_on()
.unwrap();
Ok(Some(new_file_id))
} else if let Some(rest) = old_content.strip_prefix(b"error:") {
Err(make_fix_content_error(str::from_utf8(rest).unwrap()))
} else {
Ok(None)
}
}
fn create_commit(tx: &mut Transaction, parents: Vec<CommitId>, tree: MergedTree) -> CommitId {
tx.repo_mut()
.new_commit(parents, tree)
.write()
.unwrap()
.id()
.clone()
}
#[test]
fn test_fix_one_file() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "fixme:content")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let mut file_fixer = TestFileFixer::new();
let include_unchanged_files = false;
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut file_fixer,
)
.block_on()
.unwrap();
let expected_tree_a = create_tree(repo, &[(path1, "CONTENT")]);
assert_eq!(summary.rewrites.len(), 1);
assert!(summary.rewrites.contains_key(&commit_a));
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 1);
let new_commit_a = repo
.store()
.get_commit(summary.rewrites.get(&commit_a).unwrap())
.unwrap();
assert_tree_eq!(new_commit_a.tree(), expected_tree_a);
}
#[test]
fn test_fixer_does_not_change_content() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "content")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let mut file_fixer = TestFileFixer::new();
let include_unchanged_files = false;
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut file_fixer,
)
.block_on()
.unwrap();
assert!(summary.rewrites.is_empty());
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 0);
}
#[test]
fn test_empty_commit() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let tree1 = create_tree(repo, &[]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let mut file_fixer = TestFileFixer::new();
let include_unchanged_files = false;
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut file_fixer,
)
.block_on()
.unwrap();
assert!(summary.rewrites.is_empty());
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 0);
}
#[test]
fn test_fixer_fails() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "error:boo")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let mut file_fixer = TestFileFixer::new();
let include_unchanged_files = false;
let result = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut file_fixer,
)
.block_on();
let error = result.err().unwrap();
assert_eq!(error.to_string(), "Forced failure: boo");
}
#[test]
fn test_unchanged_file_is_not_fixed() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "fixme:content")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let tree2 = create_tree(repo, &[(path1, "fixme:content")]);
let commit_b = create_commit(&mut tx, vec![commit_a.clone()], tree2);
let root_commits = vec![commit_b.clone()];
let mut file_fixer = TestFileFixer::new();
let include_unchanged_files = false;
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut file_fixer,
)
.block_on()
.unwrap();
assert!(summary.rewrites.is_empty());
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 0);
}
#[test]
fn test_unchanged_file_is_fixed() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "fixme:content")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let tree2 = create_tree(repo, &[(path1, "fixme:content")]);
let commit_b = create_commit(&mut tx, vec![commit_a.clone()], tree2);
let root_commits = vec![commit_b.clone()];
let mut file_fixer = TestFileFixer::new();
let summary = fix_files(
root_commits,
&EverythingMatcher,
true,
tx.repo_mut(),
&mut file_fixer,
)
.block_on()
.unwrap();
let expected_tree_b = create_tree(repo, &[(path1, "CONTENT")]);
assert_eq!(summary.rewrites.len(), 1);
assert!(summary.rewrites.contains_key(&commit_b));
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 1);
let new_commit_b = repo
.store()
.get_commit(summary.rewrites.get(&commit_b).unwrap())
.unwrap();
assert_tree_eq!(new_commit_b.tree(), expected_tree_b);
}
/// If a descendant is already correctly formatted, it should still be rewritten
/// but its tree should be preserved.
#[test]
fn test_already_fixed_descendant() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "fixme:content")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let tree2 = create_tree(repo, &[(path1, "CONTENT")]);
let commit_b = create_commit(&mut tx, vec![commit_a.clone()], tree2.clone());
let root_commits = vec![commit_a.clone()];
let mut file_fixer = TestFileFixer::new();
let summary = fix_files(
root_commits,
&EverythingMatcher,
true,
tx.repo_mut(),
&mut file_fixer,
)
.block_on()
.unwrap();
assert_eq!(summary.rewrites.len(), 2);
assert!(summary.rewrites.contains_key(&commit_a));
assert!(summary.rewrites.contains_key(&commit_b));
assert_eq!(summary.num_checked_commits, 2);
assert_eq!(summary.num_fixed_commits, 1);
let new_commit_a = repo
.store()
.get_commit(summary.rewrites.get(&commit_a).unwrap())
.unwrap();
assert_tree_eq!(new_commit_a.tree(), tree2);
let new_commit_b = repo
.store()
.get_commit(summary.rewrites.get(&commit_a).unwrap())
.unwrap();
assert_tree_eq!(new_commit_b.tree(), tree2);
}
#[test]
fn test_parallel_fixer_basic() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "fixme:content")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let include_unchanged_files = false;
let mut parallel_fixer = ParallelFileFixer::new(fix_file);
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut parallel_fixer,
)
.block_on()
.unwrap();
let expected_tree_a = create_tree(repo, &[(path1, "CONTENT")]);
assert_eq!(summary.rewrites.len(), 1);
assert!(summary.rewrites.contains_key(&commit_a));
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 1);
let new_commit_a = repo
.store()
.get_commit(summary.rewrites.get(&commit_a).unwrap())
.unwrap();
assert_tree_eq!(new_commit_a.tree(), expected_tree_a);
}
#[test]
fn test_parallel_fixer_fixes_files() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let tree1 = create_tree_with(repo, |builder| {
for i in 0..100 {
builder.file(repo_path(&format!("file{i}")), format!("fixme:content{i}"));
}
});
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let include_unchanged_files = false;
let mut parallel_fixer = ParallelFileFixer::new(fix_file);
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut parallel_fixer,
)
.block_on()
.unwrap();
let expected_tree_a = create_tree_with(repo, |builder| {
for i in 0..100 {
builder.file(repo_path(&format!("file{i}")), format!("CONTENT{i}"));
}
});
assert_eq!(summary.rewrites.len(), 1);
assert!(summary.rewrites.contains_key(&commit_a));
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 1);
let new_commit_a = repo
.store()
.get_commit(summary.rewrites.get(&commit_a).unwrap())
.unwrap();
assert_tree_eq!(new_commit_a.tree(), expected_tree_a);
}
#[test]
fn test_parallel_fixer_does_not_change_content() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let tree1 = create_tree_with(repo, |builder| {
for i in 0..100 {
builder.file(repo_path(&format!("file{i}")), format!("content{i}"));
}
});
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let include_unchanged_files = false;
let mut parallel_fixer = ParallelFileFixer::new(fix_file);
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut parallel_fixer,
)
.block_on()
.unwrap();
assert!(summary.rewrites.is_empty());
assert_eq!(summary.num_checked_commits, 1);
assert_eq!(summary.num_fixed_commits, 0);
}
#[test]
fn test_parallel_fixer_no_changes_upon_partial_failure() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let tree1 = create_tree_with(repo, |builder| {
for i in 0..100 {
let contents = if i == 7 {
format!("error:boo{i}")
} else if i % 3 == 0 {
format!("fixme:content{i}")
} else {
format!("foobar:{i}")
};
builder.file(repo_path(&format!("file{i}")), &contents);
}
});
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let root_commits = vec![commit_a.clone()];
let include_unchanged_files = false;
let mut parallel_fixer = ParallelFileFixer::new(fix_file);
let result = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut parallel_fixer,
)
.block_on();
let error = result.err().unwrap();
assert_eq!(error.to_string(), "Forced failure: boo7");
}
#[test]
fn test_fix_multiple_revisions() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit D. Commit C should have the changes from
// commit C and commit D, but not the changes from commit B.
//
// D
// | C
// | B
// |/
// A
let mut tx = repo.start_transaction();
let path1 = repo_path("file1");
let tree1 = create_tree(repo, &[(path1, "fixme:xyz")]);
let commit_a = create_commit(&mut tx, vec![repo.store().root_commit_id().clone()], tree1);
let path2 = repo_path("file2");
let tree2 = create_tree(repo, &[(path2, "content")]);
let commit_b = create_commit(&mut tx, vec![commit_a.clone()], tree2);
let path3 = repo_path("file3");
let tree3 = create_tree(repo, &[(path3, "content")]);
let _commit_c = create_commit(&mut tx, vec![commit_b.clone()], tree3);
let path4 = repo_path("file4");
let tree4 = create_tree(repo, &[(path4, "content")]);
let _commit_d = create_commit(&mut tx, vec![commit_a.clone()], tree4);
let root_commits = vec![commit_a.clone()];
let mut file_fixer = TestFileFixer::new();
let include_unchanged_files = false;
let summary = fix_files(
root_commits,
&EverythingMatcher,
include_unchanged_files,
tx.repo_mut(),
&mut file_fixer,
)
.block_on()
.unwrap();
let expected_tree_a = create_tree(repo, &[(path1, "XYZ")]);
let new_commit_a = repo
.store()
.get_commit(summary.rewrites.get(&commit_a).unwrap())
.unwrap();
assert_tree_eq!(new_commit_a.tree(), expected_tree_a);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_index.rs | lib/tests/test_index.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs;
use std::sync::Arc;
use assert_matches::assert_matches;
use itertools::Itertools as _;
use jj_lib::backend::ChangeId;
use jj_lib::backend::CommitId;
use jj_lib::commit::Commit;
use jj_lib::default_index::DefaultIndexStore;
use jj_lib::default_index::DefaultIndexStoreError;
use jj_lib::default_index::DefaultMutableIndex;
use jj_lib::default_index::DefaultReadonlyIndex;
use jj_lib::index::Index;
use jj_lib::index::ResolvedChangeState;
use jj_lib::index::ResolvedChangeTargets;
use jj_lib::object_id::HexPrefix;
use jj_lib::object_id::ObjectId as _;
use jj_lib::object_id::PrefixResolution;
use jj_lib::op_store::RefTarget;
use jj_lib::op_store::RemoteRef;
use jj_lib::ref_name::RefName;
use jj_lib::ref_name::RemoteName;
use jj_lib::ref_name::RemoteRefSymbol;
use jj_lib::repo::MutableRepo;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPathBuf;
use jj_lib::revset::GENERATION_RANGE_FULL;
use jj_lib::revset::PARENTS_RANGE_FULL;
use jj_lib::revset::ResolvedExpression;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::assert_tree_eq;
use testutils::commit_transactions;
use testutils::create_tree;
use testutils::repo_path;
use testutils::repo_path_buf;
use testutils::test_backend::TestBackend;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn remote_symbol<'a, N, M>(name: &'a N, remote: &'a M) -> RemoteRefSymbol<'a>
where
N: AsRef<RefName> + ?Sized,
M: AsRef<RemoteName> + ?Sized,
{
RemoteRefSymbol {
name: name.as_ref(),
remote: remote.as_ref(),
}
}
fn enable_changed_path_index(repo: &ReadonlyRepo) -> Arc<ReadonlyRepo> {
let default_index_store: &DefaultIndexStore = repo.index_store().downcast_ref().unwrap();
default_index_store
.build_changed_path_index_at_operation(repo.op_id(), repo.store(), 0)
.block_on()
.unwrap();
repo.reload_at(repo.operation()).unwrap()
}
fn collect_changed_paths(repo: &ReadonlyRepo, commit_id: &CommitId) -> Option<Vec<RepoPathBuf>> {
repo.index()
.changed_paths_in_commit(commit_id)
.unwrap()
.map(|paths| paths.collect())
}
fn index_has_id(index: &dyn Index, commit_id: &CommitId) -> bool {
index.has_id(commit_id).unwrap()
}
fn is_ancestor(
index: &DefaultReadonlyIndex,
ancestor_id: &CommitId,
descendant_id: &CommitId,
) -> bool {
index.is_ancestor(ancestor_id, descendant_id).unwrap()
}
#[test]
fn test_index_commits_empty_repo() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let index = as_readonly_index(repo);
// There should be just the root commit
assert_eq!(index.num_commits(), 1);
// Check the generation numbers of the root and the working copy
assert_eq!(
index
.generation_number(repo.store().root_commit_id())
.unwrap(),
0
);
}
#[test]
fn test_index_commits_standard_cases() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// o H
// o | G
// o | F
// |\|
// | o E
// | o D
// | o C
// o | B
// |/
// o A
// | o working copy
// |/
// o root
let root_commit_id = repo.store().root_commit_id();
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_e]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_f]);
let commit_h = write_random_commit_with_parents(tx.repo_mut(), &[&commit_e]);
let repo = tx.commit("test").unwrap();
let index = as_readonly_index(&repo);
// There should be the root commit, plus 8 more
assert_eq!(index.num_commits(), 1 + 8);
let stats = index.stats();
assert_eq!(stats.num_commits, 1 + 8);
assert_eq!(stats.num_merges, 1);
assert_eq!(stats.max_generation_number, 6);
assert_eq!(index.generation_number(root_commit_id).unwrap(), 0);
assert_eq!(index.generation_number(commit_a.id()).unwrap(), 1);
assert_eq!(index.generation_number(commit_b.id()).unwrap(), 2);
assert_eq!(index.generation_number(commit_c.id()).unwrap(), 2);
assert_eq!(index.generation_number(commit_d.id()).unwrap(), 3);
assert_eq!(index.generation_number(commit_e.id()).unwrap(), 4);
assert_eq!(index.generation_number(commit_f.id()).unwrap(), 5);
assert_eq!(index.generation_number(commit_g.id()).unwrap(), 6);
assert_eq!(index.generation_number(commit_h.id()).unwrap(), 5);
assert!(is_ancestor(index, root_commit_id, commit_a.id()));
assert!(!is_ancestor(index, commit_a.id(), root_commit_id));
assert!(is_ancestor(index, root_commit_id, commit_b.id()));
assert!(!is_ancestor(index, commit_b.id(), root_commit_id));
assert!(!is_ancestor(index, commit_b.id(), commit_c.id()));
assert!(is_ancestor(index, commit_a.id(), commit_b.id()));
assert!(is_ancestor(index, commit_a.id(), commit_e.id()));
assert!(is_ancestor(index, commit_a.id(), commit_f.id()));
assert!(is_ancestor(index, commit_a.id(), commit_g.id()));
assert!(is_ancestor(index, commit_a.id(), commit_h.id()));
}
#[test]
fn test_index_commits_criss_cross() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let num_generations = 50;
// Create a long chain of criss-crossed merges. If they were traversed without
// keeping track of visited nodes, it would be 2^50 visits, so if this test
// finishes in reasonable time, we know that we don't do a naive traversal.
let mut tx = repo.start_transaction();
let mut left_commits = vec![write_random_commit(tx.repo_mut())];
let mut right_commits = vec![write_random_commit(tx.repo_mut())];
for generation in 1..num_generations {
let new_left = write_random_commit_with_parents(
tx.repo_mut(),
&[
&left_commits[generation - 1],
&right_commits[generation - 1],
],
);
let new_right = write_random_commit_with_parents(
tx.repo_mut(),
&[
&left_commits[generation - 1],
&right_commits[generation - 1],
],
);
left_commits.push(new_left);
right_commits.push(new_right);
}
let repo = tx.commit("test").unwrap();
let index = as_readonly_index(&repo);
// There should the root commit, plus 2 for each generation
assert_eq!(index.num_commits(), 1 + 2 * (num_generations as u32));
let stats = index.stats();
assert_eq!(stats.num_commits, 1 + 2 * (num_generations as u32));
// The first generations are not merges
assert_eq!(stats.num_merges, 2 * (num_generations as u32 - 1));
assert_eq!(stats.max_generation_number, num_generations as u32);
// Check generation numbers
for generation in 0..num_generations {
assert_eq!(
index
.generation_number(left_commits[generation].id())
.unwrap(),
(generation as u32) + 1
);
assert_eq!(
index
.generation_number(right_commits[generation].id())
.unwrap(),
(generation as u32) + 1
);
}
// The left and right commits of the same generation should not be ancestors of
// each other
for generation in 0..num_generations {
assert!(!is_ancestor(
index,
left_commits[generation].id(),
right_commits[generation].id()
));
assert!(!is_ancestor(
index,
right_commits[generation].id(),
left_commits[generation].id()
));
}
// Both sides of earlier generations should be ancestors. Check a few different
// earlier generations.
for generation in 1..num_generations {
for ancestor_side in &[&left_commits, &right_commits] {
for descendant_side in &[&left_commits, &right_commits] {
assert!(is_ancestor(
index,
ancestor_side[0].id(),
descendant_side[generation].id()
));
assert!(is_ancestor(
index,
ancestor_side[generation - 1].id(),
descendant_side[generation].id()
));
assert!(is_ancestor(
index,
ancestor_side[generation / 2].id(),
descendant_side[generation].id()
));
}
}
}
let count_revs = |wanted: &[CommitId], unwanted: &[CommitId], generation| {
// Constructs ResolvedExpression directly to bypass tree optimization.
let expression = ResolvedExpression::Range {
roots: ResolvedExpression::Commits(unwanted.to_vec()).into(),
heads: ResolvedExpression::Commits(wanted.to_vec()).into(),
generation,
parents_range: PARENTS_RANGE_FULL,
};
let revset = index.evaluate_revset(&expression, repo.store()).unwrap();
// Don't switch to more efficient .count() implementation. Here we're
// testing the iterator behavior.
revset.iter().count()
};
// RevWalk deduplicates chains by entry.
assert_eq!(
count_revs(
&[left_commits[num_generations - 1].id().clone()],
&[],
GENERATION_RANGE_FULL,
),
2 * num_generations
);
assert_eq!(
count_revs(
&[right_commits[num_generations - 1].id().clone()],
&[],
GENERATION_RANGE_FULL,
),
2 * num_generations
);
assert_eq!(
count_revs(
&[left_commits[num_generations - 1].id().clone()],
&[left_commits[num_generations - 2].id().clone()],
GENERATION_RANGE_FULL,
),
2
);
assert_eq!(
count_revs(
&[right_commits[num_generations - 1].id().clone()],
&[right_commits[num_generations - 2].id().clone()],
GENERATION_RANGE_FULL,
),
2
);
// RevWalkGenerationRange deduplicates chains by (entry, generation), which may
// be more expensive than RevWalk, but should still finish in reasonable time.
assert_eq!(
count_revs(
&[left_commits[num_generations - 1].id().clone()],
&[],
0..(num_generations + 1) as u64,
),
2 * num_generations
);
assert_eq!(
count_revs(
&[right_commits[num_generations - 1].id().clone()],
&[],
0..(num_generations + 1) as u64,
),
2 * num_generations
);
assert_eq!(
count_revs(
&[left_commits[num_generations - 1].id().clone()],
&[left_commits[num_generations - 2].id().clone()],
0..(num_generations + 1) as u64,
),
2
);
assert_eq!(
count_revs(
&[right_commits[num_generations - 1].id().clone()],
&[right_commits[num_generations - 2].id().clone()],
0..(num_generations + 1) as u64,
),
2
);
}
#[test]
fn test_index_commits_previous_operations() {
// Test that commits visible only in previous operations are indexed.
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let test_env = &test_repo.env;
let repo = &test_repo.repo;
// Remove commit B and C in one operation and make sure they're still
// visible in the index after that operation.
// o C
// o B
// o A
// | o working copy
// |/
// o root
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
tx.repo_mut().remove_head(commit_c.id());
let repo = tx.commit("test").unwrap();
// Delete index from disk
let default_index_store: &DefaultIndexStore = repo.index_store().downcast_ref().unwrap();
default_index_store.reinit().unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
let index = as_readonly_index(&repo);
// There should be the root commit, plus 3 more
assert_eq!(index.num_commits(), 1 + 3);
let stats = index.stats();
assert_eq!(stats.num_commits, 1 + 3);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.max_generation_number, 3);
assert_eq!(index.generation_number(commit_a.id()).unwrap(), 1);
assert_eq!(index.generation_number(commit_b.id()).unwrap(), 2);
assert_eq!(index.generation_number(commit_c.id()).unwrap(), 3);
}
#[test]
fn test_index_commits_hidden_but_referenced() {
// Test that hidden-but-referenced commits are indexed.
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let test_env = &test_repo.env;
let repo = &test_repo.repo;
// Remote bookmarks are usually visible at a certain point in operation
// history, but that's not guaranteed if old operations have been discarded.
// This can also happen if imported remote bookmarks get immediately
// abandoned because the other bookmark has moved.
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit(tx.repo_mut());
let commit_c = write_random_commit(tx.repo_mut());
tx.repo_mut().remove_head(commit_a.id());
tx.repo_mut().remove_head(commit_b.id());
tx.repo_mut().remove_head(commit_c.id());
tx.repo_mut().set_remote_bookmark(
remote_symbol("bookmark", "origin"),
RemoteRef {
target: RefTarget::from_legacy_form(
[commit_a.id().clone()],
[commit_b.id().clone(), commit_c.id().clone()],
),
state: jj_lib::op_store::RemoteRefState::New,
},
);
let repo = tx.commit("test").unwrap();
// All commits should be indexed
assert!(index_has_id(repo.index(), commit_a.id()));
assert!(index_has_id(repo.index(), commit_b.id()));
assert!(index_has_id(repo.index(), commit_c.id()));
// Delete index from disk
let default_index_store: &DefaultIndexStore = repo.index_store().downcast_ref().unwrap();
default_index_store.reinit().unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
// All commits should be reindexed
assert!(index_has_id(repo.index(), commit_a.id()));
assert!(index_has_id(repo.index(), commit_b.id()));
assert!(index_has_id(repo.index(), commit_c.id()));
}
#[test]
fn test_index_commits_incremental() {
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let test_env = &test_repo.env;
let repo = &test_repo.repo;
// Create A in one operation, then B and C in another. Check that the index is
// valid after.
// o C
// o B
// o A
// | o working copy
// |/
// o root
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction();
let commit_a = write_random_commit_with_parents(tx.repo_mut(), &[]);
let repo = tx.commit("test").unwrap();
let index = as_readonly_index(&repo);
// There should be the root commit, plus 1 more
assert_eq!(index.num_commits(), 1 + 1);
let mut tx = repo.start_transaction();
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
tx.commit("test").unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
let index = as_readonly_index(&repo);
// There should be the root commit, plus 3 more
assert_eq!(index.num_commits(), 1 + 3);
let stats = index.stats();
assert_eq!(stats.num_commits, 1 + 3);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.max_generation_number, 3);
assert_eq!(stats.commit_levels.len(), 1);
assert_eq!(stats.commit_levels[0].num_commits, 4);
assert_eq!(index.generation_number(root_commit.id()).unwrap(), 0);
assert_eq!(index.generation_number(commit_a.id()).unwrap(), 1);
assert_eq!(index.generation_number(commit_b.id()).unwrap(), 2);
assert_eq!(index.generation_number(commit_c.id()).unwrap(), 3);
}
#[test]
fn test_index_commits_incremental_empty_transaction() {
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let test_env = &test_repo.env;
let repo = &test_repo.repo;
// Create A in one operation, then just an empty transaction. Check that the
// index is valid after.
// o A
// | o working copy
// |/
// o root
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction();
let commit_a = write_random_commit_with_parents(tx.repo_mut(), &[&root_commit]);
let repo = tx.commit("test").unwrap();
let index = as_readonly_index(&repo);
// There should be the root commit, plus 1 more
assert_eq!(index.num_commits(), 1 + 1);
repo.start_transaction().commit("test").unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
let index = as_readonly_index(&repo);
// There should be the root commit, plus 1 more
assert_eq!(index.num_commits(), 1 + 1);
let stats = index.stats();
assert_eq!(stats.num_commits, 1 + 1);
assert_eq!(stats.num_merges, 0);
assert_eq!(stats.max_generation_number, 1);
assert_eq!(stats.commit_levels.len(), 1);
assert_eq!(stats.commit_levels[0].num_commits, 2);
assert_eq!(index.generation_number(root_commit.id()).unwrap(), 0);
assert_eq!(index.generation_number(commit_a.id()).unwrap(), 1);
}
#[test]
fn test_index_commits_incremental_already_indexed() {
// Tests that trying to add a commit that's already been added is a no-op.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Create A in one operation, then try to add it again an new transaction.
// o A
// | o working copy
// |/
// o root
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction();
let commit_a = write_random_commit_with_parents(tx.repo_mut(), &[&root_commit]);
let repo = tx.commit("test").unwrap();
assert!(index_has_id(repo.index(), commit_a.id()));
assert_eq!(as_readonly_index(&repo).num_commits(), 1 + 1);
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
mut_repo.add_head(&commit_a).unwrap();
assert_eq!(as_mutable_index(mut_repo).num_commits(), 1 + 1);
}
#[must_use]
fn create_n_commits(repo: &Arc<ReadonlyRepo>, num_commits: i32) -> Arc<ReadonlyRepo> {
let mut tx = repo.start_transaction();
for _ in 0..num_commits {
write_random_commit(tx.repo_mut());
}
tx.commit("test").unwrap()
}
fn as_readonly_index(repo: &Arc<ReadonlyRepo>) -> &DefaultReadonlyIndex {
repo.readonly_index().downcast_ref().unwrap()
}
fn as_mutable_index(repo: &MutableRepo) -> &DefaultMutableIndex {
repo.mutable_index().downcast_ref().unwrap()
}
fn commits_by_level(repo: &Arc<ReadonlyRepo>) -> Vec<u32> {
as_readonly_index(repo)
.stats()
.commit_levels
.iter()
.map(|level| level.num_commits)
.collect()
}
#[test]
fn test_index_commits_incremental_squashed() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let repo = create_n_commits(repo, 1);
assert_eq!(commits_by_level(&repo), vec![2]);
let repo = create_n_commits(&repo, 1);
assert_eq!(commits_by_level(&repo), vec![3]);
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let repo = create_n_commits(repo, 2);
assert_eq!(commits_by_level(&repo), vec![3]);
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let repo = create_n_commits(repo, 100);
assert_eq!(commits_by_level(&repo), vec![101]);
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let repo = create_n_commits(repo, 1);
let repo = create_n_commits(&repo, 2);
let repo = create_n_commits(&repo, 4);
let repo = create_n_commits(&repo, 8);
let repo = create_n_commits(&repo, 16);
let repo = create_n_commits(&repo, 32);
assert_eq!(commits_by_level(&repo), vec![64]);
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let repo = create_n_commits(repo, 32);
let repo = create_n_commits(&repo, 16);
let repo = create_n_commits(&repo, 8);
let repo = create_n_commits(&repo, 4);
let repo = create_n_commits(&repo, 2);
assert_eq!(commits_by_level(&repo), vec![57, 6]);
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let repo = create_n_commits(repo, 30);
let repo = create_n_commits(&repo, 15);
let repo = create_n_commits(&repo, 7);
let repo = create_n_commits(&repo, 3);
let repo = create_n_commits(&repo, 1);
assert_eq!(commits_by_level(&repo), vec![31, 15, 7, 3, 1]);
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let repo = create_n_commits(repo, 10);
let repo = create_n_commits(&repo, 10);
let repo = create_n_commits(&repo, 10);
let repo = create_n_commits(&repo, 10);
let repo = create_n_commits(&repo, 10);
let repo = create_n_commits(&repo, 10);
let repo = create_n_commits(&repo, 10);
let repo = create_n_commits(&repo, 10);
let repo = create_n_commits(&repo, 10);
assert_eq!(commits_by_level(&repo), vec![71, 20]);
}
#[test]
fn test_reindex_no_segments_dir() {
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let test_env = &test_repo.env;
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
assert!(index_has_id(repo.index(), commit_a.id()));
// jj <= 0.14 doesn't have "segments" directory
let segments_dir = test_repo.repo_path().join("index").join("segments");
assert!(segments_dir.is_dir());
fs::remove_dir_all(&segments_dir).unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
assert!(index_has_id(repo.index(), commit_a.id()));
}
#[test]
fn test_reindex_corrupt_segment_files() {
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let test_env = &test_repo.env;
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
assert!(index_has_id(repo.index(), commit_a.id()));
// Corrupt the index files
let segments_dir = test_repo.repo_path().join("index").join("segments");
for entry in segments_dir.read_dir().unwrap() {
let entry = entry.unwrap();
// u32: file format version
// u32: parent segment file name length (0 means root)
// u32: number of local commit entries
// u32: number of local change ids
// u32: number of overflow parent entries
// u32: number of overflow change id positions
fs::write(entry.path(), b"\0".repeat(24)).unwrap();
}
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
assert!(index_has_id(repo.index(), commit_a.id()));
}
#[test]
fn test_reindex_from_merged_operation() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// The following operation log:
// x (add head, index will be missing)
// x (add head, index will be missing)
// |\
// o o (remove head)
// o o (add head)
// |/
// o
let mut txs = Vec::new();
for _ in 0..2 {
let mut tx = repo.start_transaction();
let commit = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
tx.repo_mut().remove_head(commit.id());
txs.push(tx);
}
let repo = commit_transactions(txs);
let mut op_ids_to_delete = Vec::new();
op_ids_to_delete.push(repo.op_id());
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
op_ids_to_delete.push(repo.op_id());
let operation_to_reload = repo.operation();
// Sanity check before corrupting the index store
let index = as_readonly_index(&repo);
assert_eq!(index.num_commits(), 4);
let op_links_dir = test_repo.repo_path().join("index").join("op_links");
let legacy_operations_dir = test_repo.repo_path().join("index").join("operations");
for &op_id in &op_ids_to_delete {
fs::remove_file(op_links_dir.join(op_id.hex())).unwrap();
fs::remove_file(legacy_operations_dir.join(op_id.hex())).unwrap();
}
// When re-indexing, one of the merge parent operations will be selected as
// the parent index segment. The commits in the other side should still be
// reachable.
let repo = repo.reload_at(operation_to_reload).unwrap();
let index = as_readonly_index(&repo);
assert_eq!(index.num_commits(), 4);
}
#[test]
fn test_reindex_missing_commit() {
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let test_env = &test_repo.env;
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let missing_commit = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
let bad_op_id = repo.op_id();
let mut tx = repo.start_transaction();
tx.repo_mut().remove_head(missing_commit.id());
let repo = tx.commit("test").unwrap();
// Remove historical head commit to simulate bad GC.
let test_backend: &TestBackend = repo.store().backend_impl().unwrap();
test_backend.remove_commit_unchecked(missing_commit.id());
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path()); // discard cache
assert!(repo.store().get_commit(missing_commit.id()).is_err());
// Reindexing error should include the operation id where the commit
// couldn't be found.
let default_index_store: &DefaultIndexStore = repo.index_store().downcast_ref().unwrap();
default_index_store.reinit().unwrap();
let err = default_index_store
.build_index_at_operation(repo.operation(), repo.store())
.block_on()
.unwrap_err();
assert_matches!(err, DefaultIndexStoreError::IndexCommits { op_id, .. } if op_id == *bad_op_id);
}
/// Test that .jj/repo/index/type is created when the repo is created.
#[test]
fn test_index_store_type() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
assert_eq!(as_readonly_index(repo).num_commits(), 1);
let index_store_type_path = test_repo.repo_path().join("index").join("type");
assert_eq!(
std::fs::read_to_string(index_store_type_path).unwrap(),
"default"
);
}
#[test]
fn test_read_legacy_operation_link_file() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Delete new operation link files and directory
let op_links_dir = test_repo.repo_path().join("index").join("op_links");
fs::remove_dir_all(&op_links_dir).unwrap();
// Reload repo and index
let repo = repo.reload_at(repo.operation()).unwrap();
repo.readonly_index();
// Existing index should still be readable, so new operation link file won't
// be created
assert!(!op_links_dir.join(repo.op_id().hex()).exists());
// New operation link file and directory can be created
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
assert!(op_links_dir.join(repo.op_id().hex()).exists());
}
#[test]
fn test_changed_path_segments() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
// Changed-path index should be disabled by default
let segments_dir = test_repo.repo_path().join("index").join("changed_paths");
let count_segment_files = || {
let entries = segments_dir.read_dir().unwrap();
entries.process_results(|entries| entries.count()).unwrap()
};
assert_eq!(count_segment_files(), 0);
let stats = as_readonly_index(repo).stats();
assert_eq!(stats.changed_path_commits_range, None);
assert_eq!(stats.changed_path_levels.len(), 0);
let repo = enable_changed_path_index(repo);
let stats = as_readonly_index(&repo).stats();
assert_eq!(stats.changed_path_commits_range, Some(1..1));
assert_eq!(stats.changed_path_levels.len(), 0);
let tree1 = create_tree(&repo, &[(repo_path("a"), "")]);
let tree2 = create_tree(&repo, &[(repo_path("a"), ""), (repo_path("b"), "")]);
// Add new commit with changed-path index enabled
let mut tx = repo.start_transaction();
let commit1 = tx
.repo_mut()
.new_commit(vec![root_commit_id.clone()], tree1)
.write()
.unwrap();
let repo = tx.commit("test").unwrap();
let stats = as_readonly_index(&repo).stats();
assert_eq!(count_segment_files(), 1);
assert_eq!(stats.changed_path_commits_range, Some(1..2));
assert_eq!(stats.changed_path_levels.len(), 1);
assert_eq!(stats.changed_path_levels[0].num_commits, 1);
assert_eq!(stats.changed_path_levels[0].num_changed_paths, 1);
assert_eq!(stats.changed_path_levels[0].num_paths, 1);
assert_eq!(collect_changed_paths(&repo, root_commit_id), None);
assert_eq!(
collect_changed_paths(&repo, commit1.id()),
Some(vec![repo_path_buf("a")])
);
// Add one more commit, segment files should be squashed
let mut tx = repo.start_transaction();
let commit2 = tx
.repo_mut()
.new_commit(vec![root_commit_id.clone()], tree2)
.write()
.unwrap();
let repo = tx.commit("test").unwrap();
let stats = as_readonly_index(&repo).stats();
assert_eq!(count_segment_files(), 2);
assert_eq!(stats.changed_path_commits_range, Some(1..3));
assert_eq!(stats.changed_path_levels.len(), 1);
assert_eq!(stats.changed_path_levels[0].num_commits, 2);
assert_eq!(stats.changed_path_levels[0].num_changed_paths, 3);
assert_eq!(stats.changed_path_levels[0].num_paths, 2);
assert_eq!(collect_changed_paths(&repo, root_commit_id), None);
assert_eq!(
collect_changed_paths(&repo, commit1.id()),
Some(vec![repo_path_buf("a")])
);
assert_eq!(
collect_changed_paths(&repo, commit2.id()),
Some(vec![repo_path_buf("a"), repo_path_buf("b")])
);
}
#[test]
fn test_build_changed_path_segments() {
let test_repo = TestRepo::init();
let repo = test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let default_index_store: &DefaultIndexStore = repo.index_store().downcast_ref().unwrap();
let mut tx = repo.start_transaction();
for i in 1..10 {
let tree = create_tree(&repo, &[(repo_path(&i.to_string()), "")]);
tx.repo_mut()
.new_commit(vec![root_commit_id.clone()], tree)
.write()
.unwrap();
}
let repo = tx.commit("test").unwrap();
// Index the last 4 commits
default_index_store
.build_changed_path_index_at_operation(repo.op_id(), repo.store(), 4)
.block_on()
.unwrap();
let repo = repo.reload_at(repo.operation()).unwrap();
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_merge_trees.rs | lib/tests/test_merge_trees.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::backend::TreeValue;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::merge::Merge;
use jj_lib::merge::SameChange;
use jj_lib::repo::Repo as _;
use jj_lib::rewrite::rebase_commit;
use jj_lib::settings::UserSettings;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::assert_tree_eq;
use testutils::create_tree;
use testutils::repo_path;
#[test]
fn test_simplify_conflict_after_resolving_parent() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Set up a repo like this:
// D
// | C
// | B
// |/
// A
//
// Commit A has a file with 3 lines. B and D make conflicting changes to the
// first line. C changes the third line. We then rebase B and C onto D,
// which creates a conflict. We resolve the conflict in the first line and
// rebase C2 (the rebased C) onto the resolved conflict. C3 should not have
// a conflict since it changed an unrelated line.
let path = repo_path("dir/file");
let mut tx = repo.start_transaction();
let tree_a = create_tree(repo, &[(path, "abc\ndef\nghi\n")]);
let commit_a = tx
.repo_mut()
.new_commit(vec![repo.store().root_commit_id().clone()], tree_a)
.write()
.unwrap();
let tree_b = create_tree(repo, &[(path, "Abc\ndef\nghi\n")]);
let commit_b = tx
.repo_mut()
.new_commit(vec![commit_a.id().clone()], tree_b)
.write()
.unwrap();
let tree_c = create_tree(repo, &[(path, "Abc\ndef\nGhi\n")]);
let commit_c = tx
.repo_mut()
.new_commit(vec![commit_b.id().clone()], tree_c)
.write()
.unwrap();
let tree_d = create_tree(repo, &[(path, "abC\ndef\nghi\n")]);
let commit_d = tx
.repo_mut()
.new_commit(vec![commit_a.id().clone()], tree_d)
.write()
.unwrap();
let commit_b2 = rebase_commit(tx.repo_mut(), commit_b, vec![commit_d.id().clone()])
.block_on()
.unwrap();
let commit_c2 = rebase_commit(tx.repo_mut(), commit_c, vec![commit_b2.id().clone()])
.block_on()
.unwrap();
// Test the setup: Both B and C should have conflicts.
let tree_b2 = commit_b2.tree();
let tree_c2 = commit_b2.tree();
assert!(!tree_b2.path_value(path).unwrap().is_resolved());
assert!(!tree_c2.path_value(path).unwrap().is_resolved());
// Create the resolved B and rebase C on top.
let tree_b3 = create_tree(repo, &[(path, "AbC\ndef\nghi\n")]);
let commit_b3 = tx
.repo_mut()
.rewrite_commit(&commit_b2)
.set_tree(tree_b3)
.write()
.unwrap();
let commit_c3 = rebase_commit(tx.repo_mut(), commit_c2, vec![commit_b3.id().clone()])
.block_on()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
// The conflict should now be resolved.
let tree_c2 = commit_c3.tree();
let resolved_value = tree_c2.path_value(path).unwrap();
match resolved_value.into_resolved() {
Ok(Some(TreeValue::File {
id,
executable: false,
copy_id: _,
})) => {
assert_eq!(
testutils::read_file(repo.store(), path, &id),
b"AbC\ndef\nGhi\n"
);
}
other => {
panic!("unexpected value: {other:#?}");
}
}
}
// TODO: Add tests for simplification of multi-way conflicts. Both the content
// and the executable bit need testing.
#[test_case(SameChange::Keep)]
#[test_case(SameChange::Accept)]
fn test_rebase_linearize_lossy_merge(same_change: SameChange) {
let settings = settings_with_same_change(same_change);
let test_repo = TestRepo::init_with_settings(&settings);
let repo = &test_repo.repo;
// Test this rebase:
// D foo=2 D' foo=1 or 2
// |\ |
// | C foo=2 |
// | | => B foo=2
// B | foo=2 |
// |/ |
// A foo=1 A foo=1
//
// Since both B and C changed "1" to "2" but only one "2" remains in D, it
// effectively discarded a change from "1" to "2". With `SameChange::Keep`,
// D' is therefore "1". However, with `SameChange::Accept`, `jj show D` etc.
// currently don't tell the user about the discarded change, so it's
// surprising that the change in commit D is interpreted that way.
let path = repo_path("foo");
let mut tx = repo.start_transaction();
let repo_mut = tx.repo_mut();
let tree_1 = create_tree(repo, &[(path, "1")]);
let tree_2 = create_tree(repo, &[(path, "2")]);
let commit_a = repo_mut
.new_commit(vec![repo.store().root_commit_id().clone()], tree_1.clone())
.write()
.unwrap();
let commit_b = repo_mut
.new_commit(vec![commit_a.id().clone()], tree_2.clone())
.write()
.unwrap();
let commit_c = repo_mut
.new_commit(vec![commit_a.id().clone()], tree_2.clone())
.write()
.unwrap();
let commit_d = repo_mut
.new_commit(
vec![commit_b.id().clone(), commit_c.id().clone()],
tree_2.clone(),
)
.write()
.unwrap();
match same_change {
SameChange::Keep => assert!(!commit_d.is_empty(repo_mut).unwrap()),
SameChange::Accept => assert!(commit_d.is_empty(repo_mut).unwrap()),
}
let commit_d2 = rebase_commit(repo_mut, commit_d, vec![commit_b.id().clone()])
.block_on()
.unwrap();
match same_change {
SameChange::Keep => assert_tree_eq!(commit_d2.tree(), tree_1),
SameChange::Accept => assert_tree_eq!(commit_d2.tree(), tree_2),
}
}
#[test_case(SameChange::Keep)]
#[test_case(SameChange::Accept)]
fn test_rebase_on_lossy_merge(same_change: SameChange) {
let settings = settings_with_same_change(same_change);
let test_repo = TestRepo::init_with_settings(&settings);
let repo = &test_repo.repo;
// Test this rebase:
// D foo=2 D' foo=3 or 2+(3-1) (conflict)
// |\ |\
// | C foo=2 | C' foo=3
// | | => | |
// B | foo=2 B | foo=2
// |/ |/
// A foo=1 A foo=1
//
// Commit D effectively discarded a change from "1" to "2", so one
// reasonable result in D' is "3". That's the result with
// `SameChange::Keep`. However, with `SameChange::Accept`, we resolve the
// auto-merged parents to just "2" before the rebase in order to be
// consistent with `jj show D` and other commands for inspecting the commit,
// so we instead get a conflict after the rebase.
let path = repo_path("foo");
let mut tx = repo.start_transaction();
let repo_mut = tx.repo_mut();
let tree_1 = create_tree(repo, &[(path, "1")]);
let tree_2 = create_tree(repo, &[(path, "2")]);
let tree_3 = create_tree(repo, &[(path, "3")]);
let commit_a = repo_mut
.new_commit(vec![repo.store().root_commit_id().clone()], tree_1.clone())
.write()
.unwrap();
let commit_b = repo_mut
.new_commit(vec![commit_a.id().clone()], tree_2.clone())
.write()
.unwrap();
let commit_c = repo_mut
.new_commit(vec![commit_a.id().clone()], tree_2.clone())
.write()
.unwrap();
let commit_d = repo_mut
.new_commit(
vec![commit_b.id().clone(), commit_c.id().clone()],
tree_2.clone(),
)
.write()
.unwrap();
match same_change {
SameChange::Keep => assert!(!commit_d.is_empty(repo_mut).unwrap()),
SameChange::Accept => assert!(commit_d.is_empty(repo_mut).unwrap()),
}
let commit_c2 = repo_mut
.new_commit(vec![commit_a.id().clone()], tree_3.clone())
.write()
.unwrap();
let commit_d2 = rebase_commit(
repo_mut,
commit_d,
vec![commit_b.id().clone(), commit_c2.id().clone()],
)
.block_on()
.unwrap();
match same_change {
SameChange::Keep => assert_tree_eq!(commit_d2.tree(), tree_3),
SameChange::Accept => {
let expected_tree_id = Merge::from_vec(vec![
tree_2.into_tree_ids(),
tree_1.into_tree_ids(),
tree_3.into_tree_ids(),
])
.flatten();
assert_eq!(*commit_d2.tree_ids(), expected_tree_id);
}
}
}
fn settings_with_same_change(same_change: SameChange) -> UserSettings {
let mut config = testutils::base_user_config();
let mut layer = ConfigLayer::empty(ConfigSource::User);
let same_change_str = match same_change {
SameChange::Keep => "keep",
SameChange::Accept => "accept",
};
layer
.set_value("merge.same-change", same_change_str)
.unwrap();
config.add_layer(layer);
UserSettings::from_config(config).unwrap()
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_ssh_signing.rs | lib/tests/test_ssh_signing.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs;
#[cfg(unix)]
use std::fs::Permissions;
use std::io::Write as _;
#[cfg(unix)]
use std::os::unix::prelude::PermissionsExt as _;
use std::path::Path;
use std::path::PathBuf;
use jj_lib::signing::SigStatus;
use jj_lib::signing::SigningBackend as _;
use jj_lib::ssh_signing::SshBackend;
static PRIVATE_KEY: &str = r#"-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
QyNTUxOQAAACBo/iejekjvuD/HTman0daImstssYYR52oB+dmr1KsOYQAAAIiuGFMFrhhT
BQAAAAtzc2gtZWQyNTUxOQAAACBo/iejekjvuD/HTman0daImstssYYR52oB+dmr1KsOYQ
AAAECcUtn/J/jk/+D5+/+WbQRNN4eInj5L60pt6FioP0nQfGj+J6N6SO+4P8dOZqfR1oia
y2yxhhHnagH52avUqw5hAAAAAAECAwQF
-----END OPENSSH PRIVATE KEY-----
"#;
static PUBLIC_KEY: &str =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGj+J6N6SO+4P8dOZqfR1oiay2yxhhHnagH52avUqw5h";
static FINGERPRINT: &str = "SHA256:CaeelDOMvTqGZPjAS9fdbnACrLg68N1Bb9ux5y6GjGw";
struct SshEnvironment {
_keys: tempfile::TempDir,
private_key_path: PathBuf,
allowed_signers: Option<tempfile::TempPath>,
revocation_list: Option<tempfile::TempPath>,
}
impl SshEnvironment {
fn new() -> Result<Self, std::process::Output> {
let keys_dir = tempfile::Builder::new()
.prefix("jj-test-signing-keys-")
.tempdir()
.unwrap();
let private_key_path = Path::new(keys_dir.path()).join("key");
fs::write(&private_key_path, PRIVATE_KEY).unwrap();
#[cfg(unix)]
std::fs::set_permissions(&private_key_path, Permissions::from_mode(0o600)).unwrap();
let mut env = Self {
_keys: keys_dir,
private_key_path,
allowed_signers: None,
revocation_list: None,
};
env.with_good_public_key();
Ok(env)
}
fn with_good_public_key(&mut self) {
let mut allowed_signers = tempfile::Builder::new()
.prefix("jj-test-allowed-signers-")
.tempfile()
.unwrap();
allowed_signers
.write_all("test@example.com ".as_bytes())
.unwrap();
allowed_signers.write_all(PUBLIC_KEY.as_bytes()).unwrap();
allowed_signers.flush().unwrap();
let allowed_signers_path = allowed_signers.into_temp_path();
self.allowed_signers = Some(allowed_signers_path);
}
fn with_bad_public_key(&mut self) {
let mut allowed_signers = tempfile::Builder::new()
.prefix("jj-test-allowed-signers-")
.tempfile()
.unwrap();
allowed_signers
.write_all("test@example.com ".as_bytes())
.unwrap();
allowed_signers
.write_all("INVALID PUBLIC KEY".as_bytes())
.unwrap();
allowed_signers.flush().unwrap();
let allowed_signers_path = allowed_signers.into_temp_path();
self.allowed_signers = Some(allowed_signers_path);
}
fn with_revocation_list(&mut self, revoked_key: &[u8]) {
let mut revocation_list = tempfile::Builder::new()
.prefix("jj-test-revocation-list-")
.tempfile()
.unwrap();
revocation_list.write_all(revoked_key).unwrap();
revocation_list.flush().unwrap();
let revocation_list_path = revocation_list.into_temp_path();
self.revocation_list = Some(revocation_list_path);
}
}
fn backend(env: &SshEnvironment) -> SshBackend {
SshBackend::new(
"ssh-keygen".into(),
env.allowed_signers
.as_ref()
.map(|allowed_signers| allowed_signers.as_os_str().into()),
env.revocation_list
.as_ref()
.map(|revocation_list| revocation_list.as_os_str().into()),
)
}
#[test]
fn ssh_signing_roundtrip() {
let env = SshEnvironment::new().unwrap();
let backend = backend(&env);
let data = b"hello world";
let signature = backend
.sign(data, Some(env.private_key_path.to_str().unwrap()))
.unwrap();
let check = backend.verify(data, &signature).unwrap();
assert_eq!(check.status, SigStatus::Good);
assert_eq!(check.key.unwrap(), FINGERPRINT);
assert_eq!(check.display.unwrap(), "test@example.com");
let check = backend.verify(b"invalid-commit-data", &signature).unwrap();
assert_eq!(check.status, SigStatus::Bad);
assert_eq!(check.display.unwrap(), "test@example.com");
}
#[test]
fn ssh_signing_bad_allowed_signers() {
let mut env = SshEnvironment::new().unwrap();
env.with_bad_public_key();
let backend = backend(&env);
let data = b"hello world";
let signature = backend
.sign(data, Some(env.private_key_path.to_str().unwrap()))
.unwrap();
let check = backend.verify(data, &signature).unwrap();
assert_eq!(check.status, SigStatus::Unknown);
assert_eq!(check.key.unwrap(), FINGERPRINT);
assert_eq!(check.display.unwrap(), "Signature OK. Unknown principal");
}
#[test]
fn ssh_signing_missing_allowed_signers() {
let mut env = SshEnvironment::new().unwrap();
env.allowed_signers = None;
let backend = backend(&env);
let data = b"hello world";
let signature = backend
.sign(data, Some(env.private_key_path.to_str().unwrap()))
.unwrap();
let check = backend.verify(data, &signature).unwrap();
assert_eq!(check.status, SigStatus::Unknown);
assert_eq!(check.key.unwrap(), FINGERPRINT);
assert_eq!(check.display.unwrap(), "Signature OK. Unknown principal");
}
#[test]
fn ssh_signing_revocation_revoked() {
let mut env = SshEnvironment::new().unwrap();
env.with_revocation_list(PUBLIC_KEY.as_bytes());
let backend = backend(&env);
let data = b"hello world";
let signature = backend
.sign(data, Some(env.private_key_path.to_str().unwrap()))
.unwrap();
let check = backend.verify(data, &signature).unwrap();
assert_eq!(check.status, SigStatus::Bad);
assert_eq!(check.display.unwrap(), "test@example.com");
}
#[test]
fn ssh_signing_revocation_unrevoked() {
let mut env = SshEnvironment::new().unwrap();
const ALT_PUB_KEY: &str =
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICrkiOk+QyRv87ahGdrxSin0DuNKezDDLE6lLkHxJpWU";
env.with_revocation_list(ALT_PUB_KEY.as_bytes());
let backend = backend(&env);
let data = b"hello world";
let signature = backend
.sign(data, Some(env.private_key_path.to_str().unwrap()))
.unwrap();
let check = backend.verify(data, &signature).unwrap();
assert_eq!(check.status, SigStatus::Good);
assert_eq!(check.key.unwrap(), FINGERPRINT);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_view.rs | lib/tests/test_view.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use itertools::Itertools as _;
use jj_lib::op_store::LocalRemoteRefTarget;
use jj_lib::op_store::RefTarget;
use jj_lib::op_store::RemoteRef;
use jj_lib::op_store::RemoteRefState;
use jj_lib::ref_name::RefName;
use jj_lib::ref_name::RemoteName;
use jj_lib::ref_name::RemoteRefSymbol;
use jj_lib::ref_name::WorkspaceNameBuf;
use jj_lib::repo::Repo as _;
use maplit::btreemap;
use maplit::hashset;
use test_case::test_case;
use testutils::TestRepo;
use testutils::commit_transactions;
use testutils::create_random_commit;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn remote_symbol<'a, N, M>(name: &'a N, remote: &'a M) -> RemoteRefSymbol<'a>
where
N: AsRef<RefName> + ?Sized,
M: AsRef<RemoteName> + ?Sized,
{
RemoteRefSymbol {
name: name.as_ref(),
remote: remote.as_ref(),
}
}
#[test]
fn test_heads_empty() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
assert_eq!(
*repo.view().heads(),
hashset! {repo.store().root_commit_id().clone()}
);
}
#[test]
fn test_heads_fork() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let initial = write_random_commit(tx.repo_mut());
let child1 = write_random_commit_with_parents(tx.repo_mut(), &[&initial]);
let child2 = write_random_commit_with_parents(tx.repo_mut(), &[&initial]);
let repo = tx.commit("test").unwrap();
assert_eq!(
*repo.view().heads(),
hashset! {
child1.id().clone(),
child2.id().clone(),
}
);
}
#[test]
fn test_heads_merge() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let initial = write_random_commit(tx.repo_mut());
let child1 = write_random_commit_with_parents(tx.repo_mut(), &[&initial]);
let child2 = write_random_commit_with_parents(tx.repo_mut(), &[&initial]);
let merge = write_random_commit_with_parents(tx.repo_mut(), &[&child1, &child2]);
let repo = tx.commit("test").unwrap();
assert_eq!(*repo.view().heads(), hashset! {merge.id().clone()});
}
#[test]
fn test_merge_views_heads() {
// Tests merging of the view's heads (by performing divergent operations).
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let head_unchanged = write_random_commit(mut_repo);
let head_remove_tx1 = write_random_commit(mut_repo);
let head_remove_tx2 = write_random_commit(mut_repo);
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
tx1.repo_mut().remove_head(head_remove_tx1.id());
let head_add_tx1 = write_random_commit(tx1.repo_mut());
let mut tx2 = repo.start_transaction();
tx2.repo_mut().remove_head(head_remove_tx2.id());
let head_add_tx2 = write_random_commit(tx2.repo_mut());
let repo = commit_transactions(vec![tx1, tx2]);
let expected_heads = hashset! {
head_unchanged.id().clone(),
head_add_tx1.id().clone(),
head_add_tx2.id().clone(),
};
assert_eq!(repo.view().heads(), &expected_heads);
}
#[test]
fn test_merge_views_checkout() {
// Tests merging of the view's checkout (by performing divergent operations).
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Workspace 1 gets updated in both transactions.
// Workspace 2 gets updated only in tx1.
// Workspace 3 gets updated only in tx2.
// Workspace 4 gets deleted in tx1 and modified in tx2.
// Workspace 5 gets deleted in tx2 and modified in tx1.
// Workspace 6 gets added in tx1.
// Workspace 7 gets added in tx2.
let mut initial_tx = repo.start_transaction();
let commit1 = write_random_commit(initial_tx.repo_mut());
let commit2 = write_random_commit(initial_tx.repo_mut());
let commit3 = write_random_commit(initial_tx.repo_mut());
let ws1_name = WorkspaceNameBuf::from("ws1");
let ws2_name = WorkspaceNameBuf::from("ws2");
let ws3_name = WorkspaceNameBuf::from("ws3");
let ws4_name = WorkspaceNameBuf::from("ws4");
let ws5_name = WorkspaceNameBuf::from("ws5");
let ws6_name = WorkspaceNameBuf::from("ws6");
let ws7_name = WorkspaceNameBuf::from("ws7");
initial_tx
.repo_mut()
.set_wc_commit(ws1_name.clone(), commit1.id().clone())
.unwrap();
initial_tx
.repo_mut()
.set_wc_commit(ws2_name.clone(), commit1.id().clone())
.unwrap();
initial_tx
.repo_mut()
.set_wc_commit(ws3_name.clone(), commit1.id().clone())
.unwrap();
initial_tx
.repo_mut()
.set_wc_commit(ws4_name.clone(), commit1.id().clone())
.unwrap();
initial_tx
.repo_mut()
.set_wc_commit(ws5_name.clone(), commit1.id().clone())
.unwrap();
let repo = initial_tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
tx1.repo_mut()
.set_wc_commit(ws1_name.clone(), commit2.id().clone())
.unwrap();
tx1.repo_mut()
.set_wc_commit(ws2_name.clone(), commit2.id().clone())
.unwrap();
tx1.repo_mut().remove_wc_commit(&ws4_name).unwrap();
tx1.repo_mut()
.set_wc_commit(ws5_name.clone(), commit2.id().clone())
.unwrap();
tx1.repo_mut()
.set_wc_commit(ws6_name.clone(), commit2.id().clone())
.unwrap();
let mut tx2 = repo.start_transaction();
tx2.repo_mut()
.set_wc_commit(ws1_name.clone(), commit3.id().clone())
.unwrap();
tx2.repo_mut()
.set_wc_commit(ws3_name.clone(), commit3.id().clone())
.unwrap();
tx2.repo_mut()
.set_wc_commit(ws4_name.clone(), commit3.id().clone())
.unwrap();
tx2.repo_mut().remove_wc_commit(&ws5_name).unwrap();
tx2.repo_mut()
.set_wc_commit(ws7_name.clone(), commit3.id().clone())
.unwrap();
let repo = commit_transactions(vec![tx1, tx2]);
// We currently arbitrarily pick the first transaction's working-copy commit
// (first by transaction end time).
assert_eq!(repo.view().get_wc_commit_id(&ws1_name), Some(commit2.id()));
assert_eq!(repo.view().get_wc_commit_id(&ws2_name), Some(commit2.id()));
assert_eq!(repo.view().get_wc_commit_id(&ws3_name), Some(commit3.id()));
assert_eq!(repo.view().get_wc_commit_id(&ws4_name), None);
assert_eq!(repo.view().get_wc_commit_id(&ws5_name), None);
assert_eq!(repo.view().get_wc_commit_id(&ws6_name), Some(commit2.id()));
assert_eq!(repo.view().get_wc_commit_id(&ws7_name), Some(commit3.id()));
}
#[test]
fn test_merge_views_bookmarks() {
// Tests merging of bookmarks (by performing concurrent operations). See
// test_refs.rs for tests of merging of individual ref targets.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let main_bookmark_local_tx0 = write_random_commit(mut_repo);
let main_bookmark_origin_tx0 = write_random_commit(mut_repo);
let main_bookmark_alternate_tx0 = write_random_commit(mut_repo);
let main_bookmark_origin_tx0_remote_ref = RemoteRef {
target: RefTarget::normal(main_bookmark_origin_tx0.id().clone()),
state: RemoteRefState::New,
};
let main_bookmark_alternate_tx0_remote_ref = RemoteRef {
target: RefTarget::normal(main_bookmark_alternate_tx0.id().clone()),
state: RemoteRefState::Tracked,
};
mut_repo.set_local_bookmark_target(
"main".as_ref(),
RefTarget::normal(main_bookmark_local_tx0.id().clone()),
);
mut_repo.set_remote_bookmark(
remote_symbol("main", "origin"),
main_bookmark_origin_tx0_remote_ref,
);
mut_repo.set_remote_bookmark(
remote_symbol("main", "alternate"),
main_bookmark_alternate_tx0_remote_ref.clone(),
);
let feature_bookmark_local_tx0 = write_random_commit(mut_repo);
mut_repo.set_local_bookmark_target(
"feature".as_ref(),
RefTarget::normal(feature_bookmark_local_tx0.id().clone()),
);
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let main_bookmark_local_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut().set_local_bookmark_target(
"main".as_ref(),
RefTarget::normal(main_bookmark_local_tx1.id().clone()),
);
let feature_bookmark_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut().set_local_bookmark_target(
"feature".as_ref(),
RefTarget::normal(feature_bookmark_tx1.id().clone()),
);
let mut tx2 = repo.start_transaction();
let main_bookmark_local_tx2 = write_random_commit(tx2.repo_mut());
let main_bookmark_origin_tx2 = write_random_commit(tx2.repo_mut());
let main_bookmark_origin_tx2_remote_ref = RemoteRef {
target: RefTarget::normal(main_bookmark_origin_tx2.id().clone()),
state: RemoteRefState::Tracked,
};
tx2.repo_mut().set_local_bookmark_target(
"main".as_ref(),
RefTarget::normal(main_bookmark_local_tx2.id().clone()),
);
tx2.repo_mut().set_remote_bookmark(
remote_symbol("main", "origin"),
main_bookmark_origin_tx2_remote_ref.clone(),
);
let repo = commit_transactions(vec![tx1, tx2]);
let expected_main_bookmark = LocalRemoteRefTarget {
local_target: &RefTarget::from_legacy_form(
[main_bookmark_local_tx0.id().clone()],
[
main_bookmark_local_tx1.id().clone(),
main_bookmark_local_tx2.id().clone(),
],
),
remote_refs: vec![
(
"alternate".as_ref(),
&main_bookmark_alternate_tx0_remote_ref,
),
// tx1: unchanged, tx2: new -> tracking
("origin".as_ref(), &main_bookmark_origin_tx2_remote_ref),
],
};
let expected_feature_bookmark = LocalRemoteRefTarget {
local_target: &RefTarget::normal(feature_bookmark_tx1.id().clone()),
remote_refs: vec![],
};
assert_eq!(
repo.view().bookmarks().collect::<BTreeMap<_, _>>(),
btreemap! {
"main".as_ref() => expected_main_bookmark,
"feature".as_ref() => expected_feature_bookmark,
}
);
}
#[test]
fn test_merge_views_tags() {
// Tests merging of tags (by performing divergent operations). See
// test_refs.rs for tests of merging of individual ref targets.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let v1_tx0 = write_random_commit(mut_repo);
mut_repo.set_local_tag_target("v1.0".as_ref(), RefTarget::normal(v1_tx0.id().clone()));
let v2_tx0 = write_random_commit(mut_repo);
mut_repo.set_local_tag_target("v2.0".as_ref(), RefTarget::normal(v2_tx0.id().clone()));
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let v1_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut()
.set_local_tag_target("v1.0".as_ref(), RefTarget::normal(v1_tx1.id().clone()));
let v2_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut()
.set_local_tag_target("v2.0".as_ref(), RefTarget::normal(v2_tx1.id().clone()));
let mut tx2 = repo.start_transaction();
let v1_tx2 = write_random_commit(tx2.repo_mut());
tx2.repo_mut()
.set_local_tag_target("v1.0".as_ref(), RefTarget::normal(v1_tx2.id().clone()));
let repo = commit_transactions(vec![tx1, tx2]);
let expected_v1 = RefTarget::from_legacy_form(
[v1_tx0.id().clone()],
[v1_tx1.id().clone(), v1_tx2.id().clone()],
);
let expected_v2 = RefTarget::normal(v2_tx1.id().clone());
assert_eq!(
repo.view().local_tags().collect_vec(),
vec![
("v1.0".as_ref(), &expected_v1),
("v2.0".as_ref(), &expected_v2),
]
);
}
#[test]
fn test_merge_views_remote_tags() {
// Tests merging of remote tags (by performing divergent operations). See
// test_refs.rs for tests of merging of individual ref targets.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let v1_origin_tx0 = write_random_commit(mut_repo);
mut_repo.set_remote_tag(
remote_symbol("v1.0", "origin"),
RemoteRef {
target: RefTarget::normal(v1_origin_tx0.id().clone()),
state: RemoteRefState::New,
},
);
let v1_upstream_tx0 = write_random_commit(mut_repo);
mut_repo.set_remote_tag(
remote_symbol("v1.0", "upstream"),
RemoteRef {
target: RefTarget::normal(v1_upstream_tx0.id().clone()),
state: RemoteRefState::Tracked,
},
);
let v2_upstream_tx0 = write_random_commit(mut_repo);
mut_repo.set_remote_tag(
remote_symbol("v2.0", "upstream"),
RemoteRef {
target: RefTarget::normal(v2_upstream_tx0.id().clone()),
state: RemoteRefState::Tracked,
},
);
let repo = tx.commit("test").unwrap();
// v1.0@origin: tx0 (new) -> tx1 (new)
// v2.0@upstream: tx0 (tracked) -> tx1 (tracked)
let mut tx1 = repo.start_transaction();
let v1_origin_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut().set_remote_tag(
remote_symbol("v1.0", "origin"),
RemoteRef {
target: RefTarget::normal(v1_origin_tx1.id().clone()),
state: RemoteRefState::New,
},
);
let v2_upstream_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut().set_remote_tag(
remote_symbol("v2.0", "upstream"),
RemoteRef {
target: RefTarget::normal(v2_upstream_tx1.id().clone()),
state: RemoteRefState::Tracked,
},
);
// v1.0@origin: tx0 (new) -> tx2 (tracked)
// v1.0@upstream: tx0 (tracked) -> tx2 (new)
let mut tx2 = repo.start_transaction();
let v1_origin_tx2 = write_random_commit(tx2.repo_mut());
tx2.repo_mut().set_remote_tag(
remote_symbol("v1.0", "origin"),
RemoteRef {
target: RefTarget::normal(v1_origin_tx2.id().clone()),
state: RemoteRefState::Tracked,
},
);
let v1_upstream_tx2 = write_random_commit(tx1.repo_mut());
tx1.repo_mut().set_remote_tag(
remote_symbol("v1.0", "upstream"),
RemoteRef {
target: RefTarget::normal(v1_upstream_tx2.id().clone()),
state: RemoteRefState::New,
},
);
let repo = commit_transactions(vec![tx1, tx2]);
let expected_v1_origin = RemoteRef {
target: RefTarget::from_legacy_form(
[v1_origin_tx0.id().clone()],
[v1_origin_tx1.id().clone(), v1_origin_tx2.id().clone()],
),
state: RemoteRefState::Tracked,
};
let expected_v1_upstream = RemoteRef {
target: RefTarget::normal(v1_upstream_tx2.id().clone()),
state: RemoteRefState::New,
};
let expected_v2_upstream = RemoteRef {
target: RefTarget::normal(v2_upstream_tx1.id().clone()),
state: RemoteRefState::Tracked,
};
assert_eq!(
repo.view().all_remote_tags().collect_vec(),
vec![
(remote_symbol("v1.0", "origin"), &expected_v1_origin),
(remote_symbol("v1.0", "upstream"), &expected_v1_upstream),
(remote_symbol("v2.0", "upstream"), &expected_v2_upstream),
]
);
}
#[test]
fn test_merge_views_git_refs() {
// Tests merging of git refs (by performing divergent operations). See
// test_refs.rs for tests of merging of individual ref targets.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let main_bookmark_tx0 = write_random_commit(mut_repo);
mut_repo.set_git_ref_target(
"refs/heads/main".as_ref(),
RefTarget::normal(main_bookmark_tx0.id().clone()),
);
let feature_bookmark_tx0 = write_random_commit(mut_repo);
mut_repo.set_git_ref_target(
"refs/heads/feature".as_ref(),
RefTarget::normal(feature_bookmark_tx0.id().clone()),
);
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let main_bookmark_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut().set_git_ref_target(
"refs/heads/main".as_ref(),
RefTarget::normal(main_bookmark_tx1.id().clone()),
);
let feature_bookmark_tx1 = write_random_commit(tx1.repo_mut());
tx1.repo_mut().set_git_ref_target(
"refs/heads/feature".as_ref(),
RefTarget::normal(feature_bookmark_tx1.id().clone()),
);
let mut tx2 = repo.start_transaction();
let main_bookmark_tx2 = write_random_commit(tx2.repo_mut());
tx2.repo_mut().set_git_ref_target(
"refs/heads/main".as_ref(),
RefTarget::normal(main_bookmark_tx2.id().clone()),
);
let repo = commit_transactions(vec![tx1, tx2]);
let expected_main_bookmark = RefTarget::from_legacy_form(
[main_bookmark_tx0.id().clone()],
[
main_bookmark_tx1.id().clone(),
main_bookmark_tx2.id().clone(),
],
);
let expected_feature_bookmark = RefTarget::normal(feature_bookmark_tx1.id().clone());
assert_eq!(
repo.view().git_refs(),
&btreemap! {
"refs/heads/main".into() => expected_main_bookmark,
"refs/heads/feature".into() => expected_feature_bookmark,
}
);
}
#[test]
fn test_merge_views_git_heads() {
// Tests merging of git heads (by performing divergent operations). See
// test_refs.rs for tests of merging of individual ref targets.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx0 = repo.start_transaction();
let tx0_head = write_random_commit(tx0.repo_mut());
tx0.repo_mut()
.set_git_head_target(RefTarget::normal(tx0_head.id().clone()));
let repo = tx0.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let tx1_head = write_random_commit(tx1.repo_mut());
tx1.repo_mut()
.set_git_head_target(RefTarget::normal(tx1_head.id().clone()));
let mut tx2 = repo.start_transaction();
let tx2_head = write_random_commit(tx2.repo_mut());
tx2.repo_mut()
.set_git_head_target(RefTarget::normal(tx2_head.id().clone()));
let repo = commit_transactions(vec![tx1, tx2]);
let expected_git_head = RefTarget::from_legacy_form(
[tx0_head.id().clone()],
[tx1_head.id().clone(), tx2_head.id().clone()],
);
assert_eq!(repo.view().git_head(), &expected_git_head);
}
#[test]
fn test_merge_views_divergent() {
// We start with just commit A. Operation 1 rewrites it as A2. Operation 2
// rewrites it as A3.
let test_repo = TestRepo::init();
let mut tx = test_repo.repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let commit_a2 = tx1
.repo_mut()
.rewrite_commit(&commit_a)
.set_description("A2")
.write()
.unwrap();
tx1.repo_mut().rebase_descendants().unwrap();
let mut tx2 = repo.start_transaction();
let commit_a3 = tx2
.repo_mut()
.rewrite_commit(&commit_a)
.set_description("A3")
.write()
.unwrap();
tx2.repo_mut().rebase_descendants().unwrap();
let repo = commit_transactions(vec![tx1, tx2]);
// A2 and A3 should be heads.
assert_eq!(
*repo.view().heads(),
hashset! {commit_a2.id().clone(), commit_a3.id().clone()}
);
}
#[test_case(false ; "rewrite first")]
#[test_case(true ; "add child first")]
fn test_merge_views_child_on_rewritten(child_first: bool) {
// We start with just commit A. Operation 1 adds commit B on top. Operation 2
// rewrites A as A2.
let test_repo = TestRepo::init();
let mut tx = test_repo.repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let commit_b = write_random_commit_with_parents(tx1.repo_mut(), &[&commit_a]);
let mut tx2 = repo.start_transaction();
let commit_a2 = tx2
.repo_mut()
.rewrite_commit(&commit_a)
.set_description("A2")
.write()
.unwrap();
tx2.repo_mut().rebase_descendants().unwrap();
let repo = if child_first {
commit_transactions(vec![tx1, tx2])
} else {
commit_transactions(vec![tx2, tx1])
};
// A new B2 commit (B rebased onto A2) should be the only head.
let heads = repo.view().heads();
assert_eq!(heads.len(), 1);
let b2_id = heads.iter().next().unwrap();
let commit_b2 = repo.store().get_commit(b2_id).unwrap();
assert_eq!(commit_b2.change_id(), commit_b.change_id());
assert_eq!(commit_b2.parent_ids(), vec![commit_a2.id().clone()]);
}
#[test_case(false, false ; "add child on unchanged, rewrite first")]
#[test_case(false, true ; "add child on unchanged, add child first")]
#[test_case(true, false ; "add child on rewritten, rewrite first")]
#[test_case(true, true ; "add child on rewritten, add child first")]
fn test_merge_views_child_on_rewritten_divergent(on_rewritten: bool, child_first: bool) {
// We start with divergent commits A2 and A3. Operation 1 adds commit B on top
// of A2 or A3. Operation 2 rewrites A2 as A4. The result should be that B
// gets rebased onto A4 if it was based on A2 before, but if it was based on
// A3, it should remain there.
let test_repo = TestRepo::init();
let mut tx = test_repo.repo.start_transaction();
let commit_a2 = write_random_commit(tx.repo_mut());
let commit_a3 = create_random_commit(tx.repo_mut())
.set_change_id(commit_a2.change_id().clone())
.write()
.unwrap();
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let parent = if on_rewritten { &commit_a2 } else { &commit_a3 };
let commit_b = write_random_commit_with_parents(tx1.repo_mut(), &[parent]);
let mut tx2 = repo.start_transaction();
let commit_a4 = tx2
.repo_mut()
.rewrite_commit(&commit_a2)
.set_description("A4")
.write()
.unwrap();
tx2.repo_mut().rebase_descendants().unwrap();
let repo = if child_first {
commit_transactions(vec![tx1, tx2])
} else {
commit_transactions(vec![tx2, tx1])
};
if on_rewritten {
// A3 should remain as a head. The other head should be B2 (B rebased onto A4).
let mut heads = repo.view().heads().clone();
assert_eq!(heads.len(), 2);
assert!(heads.remove(commit_a3.id()));
let b2_id = heads.iter().next().unwrap();
let commit_b2 = repo.store().get_commit(b2_id).unwrap();
assert_eq!(commit_b2.change_id(), commit_b.change_id());
assert_eq!(commit_b2.parent_ids(), vec![commit_a4.id().clone()]);
} else {
// No rebases should happen, so B and A4 should be the heads.
let mut heads = repo.view().heads().clone();
assert_eq!(heads.len(), 2);
assert!(heads.remove(commit_b.id()));
assert!(heads.remove(commit_a4.id()));
}
}
#[test_case(false ; "abandon first")]
#[test_case(true ; "add child first")]
fn test_merge_views_child_on_abandoned(child_first: bool) {
// We start with commit B on top of commit A. Operation 1 adds commit C on top.
// Operation 2 abandons B.
let test_repo = TestRepo::init();
let mut tx = test_repo.repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let repo = tx.commit("test").unwrap();
let mut tx1 = repo.start_transaction();
let commit_c = write_random_commit_with_parents(tx1.repo_mut(), &[&commit_b]);
let mut tx2 = repo.start_transaction();
tx2.repo_mut().record_abandoned_commit(&commit_b);
tx2.repo_mut().rebase_descendants().unwrap();
let repo = if child_first {
commit_transactions(vec![tx1, tx2])
} else {
commit_transactions(vec![tx2, tx1])
};
// A new C2 commit (C rebased onto A) should be the only head.
let heads = repo.view().heads();
assert_eq!(heads.len(), 1);
let id_c2 = heads.iter().next().unwrap();
let commit_c2 = repo.store().get_commit(id_c2).unwrap();
assert_eq!(commit_c2.change_id(), commit_c.change_id());
assert_eq!(commit_c2.parent_ids(), vec![commit_a.id().clone()]);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_revset.rs | lib/tests/test_revset.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::iter;
use std::path::Path;
use std::sync::Arc;
use assert_matches::assert_matches;
use chrono::DateTime;
use itertools::Itertools as _;
use jj_lib::backend::ChangeId;
use jj_lib::backend::CommitId;
use jj_lib::backend::MillisSinceEpoch;
use jj_lib::backend::Signature;
use jj_lib::backend::Timestamp;
use jj_lib::commit::Commit;
use jj_lib::default_index::DefaultIndexStore;
use jj_lib::fileset::FilesetExpression;
use jj_lib::git;
use jj_lib::graph::GraphEdge;
use jj_lib::graph::reverse_graph;
use jj_lib::id_prefix::IdPrefixContext;
use jj_lib::merge::Merge;
use jj_lib::merged_tree::MergedTree;
use jj_lib::object_id::ObjectId as _;
use jj_lib::op_store::RefTarget;
use jj_lib::op_store::RemoteRef;
use jj_lib::op_store::RemoteRefState;
use jj_lib::ref_name::RefName;
use jj_lib::ref_name::RemoteName;
use jj_lib::ref_name::RemoteRefSymbol;
use jj_lib::ref_name::WorkspaceName;
use jj_lib::ref_name::WorkspaceNameBuf;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo;
use jj_lib::repo_path::RepoPath;
use jj_lib::repo_path::RepoPathUiConverter;
use jj_lib::revset::ResolvedRevsetExpression;
use jj_lib::revset::Revset;
use jj_lib::revset::RevsetAliasesMap;
use jj_lib::revset::RevsetDiagnostics;
use jj_lib::revset::RevsetEvaluationError;
use jj_lib::revset::RevsetExpression;
use jj_lib::revset::RevsetExtensions;
use jj_lib::revset::RevsetFilterPredicate;
use jj_lib::revset::RevsetParseContext;
use jj_lib::revset::RevsetResolutionError;
use jj_lib::revset::RevsetWorkspaceContext;
use jj_lib::revset::SymbolResolver;
use jj_lib::revset::SymbolResolverExtension;
use jj_lib::revset::parse;
use jj_lib::signing::SignBehavior;
use jj_lib::signing::Signer;
use jj_lib::test_signing_backend::TestSigningBackend;
use jj_lib::workspace::Workspace;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::TestRepoBackend;
use testutils::TestWorkspace;
use testutils::create_random_commit;
use testutils::create_tree;
use testutils::create_tree_with;
use testutils::repo_path;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn remote_symbol<'a, N, M>(name: &'a N, remote: &'a M) -> RemoteRefSymbol<'a>
where
N: AsRef<RefName> + ?Sized,
M: AsRef<RemoteName> + ?Sized,
{
RemoteRefSymbol {
name: name.as_ref(),
remote: remote.as_ref(),
}
}
fn default_symbol_resolver(repo: &dyn Repo) -> SymbolResolver<'_> {
SymbolResolver::new(repo, &([] as [&Box<dyn SymbolResolverExtension>; 0]))
}
fn resolve_symbol(repo: &dyn Repo, symbol: &str) -> Result<Vec<CommitId>, RevsetResolutionError> {
let context = RevsetParseContext {
aliases_map: &RevsetAliasesMap::default(),
local_variables: HashMap::new(),
user_email: "",
date_pattern_context: chrono::Local::now().into(),
default_ignored_remote: Some(git::REMOTE_NAME_FOR_LOCAL_GIT_REPO),
use_glob_by_default: true,
extensions: &RevsetExtensions::default(),
workspace: None,
};
let expression = parse(&mut RevsetDiagnostics::new(), symbol, &context).unwrap();
assert_matches!(*expression, RevsetExpression::CommitRef(_));
let symbol_resolver = default_symbol_resolver(repo);
match expression
.resolve_user_expression(repo, &symbol_resolver)?
.as_ref()
{
RevsetExpression::Commits(commits) => Ok(commits.clone()),
expression => panic!("symbol resolved to compound expression: {expression:?}"),
}
}
fn revset_for_commits<'index>(
repo: &'index dyn Repo,
commits: &[&Commit],
) -> Box<dyn Revset + 'index> {
let symbol_resolver = default_symbol_resolver(repo);
RevsetExpression::commits(commits.iter().map(|commit| commit.id().clone()).collect())
.resolve_user_expression(repo, &symbol_resolver)
.unwrap()
.evaluate(repo)
.unwrap()
}
fn build_changed_path_index(repo: &ReadonlyRepo) -> Arc<ReadonlyRepo> {
let default_index_store: &DefaultIndexStore = repo.index_store().downcast_ref().unwrap();
default_index_store
.build_changed_path_index_at_operation(repo.op_id(), repo.store(), u32::MAX)
.block_on()
.unwrap();
repo.reload_at(repo.operation()).unwrap()
}
#[test]
fn test_resolve_symbol_empty_string() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
assert_matches!(
resolve_symbol(repo.as_ref(), r#""""#),
Err(RevsetResolutionError::EmptyString)
);
}
#[test]
fn test_resolve_symbol_commit_id() {
let settings = testutils::user_settings();
// Test only with git so we can get predictable commit ids
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let signature = Signature {
name: "test".to_string(),
email: "test".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
},
};
let mut commits = vec![];
for i in [156, 268, 869] {
let commit = mut_repo
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
// An arbitrary change id that doesn't start with "01"
.set_change_id(ChangeId::from_hex("781199f9d55d18e855a7aa84c5e4b40d"))
.set_description(format!("test {i}"))
.set_author(signature.clone())
.set_committer(signature.clone())
.write()
.unwrap();
commits.push(commit);
}
let repo = tx.commit("test").unwrap();
// Test the test setup
insta::assert_snapshot!(commits.iter().map(|c| c.id().hex()).join("\n"), @r"
019f179b4479a4f3d1373b772866037929e4f63c
019fd357eb2a4904c348b62d1f4cc2ac222cdbc7
017dc442a1d77bb1620a1a32863580ae81543d7d
");
// Test lookup by full commit id
assert_eq!(
resolve_symbol(repo.as_ref(), "019f179b4479a4f3d1373b772866037929e4f63c",).unwrap(),
vec![commits[0].id().clone()]
);
assert_eq!(
resolve_symbol(repo.as_ref(), "019fd357eb2a4904c348b62d1f4cc2ac222cdbc7",).unwrap(),
vec![commits[1].id().clone()]
);
assert_eq!(
resolve_symbol(repo.as_ref(), "017dc442a1d77bb1620a1a32863580ae81543d7d",).unwrap(),
vec![commits[2].id().clone()]
);
// Test commit id prefix
assert_eq!(
resolve_symbol(repo.as_ref(), "017").unwrap(),
vec![commits[2].id().clone()]
);
assert_matches!(
resolve_symbol(repo.as_ref(), "01"),
Err(RevsetResolutionError::AmbiguousCommitIdPrefix(s)) if s == "01"
);
assert_matches!(
resolve_symbol(repo.as_ref(), "010"),
Err(RevsetResolutionError::NoSuchRevision{name, candidates}) if name == "010" && candidates.is_empty()
);
// Test non-hex string
assert_matches!(
resolve_symbol(repo.as_ref(), "foo"),
Err(RevsetResolutionError::NoSuchRevision{name, candidates}) if name == "foo" && candidates.is_empty()
);
// Test present() suppresses only NoSuchRevision error
assert_eq!(resolve_commit_ids(repo.as_ref(), "present(foo)"), []);
let symbol_resolver = default_symbol_resolver(repo.as_ref());
let context = RevsetParseContext {
aliases_map: &RevsetAliasesMap::default(),
local_variables: HashMap::new(),
user_email: settings.user_email(),
date_pattern_context: chrono::Utc::now().fixed_offset().into(),
default_ignored_remote: Some(git::REMOTE_NAME_FOR_LOCAL_GIT_REPO),
use_glob_by_default: true,
extensions: &RevsetExtensions::default(),
workspace: None,
};
assert_matches!(
parse(&mut RevsetDiagnostics::new(), "present(01)", &context).unwrap()
.resolve_user_expression(repo.as_ref(), &symbol_resolver),
Err(RevsetResolutionError::AmbiguousCommitIdPrefix(s)) if s == "01"
);
assert_eq!(
resolve_commit_ids(repo.as_ref(), "present(017)"),
vec![commits[2].id().clone()]
);
// Test commit_id() function, which is roughly equivalent to present(id)
assert_eq!(
resolve_symbol(
repo.as_ref(),
"commit_id(019f179b4479a4f3d1373b772866037929e4f63c)",
)
.unwrap(),
vec![commits[0].id().clone()]
);
assert_eq!(
resolve_symbol(repo.as_ref(), "commit_id(019f1)").unwrap(),
vec![commits[0].id().clone()]
);
assert_eq!(
resolve_symbol(repo.as_ref(), "commit_id(12345)").unwrap(),
vec![]
);
assert_matches!(
resolve_symbol(repo.as_ref(), "commit_id('')"),
Err(RevsetResolutionError::AmbiguousCommitIdPrefix(s)) if s.is_empty()
);
assert_matches!(
resolve_symbol(repo.as_ref(), "commit_id(0)"),
Err(RevsetResolutionError::AmbiguousCommitIdPrefix(s)) if s == "0"
);
}
#[test_case(false ; "mutable")]
#[test_case(true ; "readonly")]
fn test_resolve_symbol_change_id(readonly: bool) {
// Test only with git so we can get predictable commit ids
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
// Add some commits that will end up having change ids with common prefixes
let author = Signature {
name: "git author".to_owned(),
email: "git.author@example.com".to_owned(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(1_000_000),
tz_offset: 60,
},
};
let committer = Signature {
name: "git committer".to_owned(),
email: "git.committer@example.com".to_owned(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(2_000_000),
tz_offset: -480,
},
};
let root_commit_id = repo.store().root_commit_id();
let empty_tree = repo.store().empty_merged_tree();
// These are change ids that would be generated for the imported commits,
// but that isn't important. Here we have common prefixes "04", "040",
// "04e1" across commit and change ids.
let change_ids = [
"04e12a5467bba790efb88a9870894ec2",
"040b3ba3a51d8edbc4c5855cbd09de71",
"04e1c7082e4e34f3f371d8a1a46770b8",
"911d7e52fd5ba04b8f289e14c3d30b52",
]
.map(ChangeId::from_hex);
let mut commits = vec![];
let mut tx = repo.start_transaction();
for (i, change_id) in iter::zip([0, 1, 2, 5359], change_ids) {
let commit = tx
.repo_mut()
.new_commit(vec![root_commit_id.clone()], empty_tree.clone())
.set_change_id(change_id)
.set_description(format!("test {i}"))
.set_author(author.clone())
.set_committer(committer.clone())
.write()
.unwrap();
commits.push(commit);
}
// Test the test setup
insta::allow_duplicates! {
insta::assert_snapshot!(
commits.iter().map(|c| format!("{} {}\n", c.id(), c.change_id())).join(""), @r"
cd741d7f2c542e443df3c5bf2d4f8a15a2759e77 zvlyxpuvtsoopsqzlkorrpqrszrqvlnx
0af32dcddbdf49c132ad39c3623a6196c6c987a5 zvzowopwpuymrlmonvnuruunomzqmlsy
553ee869e64329d1022f5c00c63dff6621924c18 zvlynszrxlvlwvkwkwsymrpypvtsszor
0407d5eb08231b546a42518a50a835f17282eaef qyymsluxkmuopzvorkxrqlyvnwmwzoux
");
}
let _readonly_repo;
let repo: &dyn Repo = if readonly {
_readonly_repo = tx.commit("test").unwrap();
_readonly_repo.as_ref()
} else {
tx.repo_mut()
};
// Test lookup by full change id
assert_eq!(
resolve_symbol(repo, "zvlyxpuvtsoopsqzlkorrpqrszrqvlnx").unwrap(),
vec![commits[0].id().clone()]
);
assert_eq!(
resolve_symbol(repo, "zvzowopwpuymrlmonvnuruunomzqmlsy").unwrap(),
vec![commits[1].id().clone()]
);
assert_eq!(
resolve_symbol(repo, "zvlynszrxlvlwvkwkwsymrpypvtsszor").unwrap(),
vec![commits[2].id().clone()]
);
// Test change id prefix
assert_eq!(
resolve_symbol(repo, "zvlyx").unwrap(),
vec![commits[0].id().clone()]
);
assert_eq!(
resolve_symbol(repo, "zvlyn").unwrap(),
vec![commits[2].id().clone()]
);
assert_matches!(
resolve_symbol(repo, "zvly"),
Err(RevsetResolutionError::AmbiguousChangeIdPrefix(s)) if s == "zvly"
);
assert_matches!(
resolve_symbol(repo, "zvlyw"),
Err(RevsetResolutionError::NoSuchRevision{name, candidates}) if name == "zvlyw" && candidates.is_empty()
);
// Test that commit and changed id don't conflict ("040" and "zvz" are the
// same).
assert_eq!(
resolve_symbol(repo, "040").unwrap(),
vec![commits[3].id().clone()]
);
assert_eq!(
resolve_symbol(repo, "zvz").unwrap(),
vec![commits[1].id().clone()]
);
// Test non-hex string
assert_matches!(
resolve_symbol(repo, "foo"),
Err(RevsetResolutionError::NoSuchRevision{
name,
candidates
}) if name == "foo" && candidates.is_empty()
);
// Test change_id() function, which is roughly equivalent to present(id)
assert_eq!(
resolve_symbol(repo, "change_id(zvlyxpuvtsoopsqzlkorrpqrszrqvlnx)").unwrap(),
vec![commits[0].id().clone()]
);
assert_eq!(
resolve_symbol(repo, "change_id(zvlyx)").unwrap(),
vec![commits[0].id().clone()]
);
assert_eq!(resolve_symbol(repo, "change_id(xyzzy)").unwrap(), vec![]);
assert_matches!(
resolve_symbol(repo, "change_id('')"),
Err(RevsetResolutionError::AmbiguousChangeIdPrefix(s)) if s.is_empty()
);
assert_matches!(
resolve_symbol(repo, "change_id(z)"),
Err(RevsetResolutionError::AmbiguousChangeIdPrefix(s)) if s == "z"
);
}
#[test]
fn test_resolve_symbol_divergent_change_id() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = create_random_commit(tx.repo_mut())
.set_change_id(commit1.change_id().clone())
.write()
.unwrap();
let change_id = commit1.change_id();
assert_matches!(
resolve_symbol(tx.repo(), &format!("{change_id}")),
Err(RevsetResolutionError::DivergentChangeId { symbol, visible_targets })
if symbol == change_id.to_string()
&& visible_targets == vec![(0, commit2.id().clone()), (1, commit1.id().clone())]
);
assert_eq!(
resolve_symbol(tx.repo(), &format!("{change_id}/0")).unwrap(),
vec![commit2.id().clone()]
);
assert_eq!(
resolve_symbol(tx.repo(), &format!("{change_id}/1")).unwrap(),
vec![commit1.id().clone()]
);
assert_matches!(
resolve_symbol(tx.repo(), &format!("{change_id}/2")),
Err(RevsetResolutionError::NoSuchRevision { .. })
);
assert_eq!(
resolve_symbol(tx.repo(), &format!("change_id({change_id})")).unwrap(),
vec![commit2.id().clone(), commit1.id().clone()]
);
}
#[test]
fn test_resolve_symbol_hidden_change_id() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
// Rewrite the commit, causing the old version to be abandoned.
let commit2 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("updated commit")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("rewrite commit").unwrap();
let change_id = commit1.change_id();
assert_eq!(
resolve_symbol(repo.as_ref(), &format!("{change_id}")).unwrap(),
vec![commit2.id().clone()]
);
assert_eq!(
resolve_symbol(repo.as_ref(), &format!("{change_id}/0")).unwrap(),
vec![commit2.id().clone()]
);
assert_eq!(
resolve_symbol(repo.as_ref(), &format!("{change_id}/1")).unwrap(),
vec![commit1.id().clone()]
);
assert_matches!(
resolve_symbol(repo.as_ref(), &format!("{change_id}/2")),
Err(RevsetResolutionError::NoSuchRevision { .. })
);
assert_eq!(
resolve_symbol(repo.as_ref(), &format!("change_id({change_id})")).unwrap(),
vec![commit2.id().clone()]
);
// Abandon the new commit as well so that there are only hidden commits.
let mut tx = repo.start_transaction();
tx.repo_mut().record_abandoned_commit(&commit2);
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("abandon commit").unwrap();
assert_matches!(
resolve_symbol(repo.as_ref(), &format!("{change_id}")),
Err(RevsetResolutionError::NoSuchRevision { name, candidates })
if name == change_id.to_string() && candidates.is_empty()
);
assert_eq!(
resolve_symbol(repo.as_ref(), &format!("{change_id}/0")).unwrap(),
vec![commit2.id().clone()]
);
assert_eq!(
resolve_symbol(repo.as_ref(), &format!("{change_id}/1")).unwrap(),
vec![commit1.id().clone()]
);
assert_matches!(
resolve_symbol(repo.as_ref(), &format!("{change_id}/2")),
Err(RevsetResolutionError::NoSuchRevision { .. })
);
assert_eq!(
resolve_symbol(repo.as_ref(), &format!("change_id({change_id})")).unwrap(),
vec![]
);
}
#[test]
fn test_resolve_symbol_in_different_disambiguation_context() {
let test_repo = TestRepo::init();
let repo0 = &test_repo.repo;
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
// Create more commits that are likely to conflict with 1-char hex prefix.
for _ in 0..50 {
write_random_commit(tx.repo_mut());
}
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit2 = tx.repo_mut().rewrite_commit(&commit1).write().unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo2 = tx.commit("test").unwrap();
// Set up disambiguation index which only contains the commit2.id().
let id_prefix_context = IdPrefixContext::new(Arc::new(RevsetExtensions::default()))
.disambiguate_within(RevsetExpression::commit(commit2.id().clone()));
let symbol_resolver =
default_symbol_resolver(repo2.as_ref()).with_id_prefix_context(&id_prefix_context);
// Sanity check
let change_hex = commit2.change_id().reverse_hex();
assert_eq!(
symbol_resolver
.resolve_symbol(repo2.as_ref(), &change_hex[0..1])
.unwrap(),
commit2.id().clone()
);
assert_eq!(
symbol_resolver
.resolve_symbol(repo2.as_ref(), &commit2.id().hex()[0..1])
.unwrap(),
commit2.id().clone()
);
// Change ID is disambiguated within repo2, then resolved in repo1.
assert_eq!(
symbol_resolver
.resolve_symbol(repo1.as_ref(), &change_hex[0..1])
.unwrap(),
commit1.id().clone()
);
// Commit ID can be found in the disambiguation index, but doesn't exist in
// repo1.
assert_matches!(
symbol_resolver.resolve_symbol(repo1.as_ref(), &commit2.id().hex()[0..1]),
Err(RevsetResolutionError::NoSuchRevision { .. })
);
}
#[test]
fn test_resolve_working_copy() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
let ws1 = WorkspaceNameBuf::from("ws1");
let ws2 = WorkspaceNameBuf::from("ws2");
// Cannot resolve a working-copy commit for an unknown workspace
let symbol_resolver = default_symbol_resolver(tx.repo());
assert_matches!(
RevsetExpression::working_copy(ws1.clone())
.resolve_user_expression(tx.repo(), &symbol_resolver),
Err(RevsetResolutionError::WorkspaceMissingWorkingCopy { name }) if name == "ws1"
);
// The error can be suppressed by present()
assert_eq!(
RevsetExpression::working_copy(ws1.clone())
.present()
.resolve_user_expression(tx.repo(), &symbol_resolver)
.unwrap()
.evaluate(tx.repo())
.unwrap()
.iter()
.map(Result::unwrap)
.collect_vec(),
vec![]
);
drop(symbol_resolver);
// Add some workspaces
tx.repo_mut()
.set_wc_commit(ws1.clone(), commit1.id().clone())
.unwrap();
tx.repo_mut()
.set_wc_commit(ws2.clone(), commit2.id().clone())
.unwrap();
let symbol_resolver = default_symbol_resolver(tx.repo());
let resolve = |name: WorkspaceNameBuf| -> Vec<CommitId> {
RevsetExpression::working_copy(name)
.resolve_user_expression(tx.repo(), &symbol_resolver)
.unwrap()
.evaluate(tx.repo())
.unwrap()
.iter()
.map(Result::unwrap)
.collect()
};
// Can resolve "@" shorthand with a default workspace name
assert_eq!(resolve(ws1), vec![commit1.id().clone()]);
// Can resolve an explicit checkout
assert_eq!(resolve(ws2), vec![commit2.id().clone()]);
}
#[test]
fn test_resolve_working_copies() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
// Add some workspaces
let ws1 = WorkspaceNameBuf::from("ws1");
let ws2 = WorkspaceNameBuf::from("ws2");
// add one commit to each working copy
tx.repo_mut()
.set_wc_commit(ws1.clone(), commit1.id().clone())
.unwrap();
tx.repo_mut()
.set_wc_commit(ws2.clone(), commit2.id().clone())
.unwrap();
let symbol_resolver = default_symbol_resolver(tx.repo());
let resolve = || -> Vec<CommitId> {
RevsetExpression::working_copies()
.resolve_user_expression(tx.repo(), &symbol_resolver)
.unwrap()
.evaluate(tx.repo())
.unwrap()
.iter()
.map(Result::unwrap)
.collect()
};
// ensure our output has those two commits
assert_eq!(resolve(), vec![commit2.id().clone(), commit1.id().clone()]);
}
#[test]
fn test_resolve_symbol_bookmarks() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let new_remote_ref = |target| RemoteRef {
target,
state: RemoteRefState::New,
};
let tracked_remote_ref = |target| RemoteRef {
target,
state: RemoteRefState::Tracked,
};
let normal_tracked_remote_ref =
|id: &CommitId| tracked_remote_ref(RefTarget::normal(id.clone()));
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let commit1 = write_random_commit(mut_repo);
let commit2 = write_random_commit(mut_repo);
let commit3 = write_random_commit(mut_repo);
let commit4 = write_random_commit(mut_repo);
let commit5 = write_random_commit(mut_repo);
mut_repo.set_local_bookmark_target("local".as_ref(), RefTarget::normal(commit1.id().clone()));
mut_repo.set_remote_bookmark(
remote_symbol("remote", "origin"),
normal_tracked_remote_ref(commit2.id()),
);
mut_repo.set_local_bookmark_target(
"local-remote".as_ref(),
RefTarget::normal(commit3.id().clone()),
);
mut_repo.set_remote_bookmark(
remote_symbol("local-remote", "origin"),
normal_tracked_remote_ref(commit4.id()),
);
mut_repo.set_local_bookmark_target(
"local-remote@origin".as_ref(), // not a remote bookmark
RefTarget::normal(commit5.id().clone()),
);
mut_repo.set_remote_bookmark(
remote_symbol("local-remote", "mirror"),
tracked_remote_ref(mut_repo.get_local_bookmark("local-remote".as_ref())),
);
mut_repo.set_remote_bookmark(
remote_symbol("local-remote", "untracked"),
new_remote_ref(mut_repo.get_local_bookmark("local-remote".as_ref())),
);
mut_repo.set_remote_bookmark(
remote_symbol("local-remote", git::REMOTE_NAME_FOR_LOCAL_GIT_REPO),
tracked_remote_ref(mut_repo.get_local_bookmark("local-remote".as_ref())),
);
mut_repo.set_local_bookmark_target(
"local-conflicted".as_ref(),
RefTarget::from_legacy_form(
[commit1.id().clone()],
[commit3.id().clone(), commit2.id().clone()],
),
);
mut_repo.set_remote_bookmark(
remote_symbol("remote-conflicted", "origin"),
tracked_remote_ref(RefTarget::from_legacy_form(
[commit3.id().clone()],
[commit5.id().clone(), commit4.id().clone()],
)),
);
// Local only
assert_eq!(
resolve_symbol(mut_repo, "local").unwrap(),
vec![commit1.id().clone()],
);
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "local@origin").unwrap_err(), @r#"
NoSuchRevision {
name: "local@origin",
candidates: [
"\"local-remote@origin\"",
"local",
"local-remote@git",
"local-remote@mirror",
"local-remote@origin",
"remote@origin",
],
}
"#);
// Remote only (or locally deleted)
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "remote").unwrap_err(), @r#"
NoSuchRevision {
name: "remote",
candidates: [
"remote-conflicted@origin",
"remote@origin",
],
}
"#);
assert_eq!(
resolve_symbol(mut_repo, "remote@origin").unwrap(),
vec![commit2.id().clone()],
);
// Local/remote/git
assert_eq!(
resolve_symbol(mut_repo, "local-remote").unwrap(),
vec![commit3.id().clone()],
);
assert_eq!(
resolve_symbol(mut_repo, "local-remote@origin").unwrap(),
vec![commit4.id().clone()],
);
assert_eq!(
resolve_symbol(mut_repo, r#""local-remote@origin""#).unwrap(),
vec![commit5.id().clone()],
);
assert_eq!(
resolve_symbol(mut_repo, "local-remote@mirror").unwrap(),
vec![commit3.id().clone()],
);
assert_eq!(
resolve_symbol(mut_repo, "local-remote@git").unwrap(),
vec![commit3.id().clone()],
);
// Conflicted
assert_matches!(
resolve_symbol(mut_repo, "local-conflicted"),
Err(RevsetResolutionError::ConflictedRef { kind: "bookmark", symbol, targets })
if symbol == "local-conflicted"
&& targets == vec![commit3.id().clone(), commit2.id().clone()]
);
assert_matches!(
resolve_symbol(mut_repo, "remote-conflicted@origin"),
Err(RevsetResolutionError::ConflictedRef { kind: "remote_bookmark", symbol, targets })
if symbol == "remote-conflicted@origin"
&& targets == vec![commit5.id().clone(), commit4.id().clone()]
);
assert_eq!(
resolve_symbol(mut_repo, "bookmarks(local-conflicted)").unwrap(),
vec![commit3.id().clone(), commit2.id().clone()],
);
assert_eq!(
resolve_symbol(mut_repo, "remote_bookmarks(remote-conflicted, origin)").unwrap(),
vec![commit5.id().clone(), commit4.id().clone()],
);
// Typo of local/remote bookmark name:
// For "local-emote" (without @remote part), "local-remote@mirror"/"@git" aren't
// suggested since they point to the same target as "local-remote". OTOH,
// "local-remote@untracked" is suggested because non-tracking bookmark is
// unrelated to the local bookmark of the same name.
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "local-emote").unwrap_err(), @r#"
NoSuchRevision {
name: "local-emote",
candidates: [
"\"local-remote@origin\"",
"local",
"local-conflicted",
"local-remote",
"local-remote@origin",
"local-remote@untracked",
],
}
"#);
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "local-emote@origin").unwrap_err(), @r#"
NoSuchRevision {
name: "local-emote@origin",
candidates: [
"\"local-remote@origin\"",
"local",
"local-remote",
"local-remote@git",
"local-remote@mirror",
"local-remote@origin",
"local-remote@untracked",
"remote-conflicted@origin",
"remote@origin",
],
}
"#);
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "local-remote@origine").unwrap_err(), @r#"
NoSuchRevision {
name: "local-remote@origine",
candidates: [
"\"local-remote@origin\"",
"local",
"local-remote",
"local-remote@git",
"local-remote@mirror",
"local-remote@origin",
"local-remote@untracked",
"remote-conflicted@origin",
"remote@origin",
],
}
"#);
// "local-remote@mirror" shouldn't be omitted just because it points to the same
// target as "local-remote".
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "remote@mirror").unwrap_err(), @r#"
NoSuchRevision {
name: "remote@mirror",
candidates: [
"local-remote@mirror",
"remote@origin",
],
}
"#);
// Typo of remote-only bookmark name
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "emote").unwrap_err(), @r#"
NoSuchRevision {
name: "emote",
candidates: [
"remote-conflicted@origin",
"remote@origin",
],
}
"#);
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "emote@origin").unwrap_err(), @r#"
NoSuchRevision {
name: "emote@origin",
candidates: [
"\"local-remote@origin\"",
"local-remote@origin",
"remote@origin",
],
}
"#);
insta::assert_debug_snapshot!(
resolve_symbol(mut_repo, "remote@origine").unwrap_err(), @r#"
NoSuchRevision {
name: "remote@origine",
candidates: [
"\"local-remote@origin\"",
"local-remote@origin",
"remote-conflicted@origin",
"remote@origin",
],
}
"#);
}
#[test]
fn test_resolve_symbol_tags() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let commit1 = write_random_commit(mut_repo);
let commit2 = write_random_commit(mut_repo);
let commit3 = write_random_commit(mut_repo);
mut_repo.set_local_tag_target(
"tag-bookmark".as_ref(),
RefTarget::normal(commit1.id().clone()),
);
mut_repo.set_local_bookmark_target(
"tag-bookmark".as_ref(),
RefTarget::normal(commit2.id().clone()),
);
mut_repo.set_git_ref_target(
"refs/tags/unimported".as_ref(),
RefTarget::normal(commit3.id().clone()),
);
// Tag precedes bookmark
assert_eq!(
resolve_symbol(mut_repo, "tag-bookmark").unwrap(),
vec![commit1.id().clone()],
);
assert_matches!(
resolve_symbol(mut_repo, "unimported"),
Err(RevsetResolutionError::NoSuchRevision { .. })
);
// "@" (quoted) can be resolved, and root is a normal symbol.
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo
.set_wc_commit(ws_name.clone(), commit1.id().clone())
.unwrap();
mut_repo.set_local_tag_target("@".as_ref(), RefTarget::normal(commit2.id().clone()));
mut_repo.set_local_tag_target("root".as_ref(), RefTarget::normal(commit3.id().clone()));
assert_eq!(
resolve_symbol(mut_repo, r#""@""#).unwrap(),
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_load_repo.rs | lib/tests/test_load_repo.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::repo::RepoLoader;
use testutils::TestRepo;
use testutils::write_random_commit;
#[test]
fn test_load_at_operation() {
let settings = testutils::user_settings();
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit = write_random_commit(tx.repo_mut());
let repo = tx.commit("add commit").unwrap();
let mut tx = repo.start_transaction();
tx.repo_mut().remove_head(commit.id());
tx.commit("remove commit").unwrap();
// If we load the repo at head, we should not see the commit since it was
// removed
let loader = RepoLoader::init_from_file_system(
&settings,
test_repo.repo_path(),
&test_repo.env.default_store_factories(),
)
.unwrap();
let head_repo = loader.load_at_head().unwrap();
assert!(!head_repo.view().heads().contains(commit.id()));
// If we load the repo at the previous operation, we should see the commit since
// it has not been removed yet
let loader = RepoLoader::init_from_file_system(
&settings,
test_repo.repo_path(),
&test_repo.env.default_store_factories(),
)
.unwrap();
let old_repo = loader.load_at(repo.operation()).unwrap();
assert!(old_repo.view().heads().contains(commit.id()));
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_mut_repo.rs | lib/tests/test_mut_repo.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jj_lib::backend::CommitId;
use jj_lib::index::Index;
use jj_lib::merge::Merge;
use jj_lib::merged_tree::MergedTree;
use jj_lib::op_store::RefTarget;
use jj_lib::op_store::RemoteRef;
use jj_lib::op_store::RemoteRefState;
use jj_lib::ref_name::RefName;
use jj_lib::ref_name::RemoteName;
use jj_lib::ref_name::RemoteRefSymbol;
use jj_lib::ref_name::WorkspaceName;
use jj_lib::ref_name::WorkspaceNameBuf;
use jj_lib::repo::Repo as _;
use jj_lib::rewrite::RebaseOptions;
use maplit::hashset;
use pollster::FutureExt as _;
use testutils::TestRepo;
use testutils::assert_rebased_onto;
use testutils::create_random_commit;
use testutils::create_random_tree;
use testutils::rebase_descendants_with_options_return_map;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn remote_symbol<'a, N, M>(name: &'a N, remote: &'a M) -> RemoteRefSymbol<'a>
where
N: AsRef<RefName> + ?Sized,
M: AsRef<RemoteName> + ?Sized,
{
RemoteRefSymbol {
name: name.as_ref(),
remote: remote.as_ref(),
}
}
fn index_has_id(index: &dyn Index, commit_id: &CommitId) -> bool {
index.has_id(commit_id).unwrap()
}
#[test]
fn test_edit() {
// Test that MutableRepo::edit() uses the requested commit (not a new child)
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let wc_commit = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let ws_name = WorkspaceName::DEFAULT.to_owned();
tx.repo_mut().edit(ws_name.clone(), &wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
assert_eq!(repo.view().get_wc_commit_id(&ws_name), Some(wc_commit.id()));
}
#[test]
fn test_checkout() {
// Test that MutableRepo::check_out() creates a child
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let wc_commit_parent = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let ws_name = WorkspaceName::DEFAULT.to_owned();
let wc_commit = tx
.repo_mut()
.check_out(ws_name.clone(), &wc_commit_parent)
.unwrap();
assert_eq!(wc_commit.tree_ids(), wc_commit_parent.tree_ids());
assert_eq!(wc_commit.parent_ids().len(), 1);
assert_eq!(&wc_commit.parent_ids()[0], wc_commit_parent.id());
let repo = tx.commit("test").unwrap();
assert_eq!(repo.view().get_wc_commit_id(&ws_name), Some(wc_commit.id()));
}
#[test]
fn test_edit_previous_not_empty() {
// Test that MutableRepo::edit() does not usually abandon the previous
// commit.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = write_random_commit(mut_repo);
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let new_wc_commit = write_random_commit(mut_repo);
mut_repo.edit(ws_name, &new_wc_commit).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_edit_previous_empty() {
// Test that MutableRepo::edit() abandons the previous commit if it was
// empty.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = mut_repo
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let new_wc_commit = write_random_commit(mut_repo);
mut_repo.edit(ws_name, &new_wc_commit).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(!mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_edit_previous_empty_merge() {
// Test that MutableRepo::edit() abandons the previous commit if it was
// an empty merge commit.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_parent1 = write_random_commit(mut_repo);
let old_parent2 = write_random_commit(mut_repo);
let empty_tree = repo.store().empty_merged_tree();
let old_parent_tree = MergedTree::merge(Merge::from_vec(vec![
(old_parent1.tree(), "old parent 1".into()),
(empty_tree, "empty".into()),
(old_parent2.tree(), "old parent 2".into()),
]))
.block_on()
.unwrap();
let old_wc_commit = mut_repo
.new_commit(
vec![old_parent1.id().clone(), old_parent2.id().clone()],
repo.store().empty_merged_tree(),
)
.set_tree(old_parent_tree)
.write()
.unwrap();
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let new_wc_commit = write_random_commit(mut_repo);
mut_repo.edit(ws_name, &new_wc_commit).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(!mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_edit_previous_empty_with_description() {
// Test that MutableRepo::edit() does not abandon the previous commit if it
// has a non-empty description.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = mut_repo
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.set_description("not empty")
.write()
.unwrap();
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let new_wc_commit = write_random_commit(mut_repo);
mut_repo.edit(ws_name, &new_wc_commit).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_edit_previous_empty_with_local_bookmark() {
// Test that MutableRepo::edit() does not abandon the previous commit if it
// is pointed by local bookmark.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = mut_repo
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
mut_repo.set_local_bookmark_target("b".as_ref(), RefTarget::normal(old_wc_commit.id().clone()));
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let new_wc_commit = write_random_commit(mut_repo);
mut_repo.edit(ws_name, &new_wc_commit).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_edit_previous_empty_with_other_workspace() {
// Test that MutableRepo::edit() does not abandon the previous commit if it
// is pointed by another workspace
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = mut_repo
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let other_ws_name = WorkspaceNameBuf::from("other");
mut_repo
.edit(other_ws_name.clone(), &old_wc_commit)
.unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let new_wc_commit = write_random_commit(mut_repo);
mut_repo.edit(ws_name, &new_wc_commit).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_edit_previous_empty_non_head() {
// Test that MutableRepo::edit() does not abandon the previous commit if it
// was empty and is not a head
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = mut_repo
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
let old_child = mut_repo
.new_commit(vec![old_wc_commit.id().clone()], old_wc_commit.tree())
.write()
.unwrap();
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let new_wc_commit = write_random_commit(mut_repo);
mut_repo.edit(ws_name, &new_wc_commit).unwrap();
mut_repo.rebase_descendants().unwrap();
assert_eq!(
*mut_repo.view().heads(),
hashset! {old_child.id().clone(), new_wc_commit.id().clone()}
);
}
#[test]
fn test_edit_initial() {
// Test that MutableRepo::edit() can be used on the initial working-copy commit
// in a workspace
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let wc_commit = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let ws_name = WorkspaceNameBuf::from("new-workspace");
tx.repo_mut().edit(ws_name.clone(), &wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
assert_eq!(repo.view().get_wc_commit_id(&ws_name), Some(wc_commit.id()));
}
#[test]
fn test_edit_hidden_commit() {
// Test that MutableRepo::edit() edits a hidden commit and updates
// the view head ids.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let wc_commit = write_random_commit(tx.repo_mut());
// Intentionally not doing tx.commit, so the commit id is not tracked
// in the view head ids.
let mut tx = repo.start_transaction();
let ws_name = WorkspaceName::DEFAULT.to_owned();
tx.repo_mut().edit(ws_name.clone(), &wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
assert_eq!(repo.view().get_wc_commit_id(&ws_name), Some(wc_commit.id()));
assert_eq!(*repo.view().heads(), hashset! {wc_commit.id().clone()});
}
#[test]
fn test_add_head_success() {
// Test that MutableRepo::add_head() adds the head, and that it's still there
// after commit. It should also be indexed.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Create a commit outside of the repo by using a temporary transaction. Then
// add that as a head.
let mut tx = repo.start_transaction();
let new_commit = write_random_commit(tx.repo_mut());
drop(tx);
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
assert!(!mut_repo.view().heads().contains(new_commit.id()));
assert!(!index_has_id(mut_repo.index(), new_commit.id()));
mut_repo.add_head(&new_commit).unwrap();
assert!(mut_repo.view().heads().contains(new_commit.id()));
assert!(index_has_id(mut_repo.index(), new_commit.id()));
let repo = tx.commit("test").unwrap();
assert!(repo.view().heads().contains(new_commit.id()));
assert!(index_has_id(repo.index(), new_commit.id()));
}
#[test]
fn test_add_head_ancestor() {
// Test that MutableRepo::add_head() does not add a head if it's an ancestor of
// an existing head.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let repo = tx.commit("test").unwrap();
assert_eq!(repo.view().heads(), &hashset! {commit3.id().clone()});
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
mut_repo.add_head(&commit1).unwrap();
assert_eq!(repo.view().heads(), &hashset! {commit3.id().clone()});
}
#[test]
fn test_add_head_not_immediate_child() {
// Test that MutableRepo::add_head() can be used for adding a head that is not
// an immediate child of a current head.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let initial = write_random_commit(tx.repo_mut());
let repo = tx.commit("test").unwrap();
// Create some commits outside of the repo by using a temporary transaction.
// Then add one of them as a head.
let mut tx = repo.start_transaction();
let rewritten = create_random_commit(tx.repo_mut())
.set_change_id(initial.change_id().clone())
.set_predecessors(vec![initial.id().clone()])
.write()
.unwrap();
let child = write_random_commit_with_parents(tx.repo_mut(), &[&rewritten]);
drop(tx);
assert_eq!(repo.view().heads(), &hashset! {initial.id().clone()});
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
mut_repo.add_head(&child).unwrap();
assert_eq!(
mut_repo.view().heads(),
&hashset! {initial.id().clone(), child.id().clone()}
);
assert!(index_has_id(mut_repo.index(), initial.id()));
assert!(index_has_id(mut_repo.index(), rewritten.id()));
assert!(index_has_id(mut_repo.index(), child.id()));
}
#[test]
fn test_remove_head() {
// Test that MutableRepo::remove_head() removes the head, and that it's still
// removed after commit. It should remain in the index, since we otherwise would
// have to reindex everything.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
assert!(mut_repo.view().heads().contains(commit3.id()));
mut_repo.remove_head(commit3.id());
let heads = mut_repo.view().heads().clone();
assert!(!heads.contains(commit3.id()));
assert!(!heads.contains(commit2.id()));
assert!(!heads.contains(commit1.id()));
assert!(index_has_id(mut_repo.index(), commit1.id()));
assert!(index_has_id(mut_repo.index(), commit2.id()));
assert!(index_has_id(mut_repo.index(), commit3.id()));
let repo = tx.commit("test").unwrap();
let heads = repo.view().heads().clone();
assert!(!heads.contains(commit3.id()));
assert!(!heads.contains(commit2.id()));
assert!(!heads.contains(commit1.id()));
assert!(index_has_id(repo.index(), commit1.id()));
assert!(index_has_id(repo.index(), commit2.id()));
assert!(index_has_id(repo.index(), commit3.id()));
}
#[test]
fn test_has_changed() {
// Test that MutableRepo::has_changed() reports changes iff the view has changed
// (e.g. not after setting a bookmark to point to where it was already
// pointing).
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let normal_remote_ref = |id: &CommitId| RemoteRef {
target: RefTarget::normal(id.clone()),
state: RemoteRefState::Tracked, // doesn't matter
};
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let commit1 = write_random_commit(mut_repo);
let commit2 = write_random_commit(mut_repo);
mut_repo.remove_head(commit2.id());
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo
.set_wc_commit(ws_name.clone(), commit1.id().clone())
.unwrap();
mut_repo.set_local_bookmark_target("main".as_ref(), RefTarget::normal(commit1.id().clone()));
mut_repo.set_remote_bookmark(
remote_symbol("main", "origin"),
normal_remote_ref(commit1.id()),
);
let repo = tx.commit("test").unwrap();
// Test the setup
assert_eq!(repo.view().heads(), &hashset! {commit1.id().clone()});
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
mut_repo.add_head(&commit1).unwrap();
mut_repo
.set_wc_commit(ws_name.clone(), commit1.id().clone())
.unwrap();
mut_repo.set_local_bookmark_target("main".as_ref(), RefTarget::normal(commit1.id().clone()));
mut_repo.set_remote_bookmark(
remote_symbol("main", "origin"),
normal_remote_ref(commit1.id()),
);
assert!(!mut_repo.has_changes());
mut_repo.remove_head(commit2.id());
mut_repo.set_local_bookmark_target("stable".as_ref(), RefTarget::absent());
mut_repo.set_remote_bookmark(remote_symbol("stable", "origin"), RemoteRef::absent());
assert!(!mut_repo.has_changes());
mut_repo.add_head(&commit2).unwrap();
assert!(mut_repo.has_changes());
mut_repo.remove_head(commit2.id());
assert!(!mut_repo.has_changes());
mut_repo
.set_wc_commit(ws_name.clone(), commit2.id().clone())
.unwrap();
assert!(mut_repo.has_changes());
mut_repo
.set_wc_commit(ws_name, commit1.id().clone())
.unwrap();
assert!(!mut_repo.has_changes());
mut_repo.set_local_bookmark_target("main".as_ref(), RefTarget::normal(commit2.id().clone()));
assert!(mut_repo.has_changes());
mut_repo.set_local_bookmark_target("main".as_ref(), RefTarget::normal(commit1.id().clone()));
mut_repo.remove_head(commit2.id());
assert!(!mut_repo.has_changes());
mut_repo.set_remote_bookmark(
remote_symbol("main", "origin"),
normal_remote_ref(commit2.id()),
);
assert!(mut_repo.has_changes());
mut_repo.set_remote_bookmark(
remote_symbol("main", "origin"),
normal_remote_ref(commit1.id()),
);
assert!(!mut_repo.has_changes());
}
#[test]
fn test_rebase_descendants_simple() {
// There are many additional tests of this functionality in `test_rewrite.rs`.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let commit4 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit5 = write_random_commit_with_parents(tx.repo_mut(), &[&commit4]);
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let commit6 = write_random_commit_with_parents(mut_repo, &[&commit1]);
mut_repo.set_rewritten_commit(commit2.id().clone(), commit6.id().clone());
mut_repo.record_abandoned_commit(&commit4);
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
// Commit 3 got rebased onto commit 2's replacement, i.e. commit 6
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit3, &[commit6.id()]);
// Commit 5 got rebased onto commit 4's parent, i.e. commit 1
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit5, &[commit1.id()]);
assert_eq!(rebase_map.len(), 2);
// No more descendants to rebase if we try again.
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
assert_eq!(rebase_map.len(), 0);
}
#[test]
fn test_rebase_descendants_divergent_rewrite() {
// Test rebasing descendants when one commit was rewritten to several other
// commits. There are many additional tests of this functionality in
// `test_rewrite.rs`.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let _commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let commit4 = write_random_commit_with_parents(mut_repo, &[&commit1]);
let commit5 = write_random_commit_with_parents(mut_repo, &[&commit1]);
mut_repo.set_divergent_rewrite(
commit2.id().clone(),
vec![commit4.id().clone(), commit5.id().clone()],
);
// Commit 3 does *not* get rebased because it's unclear if it should go onto
// commit 4 or commit 5
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
assert!(rebase_map.is_empty());
}
#[test]
fn test_rename_remote() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let commit = write_random_commit(mut_repo);
let remote_ref = RemoteRef {
target: RefTarget::normal(commit.id().clone()),
state: RemoteRefState::Tracked, // doesn't matter
};
mut_repo.set_remote_bookmark(remote_symbol("main", "origin"), remote_ref.clone());
mut_repo.rename_remote("origin".as_ref(), "upstream".as_ref());
assert_eq!(
mut_repo.get_remote_bookmark(remote_symbol("main", "upstream")),
remote_ref
);
assert_eq!(
mut_repo.get_remote_bookmark(remote_symbol("main", "origin")),
RemoteRef::absent()
);
}
#[test]
fn test_remove_wc_commit_previous_not_discardable() {
// Test that MutableRepo::remove_wc_commit() does not usually abandon the
// previous commit.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = write_random_commit(mut_repo);
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
mut_repo.remove_wc_commit(&ws_name).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_remove_wc_commit_previous_discardable() {
// Test that MutableRepo::remove_wc_commit() abandons the previous commit
// if it was discardable.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let old_wc_commit = mut_repo
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
let ws_name = WorkspaceName::DEFAULT.to_owned();
mut_repo.edit(ws_name.clone(), &old_wc_commit).unwrap();
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
mut_repo.remove_wc_commit(&ws_name).unwrap();
mut_repo.rebase_descendants().unwrap();
assert!(!mut_repo.view().heads().contains(old_wc_commit.id()));
}
#[test]
fn test_reparent_descendants() {
// Test that MutableRepo::reparent_descendants() reparents descendants of
// rewritten commits without altering their content.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit(tx.repo_mut());
let commit_child_a_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a, &commit_b]);
let commit_grandchild_a_b =
write_random_commit_with_parents(tx.repo_mut(), &[&commit_child_a_b]);
let commit_child_a = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_child_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let mut_repo = tx.repo_mut();
for (bookmark, commit) in [
("b", &commit_b),
("child_a_b", &commit_child_a_b),
("grandchild_a_b", &commit_grandchild_a_b),
("child_a", &commit_child_a),
("child_b", &commit_child_b),
] {
mut_repo
.set_local_bookmark_target(bookmark.as_ref(), RefTarget::normal(commit.id().clone()));
}
let repo = tx.commit("test").unwrap();
// Rewrite "commit_a".
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
mut_repo
.rewrite_commit(&commit_a)
.set_tree(create_random_tree(&repo))
.write()
.unwrap();
let reparented = mut_repo.reparent_descendants().unwrap();
// "child_a_b", "grandchild_a_b" and "child_a" (3 commits) must have been
// reparented.
assert_eq!(reparented, 3);
let repo = tx.commit("test").unwrap();
for (bookmark, commit) in [
("b", &commit_b),
("child_a_b", &commit_child_a_b),
("grandchild_a_b", &commit_grandchild_a_b),
("child_a", &commit_child_a),
("child_b", &commit_child_b),
] {
let rewritten_id = repo
.view()
.get_local_bookmark(bookmark.as_ref())
.as_normal()
.unwrap()
.clone();
if matches!(bookmark, "b" | "child_b") {
// "b" and "child_b" have been kept untouched.
assert_eq!(commit.id(), &rewritten_id);
} else {
// All commits except "b", and "child_b" have been reparented while keeping
// their content.
assert_ne!(commit.id(), &rewritten_id);
let rewritten_commit = repo.store().get_commit(&rewritten_id).unwrap();
assert_eq!(commit.tree_ids(), rewritten_commit.tree_ids());
let (parent_ids, rewritten_parent_ids) =
(commit.parent_ids(), rewritten_commit.parent_ids());
assert_eq!(parent_ids.len(), rewritten_parent_ids.len());
assert_ne!(parent_ids, rewritten_parent_ids);
}
}
}
#[test]
fn test_bookmark_hidden_commit() {
// Test that MutableRepo::set_local_bookmark_target() on a hidden commit makes
// it visible.
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let root_commit = repo.store().root_commit();
let mut tx = repo.start_transaction();
let commit = write_random_commit(tx.repo_mut());
tx.repo_mut().remove_head(commit.id());
let repo = tx.commit("test").unwrap();
// Test the setup
assert_eq!(*repo.view().heads(), hashset! {root_commit.id().clone()});
let mut tx = repo.start_transaction();
tx.repo_mut()
.set_local_bookmark_target("b".as_ref(), RefTarget::normal(commit.id().clone()));
let repo = tx.commit("test").unwrap();
assert_eq!(*repo.view().heads(), hashset! {commit.id().clone()});
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_local_working_copy_executable_bit.rs | lib/tests/test_local_working_copy_executable_bit.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg(unix)] // Nothing to test on Windows
use std::fs;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::sync::Arc;
use jj_lib::backend::TreeValue;
use jj_lib::commit::Commit;
use jj_lib::merged_tree::MergedTree;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPath;
use jj_lib::store::Store;
use testutils::TestTreeBuilder;
use testutils::TestWorkspace;
use testutils::repo_path;
/// Assert that a file's executable bit matches the expected value.
#[track_caller]
fn assert_file_executable(path: &Path, expected: bool) {
let perms = path.metadata().unwrap().permissions();
let actual = (perms.mode() & 0o100) == 0o100;
assert_eq!(actual, expected);
}
/// Set the executable bit of a file on the filesystem.
#[track_caller]
fn set_file_executable(path: &Path, executable: bool) {
let prev_mode = path.metadata().unwrap().permissions().mode();
let is_executable = prev_mode & 0o100 != 0;
assert_ne!(executable, is_executable, "why are you calling this?");
let new_mode = if executable { 0o755 } else { 0o644 };
fs::set_permissions(path, PermissionsExt::from_mode(new_mode)).unwrap();
}
/// Assert that a tree value's executable bit matches the expected value.
#[track_caller]
fn assert_tree_executable(tree_val: TreeValue, expected: bool) {
if let TreeValue::File { executable, .. } = tree_val {
assert_eq!(executable, expected);
} else {
panic!()
}
}
/// Create a tree with an empty file having the given executable bit. Returns
/// the new tree id.
#[track_caller]
fn create_tree_executable(
store: &Arc<Store>,
repo_path: &RepoPath,
executable: bool,
) -> MergedTree {
let mut tree_builder = TestTreeBuilder::new(store.clone());
tree_builder.file(repo_path, "").executable(executable);
tree_builder.write_merged_tree()
}
/// Build two commits that write the executable bit of a file as true/false.
#[track_caller]
fn prepare_exec_commits(ws: &TestWorkspace, repo_path: &RepoPath) -> (Commit, Commit) {
let store = ws.repo.store();
let tree_exec = create_tree_executable(store, repo_path, true);
let tree_no_exec = create_tree_executable(store, repo_path, false);
let commit_with_id = |id| testutils::commit_with_tree(ws.repo.store(), id);
let commit_exec = commit_with_id(tree_exec);
let commit_no_exec = commit_with_id(tree_no_exec);
assert_ne!(commit_exec, commit_no_exec);
(commit_exec, commit_no_exec)
}
/// Test that checking out a tree writes the correct executable bit to the
/// filesystem.
#[test]
fn test_exec_bit_checkout() {
let mut ws = TestWorkspace::init();
let path = &ws.workspace.workspace_root().join("file");
let repo_path = repo_path("file");
let (exec, no_exec) = prepare_exec_commits(&ws, repo_path);
let mut checkout_exec_commit = |executable| {
let commit = if executable { &exec } else { &no_exec };
let op_id = ws.repo.op_id().clone();
ws.workspace.check_out(op_id, None, commit).unwrap();
};
// Checkout commits and ensure the filesystem is updated correctly.
assert!(!fs::exists(path).unwrap());
for exec in [true, false, true] {
checkout_exec_commit(exec);
assert_file_executable(path, exec);
}
}
/// Test that snapshotting files stores the correct executable bit in the tree.
#[test]
fn test_exec_bit_snapshot() {
let mut ws = TestWorkspace::init();
let path = &ws.workspace.workspace_root().join("file");
let repo_path = repo_path("file");
// Snapshot, then assert the tree has the expected executable bit.
let mut snapshot_assert_exec_bit = |expected| {
let merged_tree_val = ws.snapshot().unwrap().path_value(repo_path).unwrap();
let tree_val = merged_tree_val.into_resolved().unwrap().unwrap();
assert_tree_executable(tree_val, expected);
};
// Snapshot tree values when the file is/isn't executable.
fs::write(path, "initial content").unwrap();
snapshot_assert_exec_bit(false);
fs::write(path, "first change").unwrap();
snapshot_assert_exec_bit(false);
set_file_executable(path, true);
snapshot_assert_exec_bit(true);
fs::write(path, "second change").unwrap();
snapshot_assert_exec_bit(true);
// Back to the same contents as before, but different exec bit.
fs::write(path, "first change").unwrap();
set_file_executable(path, false);
snapshot_assert_exec_bit(false);
set_file_executable(path, true);
snapshot_assert_exec_bit(true);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_git_backend.rs | lib/tests/test_git_backend.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::collections::HashSet;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use futures::executor::block_on_stream;
use itertools::Itertools as _;
use jj_lib::backend::CommitId;
use jj_lib::backend::CopyRecord;
use jj_lib::commit::Commit;
use jj_lib::conflict_labels::ConflictLabels;
use jj_lib::git_backend::GitBackend;
use jj_lib::git_backend::JJ_TREES_COMMIT_HEADER;
use jj_lib::merge::Merge;
use jj_lib::merged_tree::MergedTree;
use jj_lib::object_id::ObjectId as _;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPath;
use jj_lib::repo_path::RepoPathBuf;
use jj_lib::stacked_table::TableSegment as _;
use jj_lib::stacked_table::TableStore;
use jj_lib::store::Store;
use jj_lib::transaction::Transaction;
use maplit::hashmap;
use maplit::hashset;
use testutils::TestRepo;
use testutils::TestRepoBackend;
use testutils::assert_tree_eq;
use testutils::commit_with_tree;
use testutils::create_random_commit;
use testutils::create_single_tree;
use testutils::create_tree;
use testutils::is_external_tool_installed;
use testutils::repo_path;
use testutils::repo_path_buf;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn get_git_backend(repo: &Arc<ReadonlyRepo>) -> &GitBackend {
repo.store().backend_impl().unwrap()
}
fn collect_no_gc_refs(git_repo_path: &Path) -> HashSet<CommitId> {
// Load fresh git repo to isolate from false caching issue. Here we want to
// ensure that the underlying data is correct. We could test the in-memory
// data as well, but we don't have any special handling in our code.
let git_repo = gix::open(git_repo_path).unwrap();
let git_refs = git_repo.references().unwrap();
let no_gc_refs_iter = git_refs.prefixed("refs/jj/keep/").unwrap();
no_gc_refs_iter
.map(|git_ref| CommitId::from_bytes(git_ref.unwrap().id().as_bytes()))
.collect()
}
fn get_copy_records(
store: &Store,
paths: Option<&[RepoPathBuf]>,
a: &Commit,
b: &Commit,
) -> HashMap<String, String> {
let stream = store.get_copy_records(paths, a.id(), b.id()).unwrap();
let mut res: HashMap<String, String> = HashMap::new();
for CopyRecord { target, source, .. } in block_on_stream(stream).filter_map(|r| r.ok()) {
res.insert(
target.as_internal_file_string().into(),
source.as_internal_file_string().into(),
);
}
res
}
fn make_commit(
tx: &mut Transaction,
parents: Vec<CommitId>,
content: &[(&RepoPath, &str)],
) -> Commit {
let tree = create_tree(tx.base_repo(), content);
tx.repo_mut().new_commit(parents, tree).write().unwrap()
}
fn list_dir(dir: &Path) -> Vec<String> {
std::fs::read_dir(dir)
.unwrap()
.map(|entry| entry.unwrap().file_name().to_str().unwrap().to_owned())
.sorted()
.collect()
}
#[test]
fn test_gc() {
// TODO: Better way to disable the test if git command couldn't be executed
if !is_external_tool_installed("git") {
eprintln!("Skipping because git command might fail to run");
return;
}
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = test_repo.repo;
let git_repo_path = get_git_backend(&repo).git_repo_path();
let base_index = repo.readonly_index();
// Set up commits:
//
// H (predecessor: D)
// G |
// |\|
// | F
// E |
// D | |
// C |/
// |/
// B
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_e, &commit_f]);
let commit_h = create_random_commit(tx.repo_mut())
.set_parents(vec![commit_f.id().clone()])
.set_predecessors(vec![commit_d.id().clone()])
.write()
.unwrap();
let repo = tx.commit("test").unwrap();
assert_eq!(
*repo.view().heads(),
hashset! {
commit_d.id().clone(),
commit_g.id().clone(),
commit_h.id().clone(),
},
);
// At first, all commits have no-gc refs
assert_eq!(
collect_no_gc_refs(git_repo_path),
hashset! {
commit_a.id().clone(),
commit_b.id().clone(),
commit_c.id().clone(),
commit_d.id().clone(),
commit_e.id().clone(),
commit_f.id().clone(),
commit_g.id().clone(),
commit_h.id().clone(),
},
);
// Empty index, but all kept by file modification time
// (Beware that this invokes "git gc" and refs will be packed.)
repo.store()
.gc(base_index.as_index(), SystemTime::UNIX_EPOCH)
.unwrap();
assert_eq!(
collect_no_gc_refs(git_repo_path),
hashset! {
commit_a.id().clone(),
commit_b.id().clone(),
commit_c.id().clone(),
commit_d.id().clone(),
commit_e.id().clone(),
commit_f.id().clone(),
commit_g.id().clone(),
commit_h.id().clone(),
},
);
// Don't rely on the exact system time because file modification time might
// have lower precision for example.
let now = || SystemTime::now() + Duration::from_secs(1);
// All reachable: redundant no-gc refs will be removed
repo.store().gc(repo.index(), now()).unwrap();
assert_eq!(
collect_no_gc_refs(git_repo_path),
hashset! {
commit_d.id().clone(),
commit_g.id().clone(),
commit_h.id().clone(),
},
);
// G is no longer reachable
let mut mut_index = base_index.start_modification();
mut_index.add_commit(&commit_a).unwrap();
mut_index.add_commit(&commit_b).unwrap();
mut_index.add_commit(&commit_c).unwrap();
mut_index.add_commit(&commit_d).unwrap();
mut_index.add_commit(&commit_e).unwrap();
mut_index.add_commit(&commit_f).unwrap();
mut_index.add_commit(&commit_h).unwrap();
repo.store().gc(mut_index.as_index(), now()).unwrap();
assert_eq!(
collect_no_gc_refs(git_repo_path),
hashset! {
commit_d.id().clone(),
commit_e.id().clone(),
commit_h.id().clone(),
},
);
// D|E|H are no longer reachable
let mut mut_index = base_index.start_modification();
mut_index.add_commit(&commit_a).unwrap();
mut_index.add_commit(&commit_b).unwrap();
mut_index.add_commit(&commit_c).unwrap();
mut_index.add_commit(&commit_f).unwrap();
repo.store().gc(mut_index.as_index(), now()).unwrap();
assert_eq!(
collect_no_gc_refs(git_repo_path),
hashset! {
commit_c.id().clone(),
commit_f.id().clone(),
},
);
// B|C|F are no longer reachable
let mut mut_index = base_index.start_modification();
mut_index.add_commit(&commit_a).unwrap();
repo.store().gc(mut_index.as_index(), now()).unwrap();
assert_eq!(
collect_no_gc_refs(git_repo_path),
hashset! {
commit_a.id().clone(),
},
);
// All unreachable
repo.store().gc(base_index.as_index(), now()).unwrap();
assert_eq!(collect_no_gc_refs(git_repo_path), hashset! {});
}
#[test]
fn test_gc_extra_table() {
if !is_external_tool_installed("git") {
eprintln!("Skipping because git command might fail to run");
return;
}
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let extra_path = test_repo.repo_path().join("store").join("extra");
let extra_key_size = test_repo.repo.store().root_commit_id().as_bytes().len();
let load_repo = || {
test_repo
.env
.load_repo_at_head(test_repo.repo.settings(), test_repo.repo_path())
};
let load_extra_table = || {
TableStore::load(extra_path.clone(), extra_key_size)
.get_head()
.unwrap()
};
let collect_extra_segment_num_entries = || {
let mut num_entries = load_extra_table()
.ancestor_segments()
.map(|table| table.segment_num_entries())
.collect_vec();
num_entries.reverse();
num_entries
};
// Sanity check for the initial state
assert_eq!(collect_extra_segment_num_entries(), [0]);
assert_eq!(list_dir(&extra_path).len(), 1 + 1); // empty segment + "heads"
// Write 4 commits
let mut tx = test_repo.repo.start_transaction();
for _ in 0..4 {
write_random_commit(tx.repo_mut());
}
tx.commit("test").unwrap();
// The first 3 will be squashed into one table segment
assert_eq!(collect_extra_segment_num_entries(), [3, 1]);
assert_eq!(list_dir(&extra_path).len(), 5 + 1);
// Reload repo to invalidate cache in TableStore
let repo = load_repo();
let index = repo.readonly_index().as_index();
// All segments should be kept by modification time
repo.store().gc(index, SystemTime::UNIX_EPOCH).unwrap();
assert_eq!(collect_extra_segment_num_entries(), [3, 1]);
assert_eq!(list_dir(&extra_path).len(), 5 + 1);
// All unreachable segments should be removed
let now = SystemTime::now() + Duration::from_secs(1);
repo.store().gc(index, now).unwrap();
assert_eq!(collect_extra_segment_num_entries(), [3, 1]);
assert_eq!(list_dir(&extra_path).len(), 2 + 1);
// Ensure that repo is still loadable
load_repo();
}
#[test]
fn test_copy_detection() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let paths = &[
repo_path_buf("file0"),
repo_path_buf("file1"),
repo_path_buf("file2"),
];
let mut tx = repo.start_transaction();
let commit_a = make_commit(
&mut tx,
vec![repo.store().root_commit_id().clone()],
&[(&paths[0], "content")],
);
let commit_b = make_commit(
&mut tx,
vec![commit_a.id().clone()],
&[(&paths[1], "content")],
);
let commit_c = make_commit(
&mut tx,
vec![commit_b.id().clone()],
&[(&paths[2], "content")],
);
let store = repo.store();
assert_eq!(
get_copy_records(store, Some(paths), &commit_a, &commit_b),
HashMap::from([("file1".to_string(), "file0".to_string())])
);
assert_eq!(
get_copy_records(store, Some(paths), &commit_b, &commit_c),
HashMap::from([("file2".to_string(), "file1".to_string())])
);
assert_eq!(
get_copy_records(store, Some(paths), &commit_a, &commit_c),
HashMap::from([("file2".to_string(), "file0".to_string())])
);
assert_eq!(
get_copy_records(store, None, &commit_a, &commit_c),
HashMap::from([("file2".to_string(), "file0".to_string())])
);
assert_eq!(
get_copy_records(store, Some(&[paths[1].clone()]), &commit_a, &commit_c),
HashMap::default(),
);
assert_eq!(
get_copy_records(store, Some(paths), &commit_c, &commit_c),
HashMap::default(),
);
}
#[test]
fn test_copy_detection_file_and_dir() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
// a -> b (file)
// b -> a (dir)
// c -> c/file (file)
let mut tx = repo.start_transaction();
let commit_a = make_commit(
&mut tx,
vec![repo.store().root_commit_id().clone()],
&[
(repo_path("a"), "content1"),
(repo_path("b/file"), "content2"),
(repo_path("c"), "content3"),
],
);
let commit_b = make_commit(
&mut tx,
vec![commit_a.id().clone()],
&[
(repo_path("a/file"), "content2"),
(repo_path("b"), "content1"),
(repo_path("c/file"), "content3"),
],
);
assert_eq!(
get_copy_records(repo.store(), None, &commit_a, &commit_b),
hashmap! {
"b".to_owned() => "a".to_owned(),
"a/file".to_owned() => "b/file".to_owned(),
"c/file".to_owned() => "c".to_owned(),
}
);
}
#[test]
fn test_jj_trees_header_with_one_tree() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = test_repo.repo;
let git_backend = get_git_backend(&repo);
let git_repo = git_backend.git_repo();
let tree_1 = create_single_tree(&repo, &[(repo_path("file"), "aaa")]);
let tree_2 = create_single_tree(&repo, &[(repo_path("file"), "bbb")]);
// Create a normal commit with tree 1
let commit = commit_with_tree(
repo.store(),
MergedTree::resolved(repo.store().clone(), tree_1.id().clone()),
);
let git_commit_id = gix::ObjectId::from_bytes_or_panic(commit.id().as_bytes());
let git_commit = git_repo.find_commit(git_commit_id).unwrap();
// Add `jj:trees` with a single tree which is different from the Git commit tree
let mut new_commit: gix::objs::Commit = git_commit.decode().unwrap().try_into().unwrap();
new_commit.extra_headers = vec![(
JJ_TREES_COMMIT_HEADER.into(),
tree_2.id().to_string().into(),
)];
let new_commit_id = git_repo.write_object(&new_commit).unwrap();
let new_commit_id = CommitId::from_bytes(new_commit_id.as_bytes());
// Import new commit into `jj` repo. This should fail, because allowing a
// non-conflicted commit to have a different tree in `jj` than in Git could be
// used to hide malicious code.
insta::assert_debug_snapshot!(git_backend.import_head_commits(std::slice::from_ref(&new_commit_id)), @r#"
Err(
ReadObject {
object_type: "commit",
hash: "87df728a30166ce1de0bf883948dd66b74cf25a0",
source: "Invalid jj:trees header",
},
)
"#);
}
#[test]
fn test_conflict_headers_roundtrip() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = test_repo.repo;
let tree_1 = create_single_tree(&repo, &[(repo_path("file"), "aaa")]);
let tree_2 = create_single_tree(&repo, &[(repo_path("file"), "bbb")]);
let tree_3 = create_single_tree(&repo, &[(repo_path("file"), "ccc")]);
let tree_4 = create_single_tree(&repo, &[(repo_path("file"), "ddd")]);
let tree_5 = create_single_tree(&repo, &[(repo_path("file"), "eee")]);
let tree_6 = create_single_tree(&repo, &[(repo_path("file"), "fff")]);
let tree_7 = create_single_tree(&repo, &[(repo_path("file"), "ggg")]);
// This creates a Git commit header with leading and trailing newlines to ensure
// that it can still be parsed correctly. The resulting `jj:conflict-labels`
// header value will look like `\nbase 1\nside 2\n\nside 3\n\n\n`.
let merged_tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
tree_1.id().clone(),
tree_2.id().clone(),
tree_3.id().clone(),
tree_4.id().clone(),
tree_5.id().clone(),
tree_6.id().clone(),
tree_7.id().clone(),
]),
ConflictLabels::from_vec(vec![
"".into(),
// Test that various UTF-8 characters can be encoded and decoded. Git doesn't allow
// storing arbitrary binary data in headers, but it does allow storing UTF-8 encoded
// strings as long as they don't contain U+0000 codepoints.
"base 1 \"utf8 \u{67d4}\u{8853} \u{ba4}\u{bc1}\u{bb0}\u{bc1} \u{2699}\u{fe0f}\"".into(),
"side 2".into(),
"".into(),
"side 3".into(),
"".into(),
"".into(),
]),
);
// Create a commit with the conflicted tree.
let commit = commit_with_tree(repo.store(), merged_tree.clone());
// Clear cached commit to ensure it is re-read.
repo.store().clear_caches();
// Conflict trees and labels should be preserved on read.
assert_tree_eq!(
repo.store().get_commit(commit.id()).unwrap().tree(),
merged_tree
);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_workspace.rs | lib/tests/test_workspace.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::thread;
use assert_matches::assert_matches;
use jj_lib::ref_name::WorkspaceNameBuf;
use jj_lib::repo::Repo as _;
use jj_lib::workspace::Workspace;
use jj_lib::workspace::WorkspaceLoadError;
use jj_lib::workspace::default_working_copy_factories;
use jj_lib::workspace::default_working_copy_factory;
use testutils::TestEnvironment;
use testutils::TestWorkspace;
#[test]
fn test_load_bad_path() {
let settings = testutils::user_settings();
let test_env = TestEnvironment::init();
let workspace_root = test_env.root().to_owned();
// We haven't created a repo in the workspace_root, so it should fail to load.
let result = Workspace::load(
&settings,
&workspace_root,
&test_env.default_store_factories(),
&default_working_copy_factories(),
);
assert_matches!(
result.err(),
Some(WorkspaceLoadError::NoWorkspaceHere(root)) if root == workspace_root
);
}
#[test]
fn test_init_additional_workspace() {
let settings = testutils::user_settings();
let test_workspace = TestWorkspace::init_with_settings(&settings);
let workspace = &test_workspace.workspace;
let ws2_name = WorkspaceNameBuf::from("ws2");
let ws2_root = test_workspace.root_dir().join("ws2_root");
std::fs::create_dir(&ws2_root).unwrap();
let (ws2, repo) = Workspace::init_workspace_with_existing_repo(
&ws2_root,
test_workspace.repo_path(),
&test_workspace.repo,
&*default_working_copy_factory(),
ws2_name.clone(),
)
.unwrap();
let wc_commit_id = repo.view().get_wc_commit_id(&ws2_name);
assert_ne!(wc_commit_id, None);
let wc_commit_id = wc_commit_id.unwrap();
let wc_commit = repo.store().get_commit(wc_commit_id).unwrap();
assert_eq!(
wc_commit.parent_ids(),
vec![repo.store().root_commit_id().clone()]
);
assert_eq!(ws2.workspace_name(), &ws2_name);
assert_eq!(
*ws2.repo_path(),
dunce::canonicalize(workspace.repo_path()).unwrap()
);
assert_eq!(
*ws2.workspace_root(),
dunce::canonicalize(&ws2_root).unwrap()
);
let same_workspace = Workspace::load(
&settings,
&ws2_root,
&test_workspace.env.default_store_factories(),
&default_working_copy_factories(),
);
assert!(same_workspace.is_ok());
let same_workspace = same_workspace.unwrap();
assert_eq!(same_workspace.workspace_name(), &ws2_name);
assert_eq!(
*same_workspace.repo_path(),
dunce::canonicalize(workspace.repo_path()).unwrap()
);
assert_eq!(same_workspace.workspace_root(), ws2.workspace_root());
}
#[cfg(unix)]
#[test]
fn test_init_additional_workspace_non_utf8_path() {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt as _;
let settings = testutils::user_settings();
let test_env = TestEnvironment::init();
if testutils::check_strict_utf8_fs(test_env.root()) {
eprintln!(
"Skipping test \"test_init_additional_workspace_non_utf8_path\" due to strict UTF-8 \
filesystem for path {:?}",
test_env.root()
);
return;
}
let ws1_root = test_env.root().join(OsStr::from_bytes(b"ws1_root\xe0"));
std::fs::create_dir(&ws1_root).unwrap();
let (ws1, repo) = Workspace::init_simple(&settings, &ws1_root).unwrap();
let ws2_name = WorkspaceNameBuf::from("ws2");
let ws2_root = test_env.root().join(OsStr::from_bytes(b"ws2_root\xe0"));
std::fs::create_dir(&ws2_root).unwrap();
let (ws2, _repo) = Workspace::init_workspace_with_existing_repo(
&ws2_root,
ws1.repo_path(),
&repo,
&*default_working_copy_factory(),
ws2_name.clone(),
)
.unwrap();
assert_eq!(ws2.workspace_name(), &ws2_name);
assert_eq!(
*ws2.repo_path(),
dunce::canonicalize(ws1.repo_path()).unwrap()
);
assert_eq!(
*ws2.workspace_root(),
dunce::canonicalize(&ws2_root).unwrap()
);
let same_workspace = Workspace::load(
&settings,
&ws2_root,
&test_env.default_store_factories(),
&default_working_copy_factories(),
);
let same_workspace = same_workspace.unwrap();
assert_eq!(same_workspace.workspace_name(), &ws2_name);
assert_eq!(
*same_workspace.repo_path(),
dunce::canonicalize(ws1.repo_path()).unwrap()
);
assert_eq!(same_workspace.workspace_root(), ws2.workspace_root());
}
/// Test cross-thread access to a workspace, which requires it to be Send
#[test]
fn test_sendable() {
let test_workspace = TestWorkspace::init();
let root = test_workspace.workspace.workspace_root().to_owned();
thread::spawn(move || {
let shared_workspace = test_workspace.workspace;
assert_eq!(shared_workspace.workspace_root(), &root);
})
.join()
.unwrap();
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_rewrite_duplicate.rs | lib/tests/test_rewrite_duplicate.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use itertools::Itertools as _;
use jj_lib::backend::CommitId;
use jj_lib::repo::Repo as _;
use jj_lib::rewrite::duplicate_commits;
use jj_lib::transaction::Transaction;
use pollster::FutureExt as _;
use testutils::TestRepo;
use testutils::assert_tree_eq;
use testutils::create_tree;
use testutils::repo_path;
#[test]
fn test_duplicate_linear_contents() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let path_1 = repo_path("file1");
let path_2 = repo_path("file2");
let empty_tree = repo.store().empty_merged_tree();
let tree_1 = create_tree(repo, &[(path_1, "content1")]);
let tree_2 = create_tree(repo, &[(path_2, "content2")]);
let tree_1_2 = create_tree(repo, &[(path_1, "content1"), (path_2, "content2")]);
// E [=file2]
// D [-file1, =file2]
// C [=file1, +file2]
// B [+file1]
// A []
let mut tx = repo.start_transaction();
let commit_a = tx
.repo_mut()
.new_commit(
vec![repo.store().root_commit_id().clone()],
empty_tree.clone(),
)
.write()
.unwrap();
let commit_b = tx
.repo_mut()
.new_commit(vec![commit_a.id().clone()], tree_1.clone())
.write()
.unwrap();
let commit_c = tx
.repo_mut()
.new_commit(vec![commit_b.id().clone()], tree_1_2.clone())
.write()
.unwrap();
let commit_d = tx
.repo_mut()
.new_commit(vec![commit_c.id().clone()], tree_2.clone())
.write()
.unwrap();
let commit_e = tx
.repo_mut()
.new_commit(vec![commit_d.id().clone()], tree_2.clone())
.write()
.unwrap();
let repo = tx.commit("test").unwrap();
let duplicate_in_between = |tx: &mut Transaction,
target_commits: &[&CommitId],
parent_commit_ids: &[&CommitId],
children_commit_ids: &[&CommitId]| {
duplicate_commits(
tx.repo_mut(),
&target_commits.iter().copied().cloned().collect_vec(),
&HashMap::new(),
&parent_commit_ids.iter().copied().cloned().collect_vec(),
&children_commit_ids.iter().copied().cloned().collect_vec(),
)
.block_on()
.unwrap()
};
let duplicate_onto =
|tx: &mut Transaction, target_commits: &[&CommitId], parent_commit_ids: &[&CommitId]| {
duplicate_in_between(tx, target_commits, parent_commit_ids, &[])
};
// Duplicate empty commit onto empty ancestor tree
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_e.id()], &[commit_a.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_e.id()].tree(), empty_tree);
// Duplicate empty commit onto non-empty ancestor tree
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_e.id()], &[commit_b.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_e.id()].tree(), tree_1);
// Duplicate non-empty commit onto empty ancestor tree
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_c.id()], &[commit_a.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_c.id()].tree(), tree_2);
// Duplicate non-empty commit onto non-empty ancestor tree
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_d.id()], &[commit_b.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_d.id()].tree(), empty_tree);
// Duplicate non-empty commit onto non-empty descendant tree
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_b.id()], &[commit_d.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_b.id()].tree(), tree_1_2);
// Duplicate multiple contiguous commits
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_e.id(), commit_d.id()], &[commit_b.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_d.id()].tree(), empty_tree);
assert_tree_eq!(stats.duplicated_commits[commit_e.id()].tree(), empty_tree);
// Duplicate multiple non-contiguous commits
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_e.id(), commit_c.id()], &[commit_a.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_c.id()].tree(), tree_2);
assert_tree_eq!(stats.duplicated_commits[commit_e.id()].tree(), tree_2);
// Duplicate onto multiple parents
let mut tx = repo.start_transaction();
let stats = duplicate_onto(&mut tx, &[commit_d.id()], &[commit_c.id(), commit_b.id()]);
assert_tree_eq!(stats.duplicated_commits[commit_d.id()].tree(), tree_2);
// Insert duplicated commit
let mut tx = repo.start_transaction();
let stats = duplicate_in_between(
&mut tx,
&[commit_b.id()],
&[commit_d.id()],
&[commit_e.id()],
);
assert_tree_eq!(stats.duplicated_commits[commit_b.id()].tree(), tree_1_2);
let [head_id] = tx.repo().view().heads().iter().collect_array().unwrap();
assert_ne!(head_id, commit_e.id());
assert_tree_eq!(
tx.repo().store().get_commit(head_id).unwrap().tree(),
tree_1_2
);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_local_working_copy_concurrent.rs | lib/tests/test_local_working_copy_concurrent.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::cmp::max;
use std::thread;
use assert_matches::assert_matches;
use jj_lib::repo::Repo as _;
use jj_lib::working_copy::CheckoutError;
use jj_lib::workspace::Workspace;
use jj_lib::workspace::default_working_copy_factories;
use pollster::FutureExt as _;
use testutils::TestWorkspace;
use testutils::assert_tree_eq;
use testutils::commit_with_tree;
use testutils::create_tree;
use testutils::empty_snapshot_options;
use testutils::repo_path;
use testutils::repo_path_buf;
use testutils::write_working_copy_file;
#[test]
fn test_concurrent_checkout() {
// Test that we error out if a concurrent checkout is detected (i.e. if the
// working-copy commit changed on disk after we read it).
let settings = testutils::user_settings();
let mut test_workspace1 = TestWorkspace::init_with_settings(&settings);
let repo = test_workspace1.repo.clone();
let workspace1_root = test_workspace1.workspace.workspace_root().to_owned();
let tree1 = testutils::create_random_tree(&repo);
let tree2 = testutils::create_random_tree(&repo);
let tree3 = testutils::create_random_tree(&repo);
let commit1 = commit_with_tree(repo.store(), tree1.clone());
let commit2 = commit_with_tree(repo.store(), tree2.clone());
let commit3 = commit_with_tree(repo.store(), tree3);
// Check out tree1
let ws1 = &mut test_workspace1.workspace;
// The operation ID is not correct, but that doesn't matter for this test
ws1.check_out(repo.op_id().clone(), None, &commit1).unwrap();
// Check out tree2 from another process (simulated by another workspace
// instance)
{
let mut ws2 = Workspace::load(
&settings,
&workspace1_root,
&test_workspace1.env.default_store_factories(),
&default_working_copy_factories(),
)
.unwrap();
// Reload commit from the store associated with the workspace
let repo = ws2.repo_loader().load_at(repo.operation()).unwrap();
let commit2 = repo.store().get_commit(commit2.id()).unwrap();
ws2.check_out(repo.op_id().clone(), Some(&tree1), &commit2)
.unwrap();
}
// Checking out another tree (via the first workspace instance) should now fail.
assert_matches!(
ws1.check_out(repo.op_id().clone(), Some(&tree1), &commit3,),
Err(CheckoutError::ConcurrentCheckout)
);
// Check that the tree2 is still checked out on disk.
let ws3 = Workspace::load(
&settings,
&workspace1_root,
&test_workspace1.env.default_store_factories(),
&default_working_copy_factories(),
)
.unwrap();
assert_tree_eq!(*ws3.working_copy().tree().unwrap(), tree2);
}
#[test]
fn test_checkout_parallel() {
// Test that concurrent checkouts by different processes (simulated by using
// different repo instances) is safe.
let settings = testutils::user_settings();
let mut test_workspace = TestWorkspace::init_with_settings(&settings);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().to_owned();
let num_threads = max(num_cpus::get(), 4);
let mut trees = vec![];
for i in 0..num_threads {
let path = repo_path_buf(format!("file{i}"));
let tree = create_tree(repo, &[(&path, "contents")]);
trees.push(tree);
}
// Create another tree just so we can test the update stats reliably from the
// first update
let tree = create_tree(repo, &[(repo_path("other file"), "contents")]);
let commit = commit_with_tree(repo.store(), tree);
test_workspace
.workspace
.check_out(repo.op_id().clone(), None, &commit)
.unwrap();
thread::scope(|s| {
for tree in &trees {
let test_env = &test_workspace.env;
let op_id = repo.op_id().clone();
let trees = trees.clone();
let commit = commit_with_tree(repo.store(), tree.clone());
let settings = settings.clone();
let workspace_root = workspace_root.clone();
s.spawn(move || {
let mut workspace = Workspace::load(
&settings,
&workspace_root,
&test_env.default_store_factories(),
&default_working_copy_factories(),
)
.unwrap();
// Reload commit from the store associated with the workspace
let repo = workspace.repo_loader().load_at(repo.operation()).unwrap();
let commit = repo.store().get_commit(commit.id()).unwrap();
// The operation ID is not correct, but that doesn't matter for this test
let stats = workspace.check_out(op_id, None, &commit).unwrap();
assert_eq!(stats.updated_files, 0);
assert_eq!(stats.added_files, 1);
assert_eq!(stats.removed_files, 1);
// Check that the working copy contains one of the trees. We may see a
// different tree than the one we just checked out, but since
// write_tree() should take the same lock as check_out(), write_tree()
// should never produce a different tree.
let mut locked_ws = workspace.start_working_copy_mutation().unwrap();
let (new_tree, _stats) = locked_ws
.locked_wc()
.snapshot(&empty_snapshot_options())
.block_on()
.unwrap();
assert!(
trees
.iter()
.any(|tree| tree.tree_ids() == new_tree.tree_ids())
);
});
}
});
}
#[test]
fn test_racy_checkout() {
let mut test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let op_id = repo.op_id().clone();
let workspace_root = test_workspace.workspace.workspace_root().to_owned();
let path = repo_path("file");
let tree = create_tree(repo, &[(path, "1")]);
let commit = commit_with_tree(repo.store(), tree.clone());
let mut num_matches = 0;
for _ in 0..100 {
let ws = &mut test_workspace.workspace;
ws.check_out(op_id.clone(), None, &commit).unwrap();
assert_eq!(
std::fs::read(path.to_fs_path_unchecked(&workspace_root)).unwrap(),
b"1".to_vec()
);
// A file written right after checkout (hopefully, from the test's perspective,
// within the file system timestamp granularity) is detected as changed.
write_working_copy_file(&workspace_root, path, "x");
let modified_tree = test_workspace.snapshot().unwrap();
if modified_tree.tree_ids() == tree.tree_ids() {
num_matches += 1;
}
// Reset the state for the next round
write_working_copy_file(&workspace_root, path, "1");
}
assert_eq!(num_matches, 0);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_local_working_copy_sparse.rs | lib/tests/test_local_working_copy_sparse.rs | // Copyright 2022 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::StreamExt as _;
use itertools::Itertools as _;
use jj_lib::local_working_copy::LocalWorkingCopy;
use jj_lib::matchers::EverythingMatcher;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPath;
use jj_lib::repo_path::RepoPathBuf;
use jj_lib::working_copy::CheckoutStats;
use jj_lib::working_copy::WorkingCopy as _;
use pollster::FutureExt as _;
use testutils::TestWorkspace;
use testutils::commit_with_tree;
use testutils::create_tree;
use testutils::repo_path;
fn to_owned_path_vec(paths: &[&RepoPath]) -> Vec<RepoPathBuf> {
paths.iter().map(|&path| path.to_owned()).collect()
}
#[test]
fn test_sparse_checkout() {
let mut test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let working_copy_path = test_workspace.workspace.workspace_root().to_owned();
let root_file1_path = repo_path("file1");
let root_file2_path = repo_path("file2");
let dir1_path = repo_path("dir1");
let dir1_file1_path = repo_path("dir1/file1");
let dir1_file2_path = repo_path("dir1/file2");
let dir1_subdir1_path = repo_path("dir1/subdir1");
let dir1_subdir1_file1_path = repo_path("dir1/subdir1/file1");
let dir2_path = repo_path("dir2");
let dir2_file1_path = repo_path("dir2/file1");
let tree = create_tree(
repo,
&[
(root_file1_path, "contents"),
(root_file2_path, "contents"),
(dir1_file1_path, "contents"),
(dir1_file2_path, "contents"),
(dir1_subdir1_file1_path, "contents"),
(dir2_file1_path, "contents"),
],
);
let commit = commit_with_tree(repo.store(), tree);
test_workspace
.workspace
.check_out(repo.op_id().clone(), None, &commit)
.unwrap();
let ws = &mut test_workspace.workspace;
// Set sparse patterns to only dir1/
let mut locked_ws = ws.start_working_copy_mutation().unwrap();
let sparse_patterns = to_owned_path_vec(&[dir1_path]);
let stats = locked_ws
.locked_wc()
.set_sparse_patterns(sparse_patterns.clone())
.block_on()
.unwrap();
assert_eq!(
stats,
CheckoutStats {
updated_files: 0,
added_files: 0,
removed_files: 3,
skipped_files: 0,
}
);
assert_eq!(
locked_ws.locked_wc().sparse_patterns().unwrap(),
sparse_patterns
);
assert!(
!root_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
!root_file2_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
dir1_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
dir1_file2_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
dir1_subdir1_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
!dir2_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
// Write the new state to disk
locked_ws.finish(repo.op_id().clone()).unwrap();
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert_eq!(
wc.file_states().unwrap().paths().collect_vec(),
vec![dir1_file1_path, dir1_file2_path, dir1_subdir1_file1_path]
);
assert_eq!(wc.sparse_patterns().unwrap(), sparse_patterns);
// Reload the state to check that it was persisted
let wc = LocalWorkingCopy::load(
repo.store().clone(),
ws.workspace_root().to_path_buf(),
wc.state_path().to_path_buf(),
repo.settings(),
)
.unwrap();
assert_eq!(
wc.file_states().unwrap().paths().collect_vec(),
vec![dir1_file1_path, dir1_file2_path, dir1_subdir1_file1_path]
);
assert_eq!(wc.sparse_patterns().unwrap(), sparse_patterns);
// Set sparse patterns to file2, dir1/subdir1/ and dir2/
let mut locked_wc = wc.start_mutation().unwrap();
let sparse_patterns = to_owned_path_vec(&[root_file1_path, dir1_subdir1_path, dir2_path]);
let stats = locked_wc
.set_sparse_patterns(sparse_patterns.clone())
.block_on()
.unwrap();
assert_eq!(
stats,
CheckoutStats {
updated_files: 0,
added_files: 2,
removed_files: 2,
skipped_files: 0,
}
);
assert_eq!(locked_wc.sparse_patterns().unwrap(), sparse_patterns);
assert!(
root_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
!root_file2_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
!dir1_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
!dir1_file2_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
dir1_subdir1_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
assert!(
dir2_file1_path
.to_fs_path_unchecked(&working_copy_path)
.exists()
);
let wc = locked_wc.finish(repo.op_id().clone()).block_on().unwrap();
let wc: &LocalWorkingCopy = wc.downcast_ref().unwrap();
assert_eq!(
wc.file_states().unwrap().paths().collect_vec(),
vec![dir1_subdir1_file1_path, dir2_file1_path, root_file1_path]
);
}
/// Test that sparse patterns are respected on commit
#[test]
fn test_sparse_commit() {
let mut test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let op_id = repo.op_id().clone();
let working_copy_path = test_workspace.workspace.workspace_root().to_owned();
let root_file1_path = repo_path("file1");
let dir1_path = repo_path("dir1");
let dir1_file1_path = repo_path("dir1/file1");
let dir2_path = repo_path("dir2");
let dir2_file1_path = repo_path("dir2/file1");
let tree = create_tree(
repo,
&[
(root_file1_path, "contents"),
(dir1_file1_path, "contents"),
(dir2_file1_path, "contents"),
],
);
let commit = commit_with_tree(repo.store(), tree.clone());
test_workspace
.workspace
.check_out(repo.op_id().clone(), None, &commit)
.unwrap();
// Set sparse patterns to only dir1/
let mut locked_ws = test_workspace
.workspace
.start_working_copy_mutation()
.unwrap();
let sparse_patterns = to_owned_path_vec(&[dir1_path]);
locked_ws
.locked_wc()
.set_sparse_patterns(sparse_patterns)
.block_on()
.unwrap();
locked_ws.finish(repo.op_id().clone()).unwrap();
// Write modified version of all files, including files that are not in the
// sparse patterns.
std::fs::write(
root_file1_path.to_fs_path_unchecked(&working_copy_path),
"modified",
)
.unwrap();
std::fs::write(
dir1_file1_path.to_fs_path_unchecked(&working_copy_path),
"modified",
)
.unwrap();
std::fs::create_dir(dir2_path.to_fs_path_unchecked(&working_copy_path)).unwrap();
std::fs::write(
dir2_file1_path.to_fs_path_unchecked(&working_copy_path),
"modified",
)
.unwrap();
// Create a tree from the working copy. Only dir1/file1 should be updated in the
// tree.
let modified_tree = test_workspace.snapshot().unwrap();
let diff: Vec<_> = tree
.diff_stream(&modified_tree, &EverythingMatcher)
.collect()
.block_on();
assert_eq!(diff.len(), 1);
assert_eq!(diff[0].path.as_ref(), dir1_file1_path);
// Set sparse patterns to also include dir2/
let mut locked_ws = test_workspace
.workspace
.start_working_copy_mutation()
.unwrap();
let sparse_patterns = to_owned_path_vec(&[dir1_path, dir2_path]);
locked_ws
.locked_wc()
.set_sparse_patterns(sparse_patterns)
.block_on()
.unwrap();
locked_ws.finish(op_id).unwrap();
// Create a tree from the working copy. Only dir1/file1 and dir2/file1 should be
// updated in the tree.
let modified_tree = test_workspace.snapshot().unwrap();
let diff: Vec<_> = tree
.diff_stream(&modified_tree, &EverythingMatcher)
.collect()
.block_on();
assert_eq!(diff.len(), 2);
assert_eq!(diff[0].path.as_ref(), dir1_file1_path);
assert_eq!(diff[1].path.as_ref(), dir2_file1_path);
}
#[test]
fn test_sparse_commit_gitignore() {
// Test that (untracked) .gitignore files in parent directories are respected
let mut test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let working_copy_path = test_workspace.workspace.workspace_root().to_owned();
let dir1_path = repo_path("dir1");
let dir1_file1_path = repo_path("dir1/file1");
let dir1_file2_path = repo_path("dir1/file2");
// Set sparse patterns to only dir1/
let mut locked_ws = test_workspace
.workspace
.start_working_copy_mutation()
.unwrap();
let sparse_patterns = to_owned_path_vec(&[dir1_path]);
locked_ws
.locked_wc()
.set_sparse_patterns(sparse_patterns)
.block_on()
.unwrap();
locked_ws.finish(repo.op_id().clone()).unwrap();
// Write dir1/file1 and dir1/file2 and a .gitignore saying to ignore dir1/file1
std::fs::write(working_copy_path.join(".gitignore"), "dir1/file1").unwrap();
std::fs::create_dir(dir1_path.to_fs_path_unchecked(&working_copy_path)).unwrap();
std::fs::write(
dir1_file1_path.to_fs_path_unchecked(&working_copy_path),
"contents",
)
.unwrap();
std::fs::write(
dir1_file2_path.to_fs_path_unchecked(&working_copy_path),
"contents",
)
.unwrap();
// Create a tree from the working copy. Only dir1/file2 should be updated in the
// tree because dir1/file1 is ignored.
let modified_tree = test_workspace.snapshot().unwrap();
let entries = modified_tree.entries().collect_vec();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].0.as_ref(), dir1_file2_path);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_conflicts.rs | lib/tests/test_conflicts.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use indoc::indoc;
use itertools::Itertools as _;
use jj_lib::backend::FileId;
use jj_lib::conflict_labels::ConflictLabels;
use jj_lib::conflicts::ConflictMarkerStyle;
use jj_lib::conflicts::ConflictMaterializeOptions;
use jj_lib::conflicts::MIN_CONFLICT_MARKER_LEN;
use jj_lib::conflicts::choose_materialized_conflict_marker_len;
use jj_lib::conflicts::extract_as_single_hunk;
use jj_lib::conflicts::materialize_merge_result_to_bytes;
use jj_lib::conflicts::parse_conflict;
use jj_lib::conflicts::update_from_content;
use jj_lib::files::FileMergeHunkLevel;
use jj_lib::merge::Merge;
use jj_lib::merge::SameChange;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPath;
use jj_lib::store::Store;
use jj_lib::tree_merge::MergeOptions;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::read_file;
use testutils::repo_path;
#[test]
fn test_materialize_conflict_basic() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
let path = repo_path("file");
let base_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2
line 3
line 4
line 5
"},
);
let left_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2
left 3.1
left 3.2
left 3.3
line 4
line 5
"},
);
let right_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2
right 3.1
line 4
line 5
"},
);
// The left side should come first. The diff should be use the smaller (right)
// side, and the left side should be a snapshot.
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(left_id.clone()), Some(right_id.clone())],
);
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff),
@r"
line 1
line 2
<<<<<<< conflict 1 of 1
+++++++ side #1
left 3.1
left 3.2
left 3.3
%%%%%%% diff from base to side #2
-line 3
+right 3.1
>>>>>>> conflict 1 of 1 ends
line 4
line 5
"
);
// Swap the positive terms in the conflict. The diff should still use the right
// side, but now the right side should come first.
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(right_id.clone()), Some(left_id.clone())],
);
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff),
@r"
line 1
line 2
<<<<<<< conflict 1 of 1
%%%%%%% diff from base to side #1
-line 3
+right 3.1
+++++++ side #2
left 3.1
left 3.2
left 3.3
>>>>>>> conflict 1 of 1 ends
line 4
line 5
"
);
// Test materializing "snapshot" conflict markers
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(left_id.clone()), Some(right_id.clone())],
);
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Snapshot),
@r"
line 1
line 2
<<<<<<< conflict 1 of 1
+++++++ side #1
left 3.1
left 3.2
left 3.3
------- base
line 3
+++++++ side #2
right 3.1
>>>>>>> conflict 1 of 1 ends
line 4
line 5
"
);
// Test materializing "git" conflict markers
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(left_id.clone()), Some(right_id.clone())],
);
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Git),
@r"
line 1
line 2
<<<<<<< side #1
left 3.1
left 3.2
left 3.3
||||||| base
line 3
=======
right 3.1
>>>>>>> side #2
line 4
line 5
"
);
}
#[test]
fn test_materialize_conflict_three_sides() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
let path = repo_path("file");
let base_1_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 base
line 3 base
line 4 base
line 5
"},
);
let base_2_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 base
line 5
"},
);
let a_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 a.1
line 3 a.2
line 4 base
line 5
"},
);
let b_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 b.1
line 3 base
line 4 b.2
line 5
"},
);
let c_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 base
line 3 c.2
line 5
"},
);
let conflict = Merge::from_removes_adds(
vec![Some(base_1_id.clone()), Some(base_2_id.clone())],
vec![Some(a_id.clone()), Some(b_id.clone()), Some(c_id.clone())],
);
// Test materializing "diff" conflict markers
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff),
@r"
line 1
<<<<<<< conflict 1 of 1
%%%%%%% diff from base #1 to side #1
-line 2 base
-line 3 base
+line 2 a.1
+line 3 a.2
line 4 base
+++++++ side #2
line 2 b.1
line 3 base
line 4 b.2
%%%%%%% diff from base #2 to side #3
line 2 base
+line 3 c.2
>>>>>>> conflict 1 of 1 ends
line 5
"
);
// Test materializing "diff-experimental" conflict markers
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::DiffExperimental),
@r"
line 1
<<<<<<< conflict 1 of 1
+++++++ side #1
line 2 a.1
line 3 a.2
line 4 base
%%%%%%% diff from base #1 to side #2
-line 2 base
+line 2 b.1
line 3 base
-line 4 base
+line 4 b.2
%%%%%%% diff from base #2 to side #3
line 2 base
+line 3 c.2
>>>>>>> conflict 1 of 1 ends
line 5
"
);
// Test materializing "snapshot" conflict markers
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Snapshot),
@r"
line 1
<<<<<<< conflict 1 of 1
+++++++ side #1
line 2 a.1
line 3 a.2
line 4 base
------- base #1
line 2 base
line 3 base
line 4 base
+++++++ side #2
line 2 b.1
line 3 base
line 4 b.2
------- base #2
line 2 base
+++++++ side #3
line 2 base
line 3 c.2
>>>>>>> conflict 1 of 1 ends
line 5
"
);
// Test materializing "git" conflict markers (falls back to "snapshot" since
// "git" conflict markers don't support more than 2 sides)
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Git),
@r"
line 1
<<<<<<< conflict 1 of 1
+++++++ side #1
line 2 a.1
line 3 a.2
line 4 base
------- base #1
line 2 base
line 3 base
line 4 base
+++++++ side #2
line 2 b.1
line 3 base
line 4 b.2
------- base #2
line 2 base
+++++++ side #3
line 2 base
line 3 c.2
>>>>>>> conflict 1 of 1 ends
line 5
"
);
}
#[test]
fn test_materialize_conflict_multi_rebase_conflicts() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
// Create changes (a, b, c) on top of the base, and linearize them.
let path = repo_path("file");
let base_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 base
line 3
"},
);
let a_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 a.1
line 2 a.2
line 2 a.3
line 3
"},
);
let b_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 b.1
line 2 b.2
line 3
"},
);
let c_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 c.1
line 3
"},
);
// The order of (a, b, c) should be preserved. For all cases, the "a" side
// should be a snapshot.
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone()), Some(base_id.clone())],
vec![Some(a_id.clone()), Some(b_id.clone()), Some(c_id.clone())],
);
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff),
@r"
line 1
<<<<<<< conflict 1 of 1
+++++++ side #1
line 2 a.1
line 2 a.2
line 2 a.3
%%%%%%% diff from base #1 to side #2
-line 2 base
+line 2 b.1
+line 2 b.2
%%%%%%% diff from base #2 to side #3
-line 2 base
+line 2 c.1
>>>>>>> conflict 1 of 1 ends
line 3
"
);
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone()), Some(base_id.clone())],
vec![Some(c_id.clone()), Some(b_id.clone()), Some(a_id.clone())],
);
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff),
@r"
line 1
<<<<<<< conflict 1 of 1
%%%%%%% diff from base #1 to side #1
-line 2 base
+line 2 c.1
%%%%%%% diff from base #2 to side #2
-line 2 base
+line 2 b.1
+line 2 b.2
+++++++ side #3
line 2 a.1
line 2 a.2
line 2 a.3
>>>>>>> conflict 1 of 1 ends
line 3
"
);
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone()), Some(base_id.clone())],
vec![Some(c_id.clone()), Some(a_id.clone()), Some(b_id.clone())],
);
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff),
@r"
line 1
<<<<<<< conflict 1 of 1
%%%%%%% diff from base #1 to side #1
-line 2 base
+line 2 c.1
+++++++ side #2
line 2 a.1
line 2 a.2
line 2 a.3
%%%%%%% diff from base #2 to side #3
-line 2 base
+line 2 b.1
+line 2 b.2
>>>>>>> conflict 1 of 1 ends
line 3
"
);
}
// TODO: With options
#[test]
fn test_materialize_parse_roundtrip() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
let path = repo_path("file");
let base_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2
line 3
line 4
line 5
"},
);
let left_id = testutils::write_file(
store,
path,
indoc! {"
line 1 left
line 2 left
line 3
line 4
line 5 left
"},
);
let right_id = testutils::write_file(
store,
path,
indoc! {"
line 1 right
line 2
line 3
line 4 right
line 5 right
"},
);
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(left_id.clone()), Some(right_id.clone())],
);
let materialized =
materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff);
insta::assert_snapshot!(
materialized,
@r"
<<<<<<< conflict 1 of 2
+++++++ side #1
line 1 left
line 2 left
%%%%%%% diff from base to side #2
-line 1
+line 1 right
line 2
>>>>>>> conflict 1 of 2 ends
line 3
<<<<<<< conflict 2 of 2
%%%%%%% diff from base to side #1
line 4
-line 5
+line 5 left
+++++++ side #2
line 4 right
line 5 right
>>>>>>> conflict 2 of 2 ends
"
);
// The first add should always be from the left side
insta::assert_debug_snapshot!(
parse_conflict(materialized.as_bytes(), conflict.num_sides(), MIN_CONFLICT_MARKER_LEN),
@r#"
Some(
[
Conflicted(
[
"line 1 left\nline 2 left\n",
"line 1\nline 2\n",
"line 1 right\nline 2\n",
],
),
Resolved(
"line 3\n",
),
Conflicted(
[
"line 4\nline 5 left\n",
"line 4\nline 5\n",
"line 4 right\nline 5 right\n",
],
),
],
)
"#);
}
#[test_case(ConflictMarkerStyle::Diff)]
#[test_case(ConflictMarkerStyle::Snapshot)]
#[test_case(ConflictMarkerStyle::Git)]
fn test_materialize_update_roundtrip(style: ConflictMarkerStyle) {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
let path = repo_path("file");
let base_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 base
line 3 base
line 4 base
line 5
"},
);
let a_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 a.1
line 3 a.2
line 4 base
line 5
"},
);
let b_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2 b.1
line 3 base
line 4 b.2
line 5
"},
);
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(a_id.clone()), Some(b_id.clone())],
);
let materialized = materialize_conflict_string(store, path, &conflict, style);
let parsed = update_from_content(
&conflict,
store,
path,
materialized.as_bytes(),
MIN_CONFLICT_MARKER_LEN,
)
.block_on()
.unwrap();
assert_eq!(parsed, conflict);
}
#[test]
fn test_materialize_conflict_no_newlines_at_eof() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
let path = repo_path("file");
let base_id = testutils::write_file(store, path, "base");
let left_empty_id = testutils::write_file(store, path, "");
let right_id = testutils::write_file(store, path, "right");
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(left_empty_id.clone()), Some(right_id.clone())],
);
let materialized =
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff);
insta::assert_snapshot!(materialized,
@r"
<<<<<<< conflict 1 of 1
%%%%%%% diff from: base (no terminating newline)
\\\\\\\ to: side #1
-base
+++++++ side #2 (no terminating newline)
right
>>>>>>> conflict 1 of 1 ends
"
);
// The conflict markers are parsed with the trailing newline, but it is removed
// by `update_from_content`
insta::assert_debug_snapshot!(
parse_conflict(
materialized.as_bytes(),
conflict.num_sides(),
MIN_CONFLICT_MARKER_LEN
),
@r#"
Some(
[
Conflicted(
[
"",
"base\n",
"right\n",
],
),
],
)
"#);
}
#[test]
fn test_materialize_conflict_modify_delete() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
let path = repo_path("file");
let base_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2
line 3
line 4
line 5
"},
);
let modified_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2
modified
line 4
line 5
"},
);
let deleted_id = testutils::write_file(
store,
path,
indoc! {"
line 1
line 2
line 4
line 5
"},
);
// left modifies a line, right deletes the same line.
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(modified_id.clone()), Some(deleted_id.clone())],
);
insta::assert_snapshot!(&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff), @r"
line 1
line 2
<<<<<<< conflict 1 of 1
+++++++ side #1
modified
%%%%%%% diff from base to side #2
-line 3
>>>>>>> conflict 1 of 1 ends
line 4
line 5
"
);
// right modifies a line, left deletes the same line.
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(deleted_id.clone()), Some(modified_id.clone())],
);
insta::assert_snapshot!(&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff), @r"
line 1
line 2
<<<<<<< conflict 1 of 1
%%%%%%% diff from base to side #1
-line 3
+++++++ side #2
modified
>>>>>>> conflict 1 of 1 ends
line 4
line 5
"
);
// modify/delete conflict at the file level
let conflict = Merge::from_removes_adds(
vec![Some(base_id.clone())],
vec![Some(modified_id.clone()), None],
);
insta::assert_snapshot!(&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff), @r"
<<<<<<< conflict 1 of 1
%%%%%%% diff from base to side #1
line 1
line 2
-line 3
+modified
line 4
line 5
+++++++ side #2
>>>>>>> conflict 1 of 1 ends
"
);
}
#[test]
fn test_materialize_conflict_two_forward_diffs() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
// Create conflict A-B+B-C+D-E+C. This is designed to tempt the algorithm to
// produce a negative snapshot at the end like this:
// <<<<
// ====
// A
// %%%%
// B
// ++++
// D
// %%%%
// C
// ----
// E
// >>>>
let path = repo_path("file");
let a_id = testutils::write_file(store, path, "A\n");
let b_id = testutils::write_file(store, path, "B\n");
let c_id = testutils::write_file(store, path, "C\n");
let d_id = testutils::write_file(store, path, "D\n");
let e_id = testutils::write_file(store, path, "E\n");
let conflict = Merge::from_removes_adds(
vec![Some(b_id.clone()), Some(c_id.clone()), Some(e_id.clone())],
vec![
Some(a_id.clone()),
Some(b_id.clone()),
Some(d_id.clone()),
Some(c_id.clone()),
],
);
// The materialized conflict should still have exactly one snapshot despite our
// attempted temptation.
insta::assert_snapshot!(
&materialize_conflict_string(store, path, &conflict, ConflictMarkerStyle::Diff),
@r"
<<<<<<< conflict 1 of 1
+++++++ side #1
A
%%%%%%% diff from base #1 to side #2
B
%%%%%%% diff from base #2 to side #3
-C
+D
%%%%%%% diff from base #3 to side #4
-E
+C
>>>>>>> conflict 1 of 1 ends
"
);
}
#[test]
fn test_materialize_conflict_with_labels() {
let test_repo = TestRepo::init();
let store = test_repo.repo.store();
let path = repo_path("file");
let side1 = testutils::write_file(store, path, "side 1\n");
let base1 = testutils::write_file(store, path, "base 1\n");
let side2 = testutils::write_file(store, path, "side 2\n");
let conflict = Merge::from_vec(vec![Some(side1), Some(base1), Some(side2)]);
let conflict_labels = ConflictLabels::from_vec(vec![
"side 1 conflict label".into(),
"base conflict label".into(),
"side 2 conflict label".into(),
]);
insta::assert_snapshot!(
&materialize_conflict_string_with_labels(
store,
path,
&conflict,
&conflict_labels,
ConflictMarkerStyle::Diff,
),
@r"
<<<<<<< conflict 1 of 1
%%%%%%% diff from: base conflict label
\\\\\\\ to: side 1 conflict label
-base 1
+side 1
+++++++ side 2 conflict label
side 2
>>>>>>> conflict 1 of 1 ends
"
);
insta::assert_snapshot!(
&materialize_conflict_string_with_labels(
store,
path,
&conflict,
&conflict_labels,
ConflictMarkerStyle::Snapshot,
),
@r"
<<<<<<< conflict 1 of 1
+++++++ side 1 conflict label
side 1
------- base conflict label
base 1
+++++++ side 2 conflict label
side 2
>>>>>>> conflict 1 of 1 ends
"
);
insta::assert_snapshot!(
&materialize_conflict_string_with_labels(
store,
path,
&conflict,
&conflict_labels,
ConflictMarkerStyle::Git,
),
@r"
<<<<<<< side 1 conflict label
side 1
||||||| base conflict label
base 1
=======
side 2
>>>>>>> side 2 conflict label
"
);
}
#[test]
fn test_parse_conflict_resolved() {
assert_eq!(
parse_conflict(
indoc! {b"
line 1
line 2
line 3
line 4
line 5
"},
2,
7
),
None
);
}
#[test]
fn test_parse_conflict_simple() {
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<<
%%%%%%%
line 2
-line 3
+left
line 4
+++++++
right
>>>>>>>
line 5
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 2\nleft\nline 4\n",
"line 2\nline 3\nline 4\n",
"right\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<< Text
%%%%%%% Different text
line 2
-line 3
+left
line 4
+++++++ Yet <><>< more text
right
>>>>>>> More and more text
line 5
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 2\nleft\nline 4\n",
"line 2\nline 3\nline 4\n",
"right\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
// Test "snapshot" style
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<< Random text
+++++++ Random text
line 3.1
line 3.2
------- Random text
line 3
line 4
+++++++ Random text
line 3
line 4.1
>>>>>>> Random text
line 5
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 3.1\nline 3.2\n",
"line 3\nline 4\n",
"line 3\nline 4.1\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
// Test "snapshot" style with reordered sections
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<< Random text
------- Random text
line 3
line 4
+++++++ Random text
line 3.1
line 3.2
+++++++ Random text
line 3
line 4.1
>>>>>>> Random text
line 5
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 3.1\nline 3.2\n",
"line 3\nline 4\n",
"line 3\nline 4.1\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
// Test "git" style
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<< Side #1
line 3.1
line 3.2
||||||| Base
line 3
line 4
======= Side #2
line 3
line 4.1
>>>>>>> End
line 5
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 3.1\nline 3.2\n",
"line 3\nline 4\n",
"line 3\nline 4.1\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
// Test "git" style with empty side 1
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<< Side #1
||||||| Base
line 3
line 4
======= Side #2
line 3.1
line 4.1
>>>>>>> End
line 5
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"",
"line 3\nline 4\n",
"line 3.1\nline 4.1\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
// The conflict markers are longer than the originally materialized markers, but
// we allow them to parse anyway
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<<<<<<
%%%%%%%%%%%
line 2
-line 3
+left
line 4
+++++++++++
right
>>>>>>>>>>>
line 5
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 2\nleft\nline 4\n",
"line 2\nline 3\nline 4\n",
"right\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
}
#[test]
fn test_parse_conflict_multi_way() {
insta::assert_debug_snapshot!(
parse_conflict(
indoc! {b"
line 1
<<<<<<<
%%%%%%%
line 2
-line 3
+left
line 4
+++++++
right
%%%%%%%
line 2
+forward
line 3
line 4
>>>>>>>
line 5
"},
3,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 2\nleft\nline 4\n",
"line 2\nline 3\nline 4\n",
"right\n",
"line 2\nline 3\nline 4\n",
"line 2\nforward\nline 3\nline 4\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<< Random text
%%%%%%% Random text
line 2
-line 3
+left
line 4
+++++++ Random text
right
%%%%%%% Random text
line 2
+forward
line 3
line 4
>>>>>>> Random text
line 5
"},
3,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 2\nleft\nline 4\n",
"line 2\nline 3\nline 4\n",
"right\n",
"line 2\nline 3\nline 4\n",
"line 2\nforward\nline 3\nline 4\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
// Test "snapshot" style
insta::assert_debug_snapshot!(
parse_conflict(indoc! {b"
line 1
<<<<<<< Random text
+++++++ Random text
line 3.1
line 3.2
+++++++ Random text
line 3
line 4.1
------- Random text
line 3
line 4
------- Random text
line 3
+++++++ Random text
line 3
line 4
>>>>>>> Random text
line 5
"},
3,
7
),
@r#"
Some(
[
Resolved(
"line 1\n",
),
Conflicted(
[
"line 3.1\nline 3.2\n",
"line 3\nline 4\n",
"line 3\nline 4.1\n",
"line 3\n",
"line 3\nline 4\n",
],
),
Resolved(
"line 5\n",
),
],
)
"#
);
}
#[test]
fn test_parse_conflict_crlf_markers() {
// Conflict markers should be recognized even with CRLF
insta::assert_debug_snapshot!(
parse_conflict(
indoc! {b"
line 1\r
<<<<<<<\r
+++++++\r
left\r
-------\r
base\r
+++++++\r
right\r
>>>>>>>\r
line 5\r
"},
2,
7
),
@r#"
Some(
[
Resolved(
"line 1\r\n",
),
Conflicted(
[
"left\r\n",
"base\r\n",
"right\r\n",
],
),
Resolved(
"line 5\r\n",
),
],
)
"#
);
}
#[test]
fn test_parse_conflict_diff_stripped_whitespace() {
// Should be able to parse conflict even if diff contains empty line (without
// even a leading space, which is sometimes stripped by text editors)
insta::assert_debug_snapshot!(
parse_conflict(
indoc! {b"
line 1
<<<<<<<
%%%%%%%
line 2
-line 3
+left
\r
line 4
+++++++
right
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_evolution_predecessors.rs | lib/tests/test_evolution_predecessors.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::slice;
use assert_matches::assert_matches;
use itertools::Itertools as _;
use jj_lib::backend::CommitId;
use jj_lib::commit::Commit;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::evolution::CommitEvolutionEntry;
use jj_lib::evolution::WalkPredecessorsError;
use jj_lib::evolution::accumulate_predecessors;
use jj_lib::evolution::walk_predecessors;
use jj_lib::repo::MutableRepo;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo as _;
use jj_lib::settings::UserSettings;
use maplit::btreemap;
use pollster::FutureExt as _;
use testutils::TestRepo;
use testutils::commit_transactions;
use testutils::write_random_commit;
fn collect_predecessors(repo: &ReadonlyRepo, start_commit: &CommitId) -> Vec<CommitEvolutionEntry> {
walk_predecessors(repo, slice::from_ref(start_commit))
.try_collect()
.unwrap()
}
#[test]
fn test_walk_predecessors_basic() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let root_commit = repo0.store().root_commit();
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit2 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo2 = tx.commit("test").unwrap();
// The root commit has no associated operation because it isn't "created" at
// the root operation.
let entries = collect_predecessors(&repo2, root_commit.id());
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].commit, root_commit);
assert_eq!(entries[0].operation.as_ref(), None);
assert_eq!(entries[0].predecessor_ids(), []);
let entries = collect_predecessors(&repo2, commit1.id());
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].commit, commit1);
assert_eq!(entries[0].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[0].predecessor_ids(), []);
let entries = collect_predecessors(&repo2, commit2.id());
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].commit, commit2);
assert_eq!(entries[0].operation.as_ref(), Some(repo2.operation()));
assert_eq!(entries[0].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[1].commit, commit1);
assert_eq!(entries[1].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[1].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_basic_legacy_op() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let loader = repo0.loader();
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit2 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo2 = tx.commit("test").unwrap();
// Save operation without the predecessors as old jj would do. We only need
// to rewrite the head operation since walk_predecessors() will fall back to
// the legacy code path immediately.
let repo2 = {
let mut data = repo2.operation().store_operation().clone();
data.commit_predecessors = None;
let op_id = loader.op_store().write_operation(&data).block_on().unwrap();
let op = loader.load_operation(&op_id).unwrap();
loader.load_at(&op).unwrap()
};
let entries = collect_predecessors(&repo2, commit2.id());
assert_eq!(entries.len(), 2);
assert_eq!(entries[0].commit, commit2);
assert_eq!(entries[0].operation.as_ref(), None);
assert_eq!(entries[0].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[1].commit, commit1);
assert_eq!(entries[1].operation.as_ref(), None);
assert_eq!(entries[1].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_concurrent_ops() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let mut tx2 = repo1.start_transaction();
let commit2 = tx2
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten 2")
.write()
.unwrap();
tx2.repo_mut().rebase_descendants().unwrap();
let mut tx3 = repo1.start_transaction();
let commit3 = tx3
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten 3")
.write()
.unwrap();
tx3.repo_mut().rebase_descendants().unwrap();
let repo4 = commit_transactions(vec![tx2, tx3]);
let [op2, op3] = repo4
.operation()
.parents()
.map(Result::unwrap)
.collect_array()
.unwrap();
let mut tx = repo4.start_transaction();
let commit4 = tx
.repo_mut()
.rewrite_commit(&commit2)
.set_description("rewritten 4")
.write()
.unwrap();
let commit5 = tx
.repo_mut()
.rewrite_commit(&commit3)
.set_description("rewritten 5")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo5 = tx.commit("test").unwrap();
let entries = collect_predecessors(&repo5, commit4.id());
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].commit, commit4);
assert_eq!(entries[0].operation.as_ref(), Some(repo5.operation()));
assert_eq!(entries[0].predecessor_ids(), [commit2.id().clone()]);
assert_eq!(entries[1].commit, commit2);
assert_eq!(entries[1].operation.as_ref(), Some(&op2));
assert_eq!(entries[1].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[2].commit, commit1);
assert_eq!(entries[2].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[2].predecessor_ids(), []);
let entries = collect_predecessors(&repo5, commit5.id());
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].commit, commit5);
assert_eq!(entries[0].operation.as_ref(), Some(repo5.operation()));
assert_eq!(entries[0].predecessor_ids(), [commit3.id().clone()]);
assert_eq!(entries[1].commit, commit3);
assert_eq!(entries[1].operation.as_ref(), Some(&op3));
assert_eq!(entries[1].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[2].commit, commit1);
assert_eq!(entries[2].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[2].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_multiple_predecessors_across_ops() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit2 = write_random_commit(tx.repo_mut());
let repo2 = tx.commit("test").unwrap();
let mut tx = repo2.start_transaction();
let commit3 = tx
.repo_mut()
.rewrite_commit(&commit2)
.set_predecessors(vec![commit2.id().clone(), commit1.id().clone()])
.set_description("rewritten")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo3 = tx.commit("test").unwrap();
// Predecessor commits are emitted in chronological (operation) order.
let entries = collect_predecessors(&repo3, commit3.id());
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].commit, commit3);
assert_eq!(entries[0].operation.as_ref(), Some(repo3.operation()));
assert_eq!(
entries[0].predecessor_ids(),
[commit2.id().clone(), commit1.id().clone()]
);
assert_eq!(entries[1].commit, commit2);
assert_eq!(entries[1].operation.as_ref(), Some(repo2.operation()));
assert_eq!(entries[1].predecessor_ids(), []);
assert_eq!(entries[2].commit, commit1);
assert_eq!(entries[2].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[2].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_multiple_predecessors_within_op() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit3 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_predecessors(vec![commit1.id().clone(), commit2.id().clone()])
.set_description("rewritten")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo2 = tx.commit("test").unwrap();
let entries = collect_predecessors(&repo2, commit3.id());
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].commit, commit3);
assert_eq!(entries[0].operation.as_ref(), Some(repo2.operation()));
assert_eq!(
entries[0].predecessor_ids(),
[commit1.id().clone(), commit2.id().clone()]
);
assert_eq!(entries[1].commit, commit1);
assert_eq!(entries[1].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[1].predecessor_ids(), []);
assert_eq!(entries[2].commit, commit2);
assert_eq!(entries[2].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[2].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_transitive() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit2 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten 2")
.write()
.unwrap();
let commit3 = tx
.repo_mut()
.rewrite_commit(&commit2)
.set_description("rewritten 3")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo2 = tx.commit("test").unwrap();
let entries = collect_predecessors(&repo2, commit3.id());
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].commit, commit3);
assert_eq!(entries[0].operation.as_ref(), Some(repo2.operation()));
assert_eq!(entries[0].predecessor_ids(), [commit2.id().clone()]);
assert_eq!(entries[1].commit, commit2);
assert_eq!(entries[1].operation.as_ref(), Some(repo2.operation()));
assert_eq!(entries[1].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[2].commit, commit1);
assert_eq!(entries[2].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[2].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_transitive_graph_order() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
// 5 : op2
// |\
// 4 3 : op1
// | | :
// | 2 :
// |/ :
// 1 :
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten 2")
.write()
.unwrap();
let commit3 = tx
.repo_mut()
.rewrite_commit(&commit2)
.set_description("rewritten 3")
.write()
.unwrap();
let commit4 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten 4")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit5 = tx
.repo_mut()
.rewrite_commit(&commit4)
.set_predecessors(vec![commit4.id().clone(), commit3.id().clone()])
.set_description("rewritten 5")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo2 = tx.commit("test").unwrap();
let entries = collect_predecessors(&repo2, commit5.id());
assert_eq!(entries.len(), 5);
assert_eq!(entries[0].commit, commit5);
assert_eq!(entries[0].operation.as_ref(), Some(repo2.operation()));
assert_eq!(
entries[0].predecessor_ids(),
[commit4.id().clone(), commit3.id().clone()]
);
assert_eq!(entries[1].commit, commit4);
assert_eq!(entries[1].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[1].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[2].commit, commit3);
assert_eq!(entries[2].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[2].predecessor_ids(), [commit2.id().clone()]);
assert_eq!(entries[3].commit, commit2);
assert_eq!(entries[3].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[3].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[4].commit, commit1);
assert_eq!(entries[4].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[4].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_unsimplified() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
// 3
// |\
// | 2
// |/
// 1
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let mut tx = repo1.start_transaction();
let commit2 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten 2")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo2 = tx.commit("test").unwrap();
let mut tx = repo2.start_transaction();
let commit3 = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_predecessors(vec![commit1.id().clone(), commit2.id().clone()])
.set_description("rewritten 3")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo3 = tx.commit("test").unwrap();
let entries = collect_predecessors(&repo3, commit3.id());
assert_eq!(entries.len(), 3);
assert_eq!(entries[0].commit, commit3);
assert_eq!(entries[0].operation.as_ref(), Some(repo3.operation()));
assert_eq!(
entries[0].predecessor_ids(),
[commit1.id().clone(), commit2.id().clone()]
);
assert_eq!(entries[1].commit, commit2);
assert_eq!(entries[1].operation.as_ref(), Some(repo2.operation()));
assert_eq!(entries[1].predecessor_ids(), [commit1.id().clone()]);
assert_eq!(entries[2].commit, commit1);
assert_eq!(entries[2].operation.as_ref(), Some(repo1.operation()));
assert_eq!(entries[2].predecessor_ids(), []);
}
#[test]
fn test_walk_predecessors_direct_cycle_within_op() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let loader = repo0.loader();
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let repo1 = {
let mut data = repo1.operation().store_operation().clone();
data.commit_predecessors = Some(btreemap! {
commit1.id().clone() => vec![commit1.id().clone()],
});
let op_id = loader.op_store().write_operation(&data).block_on().unwrap();
let op = loader.load_operation(&op_id).unwrap();
loader.load_at(&op).unwrap()
};
assert_matches!(
walk_predecessors(&repo1, slice::from_ref(commit1.id())).next(),
Some(Err(WalkPredecessorsError::CycleDetected(_)))
);
}
#[test]
fn test_walk_predecessors_indirect_cycle_within_op() {
let test_repo = TestRepo::init();
let repo0 = test_repo.repo;
let loader = repo0.loader();
let mut tx = repo0.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
let commit3 = write_random_commit(tx.repo_mut());
let repo1 = tx.commit("test").unwrap();
let repo1 = {
let mut data = repo1.operation().store_operation().clone();
data.commit_predecessors = Some(btreemap! {
commit1.id().clone() => vec![commit3.id().clone()],
commit2.id().clone() => vec![commit1.id().clone()],
commit3.id().clone() => vec![commit2.id().clone()],
});
let op_id = loader.op_store().write_operation(&data).block_on().unwrap();
let op = loader.load_operation(&op_id).unwrap();
loader.load_at(&op).unwrap()
};
assert_matches!(
walk_predecessors(&repo1, slice::from_ref(commit3.id())).next(),
Some(Err(WalkPredecessorsError::CycleDetected(_)))
);
}
#[test]
fn test_accumulate_predecessors() {
// Stabilize commit IDs
let mut config = testutils::base_user_config();
let mut layer = ConfigLayer::empty(ConfigSource::User);
layer
.set_value("debug.commit-timestamp", "2001-02-03T04:05:06+07:00")
.unwrap();
config.add_layer(layer);
let settings = UserSettings::from_config(config).unwrap();
let test_repo = TestRepo::init_with_settings(&settings);
let repo_0 = test_repo.repo;
fn new_commit(repo: &mut MutableRepo, desc: &str) -> Commit {
repo.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.set_description(desc)
.write()
.unwrap()
}
fn rewrite_commit(repo: &mut MutableRepo, predecessors: &[&Commit], desc: &str) -> Commit {
repo.rewrite_commit(predecessors[0])
.set_predecessors(predecessors.iter().map(|c| c.id().clone()).collect())
.set_description(desc)
.write()
.unwrap()
}
// Set up operation graph:
//
// {commit: predecessors}
// D {d1: [a1], d2: [a2]}
// C | {c1: [b1], c2: [b2, a3], c3: [c2]}
// B | {b1: [a1], b2: [a2, a3]}
// |/
// A {a1: [], a2: [], a3: []}
// 0
let mut tx = repo_0.start_transaction();
let commit_a1 = new_commit(tx.repo_mut(), "a1");
let commit_a2 = new_commit(tx.repo_mut(), "a2");
let commit_a3 = new_commit(tx.repo_mut(), "a3");
let repo_a = tx.commit("a").unwrap();
let mut tx = repo_a.start_transaction();
let commit_b1 = rewrite_commit(tx.repo_mut(), &[&commit_a1], "b1");
let commit_b2 = rewrite_commit(tx.repo_mut(), &[&commit_a2, &commit_a3], "b2");
tx.repo_mut().rebase_descendants().unwrap();
let repo_b = tx.commit("b").unwrap();
let mut tx = repo_b.start_transaction();
let commit_c1 = rewrite_commit(tx.repo_mut(), &[&commit_b1], "c1");
let commit_c2 = rewrite_commit(tx.repo_mut(), &[&commit_b2, &commit_a3], "c2");
let commit_c3 = rewrite_commit(tx.repo_mut(), &[&commit_c2], "c3");
tx.repo_mut().rebase_descendants().unwrap();
let repo_c = tx.commit("c").unwrap();
let mut tx = repo_a.start_transaction();
let commit_d1 = rewrite_commit(tx.repo_mut(), &[&commit_a1], "d1");
let commit_d2 = rewrite_commit(tx.repo_mut(), &[&commit_a2], "d2");
tx.repo_mut().rebase_descendants().unwrap();
let repo_d = tx.commit("d").unwrap();
// Empty old/new ops
let predecessors = accumulate_predecessors(&[], slice::from_ref(repo_c.operation())).unwrap();
assert!(predecessors.is_empty());
let predecessors = accumulate_predecessors(slice::from_ref(repo_c.operation()), &[]).unwrap();
assert!(predecessors.is_empty());
// Empty range
let predecessors = accumulate_predecessors(
slice::from_ref(repo_c.operation()),
slice::from_ref(repo_c.operation()),
)
.unwrap();
assert!(predecessors.is_empty());
// Single forward operation
let predecessors = accumulate_predecessors(
slice::from_ref(repo_c.operation()),
slice::from_ref(repo_b.operation()),
)
.unwrap();
assert_eq!(
predecessors,
btreemap! {
commit_c1.id().clone() => vec![commit_b1.id().clone()],
commit_c2.id().clone() => vec![commit_b2.id().clone(), commit_a3.id().clone()],
commit_c3.id().clone() => vec![commit_b2.id().clone(), commit_a3.id().clone()],
}
);
// Multiple forward operations
let predecessors = accumulate_predecessors(
slice::from_ref(repo_c.operation()),
slice::from_ref(repo_a.operation()),
)
.unwrap();
assert_eq!(
predecessors,
btreemap! {
commit_b1.id().clone() => vec![commit_a1.id().clone()],
commit_b2.id().clone() => vec![commit_a2.id().clone(), commit_a3.id().clone()],
commit_c1.id().clone() => vec![commit_a1.id().clone()],
commit_c2.id().clone() => vec![commit_a2.id().clone(), commit_a3.id().clone()],
commit_c3.id().clone() => vec![commit_a2.id().clone(), commit_a3.id().clone()],
}
);
// Multiple reverse operations
let predecessors = accumulate_predecessors(
slice::from_ref(repo_a.operation()),
slice::from_ref(repo_c.operation()),
)
.unwrap();
assert_eq!(
predecessors,
btreemap! {
commit_a1.id().clone() => vec![commit_c1.id().clone()],
commit_a2.id().clone() => vec![commit_c3.id().clone()],
commit_a3.id().clone() => vec![commit_c3.id().clone()],
commit_b1.id().clone() => vec![commit_c1.id().clone()],
commit_b2.id().clone() => vec![commit_c3.id().clone()],
commit_c2.id().clone() => vec![commit_c3.id().clone()],
}
);
// Sibling operations
let predecessors = accumulate_predecessors(
slice::from_ref(repo_d.operation()),
slice::from_ref(repo_c.operation()),
)
.unwrap();
assert_eq!(
predecessors,
btreemap! {
commit_a1.id().clone() => vec![commit_c1.id().clone()],
commit_a2.id().clone() => vec![commit_c3.id().clone()],
commit_b1.id().clone() => vec![commit_c1.id().clone()],
commit_b2.id().clone() => vec![commit_c3.id().clone()],
commit_c2.id().clone() => vec![commit_c3.id().clone()],
commit_d1.id().clone() => vec![commit_c1.id().clone()],
commit_d2.id().clone() => vec![commit_c3.id().clone()],
}
);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_local_working_copy.rs | lib/tests/test_local_working_copy.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::convert::Infallible;
use std::fs::File;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt as _;
use std::path::Component;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use assert_matches::assert_matches;
use bstr::BString;
use indoc::indoc;
use itertools::Itertools as _;
use jj_lib::backend::CopyId;
use jj_lib::backend::TreeId;
use jj_lib::backend::TreeValue;
use jj_lib::conflict_labels::ConflictLabels;
use jj_lib::conflicts::ConflictMaterializeOptions;
use jj_lib::file_util;
use jj_lib::file_util::check_symlink_support;
use jj_lib::file_util::symlink_dir;
use jj_lib::file_util::symlink_file;
use jj_lib::files::FileMergeHunkLevel;
use jj_lib::fsmonitor::FsmonitorSettings;
use jj_lib::gitignore::GitIgnoreFile;
use jj_lib::local_working_copy::LocalWorkingCopy;
use jj_lib::local_working_copy::TreeState;
use jj_lib::local_working_copy::TreeStateSettings;
use jj_lib::merge::Merge;
use jj_lib::merge::SameChange;
use jj_lib::merged_tree::MergedTree;
use jj_lib::merged_tree::MergedTreeBuilder;
use jj_lib::op_store::OperationId;
use jj_lib::ref_name::WorkspaceName;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPath;
use jj_lib::repo_path::RepoPathBuf;
use jj_lib::rewrite::merge_commit_trees;
use jj_lib::secret_backend::SecretBackend;
use jj_lib::tree_builder::TreeBuilder;
use jj_lib::tree_merge::MergeOptions;
use jj_lib::working_copy::CheckoutError;
use jj_lib::working_copy::CheckoutStats;
use jj_lib::working_copy::SnapshotOptions;
use jj_lib::working_copy::UntrackedReason;
use jj_lib::working_copy::WorkingCopy as _;
use jj_lib::workspace::Workspace;
use jj_lib::workspace::default_working_copy_factories;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::TestRepoBackend;
use testutils::TestWorkspace;
use testutils::assert_tree_eq;
use testutils::commit_with_tree;
use testutils::create_tree;
use testutils::create_tree_with;
use testutils::empty_snapshot_options;
use testutils::repo_path;
use testutils::repo_path_buf;
use testutils::repo_path_component;
use testutils::write_random_commit;
use tokio::io::AsyncReadExt as _;
fn check_icase_fs(dir: &Path) -> bool {
let test_file = tempfile::Builder::new()
.prefix("icase-")
.tempfile_in(dir)
.unwrap();
let orig_name = test_file.path().file_name().unwrap().to_str().unwrap();
let upper_name = orig_name.to_ascii_uppercase();
assert_ne!(orig_name, upper_name);
dir.join(upper_name).try_exists().unwrap()
}
/// Returns true if the directory appears to ignore some unicode zero-width
/// characters, as in HFS+.
fn check_hfs_plus(dir: &Path) -> bool {
let test_file = tempfile::Builder::new()
.prefix("hfs-plus-\u{200c}-")
.tempfile_in(dir)
.unwrap();
let orig_name = test_file.path().file_name().unwrap().to_str().unwrap();
let stripped_name = orig_name.replace('\u{200c}', "");
assert_ne!(orig_name, stripped_name);
dir.join(stripped_name).try_exists().unwrap()
}
/// Returns true if the directory appears to support Windows short file names.
fn check_vfat(dir: &Path) -> bool {
let _test_file = tempfile::Builder::new()
.prefix("vfattest-")
.tempfile_in(dir)
.unwrap();
let short_name = "VFATTE~1";
dir.join(short_name).try_exists().unwrap()
}
fn to_owned_path_vec(paths: &[&RepoPath]) -> Vec<RepoPathBuf> {
paths.iter().map(|&path| path.to_owned()).collect()
}
#[test]
fn test_root() {
// Test that the working copy is clean and empty after init.
let mut test_workspace = TestWorkspace::init();
let wc = test_workspace.workspace.working_copy();
assert_eq!(wc.sparse_patterns().unwrap(), vec![RepoPathBuf::root()]);
let new_tree = test_workspace.snapshot().unwrap();
let repo = &test_workspace.repo;
let wc_commit_id = repo
.view()
.get_wc_commit_id(WorkspaceName::DEFAULT)
.unwrap();
let wc_commit = repo.store().get_commit(wc_commit_id).unwrap();
assert_tree_eq!(new_tree, wc_commit.tree());
assert_tree_eq!(new_tree, repo.store().empty_merged_tree());
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_checkout_file_transitions(backend: TestRepoBackend) {
// Tests switching between commits where a certain path is of one type in one
// commit and another type in the other. Includes a "missing" type, so we cover
// additions and removals as well.
let mut test_workspace = TestWorkspace::init_with_backend(backend);
let repo = &test_workspace.repo;
let store = repo.store().clone();
let workspace_root = test_workspace.workspace.workspace_root().to_owned();
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
enum Kind {
Missing,
Normal,
Executable,
// Executable, but same content as Normal, to test transition where only the bit changed
ExecutableNormalContent,
Conflict,
// Same content as Executable, to test that transition preserves the executable bit
ConflictedExecutableContent,
Symlink,
Tree,
GitSubmodule,
}
fn write_path(
repo: &Arc<ReadonlyRepo>,
tree_builder: &mut MergedTreeBuilder,
kind: Kind,
path: &RepoPath,
) {
let store = repo.store();
let copy_id = CopyId::placeholder();
let value = match kind {
Kind::Missing => Merge::absent(),
Kind::Normal => {
let id = testutils::write_file(store, path, "normal file contents");
Merge::normal(TreeValue::File {
id,
executable: false,
copy_id,
})
}
Kind::Executable => {
let id: jj_lib::backend::FileId =
testutils::write_file(store, path, "executable file contents");
Merge::normal(TreeValue::File {
id,
executable: true,
copy_id,
})
}
Kind::ExecutableNormalContent => {
let id = testutils::write_file(store, path, "normal file contents");
Merge::normal(TreeValue::File {
id,
executable: true,
copy_id,
})
}
Kind::Conflict => {
let base_file_id = testutils::write_file(store, path, "base file contents");
let left_file_id = testutils::write_file(store, path, "left file contents");
let right_file_id = testutils::write_file(store, path, "right file contents");
Merge::from_removes_adds(
vec![Some(TreeValue::File {
id: base_file_id,
executable: false,
copy_id: copy_id.clone(),
})],
vec![
Some(TreeValue::File {
id: left_file_id,
executable: false,
copy_id: copy_id.clone(),
}),
Some(TreeValue::File {
id: right_file_id,
executable: false,
copy_id: copy_id.clone(),
}),
],
)
}
Kind::ConflictedExecutableContent => {
let base_file_id = testutils::write_file(store, path, "executable file contents");
let left_file_id =
testutils::write_file(store, path, "left executable file contents");
let right_file_id =
testutils::write_file(store, path, "right executable file contents");
Merge::from_removes_adds(
vec![Some(TreeValue::File {
id: base_file_id,
executable: true,
copy_id: copy_id.clone(),
})],
vec![
Some(TreeValue::File {
id: left_file_id,
executable: true,
copy_id: copy_id.clone(),
}),
Some(TreeValue::File {
id: right_file_id,
executable: true,
copy_id: copy_id.clone(),
}),
],
)
}
Kind::Symlink => {
let id = store.write_symlink(path, "target").block_on().unwrap();
Merge::normal(TreeValue::Symlink(id))
}
Kind::Tree => {
let file_path = path.join(repo_path_component("file"));
let id = testutils::write_file(store, &file_path, "normal file contents");
let value = TreeValue::File {
id,
executable: false,
copy_id: copy_id.clone(),
};
tree_builder.set_or_remove(file_path, Merge::normal(value));
return;
}
Kind::GitSubmodule => {
let mut tx = repo.start_transaction();
let id = write_random_commit(tx.repo_mut()).id().clone();
tx.commit("test").unwrap();
Merge::normal(TreeValue::GitSubmodule(id))
}
};
tree_builder.set_or_remove(path.to_owned(), value);
}
let mut kinds = vec![
Kind::Missing,
Kind::Normal,
Kind::Executable,
Kind::ExecutableNormalContent,
Kind::Conflict,
Kind::ConflictedExecutableContent,
Kind::Tree,
];
kinds.push(Kind::Symlink);
if backend == TestRepoBackend::Git {
kinds.push(Kind::GitSubmodule);
}
let mut left_tree_builder = MergedTreeBuilder::new(store.empty_merged_tree());
let mut right_tree_builder = MergedTreeBuilder::new(store.empty_merged_tree());
let mut files = vec![];
for left_kind in &kinds {
for right_kind in &kinds {
let path = repo_path_buf(format!("{left_kind:?}_{right_kind:?}"));
write_path(repo, &mut left_tree_builder, *left_kind, &path);
write_path(repo, &mut right_tree_builder, *right_kind, &path);
files.push((*left_kind, *right_kind, path.clone()));
}
}
let left_tree = left_tree_builder.write_tree().unwrap();
let right_tree = right_tree_builder.write_tree().unwrap();
let left_commit = commit_with_tree(&store, left_tree);
let right_commit = commit_with_tree(&store, right_tree.clone());
let ws = &mut test_workspace.workspace;
ws.check_out(repo.op_id().clone(), None, &left_commit)
.unwrap();
ws.check_out(repo.op_id().clone(), None, &right_commit)
.unwrap();
// Check that the working copy is clean.
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(new_tree, right_tree);
for (_left_kind, right_kind, path) in &files {
let wc_path = workspace_root.join(path.as_internal_file_string());
let maybe_metadata = wc_path.symlink_metadata();
match right_kind {
Kind::Missing => {
assert!(maybe_metadata.is_err(), "{path:?} should not exist");
}
Kind::Normal => {
assert!(maybe_metadata.is_ok(), "{path:?} should exist");
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_file(), "{path:?} should be a file");
#[cfg(unix)]
assert_eq!(
metadata.permissions().mode() & 0o111,
0,
"{path:?} should not be executable"
);
}
Kind::Executable | Kind::ExecutableNormalContent => {
assert!(maybe_metadata.is_ok(), "{path:?} should exist");
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_file(), "{path:?} should be a file");
#[cfg(unix)]
assert_ne!(
metadata.permissions().mode() & 0o111,
0,
"{path:?} should be executable"
);
}
Kind::Conflict => {
assert!(maybe_metadata.is_ok(), "{path:?} should exist");
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_file(), "{path:?} should be a file");
#[cfg(unix)]
assert_eq!(
metadata.permissions().mode() & 0o111,
0,
"{path:?} should not be executable"
);
}
Kind::ConflictedExecutableContent => {
assert!(maybe_metadata.is_ok(), "{path:?} should exist");
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_file(), "{path:?} should be a file");
#[cfg(unix)]
assert_ne!(
metadata.permissions().mode() & 0o111,
0,
"{path:?} should be executable"
);
}
Kind::Symlink => {
assert!(maybe_metadata.is_ok(), "{path:?} should exist");
let metadata = maybe_metadata.unwrap();
if check_symlink_support().unwrap_or(false) {
assert!(
metadata.file_type().is_symlink(),
"{path:?} should be a symlink"
);
}
}
Kind::Tree => {
assert!(maybe_metadata.is_ok(), "{path:?} should exist");
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_dir(), "{path:?} should be a directory");
}
Kind::GitSubmodule => {
// Not supported for now
assert!(maybe_metadata.is_err(), "{path:?} should not exist");
}
};
}
}
#[test]
fn test_checkout_no_op() {
// Check out another commit with the same tree that's already checked out. The
// recorded operation should be updated even though the tree is unchanged.
let mut test_workspace = TestWorkspace::init();
let repo = test_workspace.repo.clone();
let file_path = repo_path("file");
let tree = create_tree(&repo, &[(file_path, "contents")]);
let commit1 = commit_with_tree(repo.store(), tree.clone());
let commit2 = commit_with_tree(repo.store(), tree);
let ws = &mut test_workspace.workspace;
ws.check_out(repo.op_id().clone(), None, &commit1).unwrap();
// Test the setup: the file should exist on in the tree state.
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert!(wc.file_states().unwrap().contains_path(file_path));
// Update to commit2 (same tree as commit1)
let new_op_id = OperationId::from_bytes(b"whatever");
let stats = ws.check_out(new_op_id.clone(), None, &commit2).unwrap();
assert_eq!(stats, CheckoutStats::default());
// The tree state is unchanged but the recorded operation id is updated.
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert!(wc.file_states().unwrap().contains_path(file_path));
assert_eq!(*wc.operation_id(), new_op_id);
}
// Test case for issue #2165
#[test]
fn test_conflict_subdirectory() {
let mut test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let path = repo_path("sub/file");
let empty_tree = create_tree(repo, &[]);
let tree1 = create_tree(repo, &[(path, "0")]);
let commit1 = commit_with_tree(repo.store(), tree1.clone());
let tree2 = create_tree(repo, &[(path, "1")]);
let merged_tree = MergedTree::merge(Merge::from_vec(vec![
(tree1, "tree 1".into()),
(empty_tree, "empty".into()),
(tree2, "tree 2".into()),
]))
.block_on()
.unwrap();
let merged_commit = commit_with_tree(repo.store(), merged_tree);
let repo = &test_workspace.repo;
let ws = &mut test_workspace.workspace;
ws.check_out(repo.op_id().clone(), None, &commit1).unwrap();
ws.check_out(repo.op_id().clone(), None, &merged_commit)
.unwrap();
}
#[test]
fn test_acl() {
let settings = testutils::user_settings();
let test_workspace =
TestWorkspace::init_with_backend_and_settings(TestRepoBackend::Git, &settings);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().to_owned();
let secret_modified_path = repo_path("secret/modified");
let secret_added_path = repo_path("secret/added");
let secret_deleted_path = repo_path("secret/deleted");
let became_secret_path = repo_path("file1");
let became_public_path = repo_path("file2");
let tree1 = create_tree(
repo,
&[
(secret_modified_path, "0"),
(secret_deleted_path, "0"),
(became_secret_path, "public"),
(became_public_path, "secret"),
],
);
let tree2 = create_tree(
repo,
&[
(secret_modified_path, "1"),
(secret_added_path, "1"),
(became_secret_path, "secret"),
(became_public_path, "public"),
],
);
let commit1 = commit_with_tree(repo.store(), tree1);
let commit2 = commit_with_tree(repo.store(), tree2);
SecretBackend::adopt_git_repo(&workspace_root);
let mut ws = Workspace::load(
&settings,
&workspace_root,
&test_workspace.env.default_store_factories(),
&default_working_copy_factories(),
)
.unwrap();
// Reload commits from the store associated with the workspace
let repo = ws.repo_loader().load_at(repo.operation()).unwrap();
let commit1 = repo.store().get_commit(commit1.id()).unwrap();
let commit2 = repo.store().get_commit(commit2.id()).unwrap();
ws.check_out(repo.op_id().clone(), None, &commit1).unwrap();
assert!(
!secret_modified_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
!secret_added_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
!secret_deleted_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
became_secret_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
!became_public_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
ws.check_out(repo.op_id().clone(), None, &commit2).unwrap();
assert!(
!secret_modified_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
!secret_added_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
!secret_deleted_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
!became_secret_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
assert!(
became_public_path
.to_fs_path_unchecked(&workspace_root)
.is_file()
);
}
#[test]
fn test_tree_builder_file_directory_transition() {
let test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let store = repo.store();
let mut ws = test_workspace.workspace;
let workspace_root = ws.workspace_root().to_owned();
let mut check_out_tree = |tree_id: &TreeId| {
let tree = repo.store().get_tree(RepoPathBuf::root(), tree_id).unwrap();
let commit = commit_with_tree(
repo.store(),
MergedTree::resolved(repo.store().clone(), tree.id().clone()),
);
ws.check_out(repo.op_id().clone(), None, &commit).unwrap();
};
let parent_path = repo_path("foo/bar");
let child_path = repo_path("foo/bar/baz");
// Add file at parent_path
let mut tree_builder = TreeBuilder::new(store.clone(), store.empty_tree_id().clone());
tree_builder.set(
parent_path.to_owned(),
TreeValue::File {
id: testutils::write_file(store, parent_path, ""),
executable: false,
copy_id: CopyId::placeholder(),
},
);
let tree_id = tree_builder.write_tree().unwrap();
check_out_tree(&tree_id);
assert!(parent_path.to_fs_path_unchecked(&workspace_root).is_file());
assert!(!child_path.to_fs_path_unchecked(&workspace_root).exists());
// Turn parent_path into directory, add file at child_path
let mut tree_builder = TreeBuilder::new(store.clone(), tree_id);
tree_builder.remove(parent_path.to_owned());
tree_builder.set(
child_path.to_owned(),
TreeValue::File {
id: testutils::write_file(store, child_path, ""),
executable: false,
copy_id: CopyId::placeholder(),
},
);
let tree_id = tree_builder.write_tree().unwrap();
check_out_tree(&tree_id);
assert!(parent_path.to_fs_path_unchecked(&workspace_root).is_dir());
assert!(child_path.to_fs_path_unchecked(&workspace_root).is_file());
// Turn parent_path back to file
let mut tree_builder = TreeBuilder::new(store.clone(), tree_id);
tree_builder.remove(child_path.to_owned());
tree_builder.set(
parent_path.to_owned(),
TreeValue::File {
id: testutils::write_file(store, parent_path, ""),
executable: false,
copy_id: CopyId::placeholder(),
},
);
let tree_id = tree_builder.write_tree().unwrap();
check_out_tree(&tree_id);
assert!(parent_path.to_fs_path_unchecked(&workspace_root).is_file());
assert!(!child_path.to_fs_path_unchecked(&workspace_root).exists());
}
#[test]
fn test_conflicting_changes_on_disk() {
let test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let mut ws = test_workspace.workspace;
let workspace_root = ws.workspace_root().to_owned();
// file on disk conflicts with file in target commit
let file_file_path = repo_path("file-file");
// file on disk conflicts with directory in target commit
let file_dir_path = repo_path("file-dir");
// directory on disk conflicts with file in target commit
let dir_file_path = repo_path("dir-file");
let tree = create_tree(
repo,
&[
(file_file_path, "committed contents"),
(
&file_dir_path.join(repo_path_component("file")),
"committed contents",
),
(dir_file_path, "committed contents"),
],
);
let commit = commit_with_tree(repo.store(), tree);
std::fs::write(
file_file_path.to_fs_path_unchecked(&workspace_root),
"contents on disk",
)
.unwrap();
std::fs::write(
file_dir_path.to_fs_path_unchecked(&workspace_root),
"contents on disk",
)
.unwrap();
std::fs::create_dir(dir_file_path.to_fs_path_unchecked(&workspace_root)).unwrap();
std::fs::write(
dir_file_path
.to_fs_path_unchecked(&workspace_root)
.join("file"),
"contents on disk",
)
.unwrap();
let stats = ws.check_out(repo.op_id().clone(), None, &commit).unwrap();
assert_eq!(
stats,
CheckoutStats {
updated_files: 0,
added_files: 3,
removed_files: 0,
skipped_files: 3
}
);
assert_eq!(
std::fs::read_to_string(file_file_path.to_fs_path_unchecked(&workspace_root)).ok(),
Some("contents on disk".to_string())
);
assert_eq!(
std::fs::read_to_string(file_dir_path.to_fs_path_unchecked(&workspace_root)).ok(),
Some("contents on disk".to_string())
);
assert_eq!(
std::fs::read_to_string(
dir_file_path
.to_fs_path_unchecked(&workspace_root)
.join("file")
)
.ok(),
Some("contents on disk".to_string())
);
}
#[test]
fn test_reset() {
let mut test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo;
let op_id = repo.op_id().clone();
let workspace_root = test_workspace.workspace.workspace_root().to_owned();
let ignored_path = repo_path("ignored");
let gitignore_path = repo_path(".gitignore");
let tree_without_file = create_tree(repo, &[(gitignore_path, "ignored\n")]);
let commit_without_file = commit_with_tree(repo.store(), tree_without_file.clone());
let tree_with_file = create_tree(
repo,
&[(gitignore_path, "ignored\n"), (ignored_path, "code")],
);
let commit_with_file = commit_with_tree(repo.store(), tree_with_file.clone());
let ws = &mut test_workspace.workspace;
let commit = commit_with_tree(repo.store(), tree_with_file.clone());
ws.check_out(repo.op_id().clone(), None, &commit).unwrap();
// Test the setup: the file should exist on disk and in the tree state.
assert!(ignored_path.to_fs_path_unchecked(&workspace_root).is_file());
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert!(wc.file_states().unwrap().contains_path(ignored_path));
// After we reset to the commit without the file, it should still exist on disk,
// but it should not be in the tree state, and it should not get added when we
// commit the working copy (because it's ignored).
let mut locked_ws = ws.start_working_copy_mutation().unwrap();
locked_ws
.locked_wc()
.reset(&commit_without_file)
.block_on()
.unwrap();
locked_ws.finish(op_id.clone()).unwrap();
assert!(ignored_path.to_fs_path_unchecked(&workspace_root).is_file());
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert!(!wc.file_states().unwrap().contains_path(ignored_path));
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(new_tree, tree_without_file);
// Now test the opposite direction: resetting to a commit where the file is
// tracked. The file should become tracked (even though it's ignored).
let ws = &mut test_workspace.workspace;
let mut locked_ws = ws.start_working_copy_mutation().unwrap();
locked_ws
.locked_wc()
.reset(&commit_with_file)
.block_on()
.unwrap();
locked_ws.finish(op_id.clone()).unwrap();
assert!(ignored_path.to_fs_path_unchecked(&workspace_root).is_file());
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert!(wc.file_states().unwrap().contains_path(ignored_path));
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(new_tree, tree_with_file);
}
#[test]
fn test_checkout_discard() {
// Start a mutation, do a checkout, and then discard the mutation. The working
// copy files should remain changed, but the state files should not be
// written.
let mut test_workspace = TestWorkspace::init();
let repo = test_workspace.repo.clone();
let workspace_root = test_workspace.workspace.workspace_root().to_owned();
let file1_path = repo_path("file1");
let file2_path = repo_path("file2");
let store = repo.store();
let tree1 = create_tree(&repo, &[(file1_path, "contents")]);
let tree2 = create_tree(&repo, &[(file2_path, "contents")]);
let commit1 = commit_with_tree(repo.store(), tree1);
let commit2 = commit_with_tree(repo.store(), tree2);
let ws = &mut test_workspace.workspace;
ws.check_out(repo.op_id().clone(), None, &commit1).unwrap();
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
let state_path = wc.state_path().to_path_buf();
// Test the setup: the file should exist on disk and in the tree state.
assert!(file1_path.to_fs_path_unchecked(&workspace_root).is_file());
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert!(wc.file_states().unwrap().contains_path(file1_path));
// Start a checkout
let mut locked_ws = ws.start_working_copy_mutation().unwrap();
locked_ws
.locked_wc()
.check_out(&commit2)
.block_on()
.unwrap();
// The change should be reflected in the working copy but not saved
assert!(!file1_path.to_fs_path_unchecked(&workspace_root).is_file());
assert!(file2_path.to_fs_path_unchecked(&workspace_root).is_file());
let reloaded_wc = LocalWorkingCopy::load(
store.clone(),
workspace_root.clone(),
state_path.clone(),
repo.settings(),
)
.unwrap();
assert!(reloaded_wc.file_states().unwrap().contains_path(file1_path));
assert!(!reloaded_wc.file_states().unwrap().contains_path(file2_path));
drop(locked_ws);
// The change should remain in the working copy, but not in memory and not saved
let wc: &LocalWorkingCopy = ws.working_copy().downcast_ref().unwrap();
assert!(wc.file_states().unwrap().contains_path(file1_path));
assert!(!wc.file_states().unwrap().contains_path(file2_path));
assert!(!file1_path.to_fs_path_unchecked(&workspace_root).is_file());
assert!(file2_path.to_fs_path_unchecked(&workspace_root).is_file());
let reloaded_wc =
LocalWorkingCopy::load(store.clone(), workspace_root, state_path, repo.settings()).unwrap();
assert!(reloaded_wc.file_states().unwrap().contains_path(file1_path));
assert!(!reloaded_wc.file_states().unwrap().contains_path(file2_path));
}
#[test]
fn test_snapshot_file_directory_transition() {
let mut test_workspace = TestWorkspace::init();
let repo = test_workspace.repo.clone();
let workspace_root = test_workspace.workspace.workspace_root().to_owned();
let to_ws_path = |path: &RepoPath| path.to_fs_path(&workspace_root).unwrap();
// file <-> directory transition at root and sub directories
let file1_path = repo_path("foo/bar");
let file2_path = repo_path("sub/bar/baz");
let file1p_path = file1_path.parent().unwrap();
let file2p_path = file2_path.parent().unwrap();
let tree1 = create_tree(&repo, &[(file1p_path, "1p"), (file2p_path, "2p")]);
let tree2 = create_tree(&repo, &[(file1_path, "1"), (file2_path, "2")]);
let commit1 = commit_with_tree(repo.store(), tree1.clone());
let commit2 = commit_with_tree(repo.store(), tree2.clone());
let ws = &mut test_workspace.workspace;
ws.check_out(repo.op_id().clone(), None, &commit1).unwrap();
// file -> directory
std::fs::remove_file(to_ws_path(file1p_path)).unwrap();
std::fs::remove_file(to_ws_path(file2p_path)).unwrap();
std::fs::create_dir(to_ws_path(file1p_path)).unwrap();
std::fs::create_dir(to_ws_path(file2p_path)).unwrap();
std::fs::write(to_ws_path(file1_path), "1").unwrap();
std::fs::write(to_ws_path(file2_path), "2").unwrap();
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(new_tree, tree2);
let ws = &mut test_workspace.workspace;
ws.check_out(repo.op_id().clone(), None, &commit2).unwrap();
// directory -> file
std::fs::remove_file(to_ws_path(file1_path)).unwrap();
std::fs::remove_file(to_ws_path(file2_path)).unwrap();
std::fs::remove_dir(to_ws_path(file1p_path)).unwrap();
std::fs::remove_dir(to_ws_path(file2p_path)).unwrap();
std::fs::write(to_ws_path(file1p_path), "1p").unwrap();
std::fs::write(to_ws_path(file2p_path), "2p").unwrap();
let new_tree = test_workspace.snapshot().unwrap();
assert_tree_eq!(new_tree, tree1);
}
#[test]
fn test_materialize_snapshot_conflicted_files() {
let mut test_workspace = TestWorkspace::init();
let repo = &test_workspace.repo.clone();
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_rewrite.rs | lib/tests/test_rewrite.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use assert_matches::assert_matches;
use itertools::Itertools as _;
use jj_lib::backend::ChangeId;
use jj_lib::commit::Commit;
use jj_lib::matchers::EverythingMatcher;
use jj_lib::matchers::FilesMatcher;
use jj_lib::merge::Merge;
use jj_lib::merged_tree::MergedTree;
use jj_lib::op_store::RefTarget;
use jj_lib::op_store::RemoteRef;
use jj_lib::op_store::RemoteRefState;
use jj_lib::ref_name::RefName;
use jj_lib::ref_name::RemoteName;
use jj_lib::ref_name::RemoteRefSymbol;
use jj_lib::ref_name::WorkspaceName;
use jj_lib::ref_name::WorkspaceNameBuf;
use jj_lib::repo::Repo as _;
use jj_lib::rewrite::CommitRewriter;
use jj_lib::rewrite::CommitWithSelection;
use jj_lib::rewrite::EmptyBehavior;
use jj_lib::rewrite::MoveCommitsTarget;
use jj_lib::rewrite::RebaseOptions;
use jj_lib::rewrite::RewriteRefsOptions;
use jj_lib::rewrite::find_duplicate_divergent_commits;
use jj_lib::rewrite::find_recursive_merge_commits;
use jj_lib::rewrite::merge_commit_trees;
use jj_lib::rewrite::rebase_commit_with_options;
use jj_lib::rewrite::restore_tree;
use maplit::hashmap;
use maplit::hashset;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::assert_abandoned_with_parent;
use testutils::assert_rebased_onto;
use testutils::assert_tree_eq;
use testutils::create_random_commit;
use testutils::create_tree;
use testutils::create_tree_with;
use testutils::rebase_descendants_with_options_return_map;
use testutils::repo_path;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn remote_symbol<'a, N, M>(name: &'a N, remote: &'a M) -> RemoteRefSymbol<'a>
where
N: AsRef<RefName> + ?Sized,
M: AsRef<RemoteName> + ?Sized,
{
RemoteRefSymbol {
name: name.as_ref(),
remote: remote.as_ref(),
}
}
/// Based on https://lore.kernel.org/git/Pine.LNX.4.44.0504271254120.4678-100000@wax.eds.org/
/// (found in t/t6401-merge-criss-cross.sh in the git.git repo).
#[test]
fn test_merge_criss_cross() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let path = repo_path("file");
let tree_a = create_tree(repo, &[(path, "1\n2\n3\n4\n5\n6\n7\n8\n9\n")]);
let tree_b = create_tree(repo, &[(path, "1\n2\n3\n4\n5\n6\n7\n8B\n9\n")]);
let tree_c = create_tree(repo, &[(path, "1\n2\n3C\n4\n5\n6\n7\n8\n9\n")]);
let tree_d = create_tree(repo, &[(path, "1\n2\n3C\n4\n5\n6\n7\n8D\n9\n")]);
let tree_e = create_tree(repo, &[(path, "1\n2\n3E\n4\n5\n6\n7\n8B\n9\n")]);
let tree_expected = create_tree(repo, &[(path, "1\n2\n3E\n4\n5\n6\n7\n8D\n9\n")]);
let mut tx = repo.start_transaction();
let mut make_commit = |description, parents, tree| {
tx.repo_mut()
.new_commit(parents, tree)
.set_description(description)
.write()
.unwrap()
};
let commit_a = make_commit("A", vec![repo.store().root_commit_id().clone()], tree_a);
let commit_b = make_commit("B", vec![commit_a.id().clone()], tree_b);
let commit_c = make_commit("C", vec![commit_a.id().clone()], tree_c);
let commit_d = make_commit(
"D",
vec![commit_b.id().clone(), commit_c.id().clone()],
tree_d,
);
let commit_e = make_commit(
"E",
vec![commit_b.id().clone(), commit_c.id().clone()],
tree_e,
);
let merged = merge_commit_trees(tx.repo_mut(), &[commit_d, commit_e])
.block_on()
.unwrap();
assert_tree_eq!(merged, tree_expected);
}
#[test]
fn test_find_recursive_merge_commits() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
let commit_id_merge = find_recursive_merge_commits(
tx.repo().store(),
tx.repo().index(),
vec![commit_d.id().clone(), commit_e.id().clone()],
)
.unwrap();
assert_eq!(
commit_id_merge,
Merge::from_vec(vec![
commit_d.id().clone(),
commit_b.id().clone(),
commit_a.id().clone(),
commit_c.id().clone(),
commit_e.id().clone(),
])
);
}
#[test]
fn test_restore_tree() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let path1 = repo_path("file1");
let path2 = repo_path("dir1/file2");
let path3 = repo_path("dir1/file3");
let path4 = repo_path("dir2/file4");
let left = create_tree(repo, &[(path2, "left"), (path3, "left"), (path4, "left")]);
let right = create_tree(
repo,
&[(path1, "right"), (path2, "right"), (path3, "right")],
);
// Restore everything using EverythingMatcher
let restored = restore_tree(&left, &right, &EverythingMatcher)
.block_on()
.unwrap();
assert_tree_eq!(restored, left);
// Restore everything using FilesMatcher
let restored = restore_tree(
&left,
&right,
&FilesMatcher::new([&path1, &path2, &path3, &path4]),
)
.block_on()
.unwrap();
assert_tree_eq!(restored, left);
// Restore some files
let restored = restore_tree(&left, &right, &FilesMatcher::new([path1, path2]))
.block_on()
.unwrap();
let expected = create_tree(repo, &[(path2, "left"), (path3, "right")]);
assert_tree_eq!(restored, expected);
}
#[test]
fn test_restore_tree_with_conflicts() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let path1 = repo_path("file1");
let path2 = repo_path("dir1/file2");
let left_side1 = create_tree(repo, &[(path1, "left side 1"), (path2, "left side 1")]);
let left_base1 = create_tree(repo, &[(path1, "left base"), (path2, "left base 1")]);
let left_side2 = create_tree(repo, &[(path1, "left side 2"), (path2, "left side 2")]);
let left_base2 = create_tree(repo, &[(path1, "left base"), (path2, "left base 2")]);
let left_side3 = create_tree(repo, &[(path1, "left base"), (path2, "left side 3")]);
let right_side1 = create_tree(repo, &[(path1, "right side 1"), (path2, "resolved")]);
let right_base1 = create_tree(repo, &[(path1, "right base"), (path2, "resolved")]);
let right_side2 = create_tree(repo, &[(path1, "right side 2"), (path2, "resolved")]);
let left = MergedTree::merge(Merge::from_vec(vec![
(left_side1, "left side 1".into()),
(left_base1, "left base 1".into()),
(left_side2, "left side 2".into()),
(left_base2, "left base 2".into()),
(left_side3, "left side 3".into()),
]))
.block_on()
.unwrap();
let right = MergedTree::merge(Merge::from_vec(vec![
(right_side1, "right side 1".into()),
(right_base1, "right base 1".into()),
(right_side2, "right side 2".into()),
]))
.block_on()
.unwrap();
// Restore everything using EverythingMatcher
let restored = restore_tree(&left, &right, &EverythingMatcher)
.block_on()
.unwrap();
assert_tree_eq!(restored, left);
// Restore a single file
let restored = restore_tree(&left, &right, &FilesMatcher::new([path2]))
.block_on()
.unwrap();
let expected_side1 = create_tree(repo, &[(path1, "right side 1"), (path2, "left side 1")]);
let expected_base1 = create_tree(repo, &[(path1, "right base"), (path2, "left base 1")]);
let expected_side2 = create_tree(repo, &[(path1, "right side 2"), (path2, "left side 2")]);
let expected_base2 = create_tree(repo, &[(path2, "left base 2")]);
let expected_side3 = create_tree(repo, &[(path2, "left side 3")]);
// TODO: we should preserve conflict labels when restoring somehow
let expected = MergedTree::merge_no_resolve(Merge::from_vec(vec![
(expected_side1, String::new()),
(expected_base1, String::new()),
(expected_side2, String::new()),
(expected_base2, String::new()),
(expected_side3, String::new()),
]));
assert_tree_eq!(restored, expected);
}
#[test]
fn test_rebase_descendants_sideways() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit F. Commits C-E should be rebased.
//
// F
// | D
// | C E
// | |/
// | B
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_f.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
assert_eq!(rebase_map.len(), 3);
let new_commit_c = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_c, &[commit_f.id()]);
let new_commit_d =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[new_commit_c.id()]);
let new_commit_e = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_e, &[commit_f.id()]);
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_d.id().clone(),
new_commit_e.id().clone()
}
);
}
#[test]
fn test_rebase_descendants_forward() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit F. Commits C and E should be rebased onto F.
// Commit D does not get rebased because it's an ancestor of the
// destination. Commit G does not get replaced because it's already in
// place.
// TODO: The above is not what actually happens! The test below shows what
// actually happens: D and F also get rebased onto F, so we end up with
// duplicates. Consider if it's worth supporting the case above better or if
// that decision belongs with the caller (as we currently force it to do by
// not supporting it in DescendantRebaser).
//
// G
// F E
// |/
// D C
// |/
// B
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_f]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_f.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_d =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[(commit_f.id())]);
let new_commit_f =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_f, &[new_commit_d.id()]);
let new_commit_c =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_c, &[new_commit_f.id()]);
let new_commit_e =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_e, &[new_commit_d.id()]);
let new_commit_g =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_g, &[new_commit_f.id()]);
assert_eq!(rebase_map.len(), 5);
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_c.id().clone(),
new_commit_e.id().clone(),
new_commit_g.id().clone(),
}
);
}
#[test]
fn test_rebase_descendants_reorder() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit E was replaced by commit D, and commit C was replaced by commit F
// (attempting to to reorder C and E), and commit G was replaced by commit
// H.
//
// I
// G H
// E F
// C D
// |/
// B
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_e]);
let commit_h = write_random_commit_with_parents(tx.repo_mut(), &[&commit_f]);
let commit_i = write_random_commit_with_parents(tx.repo_mut(), &[&commit_g]);
tx.repo_mut()
.set_rewritten_commit(commit_e.id().clone(), commit_d.id().clone());
tx.repo_mut()
.set_rewritten_commit(commit_c.id().clone(), commit_f.id().clone());
tx.repo_mut()
.set_rewritten_commit(commit_g.id().clone(), commit_h.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_i = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_i, &[commit_h.id()]);
assert_eq!(rebase_map.len(), 1);
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_i.id().clone(),
}
);
}
#[test]
fn test_rebase_descendants_backward() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit C was replaced by commit B. Commit D should be rebased.
//
// D
// C
// B
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
tx.repo_mut()
.set_rewritten_commit(commit_c.id().clone(), commit_b.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_d = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[commit_b.id()]);
assert_eq!(rebase_map.len(), 1);
assert_eq!(
*tx.repo().view().heads(),
hashset! {new_commit_d.id().clone()}
);
}
#[test]
fn test_rebase_descendants_chain_becomes_branchy() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit E and commit C was replaced by commit F.
// Commit F should get rebased onto E, and commit D should get rebased onto
// the rebased F.
//
// D
// C F
// |/
// B E
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_e.id().clone());
tx.repo_mut()
.set_rewritten_commit(commit_c.id().clone(), commit_f.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_f = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_f, &[commit_e.id()]);
let new_commit_d =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[new_commit_f.id()]);
assert_eq!(rebase_map.len(), 2);
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_d.id().clone(),
}
);
}
#[test]
fn test_rebase_descendants_internal_merge() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit F. Commits C-E should be rebased.
//
// F
// | E
// | |\
// | C D
// | |/
// | B
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c, &commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_f.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_c = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_c, &[commit_f.id()]);
let new_commit_d = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[commit_f.id()]);
let new_commit_e = assert_rebased_onto(
tx.repo_mut(),
&rebase_map,
&commit_e,
&[new_commit_c.id(), new_commit_d.id()],
);
assert_eq!(rebase_map.len(), 3);
assert_eq!(
*tx.repo().view().heads(),
hashset! { new_commit_e.id().clone() }
);
}
#[test]
fn test_rebase_descendants_external_merge() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit C was replaced by commit F. Commits E should be rebased. The rebased
// commit E should have F as first parent and commit D as second parent.
//
// F
// | E
// | |\
// | C D
// | |/
// | B
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c, &commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
tx.repo_mut()
.set_rewritten_commit(commit_c.id().clone(), commit_f.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_e = assert_rebased_onto(
tx.repo_mut(),
&rebase_map,
&commit_e,
&[commit_f.id(), commit_d.id()],
);
assert_eq!(rebase_map.len(), 1);
assert_eq!(
*tx.repo().view().heads(),
hashset! {new_commit_e.id().clone()}
);
}
#[test]
fn test_rebase_descendants_abandon() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B and commit E were abandoned. Commit C and commit D should get
// rebased onto commit A. Commit F should get rebased onto the new commit D.
//
// F
// E
// D C
// |/
// B
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_e]);
tx.repo_mut().record_abandoned_commit(&commit_b);
tx.repo_mut().record_abandoned_commit(&commit_e);
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_c = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_c, &[commit_a.id()]);
let new_commit_d = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[commit_a.id()]);
let new_commit_f =
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_f, &[new_commit_d.id()]);
assert_eq!(rebase_map.len(), 3);
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_c.id().clone(),
new_commit_f.id().clone()
}
);
}
#[test]
fn test_rebase_descendants_abandon_no_descendants() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B and C were abandoned. Commit A should become a head.
//
// C
// B
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
tx.repo_mut().record_abandoned_commit(&commit_b);
tx.repo_mut().record_abandoned_commit(&commit_c);
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
assert_eq!(rebase_map.len(), 0);
assert_eq!(
*tx.repo().view().heads(),
hashset! {
commit_a.id().clone(),
}
);
}
#[test]
fn test_rebase_descendants_abandon_and_replace() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit E. Commit C was abandoned. Commit D should
// get rebased onto commit E.
//
// D
// C
// E B
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_e.id().clone());
tx.repo_mut().record_abandoned_commit(&commit_c);
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_d = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[commit_e.id()]);
assert_eq!(rebase_map.len(), 1);
assert_eq!(
*tx.repo().view().heads(),
hashset! { new_commit_d.id().clone()}
);
}
#[test]
fn test_rebase_descendants_abandon_degenerate_merge_simplify() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was abandoned. Commit D should get rebased to have only C as parent
// (not A and C).
//
// D
// |\
// B C
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
tx.repo_mut().record_abandoned_commit(&commit_b);
let rebase_map = rebase_descendants_with_options_return_map(
tx.repo_mut(),
&RebaseOptions {
simplify_ancestor_merge: true,
..Default::default()
},
);
let new_commit_d = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_d, &[commit_c.id()]);
assert_eq!(rebase_map.len(), 1);
assert_eq!(
*tx.repo().view().heads(),
hashset! {new_commit_d.id().clone()}
);
}
#[test]
fn test_rebase_descendants_abandon_degenerate_merge_preserve() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was abandoned. Commit D should get rebased to have A and C as
// parents.
//
// D
// |\
// B C
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
tx.repo_mut().record_abandoned_commit(&commit_b);
let rebase_map = rebase_descendants_with_options_return_map(
tx.repo_mut(),
&RebaseOptions {
simplify_ancestor_merge: false,
..Default::default()
},
);
let new_commit_d = assert_rebased_onto(
tx.repo_mut(),
&rebase_map,
&commit_d,
&[commit_a.id(), commit_c.id()],
);
assert_eq!(rebase_map.len(), 1);
assert_eq!(
*tx.repo().view().heads(),
hashset! {new_commit_d.id().clone()}
);
}
#[test]
fn test_rebase_descendants_abandon_widen_merge() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit E was abandoned. Commit F should get rebased to have B, C, and D as
// parents (in that order).
//
// F
// |\
// E \
// |\ \
// B C D
// \|/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b, &commit_c]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_e, &commit_d]);
tx.repo_mut().record_abandoned_commit(&commit_e);
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_f = assert_rebased_onto(
tx.repo_mut(),
&rebase_map,
&commit_f,
&[commit_b.id(), commit_c.id(), commit_d.id()],
);
assert_eq!(rebase_map.len(), 1);
assert_eq!(
*tx.repo().view().heads(),
hashset! { new_commit_f.id().clone()}
);
}
#[test]
fn test_rebase_descendants_multiple_sideways() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B and commit D were both replaced by commit F. Commit C and commit E
// should get rebased onto it.
//
// C E
// B D F
// | |/
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_f.id().clone());
tx.repo_mut()
.set_rewritten_commit(commit_d.id().clone(), commit_f.id().clone());
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
let new_commit_c = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_c, &[commit_f.id()]);
let new_commit_e = assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit_e, &[commit_f.id()]);
assert_eq!(rebase_map.len(), 2);
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_c.id().clone(),
new_commit_e.id().clone()
}
);
}
#[test]
#[should_panic(expected = "cycle")]
fn test_rebase_descendants_multiple_swap() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit D. Commit D was replaced by commit B.
// This results in an infinite loop and a panic
//
// C E
// B D
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let _commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let _commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_d.id().clone());
tx.repo_mut()
.set_rewritten_commit(commit_d.id().clone(), commit_b.id().clone());
tx.repo_mut().rebase_descendants().ok(); // Panics because of the cycle
}
#[test]
fn test_rebase_descendants_multiple_no_descendants() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit C. Commit C was replaced by commit B.
//
// B C
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_c.id().clone());
tx.repo_mut()
.set_rewritten_commit(commit_c.id().clone(), commit_b.id().clone());
let result = tx.repo_mut().rebase_descendants();
assert_matches!(result, Err(err) if err.to_string().contains("Cycle"));
}
#[test]
fn test_rebase_descendants_divergent_rewrite() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Commit B was replaced by commit B2. Commit D was replaced by commits D2 and
// D3. Commit F was replaced by commit F2. Commit C should be rebased onto
// B2. Commit E should not be rebased. Commit G should be rebased onto
// commit F2.
//
// G
// F
// E
// D
// C
// B
// | F2
// |/
// | D3
// |/
// | D2
// |/
// | B2
// |/
// A
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_e]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_f]);
let commit_b2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_d3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_f2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
tx.repo_mut()
.set_rewritten_commit(commit_b.id().clone(), commit_b2.id().clone());
// Commit D becomes divergent
tx.repo_mut().set_divergent_rewrite(
commit_d.id().clone(),
vec![commit_d2.id().clone(), commit_d3.id().clone()],
);
tx.repo_mut()
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_init.rs | lib/tests/test_init.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::path::Path;
use std::path::PathBuf;
use assert_matches::assert_matches;
use jj_lib::config::StackedConfig;
use jj_lib::git_backend::GitBackend;
use jj_lib::ref_name::WorkspaceName;
use jj_lib::repo::Repo as _;
use jj_lib::settings::UserSettings;
use jj_lib::workspace::Workspace;
use test_case::test_case;
use testutils::TestRepoBackend;
use testutils::TestWorkspace;
use testutils::assert_tree_eq;
use testutils::git;
use testutils::write_random_commit;
fn canonicalize(input: &Path) -> (PathBuf, PathBuf) {
let uncanonical = input.join("..").join(input.file_name().unwrap());
let canonical = dunce::canonicalize(&uncanonical).unwrap();
(canonical, uncanonical)
}
#[test]
fn test_init_local() {
let settings = testutils::user_settings();
let temp_dir = testutils::new_temp_dir();
let (canonical, uncanonical) = canonicalize(temp_dir.path());
let (workspace, repo) = Workspace::init_simple(&settings, &uncanonical).unwrap();
assert!(repo.store().backend_impl::<GitBackend>().is_none());
assert_eq!(workspace.workspace_root(), &canonical);
// Just test that we can write a commit to the store
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
}
#[test]
fn test_init_internal_git() {
let settings = testutils::user_settings();
let temp_dir = testutils::new_temp_dir();
let (canonical, uncanonical) = canonicalize(temp_dir.path());
let (workspace, repo) = Workspace::init_internal_git(&settings, &uncanonical).unwrap();
let git_backend: &GitBackend = repo.store().backend_impl().unwrap();
let repo_path = canonical.join(".jj").join("repo");
assert_eq!(workspace.workspace_root(), &canonical);
assert_eq!(
git_backend.git_repo_path(),
canonical.join(PathBuf::from_iter([".jj", "repo", "store", "git"])),
);
assert!(git_backend.git_workdir().is_none());
assert_eq!(
std::fs::read_to_string(repo_path.join("store").join("git_target")).unwrap(),
"git"
);
// Just test that we can write a commit to the store
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
}
#[test]
fn test_init_colocated_git() {
let settings = testutils::user_settings();
let temp_dir = testutils::new_temp_dir();
let (canonical, uncanonical) = canonicalize(temp_dir.path());
let (workspace, repo) = Workspace::init_colocated_git(&settings, &uncanonical).unwrap();
let git_backend: &GitBackend = repo.store().backend_impl().unwrap();
let repo_path = canonical.join(".jj").join("repo");
assert_eq!(workspace.workspace_root(), &canonical);
assert_eq!(git_backend.git_repo_path(), canonical.join(".git"));
assert_eq!(git_backend.git_workdir(), Some(canonical.as_ref()));
assert_eq!(
std::fs::read_to_string(repo_path.join("store").join("git_target")).unwrap(),
"../../../.git"
);
// Just test that we can write a commit to the store
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
}
#[test]
fn test_init_external_git() {
let settings = testutils::user_settings();
let temp_dir = testutils::new_temp_dir();
let (canonical, uncanonical) = canonicalize(temp_dir.path());
let git_repo_path = uncanonical.join("git");
git::init(&git_repo_path);
std::fs::create_dir(uncanonical.join("jj")).unwrap();
let (workspace, repo) = Workspace::init_external_git(
&settings,
&uncanonical.join("jj"),
&git_repo_path.join(".git"),
)
.unwrap();
let git_backend: &GitBackend = repo.store().backend_impl().unwrap();
assert_eq!(workspace.workspace_root(), &canonical.join("jj"));
assert_eq!(
git_backend.git_repo_path(),
canonical.join("git").join(".git")
);
assert_eq!(
git_backend.git_workdir(),
Some(canonical.join("git").as_ref())
);
// Just test that we can write a commit to the store
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_init_with_default_config(backend: TestRepoBackend) {
// Test that we can create a repo without setting any non-default config
let settings = UserSettings::from_config(StackedConfig::with_defaults()).unwrap();
let test_workspace = TestWorkspace::init_with_backend_and_settings(backend, &settings);
let repo = &test_workspace.repo;
let wc_commit_id = repo
.view()
.get_wc_commit_id(WorkspaceName::DEFAULT)
.unwrap();
let wc_commit = repo.store().get_commit(wc_commit_id).unwrap();
assert_eq!(wc_commit.author().name, "".to_string());
assert_eq!(wc_commit.author().email, "".to_string());
assert_eq!(wc_commit.committer().name, "".to_string());
assert_eq!(wc_commit.committer().email, "".to_string());
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_init_checkout(backend: TestRepoBackend) {
// Test the contents of the working-copy commit after init
let settings = testutils::user_settings();
let test_workspace = TestWorkspace::init_with_backend_and_settings(backend, &settings);
let repo = &test_workspace.repo;
let wc_commit_id = repo
.view()
.get_wc_commit_id(WorkspaceName::DEFAULT)
.unwrap();
let wc_commit = repo.store().get_commit(wc_commit_id).unwrap();
assert_tree_eq!(wc_commit.tree(), repo.store().empty_merged_tree());
assert_eq!(
wc_commit.store_commit().parents,
vec![repo.store().root_commit_id().clone()]
);
assert!(wc_commit.store_commit().predecessors.is_empty());
assert_eq!(wc_commit.description(), "");
assert_eq!(wc_commit.author().name, settings.user_name());
assert_eq!(wc_commit.author().email, settings.user_email());
assert_eq!(wc_commit.committer().name, settings.user_name());
assert_eq!(wc_commit.committer().email, settings.user_email());
assert_matches!(
repo.operation().predecessors_for_commit(wc_commit.id()),
Some([])
);
}
#[cfg(unix)]
#[test]
fn test_init_load_non_utf8_path() {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt as _;
use jj_lib::workspace::default_working_copy_factories;
use testutils::TestEnvironment;
let settings = testutils::user_settings();
let test_env = TestEnvironment::init();
if testutils::check_strict_utf8_fs(test_env.root()) {
eprintln!(
"Skipping test \"test_init_load_non_utf8_path\" due to strict UTF-8 filesystem for \
path {:?}",
test_env.root()
);
return;
}
let git_repo_path = test_env.root().join(OsStr::from_bytes(b"git\xe0"));
assert!(git_repo_path.to_str().is_none());
git::init(&git_repo_path);
// Workspace can be created
let workspace_root = test_env.root().join(OsStr::from_bytes(b"jj\xe0"));
std::fs::create_dir(&workspace_root).unwrap();
Workspace::init_external_git(&settings, &workspace_root, &git_repo_path.join(".git")).unwrap();
// Workspace can be loaded
let workspace = Workspace::load(
&settings,
&workspace_root,
&test_env.default_store_factories(),
&default_working_copy_factories(),
)
.unwrap();
// Just test that we can write a commit to the store
let repo = workspace.repo_loader().load_at_head().unwrap();
let mut tx = repo.start_transaction();
write_random_commit(tx.repo_mut());
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_revset_optimized.rs | lib/tests/test_revset_optimized.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Test that optimized revset evaluates to the same result as the original
//! expression.
//!
//! Use `PROPTEST_CASES=10000` to increase the number of test cases to run.
//! The default is `256`, which might be too small to catch edge-case bugs.
//! <https://proptest-rs.github.io/proptest/proptest/tutorial/config.html>
use std::sync::Arc;
use itertools::Itertools as _;
use jj_lib::backend::CommitId;
use jj_lib::commit::Commit;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::repo::MutableRepo;
use jj_lib::repo::Repo;
use jj_lib::revset::ResolvedRevsetExpression;
use jj_lib::revset::RevsetExpression;
use jj_lib::revset::RevsetFilterPredicate;
use jj_lib::rewrite::RebaseOptions;
use jj_lib::rewrite::RebasedCommit;
use jj_lib::settings::UserSettings;
use proptest::prelude::*;
use testutils::TestRepo;
fn stable_settings() -> UserSettings {
let mut config = testutils::base_user_config();
let mut layer = ConfigLayer::empty(ConfigSource::User);
layer
.set_value("debug.commit-timestamp", "2001-02-03T04:05:06+07:00")
.unwrap();
config.add_layer(layer);
UserSettings::from_config(config).unwrap()
}
fn write_new_commit<'a>(
repo: &mut MutableRepo,
desc: &str,
parents: impl IntoIterator<Item = &'a Commit>,
) -> Commit {
let parents = parents.into_iter().map(|c| c.id().clone()).collect();
let tree = repo.store().empty_merged_tree();
repo.new_commit(parents, tree)
.set_description(desc)
.write()
.unwrap()
}
fn rebase_descendants(repo: &mut MutableRepo) -> Vec<Commit> {
let mut commits = Vec::new();
repo.rebase_descendants_with_options(&RebaseOptions::default(), |_, rebased| match rebased {
RebasedCommit::Rewritten(commit) => commits.push(commit),
RebasedCommit::Abandoned { .. } => {}
})
.unwrap();
commits
}
/// Strategy to generate arbitrary revset expressions.
fn arb_expression(
known_commits: Vec<CommitId>,
visible_heads: Vec<Vec<CommitId>>,
) -> impl Strategy<Value = Arc<ResolvedRevsetExpression>> {
// https://proptest-rs.github.io/proptest/proptest/tutorial/recursive.html
let max_commits = known_commits.len();
let leaf_expr = prop_oneof![
Just(RevsetExpression::none()),
Just(RevsetExpression::all()),
Just(RevsetExpression::visible_heads()),
Just(RevsetExpression::root()),
proptest::sample::subsequence(known_commits, 1..=5.min(max_commits))
.prop_map(RevsetExpression::commits),
// Use merges() as a filter that isn't constant. Since we don't have an
// optimization rule that rewrites filter predicates, we wouldn't have
// to add various filter predicates.
Just(RevsetExpression::filter(
RevsetFilterPredicate::ParentCount(2..u32::MAX)
)),
];
leaf_expr.prop_recursive(
10, // depth
100, // total nodes
2, // unary or binary
move |expr| {
// This table includes redundant expressions (e.g. parents() and
// ancestors()) if they are common, which will probably make them be
// more weighted?
prop_oneof![
// Ancestors
expr.clone().prop_map(|x| x.parents()),
expr.clone().prop_map(|x| x.ancestors()),
(expr.clone(), 0..5_u64).prop_map(|(x, d)| x.ancestors_range(0..d)),
// Descendants
expr.clone().prop_map(|x| x.children()),
expr.clone().prop_map(|x| x.descendants()),
(expr.clone(), 0..5_u64).prop_map(|(x, d)| x.descendants_range(0..d)),
// First ancestors
expr.clone().prop_map(|x| x.first_ancestors_at(1)),
expr.clone().prop_map(|x| x.first_ancestors()),
(expr.clone(), 0..5_u64).prop_map(|(x, d)| x.first_ancestors_range(0..d)),
// Range
(expr.clone(), expr.clone()).prop_map(|(x, y)| x.range(&y)),
// DagRange
(expr.clone(), expr.clone()).prop_map(|(x, y)| x.dag_range_to(&y)),
expr.clone().prop_map(|x| x.connected()),
// Reachable
(expr.clone(), expr.clone()).prop_map(|(x, y)| x.reachable(&y)),
// Heads
expr.clone().prop_map(|x| x.heads()),
// Roots
expr.clone().prop_map(|x| x.roots()),
// ForkPoint
expr.clone().prop_map(|x| x.fork_point()),
// Latest
(expr.clone(), 0..5_usize).prop_map(|(x, n)| x.latest(n)),
// AtOperation (or WithinVisibility)
(
expr.clone(),
proptest::sample::select(visible_heads.clone())
)
.prop_map(|(candidates, visible_heads)| Arc::new(
RevsetExpression::WithinVisibility {
candidates,
visible_heads
}
)),
// Coalesce (in binary form)
[expr.clone(), expr.clone()].prop_map(|xs| RevsetExpression::coalesce(&xs)),
// General set operations
expr.clone().prop_map(|x| x.negated()),
(expr.clone(), expr.clone()).prop_map(|(x, y)| x.union(&y)),
(expr.clone(), expr.clone()).prop_map(|(x, y)| x.intersection(&y)),
(expr.clone(), expr.clone()).prop_map(|(x, y)| x.minus(&y)),
]
},
)
}
fn verify_optimized(
repo: &dyn Repo,
expression: &Arc<ResolvedRevsetExpression>,
) -> Result<(), TestCaseError> {
let optimized_revset = expression.clone().evaluate(repo).unwrap();
let unoptimized_revset = expression.clone().evaluate_unoptimized(repo).unwrap();
let optimized_ids: Vec<_> = optimized_revset.iter().try_collect().unwrap();
let unoptimized_ids: Vec<_> = unoptimized_revset.iter().try_collect().unwrap();
prop_assert_eq!(optimized_ids, unoptimized_ids);
Ok(())
}
#[test]
fn test_mostly_linear() {
let settings = stable_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let repo = test_repo.repo;
// 8 9
// 6 7
// |/|
// 4 5
// 3 |
// 1 2
// |/
// 0
let mut tx = repo.start_transaction();
let commit0 = repo.store().root_commit();
let commit1 = write_new_commit(tx.repo_mut(), "1", [&commit0]);
let commit2 = write_new_commit(tx.repo_mut(), "2", [&commit0]);
let commit3 = write_new_commit(tx.repo_mut(), "3", [&commit1]);
let commit4 = write_new_commit(tx.repo_mut(), "4", [&commit3]);
let commit5 = write_new_commit(tx.repo_mut(), "5", [&commit2]);
let commit6 = write_new_commit(tx.repo_mut(), "6", [&commit4]);
let commit7 = write_new_commit(tx.repo_mut(), "7", [&commit4, &commit5]);
let commit8 = write_new_commit(tx.repo_mut(), "8", [&commit6]);
let commit9 = write_new_commit(tx.repo_mut(), "9", [&commit7]);
let commits = vec![
commit0, commit1, commit2, commit3, commit4, commit5, commit6, commit7, commit8, commit9,
];
let repo = tx.commit("a").unwrap();
// Commit ids for reference
insta::assert_snapshot!(
commits.iter().map(|c| format!("{:<2} {}\n", c.description(), c.id())).join(""), @r"
00000000000000000000
1 78f823b31f2c4a77030b
2 1ba216c17ef680561823
3 c2c719328d78654d9f8e
4 d6b40f7dfac149c7181c
5 c682b87d91a8940f71d5
6 456fe15ac6ebfdf56219
7 d2bba8ce1ce80751aab5
8 536f4a045e558c9927a5
9 6ab43bd6d94bdaff491f
");
let commit_ids = commits.iter().map(|c| c.id().clone()).collect_vec();
let visible_heads = vec![
vec![commit_ids[0].clone()],
vec![commit_ids[8].clone(), commit_ids[9].clone()],
];
proptest!(|(expression in arb_expression(commit_ids, visible_heads))| {
verify_optimized(repo.as_ref(), &expression)?;
});
}
#[test]
fn test_weird_merges() {
let settings = stable_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let repo = test_repo.repo;
// 8
// /|\
// 4 5 6 7
// |X| |/
// 1 2 3
// \|/
// 0
let mut tx = repo.start_transaction();
let commit0 = repo.store().root_commit();
let commit1 = write_new_commit(tx.repo_mut(), "1", [&commit0]);
let commit2 = write_new_commit(tx.repo_mut(), "2", [&commit0]);
let commit3 = write_new_commit(tx.repo_mut(), "3", [&commit0]);
let commit4 = write_new_commit(tx.repo_mut(), "4", [&commit1, &commit2]);
let commit5 = write_new_commit(tx.repo_mut(), "5", [&commit1, &commit2]);
let commit6 = write_new_commit(tx.repo_mut(), "6", [&commit3]);
let commit7 = write_new_commit(tx.repo_mut(), "7", [&commit3]);
let commit8 = write_new_commit(tx.repo_mut(), "8", [&commit5, &commit6, &commit7]);
let commits = vec![
commit0, commit1, commit2, commit3, commit4, commit5, commit6, commit7, commit8,
];
let repo = tx.commit("a").unwrap();
// Commit ids for reference
insta::assert_snapshot!(
commits.iter().map(|c| format!("{:<2} {}\n", c.description(), c.id())).join(""), @r"
00000000000000000000
1 78f823b31f2c4a77030b
2 1ba216c17ef680561823
3 83a7b5b8138c9428d837
4 43a3ed8115915cb0ebe0
5 aec384ff4d34c039e4db
6 d80cec48faa50bf2ac56
7 2667f762c099ffcda2f0
8 54feb3e8186bc4450be4
");
let commit_ids = commits.iter().map(|c| c.id().clone()).collect_vec();
let visible_heads = vec![
vec![commit_ids[0].clone()],
vec![commit_ids[4].clone(), commit_ids[8].clone()],
];
proptest!(|(expression in arb_expression(commit_ids, visible_heads))| {
verify_optimized(repo.as_ref(), &expression)?;
});
}
#[test]
fn test_feature_branches() {
let settings = stable_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let repo = test_repo.repo;
// 9
// |\
// 8 \
// |\ \
// | 7 |
// |/ |
// 6 |
// |\ 5
// | | 4
// | | 3
// | 2 |
// | 1 |
// | |/
// |/
// 0
// Fetch branch 2 and 5
let mut tx = repo.start_transaction();
let commit0 = repo.store().root_commit();
let commit1 = write_new_commit(tx.repo_mut(), "1", [&commit0]);
let commit2 = write_new_commit(tx.repo_mut(), "2", [&commit1]);
let commit3 = write_new_commit(tx.repo_mut(), "3", [&commit0]);
let commit4 = write_new_commit(tx.repo_mut(), "4", [&commit3]);
let commit5 = write_new_commit(tx.repo_mut(), "5", [&commit4]);
let repo = tx.commit("a").unwrap();
// Merge branch 2
let mut tx = repo.start_transaction();
let commit6 = write_new_commit(tx.repo_mut(), "6", [&commit0, &commit2]);
let repo = tx.commit("a").unwrap();
// Fetch merged branch 7
let mut tx = repo.start_transaction();
let commit7 = write_new_commit(tx.repo_mut(), "7", [&commit6]);
let commit8 = write_new_commit(tx.repo_mut(), "8", [&commit6, &commit7]);
let repo = tx.commit("a").unwrap();
// Merge branch 5
let mut tx = repo.start_transaction();
let commit9 = write_new_commit(tx.repo_mut(), "9", [&commit8, &commit5]);
let commits = vec![
commit0, commit1, commit2, commit3, commit4, commit5, commit6, commit7, commit8, commit9,
];
let repo = tx.commit("a").unwrap();
// Commit ids for reference
insta::assert_snapshot!(
commits.iter().map(|c| format!("{:<2} {}\n", c.description(), c.id())).join(""), @r"
00000000000000000000
1 78f823b31f2c4a77030b
2 6323cf55a45bcc85315d
3 83a7b5b8138c9428d837
4 93731ec1a14276206ba7
5 c388b47bd72fcfee9e3c
6 da751caa45bda2e3d526
7 45a86fae2b51ec68f8c0
8 9f83496b963cbaf8cb7a
9 998a24ecd56446732f55
");
let commit_ids = commits.iter().map(|c| c.id().clone()).collect_vec();
let visible_heads = vec![
vec![commit_ids[0].clone()],
vec![commit_ids[2].clone(), commit_ids[5].clone()],
vec![commit_ids[5].clone(), commit_ids[6].clone()],
vec![commit_ids[5].clone(), commit_ids[8].clone()],
vec![commit_ids[9].clone()],
];
proptest!(|(expression in arb_expression(commit_ids, visible_heads))| {
verify_optimized(repo.as_ref(), &expression)?;
});
}
#[test]
fn test_rewritten() {
let settings = stable_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let repo = test_repo.repo;
// 5
// |\
// 4 | 3
// | |/
// 1 2
// |/
// 0
let mut tx = repo.start_transaction();
let commit0 = repo.store().root_commit();
let commit1 = write_new_commit(tx.repo_mut(), "1", [&commit0]);
let commit2 = write_new_commit(tx.repo_mut(), "2", [&commit0]);
let commit3 = write_new_commit(tx.repo_mut(), "3", [&commit2]);
let commit4 = write_new_commit(tx.repo_mut(), "4", [&commit1]);
let commit5 = write_new_commit(tx.repo_mut(), "5", [&commit4, &commit2]);
let mut commits = vec![commit0, commit1, commit2, commit3, commit4, commit5];
let repo = tx.commit("a").unwrap();
// Rewrite 2, rebase 3 and 5
let mut tx = repo.start_transaction();
let commit2b = tx
.repo_mut()
.rewrite_commit(&commits[2])
.set_description("2b")
.write()
.unwrap();
commits.push(commit2b);
commits.extend(rebase_descendants(tx.repo_mut()));
let repo = tx.commit("b").unwrap();
// Abandon 4, rebase 5
let mut tx = repo.start_transaction();
tx.repo_mut().record_abandoned_commit(&commits[4]);
commits.extend(rebase_descendants(tx.repo_mut()));
let repo = tx.commit("c").unwrap();
// Commit ids for reference
insta::assert_snapshot!(
commits.iter().map(|c| format!("{:<2} {}\n", c.description(), c.id())).join(""), @r"
00000000000000000000
1 78f823b31f2c4a77030b
2 1ba216c17ef680561823
3 068410d7a4a5b7052c18
4 8ee10ec699f52df8c624
5 32bd65d7134884955150
2b a7d217f53df0908d3f7a
3 c199d1c8c617cf15893f
5 5fc1da61558a03f69a8d
5 cb81e73e341e59553ff6
");
let commit_ids = commits.iter().map(|c| c.id().clone()).collect_vec();
let visible_heads = vec![
vec![commit_ids[0].clone()],
vec![commit_ids[3].clone(), commit_ids[5].clone()],
vec![commit_ids[7].clone(), commit_ids[8].clone()],
vec![commit_ids[7].clone(), commit_ids[9].clone()],
];
proptest!(|(expression in arb_expression(commit_ids, visible_heads))| {
verify_optimized(repo.as_ref(), &expression)?;
});
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_signing.rs | lib/tests/test_signing.rs | use jj_lib::backend::MillisSinceEpoch;
use jj_lib::backend::Signature;
use jj_lib::backend::Timestamp;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::repo::Repo as _;
use jj_lib::settings::UserSettings;
use jj_lib::signing::SigStatus;
use jj_lib::signing::SignBehavior;
use jj_lib::signing::Signer;
use jj_lib::signing::Verification;
use jj_lib::test_signing_backend::TestSigningBackend;
use test_case::test_case;
use testutils::TestRepoBackend;
use testutils::TestWorkspace;
use testutils::create_random_commit;
use testutils::write_random_commit;
fn user_settings(behavior: SignBehavior) -> UserSettings {
let mut config = testutils::base_user_config();
config.add_layer(
ConfigLayer::parse(
ConfigSource::User,
&format!(
r#"
signing.key = "impeccable"
signing.behavior = "{}"
"#,
match behavior {
SignBehavior::Drop => "drop",
SignBehavior::Keep => "keep",
SignBehavior::Own => "own",
SignBehavior::Force => "force",
}
),
)
.unwrap(),
);
UserSettings::from_config(config).unwrap()
}
fn someone_else() -> Signature {
Signature {
name: "Someone Else".to_string(),
email: "someone-else@example.com".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
},
}
}
fn good_verification() -> Option<Verification> {
Some(Verification {
status: SigStatus::Good,
key: Some("impeccable".to_owned()),
display: Some("test-display".into()),
})
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn manual(backend: TestRepoBackend) {
let settings = user_settings(SignBehavior::Own);
let signer = Signer::new(Some(Box::new(TestSigningBackend)), vec![]);
let test_workspace = TestWorkspace::init_with_backend_and_signer(backend, signer, &settings);
let repo = &test_workspace.repo;
let repo = repo.clone();
let mut tx = repo.start_transaction();
let commit1 = create_random_commit(tx.repo_mut())
.set_sign_behavior(SignBehavior::Own)
.write()
.unwrap();
let commit2 = create_random_commit(tx.repo_mut())
.set_sign_behavior(SignBehavior::Own)
.set_author(someone_else())
.write()
.unwrap();
tx.commit("test").unwrap();
let commit1 = repo.store().get_commit(commit1.id()).unwrap();
assert_eq!(commit1.verification().unwrap(), good_verification());
let commit2 = repo.store().get_commit(commit2.id()).unwrap();
assert_eq!(commit2.verification().unwrap(), None);
}
#[test_case(TestRepoBackend::Git ; "git backend")]
fn keep_on_rewrite(backend: TestRepoBackend) {
let settings = user_settings(SignBehavior::Own);
let signer = Signer::new(Some(Box::new(TestSigningBackend)), vec![]);
let test_workspace = TestWorkspace::init_with_backend_and_signer(backend, signer, &settings);
let repo = &test_workspace.repo;
let repo = repo.clone();
let mut tx = repo.start_transaction();
let commit = create_random_commit(tx.repo_mut())
.set_sign_behavior(SignBehavior::Own)
.write()
.unwrap();
tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let rewritten = mut_repo.rewrite_commit(&commit).write().unwrap();
let commit = repo.store().get_commit(rewritten.id()).unwrap();
assert_eq!(commit.verification().unwrap(), good_verification());
}
#[test_case(TestRepoBackend::Git ; "git backend")]
fn manual_drop_on_rewrite(backend: TestRepoBackend) {
let settings = user_settings(SignBehavior::Own);
let signer = Signer::new(Some(Box::new(TestSigningBackend)), vec![]);
let test_workspace = TestWorkspace::init_with_backend_and_signer(backend, signer, &settings);
let repo = &test_workspace.repo;
let repo = repo.clone();
let mut tx = repo.start_transaction();
let commit = create_random_commit(tx.repo_mut())
.set_sign_behavior(SignBehavior::Own)
.write()
.unwrap();
tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let rewritten = mut_repo
.rewrite_commit(&commit)
.set_sign_behavior(SignBehavior::Drop)
.write()
.unwrap();
let commit = repo.store().get_commit(rewritten.id()).unwrap();
assert_eq!(commit.verification().unwrap(), None);
}
#[test_case(TestRepoBackend::Git ; "git backend")]
fn forced(backend: TestRepoBackend) {
let settings = user_settings(SignBehavior::Force);
let signer = Signer::new(Some(Box::new(TestSigningBackend)), vec![]);
let test_workspace = TestWorkspace::init_with_backend_and_signer(backend, signer, &settings);
let repo = &test_workspace.repo;
let repo = repo.clone();
let mut tx = repo.start_transaction();
let commit = create_random_commit(tx.repo_mut())
.set_author(someone_else())
.write()
.unwrap();
tx.commit("test").unwrap();
let commit = repo.store().get_commit(commit.id()).unwrap();
assert_eq!(commit.verification().unwrap(), good_verification());
}
#[test_case(TestRepoBackend::Git ; "git backend")]
fn configured(backend: TestRepoBackend) {
let settings = user_settings(SignBehavior::Own);
let signer = Signer::new(Some(Box::new(TestSigningBackend)), vec![]);
let test_workspace = TestWorkspace::init_with_backend_and_signer(backend, signer, &settings);
let repo = &test_workspace.repo;
let repo = repo.clone();
let mut tx = repo.start_transaction();
let commit = write_random_commit(tx.repo_mut());
tx.commit("test").unwrap();
let commit = repo.store().get_commit(commit.id()).unwrap();
assert_eq!(commit.verification().unwrap(), good_verification());
}
#[test_case(TestRepoBackend::Git ; "git backend")]
fn drop_behavior(backend: TestRepoBackend) {
let settings = user_settings(SignBehavior::Drop);
let signer = Signer::new(Some(Box::new(TestSigningBackend)), vec![]);
let test_workspace = TestWorkspace::init_with_backend_and_signer(backend, signer, &settings);
let repo = &test_workspace.repo;
let repo = repo.clone();
let mut tx = repo.start_transaction();
let commit = create_random_commit(tx.repo_mut())
.set_sign_behavior(SignBehavior::Own)
.write()
.unwrap();
tx.commit("test").unwrap();
let original_commit = repo.store().get_commit(commit.id()).unwrap();
assert_eq!(original_commit.verification().unwrap(), good_verification());
let mut tx = repo.start_transaction();
let mut_repo = tx.repo_mut();
let rewritten = mut_repo.rewrite_commit(&original_commit).write().unwrap();
let rewritten_commit = repo.store().get_commit(rewritten.id()).unwrap();
assert_eq!(rewritten_commit.verification().unwrap(), None);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_bisect.rs | lib/tests/test_bisect.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use assert_matches::assert_matches;
use jj_lib::backend::CommitId;
use jj_lib::bisect::BisectionResult;
use jj_lib::bisect::Bisector;
use jj_lib::bisect::Evaluation;
use jj_lib::bisect::NextStep;
use jj_lib::repo::Repo;
use jj_lib::revset::ResolvedRevsetExpression;
use testutils::TestRepo;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn test_bisection<'a>(
repo: &dyn Repo,
input_range: &Arc<ResolvedRevsetExpression>,
results: impl IntoIterator<Item = (&'a CommitId, Evaluation)>,
) -> BisectionResult {
let mut bisector = Bisector::new(repo, input_range.clone()).unwrap();
let mut iter = results.into_iter().enumerate();
loop {
match bisector.next_step().unwrap() {
NextStep::Evaluate(commit) => {
let (i, (expected_id, result)) =
iter.next().expect("More commits than expected were tested");
assert_eq!(
commit.id(),
expected_id,
"Attempt to test unexpected commit at iteration {i}"
);
bisector.mark(commit.id().clone(), result);
}
NextStep::Done(bisection_result) => {
assert!(iter.next().is_none(), "Finished earlier than expected");
return bisection_result;
}
}
}
}
#[test]
fn test_bisect_empty_input() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let input_range = ResolvedRevsetExpression::none();
let expected_tests = [];
let result = test_bisection(repo.as_ref(), &input_range, expected_tests);
assert_matches!(result, BisectionResult::Indeterminate);
}
#[test]
fn test_bisect_linear() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let root_commit = repo.store().root_commit();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let commit4 = write_random_commit_with_parents(tx.repo_mut(), &[&commit3]);
let commit5 = write_random_commit_with_parents(tx.repo_mut(), &[&commit4]);
let commit6 = write_random_commit_with_parents(tx.repo_mut(), &[&commit5]);
let commit7 = write_random_commit_with_parents(tx.repo_mut(), &[&commit6]);
let input_range = ResolvedRevsetExpression::commit(commit7.id().clone()).ancestors();
// Root commit is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Bad),
(commit1.id(), Evaluation::Bad),
(root_commit.id(), Evaluation::Bad),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![root_commit.clone()]));
// Commit 1 is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Bad),
(commit1.id(), Evaluation::Bad),
(root_commit.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit1.clone()]));
// Commit 3 is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Bad),
(commit1.id(), Evaluation::Good),
(commit2.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit3.clone()]));
// Commit 5 is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Good),
(commit5.id(), Evaluation::Bad),
(commit4.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit5.clone()]));
// Commit 7 is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Good),
(commit5.id(), Evaluation::Good),
(commit6.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit7.clone()]));
// Commit 2 is the first bad commit but commit 3 is skipped
let expected_tests = [
(commit3.id(), Evaluation::Skip),
(commit2.id(), Evaluation::Bad),
(root_commit.id(), Evaluation::Good),
(commit1.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit2.clone()]));
// Commit 4 is the first bad commit but commit 3 is skipped
let expected_tests = [
(commit3.id(), Evaluation::Skip),
(commit2.id(), Evaluation::Good),
(commit5.id(), Evaluation::Bad),
(commit4.id(), Evaluation::Bad),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
// TODO: Indicate in the result that we're unsure if commit4 was the first bad
// commit because the commit before it in the set was skipped.
assert_eq!(result, BisectionResult::Found(vec![commit4.clone()]));
// Commit 7 is the first bad commit but commits before 6 were skipped
// TODO: Avoid testing every commit near first skipped commit. Test e.g. commit
// 1 and commit 5 once we see that commit 3 was indeterminate.
let expected_tests = [
(commit3.id(), Evaluation::Skip),
(commit2.id(), Evaluation::Skip),
(commit4.id(), Evaluation::Skip),
(commit1.id(), Evaluation::Skip),
(commit5.id(), Evaluation::Skip),
(root_commit.id(), Evaluation::Skip),
(commit6.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit7.clone()]));
// Gaps in the input range are allowed
let input_range = ResolvedRevsetExpression::commits(vec![
commit7.id().clone(),
commit4.id().clone(),
commit2.id().clone(),
commit1.id().clone(),
]);
// Commit 4 is the first bad commit
let expected_tests = [
(commit2.id(), Evaluation::Good),
(commit4.id(), Evaluation::Bad),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit4.clone()]));
}
#[test]
fn test_bisect_nonlinear() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// 7
// |\
// 5 6
// | |
// 3 4
// | |
// 1 2
// |/
// 0
let mut tx = repo.start_transaction();
let root_commit = repo.store().root_commit();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
let commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit4 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let commit5 = write_random_commit_with_parents(tx.repo_mut(), &[&commit3]);
let commit6 = write_random_commit_with_parents(tx.repo_mut(), &[&commit4]);
let commit7 = write_random_commit_with_parents(tx.repo_mut(), &[&commit5, &commit6]);
let input_range = ResolvedRevsetExpression::commit(commit7.id().clone()).ancestors();
// Root commit is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Bad),
(root_commit.id(), Evaluation::Bad),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![root_commit.clone()]));
// Commit 3 is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Bad),
(root_commit.id(), Evaluation::Good),
(commit1.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit3.clone()]));
// Commit 4 is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Good),
(commit4.id(), Evaluation::Bad),
(commit2.id(), Evaluation::Good),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit4.clone()]));
// Commit 6 is the first bad commit
let expected_tests = [
(commit3.id(), Evaluation::Good),
(commit4.id(), Evaluation::Good),
(commit5.id(), Evaluation::Good),
(commit6.id(), Evaluation::Bad),
];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(result, BisectionResult::Found(vec![commit6.clone()]));
}
#[test]
fn test_bisect_disjoint_sets() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// 1 2
// |/
// 0
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
let input_range =
ResolvedRevsetExpression::commits(vec![commit1.id().clone(), commit2.id().clone()]);
// Both commit 1 and commit 2 are (implicitly) the first bad commits
let expected_tests = [];
let result = test_bisection(tx.repo(), &input_range, expected_tests);
assert_eq!(
result,
BisectionResult::Found(vec![commit2.clone(), commit1.clone()])
);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_merged_tree.rs | lib/tests/test_merged_tree.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::StreamExt as _;
use itertools::Itertools as _;
use jj_lib::backend::CommitId;
use jj_lib::backend::CopyRecord;
use jj_lib::backend::FileId;
use jj_lib::backend::TreeValue;
use jj_lib::conflict_labels::ConflictLabels;
use jj_lib::copies::CopiesTreeDiffEntryPath;
use jj_lib::copies::CopyOperation;
use jj_lib::copies::CopyRecords;
use jj_lib::files;
use jj_lib::matchers::EverythingMatcher;
use jj_lib::matchers::FilesMatcher;
use jj_lib::matchers::Matcher;
use jj_lib::matchers::PrefixMatcher;
use jj_lib::merge::Diff;
use jj_lib::merge::Merge;
use jj_lib::merge::MergedTreeValue;
use jj_lib::merged_tree::MergedTree;
use jj_lib::merged_tree::MergedTreeBuilder;
use jj_lib::merged_tree::TreeDiffEntry;
use jj_lib::merged_tree::TreeDiffIterator;
use jj_lib::merged_tree::TreeDiffStreamImpl;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPath;
use jj_lib::repo_path::RepoPathBuf;
use pollster::FutureExt as _;
use pretty_assertions::assert_eq;
use testutils::TestRepo;
use testutils::assert_tree_eq;
use testutils::create_single_tree;
use testutils::create_tree;
use testutils::repo_path;
use testutils::repo_path_buf;
use testutils::repo_path_component;
fn diff_entry_tuple(diff: TreeDiffEntry) -> (RepoPathBuf, (MergedTreeValue, MergedTreeValue)) {
let values = diff.values.unwrap();
(diff.path, (values.before, values.after))
}
fn diff_stream_equals_iter(tree1: &MergedTree, tree2: &MergedTree, matcher: &dyn Matcher) {
let iter_diff: Vec<_> = TreeDiffIterator::new(tree1, tree2, matcher)
.map(|diff| (diff.path, diff.values.unwrap()))
.collect();
let max_concurrent_reads = 10;
tree1.store().clear_caches();
let stream_diff: Vec<_> = TreeDiffStreamImpl::new(tree1, tree2, matcher, max_concurrent_reads)
.map(|diff| (diff.path, diff.values.unwrap()))
.collect()
.block_on();
assert_eq!(stream_diff, iter_diff);
}
/// Test that a tree built with no changes on top of an add/add conflict gets
/// resolved.
#[test]
fn test_merged_tree_builder_resolves_conflict() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let store = repo.store();
let path1 = repo_path("dir/file");
let tree1 = create_single_tree(repo, &[(path1, "foo")]);
let tree2 = create_single_tree(repo, &[(path1, "bar")]);
let tree3 = create_single_tree(repo, &[(path1, "bar")]);
let base_tree = MergedTree::new(
store.clone(),
Merge::from_vec(vec![
tree2.id().clone(),
tree1.id().clone(),
tree3.id().clone(),
]),
ConflictLabels::from_vec(vec!["tree 2".into(), "tree 1".into(), "tree 3".into()]),
);
let tree_builder = MergedTreeBuilder::new(base_tree);
let tree = tree_builder.write_tree().unwrap();
assert_eq!(*tree.tree_ids(), Merge::resolved(tree2.id().clone()));
}
#[test]
fn test_path_value_and_entries() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Create a MergedTree
let resolved_file_path = repo_path("dir1/subdir/resolved");
let resolved_dir_path = &resolved_file_path.parent().unwrap();
let conflicted_file_path = repo_path("dir2/conflicted");
let missing_path = repo_path("dir2/missing_file");
let modify_delete_path = repo_path("dir2/modify_delete");
let file_dir_conflict_path = repo_path("file_dir");
let file_dir_conflict_sub_path = repo_path("file_dir/file");
let tree1 = create_single_tree(
repo,
&[
(resolved_file_path, "unchanged"),
(conflicted_file_path, "1"),
(modify_delete_path, "1"),
(file_dir_conflict_path, "1"),
],
);
let tree2 = create_single_tree(
repo,
&[
(resolved_file_path, "unchanged"),
(conflicted_file_path, "2"),
(modify_delete_path, "2"),
(file_dir_conflict_path, "2"),
],
);
let tree3 = create_single_tree(
repo,
&[
(resolved_file_path, "unchanged"),
(conflicted_file_path, "3"),
// No modify_delete_path in this tree
(file_dir_conflict_sub_path, "1"),
],
);
let merged_tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
tree2.id().clone(),
tree1.id().clone(),
tree3.id().clone(),
]),
ConflictLabels::from_vec(vec!["tree 2".into(), "tree 1".into(), "tree 3".into()]),
);
// Get the root tree
assert_eq!(
merged_tree.path_value(RepoPath::root()).unwrap(),
Merge::from_removes_adds(
vec![Some(TreeValue::Tree(tree1.id().clone()))],
vec![
Some(TreeValue::Tree(tree2.id().clone())),
Some(TreeValue::Tree(tree3.id().clone())),
]
)
);
// Get file path without conflict
assert_eq!(
merged_tree.path_value(resolved_file_path).unwrap(),
Merge::resolved(tree1.path_value(resolved_file_path).unwrap()),
);
// Get directory path without conflict
assert_eq!(
merged_tree.path_value(resolved_dir_path).unwrap(),
Merge::resolved(tree1.path_value(resolved_dir_path).unwrap()),
);
// Get missing path
assert_eq!(
merged_tree.path_value(missing_path).unwrap(),
Merge::absent()
);
// Get modify/delete conflict (some None values)
assert_eq!(
merged_tree.path_value(modify_delete_path).unwrap(),
Merge::from_removes_adds(
vec![tree1.path_value(modify_delete_path).unwrap()],
vec![tree2.path_value(modify_delete_path).unwrap(), None]
),
);
// Get file/dir conflict path
assert_eq!(
merged_tree.path_value(file_dir_conflict_path).unwrap(),
Merge::from_removes_adds(
vec![tree1.path_value(file_dir_conflict_path).unwrap()],
vec![
tree2.path_value(file_dir_conflict_path).unwrap(),
tree3.path_value(file_dir_conflict_path).unwrap()
]
),
);
// Get file inside file/dir conflict
// There is a conflict in the parent directory, so it is considered to not be a
// directory in the merged tree, making the file hidden until the directory
// conflict has been resolved.
assert_eq!(
merged_tree.path_value(file_dir_conflict_sub_path).unwrap(),
Merge::absent(),
);
// Test entries()
let actual_entries = merged_tree
.entries()
.map(|(path, result)| (path, result.unwrap()))
.collect_vec();
// missing_path, resolved_dir_path, and file_dir_conflict_sub_path should not
// appear
let expected_entries = [
resolved_file_path,
conflicted_file_path,
modify_delete_path,
file_dir_conflict_path,
]
.iter()
.sorted()
.map(|&path| (path.to_owned(), merged_tree.path_value(path).unwrap()))
.collect_vec();
assert_eq!(actual_entries, expected_entries);
let actual_entries = merged_tree
.entries_matching(&FilesMatcher::new([
&resolved_file_path,
&modify_delete_path,
&file_dir_conflict_sub_path,
]))
.map(|(path, result)| (path, result.unwrap()))
.collect_vec();
let expected_entries = [resolved_file_path, modify_delete_path]
.iter()
.sorted()
.map(|&path| (path.to_owned(), merged_tree.path_value(path).unwrap()))
.collect_vec();
assert_eq!(actual_entries, expected_entries);
}
#[test]
fn test_resolve_success() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let unchanged_path = repo_path("unchanged");
let trivial_file_path = repo_path("trivial-file");
let trivial_hunk_path = repo_path("trivial-hunk");
let both_added_dir_path = repo_path("added-dir");
let both_added_dir_file1_path = &both_added_dir_path.join(repo_path_component("file1"));
let both_added_dir_file2_path = &both_added_dir_path.join(repo_path_component("file2"));
let emptied_dir_path = repo_path("to-become-empty");
let emptied_dir_file1_path = &emptied_dir_path.join(repo_path_component("file1"));
let emptied_dir_file2_path = &emptied_dir_path.join(repo_path_component("file2"));
let base1 = create_single_tree(
repo,
&[
(unchanged_path, "unchanged"),
(trivial_file_path, "base1"),
(trivial_hunk_path, "line1\nline2\nline3\n"),
(emptied_dir_file1_path, "base1"),
(emptied_dir_file2_path, "base1"),
],
);
let side1 = create_single_tree(
repo,
&[
(unchanged_path, "unchanged"),
(trivial_file_path, "base1"),
(trivial_hunk_path, "line1 side1\nline2\nline3\n"),
(both_added_dir_file1_path, "side1"),
(emptied_dir_file2_path, "base1"),
],
);
let side2 = create_single_tree(
repo,
&[
(unchanged_path, "unchanged"),
(trivial_file_path, "side2"),
(trivial_hunk_path, "line1\nline2\nline3 side2\n"),
(both_added_dir_file2_path, "side2"),
(emptied_dir_file1_path, "base1"),
],
);
let expected = create_tree(
repo,
&[
(unchanged_path, "unchanged"),
(trivial_file_path, "side2"),
(trivial_hunk_path, "line1 side1\nline2\nline3 side2\n"),
(both_added_dir_file1_path, "side1"),
(both_added_dir_file2_path, "side2"),
],
);
let tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
side1.id().clone(),
base1.id().clone(),
side2.id().clone(),
]),
ConflictLabels::from_vec(vec!["left".into(), "base".into(), "right".into()]),
);
let resolved_tree = tree.resolve().block_on().unwrap();
assert!(resolved_tree.tree_ids().is_resolved());
assert_tree_eq!(resolved_tree, expected);
}
#[test]
fn test_resolve_root_becomes_empty() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let store = repo.store();
let path1 = repo_path("dir1/file");
let path2 = repo_path("dir2/file");
let base1 = create_single_tree(repo, &[(path1, "base1"), (path2, "base1")]);
let side1 = create_single_tree(repo, &[(path2, "base1")]);
let side2 = create_single_tree(repo, &[(path1, "base1")]);
let tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
side1.id().clone(),
base1.id().clone(),
side2.id().clone(),
]),
ConflictLabels::from_vec(vec!["side 1".into(), "base 1".into(), "side 2".into()]),
);
let resolved = tree.resolve().block_on().unwrap();
assert_tree_eq!(resolved, store.empty_merged_tree());
}
#[test]
fn test_resolve_with_conflict() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// The trivial conflict should be resolved but the non-trivial should not (and
// cannot)
let trivial_path = repo_path("dir1/trivial");
let conflict_path = repo_path("dir2/file_conflict");
// We start with a 3-sided conflict:
let side1 = create_single_tree(repo, &[(trivial_path, "side1"), (conflict_path, "base")]);
let base1 = create_single_tree(repo, &[(trivial_path, "base"), (conflict_path, "base")]);
let side2 = create_single_tree(repo, &[(trivial_path, "base"), (conflict_path, "side2")]);
let base2 = create_single_tree(repo, &[(trivial_path, "base"), (conflict_path, "base")]);
let side3 = create_single_tree(repo, &[(trivial_path, "base"), (conflict_path, "side3")]);
// This should be reduced to a 2-sided conflict after "trivial" is resolved:
let expected_side1 =
create_single_tree(repo, &[(trivial_path, "side1"), (conflict_path, "side2")]);
let expected_base1 =
create_single_tree(repo, &[(trivial_path, "side1"), (conflict_path, "base")]);
let expected_side2 =
create_single_tree(repo, &[(trivial_path, "side1"), (conflict_path, "side3")]);
let tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
side1.id().clone(),
base1.id().clone(),
side2.id().clone(),
base2.id().clone(),
side3.id().clone(),
]),
ConflictLabels::from_vec(vec![
"side 1".into(),
"base 1".into(),
"side 2".into(),
"base 2".into(),
"side 3".into(),
]),
);
let resolved_tree = tree.resolve().block_on().unwrap();
assert_tree_eq!(
resolved_tree,
MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
expected_side1.id().clone(),
expected_base1.id().clone(),
expected_side2.id().clone()
]),
ConflictLabels::from_vec(vec!["side 2".into(), "base 2".into(), "side 3".into()]),
)
);
}
#[test]
fn test_resolve_with_conflict_containing_empty_subtree() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// Since "dir" in side2 is absent, side2's root tree should be empty as
// well. If it were added to the root tree, side2.id() would differ.
let conflict_path = repo_path("dir/file_conflict");
let base1 = create_single_tree(repo, &[(conflict_path, "base1")]);
let side1 = create_single_tree(repo, &[(conflict_path, "side1")]);
let side2 = create_single_tree(repo, &[]);
let tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
side1.id().clone(),
base1.id().clone(),
side2.id().clone(),
]),
ConflictLabels::from_vec(vec!["left".into(), "base".into(), "right".into()]),
);
let resolved_tree = tree.clone().resolve().block_on().unwrap();
assert_tree_eq!(resolved_tree, tree);
}
#[test]
fn test_conflict_iterator() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let unchanged_path = repo_path("dir/subdir/unchanged");
let trivial_path = repo_path("dir/subdir/trivial");
let trivial_hunk_path = repo_path("dir/non_trivial");
let file_conflict_path = repo_path("dir/subdir/file_conflict");
let modify_delete_path = repo_path("dir/subdir/modify_delete");
let same_add_path = repo_path("dir/subdir/same_add");
let different_add_path = repo_path("dir/subdir/different_add");
let dir_file_path = repo_path("dir/subdir/dir_file");
let added_dir_path = repo_path("dir/new_dir");
let modify_delete_dir_path = repo_path("dir/modify_delete_dir");
let base1 = create_single_tree(
repo,
&[
(unchanged_path, "unchanged"),
(trivial_path, "base"),
(trivial_hunk_path, "line1\nline2\nline3\n"),
(file_conflict_path, "base"),
(modify_delete_path, "base"),
// no same_add_path
// no different_add_path
(dir_file_path, "base"),
// no added_dir_path
(
&modify_delete_dir_path.join(repo_path_component("base")),
"base",
),
],
);
let side1 = create_single_tree(
repo,
&[
(unchanged_path, "unchanged"),
(trivial_path, "base"),
(file_conflict_path, "side1"),
(trivial_hunk_path, "line1 side1\nline2\nline3\n"),
(modify_delete_path, "modified"),
(same_add_path, "same"),
(different_add_path, "side1"),
(dir_file_path, "side1"),
(&added_dir_path.join(repo_path_component("side1")), "side1"),
(
&modify_delete_dir_path.join(repo_path_component("side1")),
"side1",
),
],
);
let side2 = create_single_tree(
repo,
&[
(unchanged_path, "unchanged"),
(trivial_path, "side2"),
(file_conflict_path, "side2"),
(trivial_hunk_path, "line1\nline2\nline3 side2\n"),
// no modify_delete_path
(same_add_path, "same"),
(different_add_path, "side2"),
(&dir_file_path.join(repo_path_component("dir")), "new"),
(&added_dir_path.join(repo_path_component("side2")), "side2"),
// no modify_delete_dir_path
],
);
let tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
side1.id().clone(),
base1.id().clone(),
side2.id().clone(),
]),
ConflictLabels::from_vec(vec!["side 1".into(), "base 1".into(), "side 2".into()]),
);
let conflicts = tree
.conflicts()
.map(|(path, conflict)| (path, conflict.unwrap()))
.collect_vec();
let conflict_at = |path: &RepoPath| {
Merge::from_removes_adds(
vec![base1.path_value(path).unwrap()],
vec![
side1.path_value(path).unwrap(),
side2.path_value(path).unwrap(),
],
)
};
// We initially also get a conflict in trivial_hunk_path because we had
// forgotten to resolve conflicts
assert_eq!(
conflicts,
vec![
(trivial_hunk_path.to_owned(), conflict_at(trivial_hunk_path)),
(
different_add_path.to_owned(),
conflict_at(different_add_path)
),
(dir_file_path.to_owned(), conflict_at(dir_file_path)),
(
file_conflict_path.to_owned(),
conflict_at(file_conflict_path)
),
(
modify_delete_path.to_owned(),
conflict_at(modify_delete_path)
),
]
);
// We can filter conflicts using a matcher
let conflicts = tree
.conflicts_matching(&PrefixMatcher::new([file_conflict_path, dir_file_path]))
.map(|(path, conflict)| (path, conflict.unwrap()))
.collect_vec();
assert_eq!(
conflicts,
vec![
(dir_file_path.to_owned(), conflict_at(dir_file_path)),
(
file_conflict_path.to_owned(),
conflict_at(file_conflict_path)
),
]
);
// After we resolve conflicts, there are only non-trivial conflicts left
let tree = tree.resolve().block_on().unwrap();
let conflicts = tree
.conflicts()
.map(|(path, conflict)| (path, conflict.unwrap()))
.collect_vec();
assert_eq!(
conflicts,
vec![
(
different_add_path.to_owned(),
conflict_at(different_add_path)
),
(dir_file_path.to_owned(), conflict_at(dir_file_path)),
(
file_conflict_path.to_owned(),
conflict_at(file_conflict_path)
),
(
modify_delete_path.to_owned(),
conflict_at(modify_delete_path)
),
]
);
}
#[test]
fn test_conflict_iterator_higher_arity() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let two_sided_path = repo_path("dir/2-sided");
let three_sided_path = repo_path("dir/3-sided");
let base1 = create_single_tree(
repo,
&[(two_sided_path, "base1"), (three_sided_path, "base1")],
);
let base2 = create_single_tree(
repo,
&[(two_sided_path, "base2"), (three_sided_path, "base2")],
);
let side1 = create_single_tree(
repo,
&[(two_sided_path, "side1"), (three_sided_path, "side1")],
);
let side2 = create_single_tree(
repo,
&[(two_sided_path, "base1"), (three_sided_path, "side2")],
);
let side3 = create_single_tree(
repo,
&[(two_sided_path, "side3"), (three_sided_path, "side3")],
);
let tree = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
side1.id().clone(),
base1.id().clone(),
side2.id().clone(),
base2.id().clone(),
side3.id().clone(),
]),
ConflictLabels::from_vec(vec![
"side 1".into(),
"base 1".into(),
"side 2".into(),
"base 2".into(),
"side 3".into(),
]),
);
let conflicts = tree
.conflicts()
.map(|(path, conflict)| (path, conflict.unwrap()))
.collect_vec();
let conflict_at = |path: &RepoPath| {
Merge::from_removes_adds(
vec![
base1.path_value(path).unwrap(),
base2.path_value(path).unwrap(),
],
vec![
side1.path_value(path).unwrap(),
side2.path_value(path).unwrap(),
side3.path_value(path).unwrap(),
],
)
};
// Both paths have the full, unsimplified conflict (3-sided)
assert_eq!(
conflicts,
vec![
(two_sided_path.to_owned(), conflict_at(two_sided_path)),
(three_sided_path.to_owned(), conflict_at(three_sided_path))
]
);
}
/// Diff two resolved trees
#[test]
fn test_diff_resolved() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let clean_path = repo_path("dir1/file");
let modified_path = repo_path("dir2/file");
let removed_path = repo_path("dir3/file");
let added_path = repo_path("dir4/file");
let before = create_single_tree(
repo,
&[
(clean_path, "clean"),
(modified_path, "before"),
(removed_path, "before"),
],
);
let after = create_single_tree(
repo,
&[
(clean_path, "clean"),
(modified_path, "after"),
(added_path, "after"),
],
);
let before_merged = MergedTree::resolved(repo.store().clone(), before.id().clone());
let after_merged = MergedTree::resolved(repo.store().clone(), after.id().clone());
let diff: Vec<_> = before_merged
.diff_stream(&after_merged, &EverythingMatcher)
.map(diff_entry_tuple)
.collect()
.block_on();
assert_eq!(diff.len(), 3);
assert_eq!(
diff[0].clone(),
(
modified_path.to_owned(),
(
Merge::resolved(before.path_value(modified_path).unwrap()),
Merge::resolved(after.path_value(modified_path).unwrap())
),
)
);
assert_eq!(
diff[1].clone(),
(
removed_path.to_owned(),
(
Merge::resolved(before.path_value(removed_path).unwrap()),
Merge::absent()
),
)
);
assert_eq!(
diff[2].clone(),
(
added_path.to_owned(),
(
Merge::absent(),
Merge::resolved(after.path_value(added_path).unwrap())
),
)
);
diff_stream_equals_iter(&before_merged, &after_merged, &EverythingMatcher);
}
fn create_copy_records(paths: &[(&RepoPath, &RepoPath)]) -> CopyRecords {
let mut copy_records = CopyRecords::default();
copy_records
.add_records(paths.iter().map(|&(source, target)| {
Ok(CopyRecord {
source: source.to_owned(),
target: target.to_owned(),
target_commit: CommitId::new(vec![]),
source_commit: CommitId::new(vec![]),
source_file: FileId::new(vec![]),
})
}))
.unwrap();
copy_records
}
/// Diff two resolved trees
#[test]
fn test_diff_copy_tracing() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let clean_path = repo_path("1/clean/path");
let modified_path = repo_path("2/modified/path");
let copied_path = repo_path("3/copied/path");
let removed_path = repo_path("4/removed/path");
let added_path = repo_path("5/added/path");
let before = create_single_tree(
repo,
&[
(clean_path, "clean"),
(modified_path, "before"),
(removed_path, "before"),
],
);
let after = create_single_tree(
repo,
&[
(clean_path, "clean"),
(modified_path, "after"),
(copied_path, "after"),
(added_path, "after"),
],
);
let before_merged = MergedTree::resolved(repo.store().clone(), before.id().clone());
let after_merged = MergedTree::resolved(repo.store().clone(), after.id().clone());
let copy_records =
create_copy_records(&[(removed_path, added_path), (modified_path, copied_path)]);
let diff: Vec<_> = before_merged
.diff_stream_with_copies(&after_merged, &EverythingMatcher, ©_records)
.map(|diff| (diff.path, diff.values.unwrap()))
.collect()
.block_on();
assert_eq!(diff.len(), 3);
assert_eq!(
diff[0].clone(),
(
CopiesTreeDiffEntryPath {
source: None,
target: modified_path.to_owned()
},
Diff::new(
Merge::resolved(before.path_value(modified_path).unwrap()),
Merge::resolved(after.path_value(modified_path).unwrap())
),
)
);
assert_eq!(
diff[1].clone(),
(
CopiesTreeDiffEntryPath {
source: Some((modified_path.to_owned(), CopyOperation::Copy)),
target: copied_path.to_owned(),
},
Diff::new(
Merge::resolved(before.path_value(modified_path).unwrap()),
Merge::resolved(after.path_value(copied_path).unwrap()),
),
)
);
assert_eq!(
diff[2].clone(),
(
CopiesTreeDiffEntryPath {
source: Some((removed_path.to_owned(), CopyOperation::Rename)),
target: added_path.to_owned(),
},
Diff::new(
Merge::resolved(before.path_value(removed_path).unwrap()),
Merge::resolved(after.path_value(added_path).unwrap())
),
)
);
diff_stream_equals_iter(&before_merged, &after_merged, &EverythingMatcher);
}
#[test]
fn test_diff_copy_tracing_file_and_dir() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// a -> b (file)
// b -> a (dir)
// c -> c/file (file)
let before = create_tree(
repo,
&[
(repo_path("a"), "content1"),
(repo_path("b/file"), "content2"),
(repo_path("c"), "content3"),
],
);
let after = create_tree(
repo,
&[
(repo_path("a/file"), "content2"),
(repo_path("b"), "content1"),
(repo_path("c/file"), "content3"),
],
);
let copy_records = create_copy_records(&[
(repo_path("a"), repo_path("b")),
(repo_path("b/file"), repo_path("a/file")),
(repo_path("c"), repo_path("c/file")),
]);
let diff: Vec<_> = before
.diff_stream_with_copies(&after, &EverythingMatcher, ©_records)
.map(|diff| (diff.path, diff.values.unwrap()))
.collect()
.block_on();
assert_eq!(diff.len(), 3);
assert_eq!(
diff[0],
(
CopiesTreeDiffEntryPath {
source: Some((repo_path_buf("b/file"), CopyOperation::Rename)),
target: repo_path_buf("a/file"),
},
Diff::new(
before.path_value(repo_path("b/file")).unwrap(),
after.path_value(repo_path("a/file")).unwrap(),
),
)
);
assert_eq!(
diff[1],
(
CopiesTreeDiffEntryPath {
source: Some((repo_path_buf("a"), CopyOperation::Rename)),
target: repo_path_buf("b"),
},
Diff::new(
before.path_value(repo_path("a")).unwrap(),
after.path_value(repo_path("b")).unwrap(),
),
)
);
assert_eq!(
diff[2],
(
CopiesTreeDiffEntryPath {
source: Some((repo_path_buf("c"), CopyOperation::Rename)),
target: repo_path_buf("c/file"),
},
Diff::new(
before.path_value(repo_path("c")).unwrap(),
after.path_value(repo_path("c/file")).unwrap(),
),
)
);
diff_stream_equals_iter(&before, &after, &EverythingMatcher);
}
/// Diff two conflicted trees
#[test]
fn test_diff_conflicted() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
// path1 is a clean (unchanged) conflict
// path2 is a conflict before and different conflict after
// path3 is resolved before and a conflict after
// path4 is missing before and a conflict after
let path1 = repo_path("dir1/file");
let path2 = repo_path("dir2/file");
let path3 = repo_path("dir4/file");
let path4 = repo_path("dir6/file");
let left_base = create_single_tree(
repo,
&[(path1, "clean-base"), (path2, "left-base"), (path3, "left")],
);
let left_side1 = create_single_tree(
repo,
&[
(path1, "clean-side1"),
(path2, "left-side1"),
(path3, "left"),
],
);
let left_side2 = create_single_tree(
repo,
&[
(path1, "clean-side2"),
(path2, "left-side2"),
(path3, "left"),
],
);
let right_base = create_single_tree(
repo,
&[
(path1, "clean-base"),
(path2, "right-base"),
(path3, "right-base"),
(path4, "right-base"),
],
);
let right_side1 = create_single_tree(
repo,
&[
(path1, "clean-side1"),
(path2, "right-side1"),
(path3, "right-side1"),
(path4, "right-side1"),
],
);
let right_side2 = create_single_tree(
repo,
&[
(path1, "clean-side2"),
(path2, "right-side2"),
(path3, "right-side2"),
(path4, "right-side2"),
],
);
let left_merged = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
left_side1.id().clone(),
left_base.id().clone(),
left_side2.id().clone(),
]),
ConflictLabels::from_vec(vec![
"left side 1".into(),
"left base".into(),
"left side 2".into(),
]),
);
let right_merged = MergedTree::new(
repo.store().clone(),
Merge::from_vec(vec![
right_side1.id().clone(),
right_base.id().clone(),
right_side2.id().clone(),
]),
ConflictLabels::from_vec(vec![
"right side 1".into(),
"right base".into(),
"right side 2".into(),
]),
);
// Test the forwards diff
let actual_diff: Vec<_> = left_merged
.diff_stream(&right_merged, &EverythingMatcher)
.map(diff_entry_tuple)
.collect()
.block_on();
let expected_diff = [path2, path3, path4]
.iter()
.map(|&path| {
(
path.to_owned(),
(
left_merged.path_value(path).unwrap(),
right_merged.path_value(path).unwrap(),
),
)
})
.collect_vec();
assert_eq!(actual_diff, expected_diff);
diff_stream_equals_iter(&left_merged, &right_merged, &EverythingMatcher);
// Test the reverse diff
let actual_diff: Vec<_> = right_merged
.diff_stream(&left_merged, &EverythingMatcher)
.map(diff_entry_tuple)
.collect()
.block_on();
let expected_diff = [path2, path3, path4]
.iter()
.map(|&path| {
(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_commit_builder.rs | lib/tests/test_commit_builder.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use assert_matches::assert_matches;
use futures::StreamExt as _;
use indoc::indoc;
use itertools::Itertools as _;
use jj_lib::backend::BackendError;
use jj_lib::backend::ChangeId;
use jj_lib::backend::MillisSinceEpoch;
use jj_lib::backend::Signature;
use jj_lib::backend::Timestamp;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::config::StackedConfig;
use jj_lib::matchers::EverythingMatcher;
use jj_lib::merged_tree::MergedTree;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPath;
use jj_lib::repo_path::RepoPathBuf;
use jj_lib::rewrite::RebaseOptions;
use jj_lib::settings::UserSettings;
use pollster::FutureExt as _;
use test_case::test_case;
use testutils::TestRepo;
use testutils::TestRepoBackend;
use testutils::assert_rebased_onto;
use testutils::assert_tree_eq;
use testutils::create_tree;
use testutils::rebase_descendants_with_options_return_map;
use testutils::repo_path;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn config_with_commit_timestamp(timestamp: &str) -> StackedConfig {
let mut config = testutils::base_user_config();
let mut layer = ConfigLayer::empty(ConfigSource::User);
layer
.set_value("debug.commit-timestamp", timestamp)
.unwrap();
config.add_layer(layer);
config
}
fn diff_paths(from_tree: &MergedTree, to_tree: &MergedTree) -> Vec<RepoPathBuf> {
from_tree
.diff_stream(to_tree, &EverythingMatcher)
.map(|diff| {
diff.values.unwrap();
diff.path
})
.collect()
.block_on()
}
fn to_owned_path_vec(paths: &[&RepoPath]) -> Vec<RepoPathBuf> {
paths.iter().map(|&path| path.to_owned()).collect()
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_initial(backend: TestRepoBackend) {
let test_repo = TestRepo::init_with_backend(backend);
let repo = &test_repo.repo;
let store = repo.store();
let root_file_path = repo_path("file");
let dir_file_path = repo_path("dir/file");
let tree = create_tree(
repo,
&[
(root_file_path, "file contents"),
(dir_file_path, "dir/file contents"),
],
);
let mut tx = repo.start_transaction();
let author_signature = Signature {
name: "author name".to_string(),
email: "author email".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(1000),
tz_offset: 60,
},
};
let committer_signature = Signature {
name: "committer name".to_string(),
email: "committer email".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(2000),
tz_offset: -60,
},
};
let change_id = ChangeId::new(vec![100u8; 16]);
let builder = tx
.repo_mut()
.new_commit(vec![store.root_commit_id().clone()], tree.clone())
.set_change_id(change_id.clone())
.set_description("description")
.set_author(author_signature.clone())
.set_committer(committer_signature.clone());
assert_eq!(builder.parents(), &[store.root_commit_id().clone()]);
assert_eq!(builder.predecessors(), &[]);
assert_tree_eq!(builder.tree(), tree);
assert_eq!(builder.change_id(), &change_id);
assert_eq!(builder.author(), &author_signature);
assert_eq!(builder.committer(), &committer_signature);
let commit = builder.write().unwrap();
let repo = tx.commit("test").unwrap();
let parents: Vec<_> = commit.parents().try_collect().unwrap();
assert_eq!(parents, vec![store.root_commit()]);
assert!(commit.store_commit().predecessors.is_empty());
assert_eq!(commit.description(), "description");
assert_eq!(commit.author(), &author_signature);
assert_eq!(commit.committer(), &committer_signature);
assert_eq!(
diff_paths(&store.root_commit().tree(), &commit.tree()),
to_owned_path_vec(&[dir_file_path, root_file_path]),
);
assert_matches!(
repo.operation().predecessors_for_commit(commit.id()),
Some([])
);
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_rewrite(backend: TestRepoBackend) {
let settings = testutils::user_settings();
let test_repo = TestRepo::init_with_backend_and_settings(backend, &settings);
let test_env = &test_repo.env;
let repo = &test_repo.repo;
let store = repo.store();
let root_file_path = repo_path("file");
let dir_file_path = repo_path("dir/file");
let initial_tree = create_tree(
repo,
&[
(root_file_path, "file contents"),
(dir_file_path, "dir/file contents"),
],
);
let mut tx = repo.start_transaction();
let initial_commit = tx
.repo_mut()
.new_commit(vec![store.root_commit_id().clone()], initial_tree)
.write()
.unwrap();
let repo = tx.commit("test").unwrap();
let rewritten_tree = create_tree(
&repo,
&[
(root_file_path, "file contents"),
(dir_file_path, "updated dir/file contents"),
],
);
let mut config = StackedConfig::with_defaults();
config.add_layer(
ConfigLayer::parse(
ConfigSource::User,
indoc! {"
user.name = 'Rewrite User'
user.email = 'rewrite.user@example.com'
"},
)
.unwrap(),
);
let rewrite_settings = UserSettings::from_config(config).unwrap();
let repo = test_env.load_repo_at_head(&rewrite_settings, test_repo.repo_path());
let store = repo.store();
// We have a new store instance, so we need to associate the old tree with the
// new store instance.
let (tree_ids, labels) = rewritten_tree.into_tree_ids_and_labels();
let rewritten_tree = MergedTree::new(store.clone(), tree_ids, labels);
let initial_commit = store.get_commit(initial_commit.id()).unwrap();
let mut tx = repo.start_transaction();
let rewritten_commit = tx
.repo_mut()
.rewrite_commit(&initial_commit)
.set_tree(rewritten_tree)
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let parents: Vec<_> = rewritten_commit.parents().try_collect().unwrap();
assert_eq!(parents, vec![store.root_commit()]);
assert_eq!(
rewritten_commit.store_commit().predecessors,
[initial_commit.id().clone()]
);
assert_eq!(rewritten_commit.author().name, settings.user_name());
assert_eq!(rewritten_commit.author().email, settings.user_email());
assert_eq!(
rewritten_commit.committer().name,
rewrite_settings.user_name()
);
assert_eq!(
rewritten_commit.committer().email,
rewrite_settings.user_email()
);
assert_eq!(
diff_paths(&store.root_commit().tree(), &rewritten_commit.tree()),
to_owned_path_vec(&[dir_file_path, root_file_path]),
);
assert_eq!(
diff_paths(&initial_commit.tree(), &rewritten_commit.tree()),
to_owned_path_vec(&[dir_file_path]),
);
assert_matches!(
repo.operation().predecessors_for_commit(rewritten_commit.id()),
Some([id]) if id == initial_commit.id()
);
assert_matches!(
repo.operation()
.predecessors_for_commit(initial_commit.id()),
None
);
}
// An author field with an empty name/email should get filled in on rewrite
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_rewrite_update_missing_user(backend: TestRepoBackend) {
let missing_user_settings = UserSettings::from_config(StackedConfig::with_defaults()).unwrap();
let test_repo = TestRepo::init_with_backend_and_settings(backend, &missing_user_settings);
let test_env = &test_repo.env;
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let initial_commit = tx
.repo_mut()
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
assert_eq!(initial_commit.author().name, "");
assert_eq!(initial_commit.author().email, "");
assert_eq!(initial_commit.committer().name, "");
assert_eq!(initial_commit.committer().email, "");
tx.commit("test").unwrap();
let mut config = StackedConfig::with_defaults();
config.add_layer(
ConfigLayer::parse(
ConfigSource::User,
indoc! {"
user.name = 'Configured User'
user.email = 'configured.user@example.com'
"},
)
.unwrap(),
);
let settings = UserSettings::from_config(config).unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
let initial_commit = repo.store().get_commit(initial_commit.id()).unwrap();
let mut tx = repo.start_transaction();
let rewritten_commit = tx
.repo_mut()
.rewrite_commit(&initial_commit)
.write()
.unwrap();
assert_eq!(rewritten_commit.author().name, "Configured User");
assert_eq!(
rewritten_commit.author().email,
"configured.user@example.com"
);
assert_eq!(rewritten_commit.committer().name, "Configured User");
assert_eq!(
rewritten_commit.committer().email,
"configured.user@example.com"
);
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_rewrite_resets_author_timestamp(backend: TestRepoBackend) {
let test_repo = TestRepo::init_with_backend(backend);
let test_env = &test_repo.env;
// Create discardable commit
let initial_timestamp = "2001-02-03T04:05:06+07:00";
let settings =
UserSettings::from_config(config_with_commit_timestamp(initial_timestamp)).unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
let mut tx = repo.start_transaction();
let initial_commit = tx
.repo_mut()
.new_commit(
vec![repo.store().root_commit_id().clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
tx.commit("test").unwrap();
let initial_timestamp =
Timestamp::from_datetime(chrono::DateTime::parse_from_rfc3339(initial_timestamp).unwrap());
assert_eq!(initial_commit.author().timestamp, initial_timestamp);
assert_eq!(initial_commit.committer().timestamp, initial_timestamp);
// Rewrite discardable commit to no longer be discardable
let new_timestamp_1 = "2002-03-04T05:06:07+08:00";
let settings =
UserSettings::from_config(config_with_commit_timestamp(new_timestamp_1)).unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
let initial_commit = repo.store().get_commit(initial_commit.id()).unwrap();
let mut tx = repo.start_transaction();
let rewritten_commit_1 = tx
.repo_mut()
.rewrite_commit(&initial_commit)
.set_description("No longer discardable")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
tx.commit("test").unwrap();
let new_timestamp_1 =
Timestamp::from_datetime(chrono::DateTime::parse_from_rfc3339(new_timestamp_1).unwrap());
assert_ne!(new_timestamp_1, initial_timestamp);
assert_eq!(rewritten_commit_1.author().timestamp, new_timestamp_1);
assert_eq!(rewritten_commit_1.committer().timestamp, new_timestamp_1);
assert_eq!(rewritten_commit_1.author(), rewritten_commit_1.committer());
// Rewrite non-discardable commit
let new_timestamp_2 = "2003-04-05T06:07:08+09:00";
let settings =
UserSettings::from_config(config_with_commit_timestamp(new_timestamp_2)).unwrap();
let repo = test_env.load_repo_at_head(&settings, test_repo.repo_path());
let rewritten_commit_1 = repo.store().get_commit(rewritten_commit_1.id()).unwrap();
let mut tx = repo.start_transaction();
let rewritten_commit_2 = tx
.repo_mut()
.rewrite_commit(&rewritten_commit_1)
.set_description("New description")
.write()
.unwrap();
tx.repo_mut().rebase_descendants().unwrap();
tx.commit("test").unwrap();
let new_timestamp_2 =
Timestamp::from_datetime(chrono::DateTime::parse_from_rfc3339(new_timestamp_2).unwrap());
assert_ne!(new_timestamp_2, new_timestamp_1);
assert_eq!(rewritten_commit_2.author().timestamp, new_timestamp_1);
assert_eq!(rewritten_commit_2.committer().timestamp, new_timestamp_2);
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
#[test_case(TestRepoBackend::Git ; "git backend")]
fn test_rewrite_to_identical_commit(backend: TestRepoBackend) {
let timestamp = "2001-02-03T04:05:06+07:00";
let settings = UserSettings::from_config(config_with_commit_timestamp(timestamp)).unwrap();
let test_repo = TestRepo::init_with_backend_and_settings(backend, &settings);
let repo = test_repo.repo;
let store = repo.store();
let mut tx = repo.start_transaction();
let commit1 = tx
.repo_mut()
.new_commit(
vec![store.root_commit_id().clone()],
store.empty_merged_tree(),
)
.write()
.unwrap();
let repo = tx.commit("test").unwrap();
// Create commit identical to the original
let mut tx = repo.start_transaction();
let mut builder = tx.repo_mut().rewrite_commit(&commit1).detach();
builder.set_predecessors(vec![]);
// Writing to the store should work
let commit2 = builder.write_hidden().unwrap();
assert_eq!(commit1, commit2);
// Writing to the repo shouldn't work, which would create cycle in
// predecessors/parent mappings
let result = builder.write(tx.repo_mut());
assert_matches!(result, Err(BackendError::Other(_)));
tx.repo_mut().rebase_descendants().unwrap();
tx.commit("test").unwrap();
// Create two rewritten commits of the same content and metadata
let mut tx = repo.start_transaction();
tx.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten")
.write()
.unwrap();
let result = tx
.repo_mut()
.rewrite_commit(&commit1)
.set_description("rewritten")
.write();
assert_matches!(result, Err(BackendError::Other(_)));
tx.repo_mut().rebase_descendants().unwrap();
tx.commit("test").unwrap();
}
#[test_case(TestRepoBackend::Simple ; "simple backend")]
// #[test_case(TestRepoBackend::Git ; "git backend")]
fn test_commit_builder_descendants(backend: TestRepoBackend) {
let test_repo = TestRepo::init_with_backend(backend);
let repo = &test_repo.repo;
let store = repo.store().clone();
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit_with_parents(tx.repo_mut(), &[&commit1]);
let commit3 = write_random_commit_with_parents(tx.repo_mut(), &[&commit2]);
let repo = tx.commit("test").unwrap();
// Test with for_new_commit()
let mut tx = repo.start_transaction();
tx.repo_mut()
.new_commit(
vec![store.root_commit_id().clone()],
store.empty_merged_tree(),
)
.write()
.unwrap();
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
assert_eq!(rebase_map.len(), 0);
// Test with for_rewrite_from()
let mut tx = repo.start_transaction();
let commit4 = tx.repo_mut().rewrite_commit(&commit2).write().unwrap();
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
assert_rebased_onto(tx.repo_mut(), &rebase_map, &commit3, &[commit4.id()]);
assert_eq!(rebase_map.len(), 1);
// Test with for_rewrite_from() but new change id
let mut tx = repo.start_transaction();
tx.repo_mut()
.rewrite_commit(&commit2)
.clear_rewrite_source()
.generate_new_change_id()
.write()
.unwrap();
let rebase_map =
rebase_descendants_with_options_return_map(tx.repo_mut(), &RebaseOptions::default());
assert!(rebase_map.is_empty());
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_id_prefix.rs | lib/tests/test_id_prefix.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use itertools::Itertools as _;
use jj_lib::backend::ChangeId;
use jj_lib::backend::CommitId;
use jj_lib::backend::MillisSinceEpoch;
use jj_lib::backend::Signature;
use jj_lib::backend::Timestamp;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::id_prefix::IdPrefixContext;
use jj_lib::id_prefix::IdPrefixIndex;
use jj_lib::index::ResolvedChangeTargets;
use jj_lib::object_id::HexPrefix;
use jj_lib::object_id::ObjectId as _;
use jj_lib::object_id::PrefixResolution::AmbiguousMatch;
use jj_lib::object_id::PrefixResolution::NoMatch;
use jj_lib::object_id::PrefixResolution::SingleMatch;
use jj_lib::op_store::RefTarget;
use jj_lib::repo::MutableRepo;
use jj_lib::repo::Repo as _;
use jj_lib::revset::RevsetExpression;
use jj_lib::settings::UserSettings;
use testutils::TestRepo;
use testutils::TestRepoBackend;
fn stable_settings() -> UserSettings {
let mut config = testutils::base_user_config();
let mut layer = ConfigLayer::empty(ConfigSource::User);
layer
.set_value("debug.commit-timestamp", "2001-02-03T04:05:06+07:00")
.unwrap();
config.add_layer(layer);
UserSettings::from_config(config).unwrap()
}
#[test]
fn test_id_prefix() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let root_change_id = repo.store().root_change_id();
let mut tx = repo.start_transaction();
let mut create_commit = |parent_id: &CommitId| {
let signature = Signature {
name: "Some One".to_string(),
email: "some.one@example.com".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
},
};
tx.repo_mut()
.new_commit(vec![parent_id.clone()], repo.store().empty_merged_tree())
.set_author(signature.clone())
.set_committer(signature)
.write()
.unwrap()
};
let mut commits = vec![create_commit(root_commit_id)];
for _ in 0..25 {
commits.push(create_commit(commits.last().unwrap().id()));
}
let repo = tx.commit("test").unwrap();
// Print the commit IDs and change IDs for reference
let commit_prefixes = commits
.iter()
.enumerate()
.map(|(i, commit)| format!("{} {}", &commit.id().hex()[..3], i))
.sorted()
.join("\n");
insta::assert_snapshot!(commit_prefixes, @r"
0c8 9
18f 7
19a 10
37a 13
3b4 21
3c0 1
4ee 16
51f 4
56e 14
711 17
761 3
7b1 11
7c6 24
7f4 8
846 23
8d7 25
960 15
a30 12
b51 19
b97 22
b9d 5
bb4 2
c3a 18
c47 0
d3c 6
d54 20
");
let change_prefixes = commits
.iter()
.enumerate()
.map(|(i, commit)| format!("{} {}", &commit.change_id().hex()[..3], i))
.sorted()
.join("\n");
insta::assert_snapshot!(change_prefixes, @r"
026 9
030 13
1b5 6
26b 3
26c 8
271 10
439 2
44a 17
4e9 16
5b2 23
6c2 19
781 0
79f 14
7d5 24
86b 20
871 7
896 5
9e4 18
a2c 1
a63 22
b19 11
b93 4
bf5 21
c24 15
d64 12
fee 25
");
let prefix = |x| HexPrefix::try_from_hex(x).unwrap();
let shortest_commit_prefix_len = |index: &IdPrefixIndex, commit_id| {
index
.shortest_commit_prefix_len(repo.as_ref(), commit_id)
.unwrap()
};
let resolve_commit_prefix = |index: &IdPrefixIndex, prefix: HexPrefix| {
index.resolve_commit_prefix(repo.as_ref(), &prefix).unwrap()
};
let shortest_change_prefix_len = |index: &IdPrefixIndex, change_id| {
index
.shortest_change_prefix_len(repo.as_ref(), change_id)
.unwrap()
};
let resolve_change_prefix = |index: &IdPrefixIndex, prefix: HexPrefix| {
index
.resolve_change_prefix(repo.as_ref(), &prefix)
.unwrap()
.filter_map(ResolvedChangeTargets::into_visible)
};
// Without a disambiguation revset
// ---------------------------------------------------------------------------------------------
let context = IdPrefixContext::default();
let index = context.populate(repo.as_ref()).unwrap();
assert_eq!(shortest_commit_prefix_len(&index, commits[7].id()), 2);
assert_eq!(shortest_commit_prefix_len(&index, commits[16].id()), 1);
assert_eq!(resolve_commit_prefix(&index, prefix("1")), AmbiguousMatch);
assert_eq!(
resolve_commit_prefix(&index, prefix("18")),
SingleMatch(commits[7].id().clone())
);
assert_eq!(resolve_commit_prefix(&index, prefix("10")), NoMatch);
assert_eq!(resolve_commit_prefix(&index, prefix("180")), NoMatch);
assert_eq!(
shortest_change_prefix_len(&index, commits[2].change_id()),
2
);
assert_eq!(
shortest_change_prefix_len(&index, commits[6].change_id()),
1
);
assert_eq!(resolve_change_prefix(&index, prefix("4")), AmbiguousMatch);
assert_eq!(
resolve_change_prefix(&index, prefix("43")),
SingleMatch(vec![commits[2].id().clone()])
);
assert_eq!(resolve_change_prefix(&index, prefix("40")), NoMatch);
assert_eq!(resolve_change_prefix(&index, prefix("430")), NoMatch);
// Disambiguate within a revset
// ---------------------------------------------------------------------------------------------
let expression =
RevsetExpression::commits(vec![commits[7].id().clone(), commits[2].id().clone()]);
let context = context.disambiguate_within(expression);
let index = context.populate(repo.as_ref()).unwrap();
// The prefix is now shorter
assert_eq!(shortest_commit_prefix_len(&index, commits[7].id()), 1);
// Shorter prefix within the set can be used
assert_eq!(
resolve_commit_prefix(&index, prefix("1")),
SingleMatch(commits[7].id().clone())
);
// Can still resolve commits outside the set
assert_eq!(
resolve_commit_prefix(&index, prefix("19")),
SingleMatch(commits[10].id().clone())
);
assert_eq!(
shortest_change_prefix_len(&index, commits[2].change_id()),
1
);
assert_eq!(
resolve_change_prefix(&index, prefix("4")),
SingleMatch(vec![commits[2].id().clone()])
);
// Single commit in revset. Length 0 is unambiguous, but we pretend 1 digit is
// needed.
// ---------------------------------------------------------------------------------------------
let expression = RevsetExpression::commit(root_commit_id.clone());
let context = context.disambiguate_within(expression);
let index = context.populate(repo.as_ref()).unwrap();
assert_eq!(shortest_commit_prefix_len(&index, root_commit_id), 1);
assert_eq!(resolve_commit_prefix(&index, prefix("")), AmbiguousMatch);
assert_eq!(
resolve_commit_prefix(&index, prefix("0")),
SingleMatch(root_commit_id.clone())
);
assert_eq!(shortest_change_prefix_len(&index, root_change_id), 1);
assert_eq!(resolve_change_prefix(&index, prefix("")), AmbiguousMatch);
assert_eq!(
resolve_change_prefix(&index, prefix("0")),
SingleMatch(vec![root_commit_id.clone()])
);
// Disambiguate within revset that fails to evaluate
// ---------------------------------------------------------------------------------------------
let expression = RevsetExpression::symbol("nonexistent".to_string());
let context = context.disambiguate_within(expression);
assert!(context.populate(repo.as_ref()).is_err());
}
#[test]
fn test_id_prefix_divergent() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let mut tx = repo.start_transaction();
let mut create_commit_with_change_id =
|parent_id: &CommitId, description: &str, change_id: ChangeId| {
let signature = Signature {
name: "Some One".to_string(),
email: "some.one@example.com".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
},
};
tx.repo_mut()
.new_commit(vec![parent_id.clone()], repo.store().empty_merged_tree())
.set_description(description)
.set_author(signature.clone())
.set_committer(signature)
.set_change_id(change_id)
.write()
.unwrap()
};
let first_change_id = ChangeId::from_hex("a5333333333333333333333333333333");
let second_change_id = ChangeId::from_hex("a5000000000000000000000000000000");
let first_commit = create_commit_with_change_id(root_commit_id, "first", first_change_id);
let second_commit =
create_commit_with_change_id(first_commit.id(), "second", second_change_id.clone());
let third_commit_divergent_with_second =
create_commit_with_change_id(first_commit.id(), "third", second_change_id);
let commits = [
first_commit.clone(),
second_commit.clone(),
third_commit_divergent_with_second.clone(),
];
let repo = tx.commit("test").unwrap();
// Print the commit IDs and change IDs for reference
let change_prefixes = commits
.iter()
.enumerate()
.map(|(i, commit)| format!("{} {}", &commit.change_id().hex()[..4], i))
.join("\n");
insta::assert_snapshot!(change_prefixes, @r"
a533 0
a500 1
a500 2
");
let commit_prefixes = commits
.iter()
.enumerate()
.map(|(i, commit)| format!("{} {}", &commit.id().hex()[..4], i))
.join("\n");
insta::assert_snapshot!(commit_prefixes, @r"
e2b9 0
f8d1 1
c596 2
");
let prefix = |x| HexPrefix::try_from_hex(x).unwrap();
let shortest_change_prefix_len = |index: &IdPrefixIndex, change_id| {
index
.shortest_change_prefix_len(repo.as_ref(), change_id)
.unwrap()
};
let resolve_change_prefix = |index: &IdPrefixIndex, prefix: HexPrefix| {
index
.resolve_change_prefix(repo.as_ref(), &prefix)
.unwrap()
.filter_map(ResolvedChangeTargets::into_visible)
};
// Without a disambiguation revset
// --------------------------------
let context = IdPrefixContext::default();
let index = context.populate(repo.as_ref()).unwrap();
assert_eq!(
shortest_change_prefix_len(&index, commits[0].change_id()),
3
);
assert_eq!(
shortest_change_prefix_len(&index, commits[1].change_id()),
3
);
assert_eq!(
shortest_change_prefix_len(&index, commits[2].change_id()),
3
);
assert_eq!(resolve_change_prefix(&index, prefix("a5")), AmbiguousMatch);
assert_eq!(
resolve_change_prefix(&index, prefix("a53")),
SingleMatch(vec![first_commit.id().clone()])
);
assert_eq!(
resolve_change_prefix(&index, prefix("a50")),
SingleMatch(vec![
third_commit_divergent_with_second.id().clone(),
second_commit.id().clone(),
])
);
// Now, disambiguate within the revset containing only the second commit
// ----------------------------------------------------------------------
let expression = RevsetExpression::commits(vec![second_commit.id().clone()]);
let context = context.disambiguate_within(expression);
let index = context.populate(repo.as_ref()).unwrap();
// The prefix is now shorter
assert_eq!(
shortest_change_prefix_len(&index, second_commit.change_id()),
1
);
// This tests two issues, both important:
// - We find both commits with the same change id, even though
// `third_commit_divergent_with_second` is not in the short prefix set
// (#2476).
// - The short prefix set still works: we do *not* find the first commit and the
// match is not ambiguous, even though the first commit's change id would also
// match the prefix.
assert_eq!(
resolve_change_prefix(&index, prefix("a")),
SingleMatch(vec![
third_commit_divergent_with_second.id().clone(),
second_commit.id().clone(),
])
);
// We can still resolve commits outside the set
assert_eq!(
resolve_change_prefix(&index, prefix("a53")),
SingleMatch(vec![first_commit.id().clone()])
);
assert_eq!(
shortest_change_prefix_len(&index, first_commit.change_id()),
3
);
}
#[test]
fn test_id_prefix_hidden() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let mut tx = repo.start_transaction();
let mut commits = vec![];
for i in 0..10 {
let signature = Signature {
name: "Some One".to_string(),
email: "some.one@example.com".to_string(),
timestamp: Timestamp {
timestamp: MillisSinceEpoch(i),
tz_offset: 0,
},
};
let commit = tx
.repo_mut()
.new_commit(
vec![root_commit_id.clone()],
repo.store().empty_merged_tree(),
)
.set_author(signature.clone())
.set_committer(signature)
.write()
.unwrap();
commits.push(commit);
}
// Print the commit IDs and change IDs for reference
let commit_prefixes = commits
.iter()
.enumerate()
.map(|(i, commit)| format!("{} {}", &commit.id().hex()[..3], i))
.sorted()
.join("\n");
insta::assert_snapshot!(commit_prefixes, @r"
3ae 6
64c 5
84e 2
906 8
912 7
9d1 3
a6b 1
c47 0
d9b 4
f5f 9
");
let change_prefixes = commits
.iter()
.enumerate()
.map(|(i, commit)| format!("{} {}", &commit.change_id().hex()[..3], i))
.sorted()
.join("\n");
insta::assert_snapshot!(change_prefixes, @r"
026 9
1b5 6
26b 3
26c 8
439 2
781 0
871 7
896 5
a2c 1
b93 4
");
let hidden_commit = &commits[8];
tx.repo_mut().record_abandoned_commit(hidden_commit);
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let prefix = |x: &str| HexPrefix::try_from_hex(x).unwrap();
let shortest_commit_prefix_len = |index: &IdPrefixIndex, commit_id| {
index
.shortest_commit_prefix_len(repo.as_ref(), commit_id)
.unwrap()
};
let resolve_commit_prefix = |index: &IdPrefixIndex, prefix: HexPrefix| {
index.resolve_commit_prefix(repo.as_ref(), &prefix).unwrap()
};
let shortest_change_prefix_len = |index: &IdPrefixIndex, change_id| {
index
.shortest_change_prefix_len(repo.as_ref(), change_id)
.unwrap()
};
let resolve_change_prefix = |index: &IdPrefixIndex, prefix: HexPrefix| {
index
.resolve_change_prefix(repo.as_ref(), &prefix)
.unwrap()
.filter_map(ResolvedChangeTargets::into_visible)
};
// Without a disambiguation revset
// --------------------------------
let context = IdPrefixContext::default();
let index = context.populate(repo.as_ref()).unwrap();
assert_eq!(shortest_commit_prefix_len(&index, hidden_commit.id()), 2);
assert_eq!(
shortest_change_prefix_len(&index, hidden_commit.change_id()),
3
);
assert_eq!(
resolve_commit_prefix(&index, prefix(&hidden_commit.id().hex()[..1])),
AmbiguousMatch
);
assert_eq!(
resolve_commit_prefix(&index, prefix(&hidden_commit.id().hex()[..2])),
SingleMatch(hidden_commit.id().clone())
);
assert_eq!(
resolve_change_prefix(&index, prefix(&hidden_commit.change_id().hex()[..2])),
AmbiguousMatch
);
assert_eq!(
resolve_change_prefix(&index, prefix(&hidden_commit.change_id().hex()[..3])),
NoMatch
);
// Disambiguate within hidden
// --------------------------
let expression = RevsetExpression::commit(hidden_commit.id().clone());
let context = context.disambiguate_within(expression);
let index = context.populate(repo.as_ref()).unwrap();
assert_eq!(shortest_commit_prefix_len(&index, hidden_commit.id()), 1);
assert_eq!(
shortest_change_prefix_len(&index, hidden_commit.change_id()),
1
);
// Short commit id can be resolved even if it's hidden.
assert_eq!(
resolve_commit_prefix(&index, prefix(&hidden_commit.id().hex()[..1])),
SingleMatch(hidden_commit.id().clone())
);
// OTOH, hidden change id should never be found. The resolution might be
// ambiguous if hidden commits were excluded from the disambiguation set.
// In that case, shortest_change_prefix_len() shouldn't be 1.
assert_eq!(
resolve_change_prefix(&index, prefix(&hidden_commit.change_id().hex()[..1])),
NoMatch
);
assert_eq!(
resolve_change_prefix(&index, prefix(&hidden_commit.change_id().hex()[..2])),
NoMatch
);
}
#[test]
fn test_id_prefix_shadowed_by_ref() {
let settings = stable_settings();
let test_repo = TestRepo::init_with_settings(&settings);
let repo = &test_repo.repo;
let root_commit_id = repo.store().root_commit_id();
let mut tx = repo.start_transaction();
let commit = tx
.repo_mut()
.new_commit(
vec![root_commit_id.clone()],
repo.store().empty_merged_tree(),
)
.write()
.unwrap();
let commit_id_sym = commit.id().to_string();
let change_id_sym = commit.change_id().to_string();
insta::assert_snapshot!(commit_id_sym, @"b06a01f026da65ac5821");
insta::assert_snapshot!(change_id_sym, @"sryyqqkqmuumyrlruupspprvnulvovzm");
let context = IdPrefixContext::default();
let index = context.populate(tx.repo()).unwrap();
let shortest_commit_prefix_len =
|repo: &MutableRepo, commit_id| index.shortest_commit_prefix_len(repo, commit_id).unwrap();
let shortest_change_prefix_len =
|repo: &MutableRepo, change_id| index.shortest_change_prefix_len(repo, change_id).unwrap();
assert_eq!(shortest_commit_prefix_len(tx.repo(), commit.id()), 1);
assert_eq!(shortest_change_prefix_len(tx.repo(), commit.change_id()), 1);
// Longer symbol doesn't count
let dummy_target = RefTarget::normal(root_commit_id.clone());
tx.repo_mut()
.set_local_tag_target(commit_id_sym[..2].as_ref(), dummy_target.clone());
tx.repo_mut()
.set_local_tag_target(change_id_sym[..2].as_ref(), dummy_target.clone());
assert_eq!(shortest_commit_prefix_len(tx.repo(), commit.id()), 1);
assert_eq!(shortest_change_prefix_len(tx.repo(), commit.change_id()), 1);
// 1-char conflict with bookmark, 2-char with tag
tx.repo_mut()
.set_local_bookmark_target(commit_id_sym[..1].as_ref(), dummy_target.clone());
tx.repo_mut()
.set_local_bookmark_target(change_id_sym[..1].as_ref(), dummy_target.clone());
assert_eq!(shortest_commit_prefix_len(tx.repo(), commit.id()), 3);
assert_eq!(shortest_change_prefix_len(tx.repo(), commit.change_id()), 3);
// Many-char conflicts
for n in 3..commit_id_sym.len() {
tx.repo_mut()
.set_local_tag_target(commit_id_sym[..n].as_ref(), dummy_target.clone());
}
for n in 3..change_id_sym.len() {
tx.repo_mut()
.set_local_tag_target(change_id_sym[..n].as_ref(), dummy_target.clone());
}
assert_eq!(
shortest_commit_prefix_len(tx.repo(), commit.id()),
commit_id_sym.len()
);
assert_eq!(
shortest_change_prefix_len(tx.repo(), commit.change_id()),
change_id_sym.len()
);
// Full-char conflicts
tx.repo_mut()
.set_local_tag_target(commit_id_sym.as_ref(), dummy_target.clone());
tx.repo_mut()
.set_local_tag_target(change_id_sym.as_ref(), dummy_target.clone());
assert_eq!(
shortest_commit_prefix_len(tx.repo(), commit.id()),
commit_id_sym.len()
);
assert_eq!(
shortest_change_prefix_len(tx.repo(), commit.change_id()),
change_id_sym.len()
);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_rewrite_transform.rs | lib/tests/test_rewrite_transform.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use jj_lib::commit::Commit;
use jj_lib::repo::Repo as _;
use jj_lib::rewrite::RewriteRefsOptions;
use maplit::hashmap;
use maplit::hashset;
use testutils::TestRepo;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
// Simulate some `jj sync` command that rebases B:: onto G while abandoning C
// (because it's presumably already in G).
//
// G
// | E
// | D F
// | |/
// | C
// | B
// |/
// A
#[test]
fn test_transform_descendants_sync() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let mut rebased = HashMap::new();
tx.repo_mut()
.transform_descendants(vec![commit_b.id().clone()], async |mut rewriter| {
rewriter.replace_parent(commit_a.id(), [commit_g.id()]);
if *rewriter.old_commit() == commit_c {
rewriter.abandon();
} else {
let old_commit_id = rewriter.old_commit().id().clone();
let new_commit = rewriter.rebase().await?.write()?;
rebased.insert(old_commit_id, new_commit);
}
Ok(())
})
.unwrap();
assert_eq!(rebased.len(), 4);
let new_commit_b = rebased.get(commit_b.id()).unwrap();
let new_commit_d = rebased.get(commit_d.id()).unwrap();
let new_commit_e = rebased.get(commit_e.id()).unwrap();
let new_commit_f = rebased.get(commit_f.id()).unwrap();
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_e.id().clone(),
new_commit_f.id().clone(),
}
);
assert_eq!(new_commit_b.parent_ids(), vec![commit_g.id().clone()]);
assert_eq!(new_commit_d.parent_ids(), vec![new_commit_b.id().clone()]);
assert_eq!(new_commit_e.parent_ids(), vec![new_commit_d.id().clone()]);
assert_eq!(new_commit_f.parent_ids(), vec![new_commit_b.id().clone()]);
}
// Transform just commit C replacing parent A by parent B. The parents should be
// deduplicated.
//
// C
// /|
// B |
// |/
// A
#[test]
fn test_transform_descendants_sync_linearize_merge() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a, &commit_b]);
let mut rebased = HashMap::new();
tx.repo_mut()
.transform_descendants(vec![commit_c.id().clone()], async |mut rewriter| {
rewriter.replace_parent(commit_a.id(), [commit_b.id()]);
let old_commit_id = rewriter.old_commit().id().clone();
let new_commit = rewriter.rebase().await?.write()?;
rebased.insert(old_commit_id, new_commit);
Ok(())
})
.unwrap();
assert_eq!(rebased.len(), 1);
let new_commit_c = rebased.get(commit_c.id()).unwrap();
assert_eq!(
*tx.repo().view().heads(),
hashset! {
new_commit_c.id().clone(),
}
);
assert_eq!(new_commit_c.parent_ids(), vec![commit_b.id().clone()]);
}
// Reorder commits B and C by using the `new_parents_map`. Reordering has to be
// done outside of the typical callback since we must ensure that the new
// traversal order of the commits is valid.
//
// G
// | E
// | D F
// | |/
// | C
// | B
// |/
// A
#[test]
fn test_transform_descendants_new_parents_map() {
let test_repo = TestRepo::init();
let repo = &test_repo.repo;
let mut tx = repo.start_transaction();
let commit_a = write_random_commit(tx.repo_mut());
let commit_b = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let commit_c = write_random_commit_with_parents(tx.repo_mut(), &[&commit_b]);
let commit_d = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_e = write_random_commit_with_parents(tx.repo_mut(), &[&commit_d]);
let commit_f = write_random_commit_with_parents(tx.repo_mut(), &[&commit_c]);
let commit_g = write_random_commit_with_parents(tx.repo_mut(), &[&commit_a]);
let options = RewriteRefsOptions::default();
let mut rebased = HashMap::new();
tx.repo_mut()
.transform_descendants_with_options(
vec![commit_b.id().clone()],
&hashmap! {
commit_b.id().clone() => vec![commit_c.id().clone()],
commit_c.id().clone() => vec![commit_a.id().clone()],
},
&options,
async |mut rewriter| {
let old_commit_id = rewriter.old_commit().id().clone();
if old_commit_id != *commit_b.id()
&& let Some(new_commit_c) = rebased.get(commit_c.id())
{
let new_commit_b: &Commit = rebased.get(commit_b.id()).unwrap();
rewriter.replace_parent(new_commit_c.id(), [new_commit_b.id()]);
}
let new_commit = rewriter.rebase().await?.write()?;
rebased.insert(old_commit_id, new_commit);
Ok(())
},
)
.unwrap();
assert_eq!(rebased.len(), 5);
let new_commit_b = rebased.get(commit_b.id()).unwrap();
let new_commit_c = rebased.get(commit_c.id()).unwrap();
let new_commit_d = rebased.get(commit_d.id()).unwrap();
let new_commit_e = rebased.get(commit_e.id()).unwrap();
let new_commit_f = rebased.get(commit_f.id()).unwrap();
assert_eq!(
*tx.repo().view().heads(),
hashset! {
commit_g.id().clone(),
new_commit_e.id().clone(),
new_commit_f.id().clone(),
}
);
assert_eq!(new_commit_c.parent_ids(), vec![commit_a.id().clone()]);
assert_eq!(new_commit_b.parent_ids(), vec![new_commit_c.id().clone()]);
assert_eq!(new_commit_d.parent_ids(), vec![new_commit_b.id().clone()]);
assert_eq!(new_commit_e.parent_ids(), vec![new_commit_d.id().clone()]);
assert_eq!(new_commit_f.parent_ids(), vec![new_commit_b.id().clone()]);
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/tests/test_git.rs | lib/tests/test_git.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fs;
use std::io::Write as _;
use std::iter;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Barrier;
use std::sync::mpsc;
use std::thread;
use assert_matches::assert_matches;
use gix::remote::Direction;
use itertools::Itertools as _;
use jj_lib::backend::BackendError;
use jj_lib::backend::ChangeId;
use jj_lib::backend::CommitId;
use jj_lib::backend::MillisSinceEpoch;
use jj_lib::backend::Signature;
use jj_lib::backend::Timestamp;
use jj_lib::commit::Commit;
use jj_lib::commit_builder::CommitBuilder;
use jj_lib::config::ConfigLayer;
use jj_lib::config::ConfigSource;
use jj_lib::git;
use jj_lib::git::FailedRefExportReason;
use jj_lib::git::FetchTagsOverride;
use jj_lib::git::GitBranchPushTargets;
use jj_lib::git::GitFetch;
use jj_lib::git::GitFetchError;
use jj_lib::git::GitImportError;
use jj_lib::git::GitImportOptions;
use jj_lib::git::GitImportStats;
use jj_lib::git::GitPushError;
use jj_lib::git::GitPushStats;
use jj_lib::git::GitRefKind;
use jj_lib::git::GitRefUpdate;
use jj_lib::git::GitResetHeadError;
use jj_lib::git::GitSettings;
use jj_lib::git::GitSubprocessOptions;
use jj_lib::git::IgnoredRefspec;
use jj_lib::git::IgnoredRefspecs;
use jj_lib::git::expand_default_fetch_refspecs;
use jj_lib::git::expand_fetch_refspecs;
use jj_lib::git_backend::GitBackend;
use jj_lib::hex_util;
use jj_lib::index::ResolvedChangeTargets;
use jj_lib::merge::Merge;
use jj_lib::object_id::ObjectId as _;
use jj_lib::op_store::LocalRemoteRefTarget;
use jj_lib::op_store::RefTarget;
use jj_lib::op_store::RemoteRef;
use jj_lib::op_store::RemoteRefState;
use jj_lib::ref_name::GitRefNameBuf;
use jj_lib::ref_name::RefName;
use jj_lib::ref_name::RemoteName;
use jj_lib::ref_name::RemoteRefSymbol;
use jj_lib::refs::BookmarkPushUpdate;
use jj_lib::repo::MutableRepo;
use jj_lib::repo::ReadonlyRepo;
use jj_lib::repo::Repo as _;
use jj_lib::settings::UserSettings;
use jj_lib::signing::Signer;
use jj_lib::str_util::StringExpression;
use jj_lib::str_util::StringMatcher;
use jj_lib::str_util::StringPattern;
use jj_lib::workspace::Workspace;
use maplit::btreemap;
use maplit::hashset;
use tempfile::TempDir;
use test_case::test_case;
use testutils::TestRepo;
use testutils::TestRepoBackend;
use testutils::base_user_config;
use testutils::commit_transactions;
use testutils::create_random_commit;
use testutils::repo_path;
use testutils::write_random_commit;
use testutils::write_random_commit_with_parents;
fn empty_git_commit(
git_repo: &gix::Repository,
ref_name: &str,
parents: &[gix::ObjectId],
) -> gix::ObjectId {
let empty_tree_id = git_repo.empty_tree().id().detach();
testutils::git::write_commit(
git_repo,
ref_name,
empty_tree_id,
&format!("random commit {}", rand::random::<u32>()),
parents,
)
}
fn jj_id(id: gix::ObjectId) -> CommitId {
CommitId::from_bytes(id.as_bytes())
}
fn git_id(commit: &Commit) -> gix::ObjectId {
gix::ObjectId::from_bytes_or_panic(commit.id().as_bytes())
}
fn remote_symbol<'a, N, M>(name: &'a N, remote: &'a M) -> RemoteRefSymbol<'a>
where
N: AsRef<RefName> + ?Sized,
M: AsRef<RemoteName> + ?Sized,
{
RemoteRefSymbol {
name: name.as_ref(),
remote: remote.as_ref(),
}
}
fn get_git_backend(repo: &Arc<ReadonlyRepo>) -> &GitBackend {
repo.store().backend_impl().unwrap()
}
fn get_git_repo(repo: &Arc<ReadonlyRepo>) -> gix::Repository {
get_git_backend(repo).git_repo()
}
/// Fetches and imports all refs with the default configuration.
fn fetch_import_all(mut_repo: &mut MutableRepo, remote: &RemoteName) -> GitImportStats {
let git_settings = GitSettings::from_settings(mut_repo.base_repo().settings()).unwrap();
let import_options = default_import_options();
let mut fetcher = GitFetch::new(
mut_repo,
git_settings.to_subprocess_options(),
&import_options,
)
.unwrap();
fetch_all_with(&mut fetcher, remote).unwrap();
fetcher.import_refs().unwrap()
}
/// Fetches all refs without importing.
fn fetch_all_with(fetcher: &mut GitFetch, remote: &RemoteName) -> Result<(), GitFetchError> {
fetch_with(fetcher, remote, StringExpression::all())
}
/// Fetches the specified refs without importing.
fn fetch_with(
fetcher: &mut GitFetch,
remote: &RemoteName,
bookmark_expr: StringExpression,
) -> Result<(), GitFetchError> {
let refspecs =
expand_fetch_refspecs(remote, bookmark_expr).expect("ref patterns should be valid");
let callbacks = git::RemoteCallbacks::default();
let depth = None;
let fetch_tags = None;
fetcher.fetch(remote, refspecs, callbacks, depth, fetch_tags)
}
fn push_status_rejected_references(push_stats: GitPushStats) -> Vec<GitRefNameBuf> {
assert!(push_stats.pushed.is_empty());
assert!(push_stats.remote_rejected.is_empty());
push_stats
.rejected
.into_iter()
.map(|(reference, _)| reference)
.collect()
}
#[test]
fn test_import_refs() {
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let git_repo = get_git_repo(repo);
let import_options = default_import_options();
let commit1 = empty_git_commit(&git_repo, "refs/heads/main", &[]);
git_ref(&git_repo, "refs/remotes/origin/main", commit1);
let commit2 = empty_git_commit(&git_repo, "refs/heads/main", &[commit1]);
let commit3 = empty_git_commit(&git_repo, "refs/heads/feature1", &[commit2]);
let commit4 = empty_git_commit(&git_repo, "refs/heads/feature2", &[commit2]);
let commit5 = empty_git_commit(&git_repo, "refs/tags/v1.0", &[commit1]);
let commit6 = empty_git_commit(&git_repo, "refs/remotes/origin/feature3", &[commit1]);
// Should not be imported
empty_git_commit(&git_repo, "refs/notes/x", &[commit2]);
empty_git_commit(&git_repo, "refs/remotes/origin/HEAD", &[commit2]);
testutils::git::set_symbolic_reference(&git_repo, "HEAD", "refs/heads/main");
let mut tx = repo.start_transaction();
git::import_head(tx.repo_mut()).unwrap();
let stats = git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let view = repo.view();
assert!(stats.abandoned_commits.is_empty());
let expected_heads = hashset! {
jj_id(commit3),
jj_id(commit4),
jj_id(commit5),
jj_id(commit6),
};
assert_eq!(*view.heads(), expected_heads);
assert_eq!(view.bookmarks().count(), 4);
assert_eq!(
view.get_local_bookmark("main".as_ref()),
&RefTarget::normal(jj_id(commit2))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("main", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit2)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("main", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit1)),
state: RemoteRefState::New,
},
);
assert_eq!(
view.get_local_bookmark("feature1".as_ref()),
&RefTarget::normal(jj_id(commit3))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature1", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit3)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature1", "origin")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_local_bookmark("feature2".as_ref()),
&RefTarget::normal(jj_id(commit4))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature2", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit4)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature2", "origin")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_local_bookmark("feature3".as_ref()),
RefTarget::absent_ref()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature3", "git")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature3", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit6)),
state: RemoteRefState::New,
},
);
assert_eq!(
view.get_local_tag("v1.0".as_ref()),
&RefTarget::normal(jj_id(commit5))
);
assert_eq!(
view.get_remote_tag(remote_symbol("v1.0", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit5)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(view.git_refs().len(), 6);
assert_eq!(
view.get_git_ref("refs/heads/main".as_ref()),
&RefTarget::normal(jj_id(commit2))
);
assert_eq!(
view.get_git_ref("refs/heads/feature1".as_ref()),
&RefTarget::normal(jj_id(commit3))
);
assert_eq!(
view.get_git_ref("refs/heads/feature2".as_ref()),
&RefTarget::normal(jj_id(commit4))
);
assert_eq!(
view.get_git_ref("refs/remotes/origin/main".as_ref()),
&RefTarget::normal(jj_id(commit1))
);
assert_eq!(
view.get_git_ref("refs/remotes/origin/feature3".as_ref()),
&RefTarget::normal(jj_id(commit6))
);
assert_eq!(
view.get_git_ref("refs/tags/v1.0".as_ref()),
&RefTarget::normal(jj_id(commit5))
);
assert_eq!(view.git_head(), &RefTarget::normal(jj_id(commit2)));
}
#[test]
fn test_import_refs_reimport() {
let test_workspace = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_workspace.repo;
let git_repo = get_git_repo(repo);
let import_options = default_import_options();
let commit1 = empty_git_commit(&git_repo, "refs/heads/main", &[]);
git_ref(&git_repo, "refs/remotes/origin/main", commit1);
let commit2 = empty_git_commit(&git_repo, "refs/heads/main", &[commit1]);
let commit3 = empty_git_commit(&git_repo, "refs/heads/feature1", &[commit2]);
let commit4 = empty_git_commit(&git_repo, "refs/heads/feature2", &[commit2]);
let pgp_key_oid = git_repo.write_blob(b"my PGP key").unwrap().detach();
git_repo
.reference(
"refs/tags/my-gpg-key",
pgp_key_oid,
gix::refs::transaction::PreviousValue::MustNotExist,
"",
)
.unwrap();
let mut tx = repo.start_transaction();
let stats = git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
assert!(stats.abandoned_commits.is_empty());
let expected_heads = hashset! {
jj_id(commit3),
jj_id(commit4),
};
let view = repo.view();
assert_eq!(*view.heads(), expected_heads);
// Delete feature1 and rewrite feature2
delete_git_ref(&git_repo, "refs/heads/feature1");
delete_git_ref(&git_repo, "refs/heads/feature2");
let commit5 = empty_git_commit(&git_repo, "refs/heads/feature2", &[commit2]);
// Also modify feature2 on the jj side
let mut tx = repo.start_transaction();
let commit6 = create_random_commit(tx.repo_mut())
.set_parents(vec![jj_id(commit2)])
.write()
.unwrap();
tx.repo_mut()
.set_local_bookmark_target("feature2".as_ref(), RefTarget::normal(commit6.id().clone()));
let repo = tx.commit("test").unwrap();
let mut tx = repo.start_transaction();
let stats = git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
assert_eq!(
// The order is unstable just because we import heads from Git repo.
HashSet::from_iter(stats.abandoned_commits),
hashset! {
jj_id(commit4),
jj_id(commit3),
},
);
let view = repo.view();
let expected_heads = hashset! {
jj_id(commit5),
commit6.id().clone(),
};
assert_eq!(*view.heads(), expected_heads);
assert_eq!(view.bookmarks().count(), 2);
let commit1_target = RefTarget::normal(jj_id(commit1));
let commit2_target = RefTarget::normal(jj_id(commit2));
assert_eq!(
view.get_local_bookmark("main".as_ref()),
&RefTarget::normal(jj_id(commit2))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("main", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit2)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("main", "origin")),
&RemoteRef {
target: commit1_target.clone(),
state: RemoteRefState::New,
},
);
assert_eq!(
view.get_local_bookmark("feature2".as_ref()),
&RefTarget::from_legacy_form([jj_id(commit4)], [commit6.id().clone(), jj_id(commit5)])
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature2", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit5)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature2", "origin")),
RemoteRef::absent_ref()
);
assert_eq!(view.local_tags().count(), 0);
assert_eq!(view.git_refs().len(), 3);
assert_eq!(
view.get_git_ref("refs/heads/main".as_ref()),
&commit2_target
);
assert_eq!(
view.get_git_ref("refs/remotes/origin/main".as_ref()),
&commit1_target
);
let commit5_target = RefTarget::normal(jj_id(commit5));
assert_eq!(
view.get_git_ref("refs/heads/feature2".as_ref()),
&commit5_target
);
}
#[test]
fn test_import_refs_reimport_head_removed() {
// Test that re-importing refs doesn't cause a deleted head to come back
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let git_repo = get_git_repo(repo);
let import_options = default_import_options();
let commit = empty_git_commit(&git_repo, "refs/heads/main", &[]);
let mut tx = repo.start_transaction();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let commit_id = jj_id(commit);
// Test the setup
assert!(tx.repo().view().heads().contains(&commit_id));
// Remove the head and re-import
tx.repo_mut().remove_head(&commit_id);
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
assert!(!tx.repo().view().heads().contains(&commit_id));
}
#[test]
fn test_import_refs_reimport_git_head_does_not_count() {
// Test that if a bookmark is removed, the corresponding commit is abandoned
// no matter if the Git HEAD points to the commit (or a descendant of it.)
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let git_repo = get_git_repo(repo);
let import_options = default_import_options();
let commit = empty_git_commit(&git_repo, "refs/heads/main", &[]);
testutils::git::set_head_to_id(&git_repo, commit);
let mut tx = repo.start_transaction();
git::import_head(tx.repo_mut()).unwrap();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
// Delete the bookmark and re-import. The commit should still be there since
// HEAD points to it
git_repo
.find_reference("refs/heads/main")
.unwrap()
.delete()
.unwrap();
git::import_head(tx.repo_mut()).unwrap();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
assert!(!tx.repo().view().heads().contains(&jj_id(commit)));
}
#[test]
fn test_import_refs_reimport_git_head_without_ref() {
// Simulate external `git checkout` in colocated workspace, from anonymous
// bookmark.
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let git_repo = get_git_repo(repo);
let import_options = default_import_options();
// First, HEAD points to commit1.
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
testutils::git::set_head_to_id(&git_repo, git_id(&commit1));
// Import HEAD.
git::import_head(tx.repo_mut()).unwrap();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
assert!(tx.repo().view().heads().contains(commit1.id()));
assert!(tx.repo().view().heads().contains(commit2.id()));
// Move HEAD to commit2 (by e.g. `git checkout` command)
testutils::git::set_head_to_id(&git_repo, git_id(&commit2));
// Reimport HEAD, which doesn't abandon the old HEAD branch because jj thinks it
// would be moved by `git checkout` command. This isn't always true because the
// detached HEAD commit could be rewritten by e.g. `git commit --amend` command,
// but it should be safer than abandoning old checkout branch.
git::import_head(tx.repo_mut()).unwrap();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
assert!(tx.repo().view().heads().contains(commit1.id()));
assert!(tx.repo().view().heads().contains(commit2.id()));
}
#[test]
fn test_import_refs_reimport_git_head_with_moved_ref() {
// Simulate external history rewriting in colocated workspace.
let test_repo = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_repo.repo;
let git_repo = get_git_repo(repo);
let import_options = default_import_options();
// First, both HEAD and main point to commit1.
let mut tx = repo.start_transaction();
let commit1 = write_random_commit(tx.repo_mut());
let commit2 = write_random_commit(tx.repo_mut());
git_repo
.reference(
"refs/heads/main",
git_id(&commit1),
gix::refs::transaction::PreviousValue::Any,
"test",
)
.unwrap();
testutils::git::set_head_to_id(&git_repo, git_id(&commit1));
// Import HEAD and main.
git::import_head(tx.repo_mut()).unwrap();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
assert!(tx.repo().view().heads().contains(commit1.id()));
assert!(tx.repo().view().heads().contains(commit2.id()));
// Move both HEAD and main to commit2 (by e.g. `git commit --amend` command)
git_repo
.reference(
"refs/heads/main",
git_id(&commit2),
gix::refs::transaction::PreviousValue::Any,
"test",
)
.unwrap();
testutils::git::set_head_to_id(&git_repo, git_id(&commit2));
// Reimport HEAD and main, which abandons the old main branch.
git::import_head(tx.repo_mut()).unwrap();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
assert!(!tx.repo().view().heads().contains(commit1.id()));
assert!(tx.repo().view().heads().contains(commit2.id()));
// Reimport HEAD and main, which abandons the old main bookmark.
git::import_head(tx.repo_mut()).unwrap();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
assert!(!tx.repo().view().heads().contains(commit1.id()));
assert!(tx.repo().view().heads().contains(commit2.id()));
}
#[test]
fn test_import_refs_reimport_with_deleted_remote_ref() {
let test_workspace = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_workspace.repo;
let git_repo = get_git_repo(repo);
let import_options = auto_track_import_options();
let commit_base = empty_git_commit(&git_repo, "refs/heads/main", &[]);
let commit_main = empty_git_commit(&git_repo, "refs/heads/main", &[commit_base]);
let commit_remote_only = empty_git_commit(
&git_repo,
"refs/remotes/origin/feature-remote-only",
&[commit_base],
);
let commit_remote_and_local = empty_git_commit(
&git_repo,
"refs/remotes/origin/feature-remote-and-local",
&[commit_base],
);
git_ref(
&git_repo,
"refs/heads/feature-remote-and-local",
commit_remote_and_local,
);
let mut tx = repo.start_transaction();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let expected_heads = hashset! {
jj_id(commit_main),
jj_id(commit_remote_only),
jj_id(commit_remote_and_local),
};
let view = repo.view();
assert_eq!(*view.heads(), expected_heads);
assert_eq!(view.bookmarks().count(), 3);
// Even though the git repo does not have a local bookmark for
// `feature-remote-only`, jj creates one. This follows the model explained
// in docs/bookmarks.md.
assert_eq!(
view.get_local_bookmark("feature-remote-only".as_ref()),
&RefTarget::normal(jj_id(commit_remote_only))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-only", "git")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-only", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_only)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_local_bookmark("feature-remote-and-local".as_ref()),
&RefTarget::normal(jj_id(commit_remote_and_local))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_and_local)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_and_local)),
state: RemoteRefState::Tracked,
},
);
assert!(view.get_local_bookmark("main".as_ref()).is_present()); // bookmark #3 of 3
// Simulate fetching from a remote where feature-remote-only and
// feature-remote-and-local bookmarks were deleted. This leads to the
// following import deleting the corresponding local bookmarks.
delete_git_ref(&git_repo, "refs/remotes/origin/feature-remote-only");
delete_git_ref(&git_repo, "refs/remotes/origin/feature-remote-and-local");
let mut tx = repo.start_transaction();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let view = repo.view();
// The local bookmarks were indeed deleted
assert_eq!(view.bookmarks().count(), 2);
assert!(view.get_local_bookmark("main".as_ref()).is_present());
assert!(
view.get_local_bookmark("feature-remote-only".as_ref())
.is_absent()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-only", "origin")),
RemoteRef::absent_ref()
);
assert!(
view.get_local_bookmark("feature-remote-and-local".as_ref())
.is_absent()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_and_local)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "origin")),
RemoteRef::absent_ref()
);
let expected_heads = hashset! {
jj_id(commit_main),
// Neither commit_remote_only nor commit_remote_and_local should be
// listed as a head. commit_remote_only was never affected by #864,
// but commit_remote_and_local was.
};
assert_eq!(*view.heads(), expected_heads);
}
/// This test is nearly identical to the previous one, except the bookmarks are
/// moved sideways instead of being deleted.
#[test]
fn test_import_refs_reimport_with_moved_remote_ref() {
let test_workspace = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_workspace.repo;
let git_repo = get_git_repo(repo);
let import_options = auto_track_import_options();
let commit_base = empty_git_commit(&git_repo, "refs/heads/main", &[]);
let commit_main = empty_git_commit(&git_repo, "refs/heads/main", &[commit_base]);
let commit_remote_only = empty_git_commit(
&git_repo,
"refs/remotes/origin/feature-remote-only",
&[commit_base],
);
let commit_remote_and_local = empty_git_commit(
&git_repo,
"refs/remotes/origin/feature-remote-and-local",
&[commit_base],
);
git_ref(
&git_repo,
"refs/heads/feature-remote-and-local",
commit_remote_and_local,
);
let mut tx = repo.start_transaction();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let expected_heads = hashset! {
jj_id(commit_main),
jj_id(dbg!(commit_remote_only)),
jj_id(dbg!(commit_remote_and_local)),
};
let view = repo.view();
assert_eq!(*view.heads(), expected_heads);
assert_eq!(view.bookmarks().count(), 3);
// Even though the git repo does not have a local bookmark for
// `feature-remote-only`, jj creates one. This follows the model explained
// in docs/bookmarks.md.
assert_eq!(
view.get_local_bookmark("feature-remote-only".as_ref()),
&RefTarget::normal(jj_id(commit_remote_only))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-only", "git")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-only", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_only)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_local_bookmark("feature-remote-and-local".as_ref()),
&RefTarget::normal(jj_id(commit_remote_and_local))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_and_local)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_and_local)),
state: RemoteRefState::Tracked,
},
);
assert!(view.get_local_bookmark("main".as_ref()).is_present()); // bookmark #3 of 3
// Simulate fetching from a remote where feature-remote-only and
// feature-remote-and-local bookmarks were moved. This leads to the
// following import moving the corresponding local bookmarks.
delete_git_ref(&git_repo, "refs/remotes/origin/feature-remote-only");
delete_git_ref(&git_repo, "refs/remotes/origin/feature-remote-and-local");
let new_commit_remote_only = empty_git_commit(
&git_repo,
"refs/remotes/origin/feature-remote-only",
&[commit_base],
);
let new_commit_remote_and_local = empty_git_commit(
&git_repo,
"refs/remotes/origin/feature-remote-and-local",
&[commit_base],
);
let mut tx = repo.start_transaction();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let view = repo.view();
assert_eq!(view.bookmarks().count(), 3);
// The local bookmarks are moved
assert_eq!(
view.get_local_bookmark("feature-remote-only".as_ref()),
&RefTarget::normal(jj_id(new_commit_remote_only))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-only", "git")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-only", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(new_commit_remote_only)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_local_bookmark("feature-remote-and-local".as_ref()),
&RefTarget::normal(jj_id(new_commit_remote_and_local))
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "git")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_and_local)),
state: RemoteRefState::Tracked,
},
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature-remote-and-local", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(new_commit_remote_and_local)),
state: RemoteRefState::Tracked,
},
);
assert!(view.get_local_bookmark("main".as_ref()).is_present()); // bookmark #3 of 3
let expected_heads = hashset! {
jj_id(commit_main),
jj_id(new_commit_remote_and_local),
jj_id(new_commit_remote_only),
// Neither commit_remote_only nor commit_remote_and_local should be
// listed as a head. commit_remote_only was never affected by #864,
// but commit_remote_and_local was.
};
assert_eq!(*view.heads(), expected_heads);
}
#[test]
fn test_import_refs_reimport_with_moved_untracked_remote_ref() {
let test_workspace = TestRepo::init_with_backend(TestRepoBackend::Git);
let repo = &test_workspace.repo;
let git_repo = get_git_repo(repo);
let import_options = default_import_options();
// The base commit doesn't have a reference.
let remote_ref_name = "refs/remotes/origin/feature";
let commit_base = empty_git_commit(&git_repo, remote_ref_name, &[]);
let commit_remote_t0 = empty_git_commit(&git_repo, remote_ref_name, &[commit_base]);
let mut tx = repo.start_transaction();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let view = repo.view();
assert_eq!(*view.heads(), hashset! { jj_id(commit_remote_t0) });
assert_eq!(view.local_bookmarks().count(), 0);
assert_eq!(view.all_remote_bookmarks().count(), 1);
assert_eq!(
view.get_remote_bookmark(remote_symbol("feature", "origin")),
&RemoteRef {
target: RefTarget::normal(jj_id(commit_remote_t0)),
state: RemoteRefState::New,
},
);
// Move the reference remotely and fetch the changes.
delete_git_ref(&git_repo, remote_ref_name);
let commit_remote_t1 = empty_git_commit(&git_repo, remote_ref_name, &[commit_base]);
let mut tx = repo.start_transaction();
git::import_refs(tx.repo_mut(), &import_options).unwrap();
tx.repo_mut().rebase_descendants().unwrap();
let repo = tx.commit("test").unwrap();
let view = repo.view();
// commit_remote_t0 should be abandoned, but commit_base shouldn't because
// it's the ancestor of commit_remote_t1.
assert_eq!(*view.heads(), hashset! { jj_id(commit_remote_t1) });
assert_eq!(view.local_bookmarks().count(), 0);
assert_eq!(view.all_remote_bookmarks().count(), 1);
assert_eq!(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/benches/diff_bench.rs | lib/benches/diff_bench.rs | use criterion::BenchmarkId;
use criterion::Criterion;
use criterion::criterion_group;
use criterion::criterion_main;
use jj_lib::diff;
fn unchanged_lines(count: usize) -> (String, String) {
let mut lines = vec![];
for i in 0..count {
lines.push(format!("left line {i}\n"));
}
(lines.join(""), lines.join(""))
}
fn modified_lines(count: usize) -> (String, String) {
let mut left_lines = vec![];
let mut right_lines = vec![];
for i in 0..count {
left_lines.push(format!("left line {i}\n"));
right_lines.push(format!("right line {i}\n"));
}
(left_lines.join(""), right_lines.join(""))
}
fn reversed_lines(count: usize) -> (String, String) {
let mut left_lines = vec![];
for i in 0..count {
left_lines.push(format!("left line {i}\n"));
}
let mut right_lines = left_lines.clone();
right_lines.reverse();
(left_lines.join(""), right_lines.join(""))
}
fn bench_diff_lines(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_diff_lines");
for count in [1000, 10000] {
let label = format!("{}k", count / 1000);
group.bench_with_input(
BenchmarkId::new("unchanged", &label),
&unchanged_lines(count),
|b, (left, right)| b.iter(|| diff::diff([left, right])),
);
group.bench_with_input(
BenchmarkId::new("modified", &label),
&modified_lines(count),
|b, (left, right)| b.iter(|| diff::diff([left, right])),
);
group.bench_with_input(
BenchmarkId::new("reversed", &label),
&reversed_lines(count),
|b, (left, right)| b.iter(|| diff::diff([left, right])),
);
}
}
fn bench_diff_git_git_read_tree_c(c: &mut Criterion) {
c.bench_function("bench_diff_git_git_read_tree_c", |b| {
b.iter(|| {
diff::diff([
r##"/*
* GIT - The information manager from hell
*
* Copyright (C) Linus Torvalds, 2005
*/
#include "#cache.h"
static int unpack(unsigned char *sha1)
{
void *buffer;
unsigned long size;
char type[20];
buffer = read_sha1_file(sha1, type, &size);
if (!buffer)
usage("unable to read sha1 file");
if (strcmp(type, "tree"))
usage("expected a 'tree' node");
while (size) {
int len = strlen(buffer)+1;
unsigned char *sha1 = buffer + len;
char *path = strchr(buffer, ' ')+1;
unsigned int mode;
if (size < len + 20 || sscanf(buffer, "%o", &mode) != 1)
usage("corrupt 'tree' file");
buffer = sha1 + 20;
size -= len + 20;
printf("%o %s (%s)\n", mode, path, sha1_to_hex(sha1));
}
return 0;
}
int main(int argc, char **argv)
{
int fd;
unsigned char sha1[20];
if (argc != 2)
usage("read-tree <key>");
if (get_sha1_hex(argv[1], sha1) < 0)
usage("read-tree <key>");
sha1_file_directory = getenv(DB_ENVIRONMENT);
if (!sha1_file_directory)
sha1_file_directory = DEFAULT_DB_ENVIRONMENT;
if (unpack(sha1) < 0)
usage("unpack failed");
return 0;
}
"##,
r##"/*
* GIT - The information manager from hell
*
* Copyright (C) Linus Torvalds, 2005
*/
#include "#cache.h"
static void create_directories(const char *path)
{
int len = strlen(path);
char *buf = malloc(len + 1);
const char *slash = path;
while ((slash = strchr(slash+1, '/')) != NULL) {
len = slash - path;
memcpy(buf, path, len);
buf[len] = 0;
mkdir(buf, 0700);
}
}
static int create_file(const char *path)
{
int fd = open(path, O_WRONLY | O_TRUNC | O_CREAT, 0600);
if (fd < 0) {
if (errno == ENOENT) {
create_directories(path);
fd = open(path, O_WRONLY | O_TRUNC | O_CREAT, 0600);
}
}
return fd;
}
static int unpack(unsigned char *sha1)
{
void *buffer;
unsigned long size;
char type[20];
buffer = read_sha1_file(sha1, type, &size);
if (!buffer)
usage("unable to read sha1 file");
if (strcmp(type, "tree"))
usage("expected a 'tree' node");
while (size) {
int len = strlen(buffer)+1;
unsigned char *sha1 = buffer + len;
char *path = strchr(buffer, ' ')+1;
char *data;
unsigned long filesize;
unsigned int mode;
int fd;
if (size < len + 20 || sscanf(buffer, "%o", &mode) != 1)
usage("corrupt 'tree' file");
buffer = sha1 + 20;
size -= len + 20;
data = read_sha1_file(sha1, type, &filesize);
if (!data || strcmp(type, "blob"))
usage("tree file refers to bad file data");
fd = create_file(path);
if (fd < 0)
usage("unable to create file");
if (write(fd, data, filesize) != filesize)
usage("unable to write file");
fchmod(fd, mode);
close(fd);
free(data);
}
return 0;
}
int main(int argc, char **argv)
{
int fd;
unsigned char sha1[20];
if (argc != 2)
usage("read-tree <key>");
if (get_sha1_hex(argv[1], sha1) < 0)
usage("read-tree <key>");
sha1_file_directory = getenv(DB_ENVIRONMENT);
if (!sha1_file_directory)
sha1_file_directory = DEFAULT_DB_ENVIRONMENT;
if (unpack(sha1) < 0)
usage("unpack failed");
return 0;
}
"##,
])
});
});
}
criterion_group!(benches, bench_diff_lines, bench_diff_git_git_read_tree_c,);
criterion_main!(benches);
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.