repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/xtask/src/bump.rs | crates/xtask/src/bump.rs | use std::{cmp::Ordering, path::Path};
use anyhow::{anyhow, Context, Result};
use indoc::indoc;
use semver::{Prerelease, Version};
use crate::{create_commit, BumpVersion};
pub fn get_latest_tag() -> Result<String> {
let output = std::process::Command::new("git")
.args(["tag", "-l"])
.output()?;
if !output.status.success() {
anyhow::bail!(
"Failed to list tags: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let mut tags = String::from_utf8(output.stdout)?
.lines()
.filter_map(|tag| Version::parse(tag.strip_prefix('v').unwrap_or(tag)).ok())
.collect::<Vec<Version>>();
tags.sort_by(
|a, b| match (a.pre != Prerelease::EMPTY, b.pre != Prerelease::EMPTY) {
(true, true) | (false, false) => a.cmp(b),
(true, false) => Ordering::Less,
(false, true) => Ordering::Greater,
},
);
tags.last()
.map(std::string::ToString::to_string)
.ok_or_else(|| anyhow!("No tags found"))
}
pub fn run(args: BumpVersion) -> Result<()> {
let latest_tag = get_latest_tag()?;
let current_version = Version::parse(&latest_tag)?;
let output = std::process::Command::new("git")
.args(["rev-parse", &format!("v{latest_tag}")])
.output()?;
if !output.status.success() {
anyhow::bail!(
"Failed to get tag SHA: {}",
String::from_utf8_lossy(&output.stderr)
);
}
let workspace_toml_version = Version::parse(&fetch_workspace_version()?)?;
if current_version.major != workspace_toml_version.major
&& current_version.minor != workspace_toml_version.minor
{
eprintln!(
indoc! {"
Seems like the workspace Cargo.toml ({}) version does not match up with the latest git tag ({}).
Please ensure you don't change that yourself, this subcommand will handle this for you.
"},
workspace_toml_version, latest_tag
);
return Ok(());
}
let next_version = args.version;
println!("Bumping from {current_version} to {next_version}");
update_crates(¤t_version, &next_version)?;
update_makefile(&next_version)?;
update_cmake(&next_version)?;
update_nix(&next_version)?;
update_npm(&next_version)?;
update_zig(&next_version)?;
tag_next_version(&next_version)?;
Ok(())
}
fn tag_next_version(next_version: &Version) -> Result<()> {
let commit_sha = create_commit(
&format!("{next_version}"),
&[
"Cargo.lock",
"Cargo.toml",
"Makefile",
"build.zig.zon",
"flake.nix",
"crates/cli/Cargo.toml",
"crates/cli/npm/package.json",
"crates/cli/npm/package-lock.json",
"crates/config/Cargo.toml",
"crates/highlight/Cargo.toml",
"crates/loader/Cargo.toml",
"crates/tags/Cargo.toml",
"CMakeLists.txt",
"lib/Cargo.toml",
"lib/binding_web/package.json",
"lib/binding_web/package-lock.json",
],
)?;
// Create tag
let output = std::process::Command::new("git")
.args([
"tag",
"-a",
&format!("v{next_version}"),
"-m",
&format!("v{next_version}"),
&commit_sha,
])
.output()?;
if !output.status.success() {
anyhow::bail!(
"Failed to create tag: {}",
String::from_utf8_lossy(&output.stderr)
);
}
println!("Tagged commit {commit_sha} with tag v{next_version}");
Ok(())
}
fn update_makefile(next_version: &Version) -> Result<()> {
let makefile = std::fs::read_to_string("Makefile")?;
let makefile = makefile
.lines()
.map(|line| {
if line.starts_with("VERSION") {
format!("VERSION := {next_version}")
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n";
std::fs::write("Makefile", makefile)?;
Ok(())
}
fn update_cmake(next_version: &Version) -> Result<()> {
let cmake = std::fs::read_to_string("CMakeLists.txt")?;
let cmake = cmake
.lines()
.map(|line| {
if line.contains(" VERSION") {
let start_quote = line.find('"').unwrap();
let end_quote = line.rfind('"').unwrap();
format!(
"{}{next_version}{}",
&line[..=start_quote],
&line[end_quote..]
)
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n";
std::fs::write("CMakeLists.txt", cmake)?;
Ok(())
}
fn update_nix(next_version: &Version) -> Result<()> {
let nix = std::fs::read_to_string("flake.nix")?;
let nix = nix
.lines()
.map(|line| {
if line.trim_start().starts_with("version =") {
format!(" version = \"{next_version}\";")
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n";
std::fs::write("flake.nix", nix)?;
Ok(())
}
fn update_crates(current_version: &Version, next_version: &Version) -> Result<()> {
let mut cmd = std::process::Command::new("cargo");
cmd.arg("workspaces").arg("version");
if next_version.minor > current_version.minor {
cmd.arg("minor");
} else {
cmd.arg("patch");
}
cmd.arg("--no-git-commit")
.arg("--yes")
.arg("--force")
.arg("tree-sitter{,-cli,-config,-generate,-loader,-highlight,-tags}")
.arg("--ignore-changes")
.arg("crates/language/*");
let status = cmd.status()?;
if !status.success() {
return Err(anyhow!("Failed to update crates"));
}
Ok(())
}
fn update_npm(next_version: &Version) -> Result<()> {
for npm_project in ["lib/binding_web", "crates/cli/npm"] {
let npm_path = Path::new(npm_project);
let package_json_path = npm_path.join("package.json");
let package_json = serde_json::from_str::<serde_json::Value>(
&std::fs::read_to_string(&package_json_path)
.with_context(|| format!("Failed to read {}", package_json_path.display()))?,
)?;
let mut package_json = package_json
.as_object()
.ok_or_else(|| anyhow!("Invalid package.json"))?
.clone();
package_json.insert(
"version".to_string(),
serde_json::Value::String(next_version.to_string()),
);
let package_json = serde_json::to_string_pretty(&package_json)? + "\n";
std::fs::write(package_json_path, package_json)?;
let Ok(cmd) = std::process::Command::new("npm")
.arg("install")
.arg("--package-lock-only")
.arg("--ignore-scripts")
.current_dir(npm_path)
.output()
else {
return Ok(()); // npm is not `executable`, ignore
};
if !cmd.status.success() {
let stderr = String::from_utf8_lossy(&cmd.stderr);
return Err(anyhow!(
"Failed to run `npm install` in {}:\n{stderr}",
npm_path.display()
));
}
}
Ok(())
}
fn update_zig(next_version: &Version) -> Result<()> {
let zig = std::fs::read_to_string("build.zig.zon")?
.lines()
.map(|line| {
if line.starts_with(" .version") {
format!(" .version = \"{next_version}\",")
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n";
std::fs::write("build.zig.zon", zig)?;
Ok(())
}
/// read Cargo.toml and get the version
fn fetch_workspace_version() -> Result<String> {
std::fs::read_to_string("Cargo.toml")?
.lines()
.find(|line| line.starts_with("version = "))
.and_then(|line| {
line.split_terminator('"')
.next_back()
.map(|s| s.to_string())
})
.ok_or_else(|| anyhow!("No version found in Cargo.toml"))
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/xtask/src/generate.rs | crates/xtask/src/generate.rs | use std::{collections::BTreeSet, ffi::OsStr, fs, path::Path, process::Command, str::FromStr};
use anyhow::{Context, Result};
use bindgen::RustTarget;
use crate::{bail_on_err, GenerateFixtures};
const HEADER_PATH: &str = "lib/include/tree_sitter/api.h";
pub fn run_fixtures(args: &GenerateFixtures) -> Result<()> {
let output = std::process::Command::new("cargo")
.args(["build", "--release"])
.spawn()?
.wait_with_output()?;
bail_on_err(&output, "Failed to run cargo build")?;
let tree_sitter_binary = std::env::current_dir()?
.join("target")
.join("release")
.join("tree-sitter");
let grammars_dir = std::env::current_dir()?
.join("test")
.join("fixtures")
.join("grammars");
for grammar_file in find_grammar_files(grammars_dir.to_str().unwrap()).flatten() {
let grammar_dir = grammar_file.parent().unwrap();
let grammar_name = grammar_dir.file_name().and_then(OsStr::to_str).unwrap();
println!(
"Regenerating {grammar_name} parser{}",
if args.wasm { " to Wasm" } else { "" }
);
if args.wasm {
let mut cmd = Command::new(&tree_sitter_binary);
let cmd = cmd.args([
"build",
"--wasm",
"-o",
&format!("target/release/tree-sitter-{grammar_name}.wasm"),
grammar_dir.to_str().unwrap(),
]);
bail_on_err(
&cmd.spawn()?.wait_with_output()?,
&format!("Failed to regenerate {grammar_name} parser to wasm"),
)?;
} else {
let output = Command::new(&tree_sitter_binary)
.arg("generate")
.arg("src/grammar.json")
.arg("--abi=latest")
.current_dir(grammar_dir)
.spawn()?
.wait_with_output()?;
bail_on_err(
&output,
&format!("Failed to regenerate {grammar_name} parser"),
)?;
}
}
Ok(())
}
pub fn run_bindings() -> Result<()> {
let output = Command::new("cargo")
.args(["metadata", "--format-version", "1"])
.output()
.unwrap();
let metadata = serde_json::from_slice::<serde_json::Value>(&output.stdout).unwrap();
let Some(rust_version) = metadata
.get("packages")
.and_then(|packages| packages.as_array())
.and_then(|packages| {
packages.iter().find_map(|package| {
if package["name"] == "tree-sitter" {
package.get("rust_version").and_then(|v| v.as_str())
} else {
None
}
})
})
else {
panic!("Failed to find tree-sitter package in cargo metadata");
};
let no_copy = [
"TSInput",
"TSLanguage",
"TSLogger",
"TSLookaheadIterator",
"TSParser",
"TSTree",
"TSQuery",
"TSQueryCursor",
"TSQueryCapture",
"TSQueryMatch",
"TSQueryPredicateStep",
];
let bindings = bindgen::Builder::default()
.header(HEADER_PATH)
.layout_tests(false)
.allowlist_type("^TS.*")
.allowlist_function("^ts_.*")
.allowlist_var("^TREE_SITTER.*")
.no_copy(no_copy.join("|"))
.prepend_enum_name(false)
.use_core()
.clang_arg("-D TREE_SITTER_FEATURE_WASM")
.rust_target(RustTarget::from_str(rust_version).unwrap())
.generate()
.expect("Failed to generate bindings");
bindings
.write_to_file("lib/binding_rust/bindings.rs")
.with_context(|| "Failed to write bindings")
}
pub fn run_wasm_exports() -> Result<()> {
let mut imports = BTreeSet::new();
let mut callback = |path: &str| -> Result<()> {
let output = Command::new("wasm-objdump")
.args(["--details", path, "--section", "Import"])
.output()?;
bail_on_err(&output, "Failed to run wasm-objdump")?;
let output = String::from_utf8_lossy(&output.stdout);
for line in output.lines() {
if let Some(imp) = line.split("<env.").nth(1).and_then(|s| s.split('>').next()) {
imports.insert(imp.to_string());
}
}
Ok(())
};
for entry in fs::read_dir(Path::new("target"))? {
let Ok(entry) = entry else {
continue;
};
let path = entry.path();
if path.is_dir() {
for entry in fs::read_dir(&path)? {
let Ok(entry) = entry else {
continue;
};
let path = entry.path();
if path.is_file()
&& path.extension() == Some(OsStr::new("wasm"))
&& path
.file_name()
.unwrap()
.to_str()
.unwrap()
.starts_with("tree-sitter-")
{
callback(path.to_str().unwrap())?;
}
}
}
}
for imp in imports {
println!("{imp}");
}
Ok(())
}
fn find_grammar_files(
dir: &str,
) -> impl Iterator<Item = Result<std::path::PathBuf, std::io::Error>> {
fs::read_dir(dir)
.expect("Failed to read directory")
.filter_map(Result::ok)
.flat_map(|entry| {
let path = entry.path();
if path.is_dir() && !path.to_string_lossy().contains("node_modules") {
Box::new(find_grammar_files(path.to_str().unwrap())) as Box<dyn Iterator<Item = _>>
} else if path.is_file() && path.file_name() == Some(OsStr::new("grammar.js")) {
Box::new(std::iter::once(Ok(path))) as _
} else {
Box::new(std::iter::empty()) as _
}
})
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/xtask/src/upgrade_wasmtime.rs | crates/xtask/src/upgrade_wasmtime.rs | use std::process::Command;
use anyhow::{Context, Result};
use semver::Version;
use crate::{create_commit, UpgradeWasmtime};
const WASMTIME_RELEASE_URL: &str = "https://github.com/bytecodealliance/wasmtime/releases/download";
fn update_cargo(version: &Version) -> Result<()> {
let file = std::fs::read_to_string("lib/Cargo.toml")?;
let mut old_lines = file.lines();
let mut new_lines = Vec::with_capacity(old_lines.size_hint().0);
while let Some(line) = old_lines.next() {
new_lines.push(line.to_string());
if line == "[dependencies.wasmtime-c-api]" {
let _ = old_lines.next();
new_lines.push(format!("version = \"{version}\""));
}
}
std::fs::write("lib/Cargo.toml", new_lines.join("\n") + "\n")?;
Command::new("cargo")
.arg("update")
.status()
.map(|_| ())
.with_context(|| "Failed to execute cargo update")
}
fn zig_fetch(lines: &mut Vec<String>, version: &Version, url_suffix: &str) -> Result<()> {
let url = &format!("{WASMTIME_RELEASE_URL}/v{version}/wasmtime-v{version}-{url_suffix}");
println!(" Fetching {url}");
lines.push(format!(" .url = \"{url}\","));
let output = Command::new("zig")
.arg("fetch")
.arg(url)
.output()
.with_context(|| format!("Failed to execute zig fetch {url}"))?;
let hash = String::from_utf8_lossy(&output.stdout);
lines.push(format!(" .hash = \"{}\",", hash.trim_end()));
Ok(())
}
fn update_zig(version: &Version) -> Result<()> {
let file = std::fs::read_to_string("build.zig.zon")?;
let mut old_lines = file.lines();
let new_lines = &mut Vec::with_capacity(old_lines.size_hint().0);
macro_rules! match_wasmtime_zig_dep {
($line:ident, {$($platform:literal => [$($arch:literal),*]),*,}) => {
match $line {
$($(concat!(" .wasmtime_c_api_", $arch, "_", $platform, " = .{") => {
let (_, _) = (old_lines.next(), old_lines.next());
let suffix = if $platform == "windows" || $platform == "mingw" {
concat!($arch, "-", $platform, "-c-api.zip")
} else {
concat!($arch, "-", $platform, "-c-api.tar.xz")
};
zig_fetch(new_lines, version, suffix)?;
})*)*
_ => {}
}
};
}
while let Some(line) = old_lines.next() {
new_lines.push(line.to_string());
match_wasmtime_zig_dep!(line, {
"android" => ["aarch64", "x86_64"],
"linux" => ["aarch64", "armv7", "i686", "riscv64gc", "s390x", "x86_64"],
"macos" => ["aarch64", "x86_64"],
"mingw" => ["x86_64"],
"musl" => ["aarch64", "x86_64"],
"windows" => ["aarch64", "i686", "x86_64"],
});
}
std::fs::write("build.zig.zon", new_lines.join("\n") + "\n")?;
Ok(())
}
pub fn run(args: &UpgradeWasmtime) -> Result<()> {
println!("Upgrading wasmtime for Rust");
update_cargo(&args.version)?;
println!("Upgrading wasmtime for Zig");
update_zig(&args.version)?;
create_commit(
&format!("build(deps): bump wasmtime-c-api to v{}", args.version),
&["lib/Cargo.toml", "Cargo.lock", "build.zig.zon"],
)?;
Ok(())
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/xtask/src/clippy.rs | crates/xtask/src/clippy.rs | use std::process::Command;
use anyhow::Result;
use crate::{bail_on_err, Clippy};
pub fn run(args: &Clippy) -> Result<()> {
let mut clippy_command = Command::new("cargo");
clippy_command.arg("clippy");
if let Some(package) = args.package.as_ref() {
clippy_command.args(["--package", package]);
} else {
clippy_command.arg("--workspace");
}
clippy_command
.arg("--release")
.arg("--all-targets")
.arg("--all-features")
.arg("--")
.arg("-D")
.arg("warnings");
if args.fix {
clippy_command.arg("--fix");
}
bail_on_err(
&clippy_command.spawn()?.wait_with_output()?,
"Clippy failed",
)
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/config/src/tree_sitter_config.rs | crates/config/src/tree_sitter_config.rs | #![cfg_attr(not(any(test, doctest)), doc = include_str!("../README.md"))]
use std::{
env, fs,
path::{Path, PathBuf},
};
use etcetera::BaseStrategy as _;
use log::warn;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use thiserror::Error;
pub type ConfigResult<T> = Result<T, ConfigError>;
#[derive(Debug, Error)]
pub enum ConfigError {
#[error("Bad JSON config {0} -- {1}")]
ConfigRead(String, serde_json::Error),
#[error(transparent)]
HomeDir(#[from] etcetera::HomeDirError),
#[error(transparent)]
IO(IoError),
#[error(transparent)]
Serialization(#[from] serde_json::Error),
}
#[derive(Debug, Error)]
pub struct IoError {
pub error: std::io::Error,
pub path: Option<String>,
}
impl IoError {
fn new(error: std::io::Error, path: Option<&Path>) -> Self {
Self {
error,
path: path.map(|p| p.to_string_lossy().to_string()),
}
}
}
impl std::fmt::Display for IoError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.error)?;
if let Some(ref path) = self.path {
write!(f, " ({path})")?;
}
Ok(())
}
}
/// Holds the contents of tree-sitter's configuration file.
///
/// The file typically lives at `~/.config/tree-sitter/config.json`, but see the [`Config::load`][]
/// method for the full details on where it might be located.
///
/// This type holds the generic JSON content of the configuration file. Individual tree-sitter
/// components will use the [`Config::get`][] method to parse that JSON to extract configuration
/// fields that are specific to that component.
#[derive(Debug)]
pub struct Config {
pub location: PathBuf,
pub config: Value,
}
impl Config {
pub fn find_config_file() -> ConfigResult<Option<PathBuf>> {
if let Ok(path) = env::var("TREE_SITTER_DIR") {
let mut path = PathBuf::from(path);
path.push("config.json");
if !path.exists() {
return Ok(None);
}
if path.is_file() {
return Ok(Some(path));
}
}
let xdg_path = Self::xdg_config_file()?;
if xdg_path.is_file() {
return Ok(Some(xdg_path));
}
if cfg!(target_os = "macos") {
let legacy_apple_path = etcetera::base_strategy::Apple::new()?
.data_dir() // `$HOME/Library/Application Support/`
.join("tree-sitter")
.join("config.json");
if legacy_apple_path.is_file() {
let xdg_dir = xdg_path.parent().unwrap();
fs::create_dir_all(xdg_dir)
.map_err(|e| ConfigError::IO(IoError::new(e, Some(xdg_dir))))?;
fs::rename(&legacy_apple_path, &xdg_path).map_err(|e| {
ConfigError::IO(IoError::new(e, Some(legacy_apple_path.as_path())))
})?;
warn!(
"Your config.json file has been automatically migrated from \"{}\" to \"{}\"",
legacy_apple_path.display(),
xdg_path.display()
);
return Ok(Some(xdg_path));
}
}
let legacy_path = etcetera::home_dir()?
.join(".tree-sitter")
.join("config.json");
if legacy_path.is_file() {
return Ok(Some(legacy_path));
}
Ok(None)
}
fn xdg_config_file() -> ConfigResult<PathBuf> {
let xdg_path = etcetera::choose_base_strategy()?
.config_dir()
.join("tree-sitter")
.join("config.json");
Ok(xdg_path)
}
/// Locates and loads in the user's configuration file. We search for the configuration file
/// in the following locations, in order:
///
/// - Location specified by the path parameter if provided
/// - `$TREE_SITTER_DIR/config.json`, if the `TREE_SITTER_DIR` environment variable is set
/// - `tree-sitter/config.json` in your default user configuration directory, as determined by
/// [`etcetera::choose_base_strategy`](https://docs.rs/etcetera/*/etcetera/#basestrategy)
/// - `$HOME/.tree-sitter/config.json` as a fallback from where tree-sitter _used_ to store
/// its configuration
pub fn load(path: Option<PathBuf>) -> ConfigResult<Self> {
let location = if let Some(path) = path {
path
} else if let Some(path) = Self::find_config_file()? {
path
} else {
return Self::initial();
};
let content = fs::read_to_string(&location)
.map_err(|e| ConfigError::IO(IoError::new(e, Some(location.as_path()))))?;
let config = serde_json::from_str(&content)
.map_err(|e| ConfigError::ConfigRead(location.to_string_lossy().to_string(), e))?;
Ok(Self { location, config })
}
/// Creates an empty initial configuration file. You can then use the [`Config::add`][] method
/// to add the component-specific configuration types for any components that want to add
/// content to the default file, and then use [`Config::save`][] to write the configuration to
/// disk.
///
/// (Note that this is typically only done by the `tree-sitter init-config` command.)
pub fn initial() -> ConfigResult<Self> {
let location = if let Ok(path) = env::var("TREE_SITTER_DIR") {
let mut path = PathBuf::from(path);
path.push("config.json");
path
} else {
Self::xdg_config_file()?
};
let config = serde_json::json!({});
Ok(Self { location, config })
}
/// Saves this configuration to the file that it was originally loaded from.
pub fn save(&self) -> ConfigResult<()> {
let json = serde_json::to_string_pretty(&self.config)?;
let config_dir = self.location.parent().unwrap();
fs::create_dir_all(config_dir)
.map_err(|e| ConfigError::IO(IoError::new(e, Some(config_dir))))?;
fs::write(&self.location, json)
.map_err(|e| ConfigError::IO(IoError::new(e, Some(self.location.as_path()))))?;
Ok(())
}
/// Parses a component-specific configuration from the configuration file. The type `C` must
/// be [deserializable](https://docs.rs/serde/*/serde/trait.Deserialize.html) from a JSON
/// object, and must only include the fields relevant to that component.
pub fn get<C>(&self) -> ConfigResult<C>
where
C: for<'de> Deserialize<'de>,
{
let config = serde_json::from_value(self.config.clone())?;
Ok(config)
}
/// Adds a component-specific configuration to the configuration file. The type `C` must be
/// [serializable](https://docs.rs/serde/*/serde/trait.Serialize.html) into a JSON object, and
/// must only include the fields relevant to that component.
pub fn add<C>(&mut self, config: C) -> ConfigResult<()>
where
C: Serialize,
{
let mut config = serde_json::to_value(&config)?;
self.config
.as_object_mut()
.unwrap()
.append(config.as_object_mut().unwrap());
Ok(())
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/build.rs | crates/cli/build.rs | use std::{
env,
path::{Path, PathBuf},
process::Command,
time::SystemTime,
};
fn main() {
if let Some(git_sha) = read_git_sha() {
println!("cargo:rustc-env=BUILD_SHA={git_sha}");
}
println!("cargo:rustc-check-cfg=cfg(sanitizing)");
println!("cargo:rustc-check-cfg=cfg(TREE_SITTER_EMBED_WASM_BINDING)");
if web_playground_files_present() {
println!("cargo:rustc-cfg=TREE_SITTER_EMBED_WASM_BINDING");
}
let build_time = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs_f64();
println!("cargo:rustc-env=BUILD_TIME={build_time}");
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "freebsd",
target_os = "openbsd",
target_os = "netbsd",
target_os = "dragonfly",
))]
{
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()).join("dynamic-symbols.txt");
std::fs::write(
&out_dir,
"{
ts_current_malloc;
ts_current_calloc;
ts_current_realloc;
ts_current_free;
};",
)
.unwrap();
println!(
"cargo:rustc-link-arg=-Wl,--dynamic-list={}",
out_dir.display()
);
}
}
fn web_playground_files_present() -> bool {
let paths = [
"../../docs/src/assets/js/playground.js",
"../../lib/binding_web/web-tree-sitter.js",
"../../lib/binding_web/web-tree-sitter.wasm",
];
paths.iter().all(|p| Path::new(p).exists())
}
fn read_git_sha() -> Option<String> {
let crate_path = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
if !crate_path.parent().is_some_and(|p| p.join(".git").exists()) {
return None;
}
Command::new("git")
.args(["rev-parse", "HEAD"])
.current_dir(crate_path)
.output()
.map_or(None, |output| {
if !output.status.success() {
return None;
}
Some(String::from_utf8_lossy(&output.stdout).to_string())
})
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/test.rs | crates/cli/src/test.rs | use std::{
collections::BTreeMap,
ffi::OsStr,
fmt::Display as _,
fs,
io::{self, Write},
path::{Path, PathBuf},
str,
sync::LazyLock,
time::Duration,
};
use anstyle::AnsiColor;
use anyhow::{anyhow, Context, Result};
use clap::ValueEnum;
use indoc::indoc;
use regex::{
bytes::{Regex as ByteRegex, RegexBuilder as ByteRegexBuilder},
Regex,
};
use schemars::{JsonSchema, Schema, SchemaGenerator};
use serde::Serialize;
use similar::{ChangeTag, TextDiff};
use tree_sitter::{format_sexp, Language, LogType, Parser, Query, Tree};
use walkdir::WalkDir;
use super::util;
use crate::{
logger::paint,
parse::{
render_cst, ParseDebugType, ParseFileOptions, ParseOutput, ParseStats, ParseTheme, Stats,
},
};
static HEADER_REGEX: LazyLock<ByteRegex> = LazyLock::new(|| {
ByteRegexBuilder::new(
r"^(?x)
(?P<equals>(?:=+){3,})
(?P<suffix1>[^=\r\n][^\r\n]*)?
\r?\n
(?P<test_name_and_markers>(?:([^=\r\n]|\s+:)[^\r\n]*\r?\n)+)
===+
(?P<suffix2>[^=\r\n][^\r\n]*)?\r?\n",
)
.multi_line(true)
.build()
.unwrap()
});
static DIVIDER_REGEX: LazyLock<ByteRegex> = LazyLock::new(|| {
ByteRegexBuilder::new(r"^(?P<hyphens>(?:-+){3,})(?P<suffix>[^-\r\n][^\r\n]*)?\r?\n")
.multi_line(true)
.build()
.unwrap()
});
static COMMENT_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(?m)^\s*;.*$").unwrap());
static WHITESPACE_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"\s+").unwrap());
static SEXP_FIELD_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r" \w+: \(").unwrap());
static POINT_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"\s*\[\s*\d+\s*,\s*\d+\s*\]\s*").unwrap());
#[derive(Debug, PartialEq, Eq)]
pub enum TestEntry {
Group {
name: String,
children: Vec<Self>,
file_path: Option<PathBuf>,
},
Example {
name: String,
input: Vec<u8>,
output: String,
header_delim_len: usize,
divider_delim_len: usize,
has_fields: bool,
attributes_str: String,
attributes: TestAttributes,
file_name: Option<String>,
},
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TestAttributes {
pub skip: bool,
pub platform: bool,
pub fail_fast: bool,
pub error: bool,
pub cst: bool,
pub languages: Vec<Box<str>>,
}
impl Default for TestEntry {
fn default() -> Self {
Self::Group {
name: String::new(),
children: Vec::new(),
file_path: None,
}
}
}
impl Default for TestAttributes {
fn default() -> Self {
Self {
skip: false,
platform: true,
fail_fast: false,
error: false,
cst: false,
languages: vec!["".into()],
}
}
}
#[derive(ValueEnum, Default, Debug, Copy, Clone, PartialEq, Eq, Serialize)]
pub enum TestStats {
All,
#[default]
OutliersAndTotal,
TotalOnly,
}
pub struct TestOptions<'a> {
pub path: PathBuf,
pub debug: bool,
pub debug_graph: bool,
pub include: Option<Regex>,
pub exclude: Option<Regex>,
pub file_name: Option<String>,
pub update: bool,
pub open_log: bool,
pub languages: BTreeMap<&'a str, &'a Language>,
pub color: bool,
pub show_fields: bool,
pub overview_only: bool,
}
/// A stateful object used to collect results from running a grammar's test suite
#[derive(Debug, Default, Serialize, JsonSchema)]
pub struct TestSummary {
// Parse test results and associated data
#[schemars(schema_with = "schema_as_array")]
#[serde(serialize_with = "serialize_as_array")]
pub parse_results: TestResultHierarchy,
pub parse_failures: Vec<TestFailure>,
pub parse_stats: Stats,
#[schemars(skip)]
#[serde(skip)]
pub has_parse_errors: bool,
#[schemars(skip)]
#[serde(skip)]
pub parse_stat_display: TestStats,
// Other test results
#[schemars(schema_with = "schema_as_array")]
#[serde(serialize_with = "serialize_as_array")]
pub highlight_results: TestResultHierarchy,
#[schemars(schema_with = "schema_as_array")]
#[serde(serialize_with = "serialize_as_array")]
pub tag_results: TestResultHierarchy,
#[schemars(schema_with = "schema_as_array")]
#[serde(serialize_with = "serialize_as_array")]
pub query_results: TestResultHierarchy,
// Data used during construction
#[schemars(skip)]
#[serde(skip)]
pub test_num: usize,
// Options passed in from the CLI which control how the summary is displayed
#[schemars(skip)]
#[serde(skip)]
pub color: bool,
#[schemars(skip)]
#[serde(skip)]
pub overview_only: bool,
#[schemars(skip)]
#[serde(skip)]
pub update: bool,
#[schemars(skip)]
#[serde(skip)]
pub json: bool,
}
impl TestSummary {
#[must_use]
pub fn new(
color: bool,
stat_display: TestStats,
parse_update: bool,
overview_only: bool,
json_summary: bool,
) -> Self {
Self {
color,
parse_stat_display: stat_display,
update: parse_update,
overview_only,
json: json_summary,
test_num: 1,
..Default::default()
}
}
}
#[derive(Debug, Default, JsonSchema)]
pub struct TestResultHierarchy {
root_group: Vec<TestResult>,
traversal_idxs: Vec<usize>,
}
fn serialize_as_array<S>(results: &TestResultHierarchy, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
results.root_group.serialize(serializer)
}
fn schema_as_array(gen: &mut SchemaGenerator) -> Schema {
gen.subschema_for::<Vec<TestResult>>()
}
/// Stores arbitrarily nested parent test groups and child cases. Supports creation
/// in DFS traversal order
impl TestResultHierarchy {
/// Signifies the start of a new group's traversal during construction.
fn push_traversal(&mut self, idx: usize) {
self.traversal_idxs.push(idx);
}
/// Signifies the end of the current group's traversal during construction.
/// Must be paired with a prior call to [`TestResultHierarchy::add_group`].
pub fn pop_traversal(&mut self) {
self.traversal_idxs.pop();
}
/// Adds a new group as a child of the current group. Caller is responsible
/// for calling [`TestResultHierarchy::pop_traversal`] once the group is done
/// being traversed.
pub fn add_group(&mut self, group_name: &str) {
let new_group_idx = self.curr_group_len();
self.push(TestResult {
name: group_name.to_string(),
info: TestInfo::Group {
children: Vec::new(),
},
});
self.push_traversal(new_group_idx);
}
/// Adds a new test example as a child of the current group.
/// Asserts that `test_case.info` is not [`TestInfo::Group`].
pub fn add_case(&mut self, test_case: TestResult) {
assert!(!matches!(test_case.info, TestInfo::Group { .. }));
self.push(test_case);
}
/// Adds a new `TestResult` to the current group.
fn push(&mut self, result: TestResult) {
// If there are no traversal steps, we're adding to the root
if self.traversal_idxs.is_empty() {
self.root_group.push(result);
return;
}
#[allow(clippy::manual_let_else)]
let mut curr_group = match self.root_group[self.traversal_idxs[0]].info {
TestInfo::Group { ref mut children } => children,
_ => unreachable!(),
};
for idx in self.traversal_idxs.iter().skip(1) {
curr_group = match curr_group[*idx].info {
TestInfo::Group { ref mut children } => children,
_ => unreachable!(),
};
}
curr_group.push(result);
}
fn curr_group_len(&self) -> usize {
if self.traversal_idxs.is_empty() {
return self.root_group.len();
}
#[allow(clippy::manual_let_else)]
let mut curr_group = match self.root_group[self.traversal_idxs[0]].info {
TestInfo::Group { ref children } => children,
_ => unreachable!(),
};
for idx in self.traversal_idxs.iter().skip(1) {
curr_group = match curr_group[*idx].info {
TestInfo::Group { ref children } => children,
_ => unreachable!(),
};
}
curr_group.len()
}
#[allow(clippy::iter_without_into_iter)]
#[must_use]
pub fn iter(&self) -> TestResultIterWithDepth<'_> {
let mut stack = Vec::with_capacity(self.root_group.len());
for child in self.root_group.iter().rev() {
stack.push((0, child));
}
TestResultIterWithDepth { stack }
}
}
pub struct TestResultIterWithDepth<'a> {
stack: Vec<(usize, &'a TestResult)>,
}
impl<'a> Iterator for TestResultIterWithDepth<'a> {
type Item = (usize, &'a TestResult);
fn next(&mut self) -> Option<Self::Item> {
self.stack.pop().inspect(|(depth, result)| {
if let TestInfo::Group { children } = &result.info {
for child in children.iter().rev() {
self.stack.push((depth + 1, child));
}
}
})
}
}
#[derive(Debug, Serialize, JsonSchema)]
pub struct TestResult {
pub name: String,
#[schemars(flatten)]
#[serde(flatten)]
pub info: TestInfo,
}
#[derive(Debug, Serialize, JsonSchema)]
#[schemars(untagged)]
#[serde(untagged)]
pub enum TestInfo {
Group {
children: Vec<TestResult>,
},
ParseTest {
outcome: TestOutcome,
// True parse rate, adjusted parse rate
#[schemars(schema_with = "parse_rate_schema")]
#[serde(serialize_with = "serialize_parse_rates")]
parse_rate: Option<(f64, f64)>,
test_num: usize,
},
AssertionTest {
outcome: TestOutcome,
test_num: usize,
},
}
fn serialize_parse_rates<S>(
parse_rate: &Option<(f64, f64)>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match parse_rate {
None => serializer.serialize_none(),
Some((first, _)) => serializer.serialize_some(first),
}
}
fn parse_rate_schema(gen: &mut SchemaGenerator) -> Schema {
gen.subschema_for::<Option<f64>>()
}
#[derive(Debug, Clone, Eq, PartialEq, Serialize, JsonSchema)]
pub enum TestOutcome {
// Parse outcomes
Passed,
Failed,
Updated,
Skipped,
Platform,
// Highlight/Tag/Query outcomes
AssertionPassed { assertion_count: usize },
AssertionFailed { error: String },
}
impl TestSummary {
fn fmt_parse_results(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let (count, total_adj_parse_time) = self
.parse_results
.iter()
.filter_map(|(_, result)| match result.info {
TestInfo::Group { .. } => None,
TestInfo::ParseTest { parse_rate, .. } => parse_rate,
_ => unreachable!(),
})
.fold((0usize, 0.0f64), |(count, rate_accum), (_, adj_rate)| {
(count + 1, rate_accum + adj_rate)
});
let avg = total_adj_parse_time / count as f64;
let std_dev = {
let variance = self
.parse_results
.iter()
.filter_map(|(_, result)| match result.info {
TestInfo::Group { .. } => None,
TestInfo::ParseTest { parse_rate, .. } => parse_rate,
_ => unreachable!(),
})
.map(|(_, rate_i)| (rate_i - avg).powi(2))
.sum::<f64>()
/ count as f64;
variance.sqrt()
};
for (depth, entry) in self.parse_results.iter() {
write!(f, "{}", " ".repeat(depth + 1))?;
match &entry.info {
TestInfo::Group { .. } => writeln!(f, "{}:", entry.name)?,
TestInfo::ParseTest {
outcome,
parse_rate,
test_num,
} => {
let (color, result_char) = match outcome {
TestOutcome::Passed => (AnsiColor::Green, "✓"),
TestOutcome::Failed => (AnsiColor::Red, "✗"),
TestOutcome::Updated => (AnsiColor::Blue, "✓"),
TestOutcome::Skipped => (AnsiColor::Yellow, "⌀"),
TestOutcome::Platform => (AnsiColor::Magenta, "⌀"),
_ => unreachable!(),
};
let stat_display = match (self.parse_stat_display, parse_rate) {
(TestStats::TotalOnly, _) | (_, None) => String::new(),
(display, Some((true_rate, adj_rate))) => {
let mut stats = if display == TestStats::All {
format!(" ({true_rate:.3} bytes/ms)")
} else {
String::new()
};
// 3 standard deviations below the mean, aka the "Empirical Rule"
if *adj_rate < 3.0f64.mul_add(-std_dev, avg) {
stats += &paint(
self.color.then_some(AnsiColor::Yellow),
&format!(
" -- Warning: Slow parse rate ({true_rate:.3} bytes/ms)"
),
);
}
stats
}
};
writeln!(
f,
"{test_num:>3}. {result_char} {}{stat_display}",
paint(self.color.then_some(color), &entry.name),
)?;
}
TestInfo::AssertionTest { .. } => unreachable!(),
}
}
// Parse failure info
if !self.parse_failures.is_empty() && self.update && !self.has_parse_errors {
writeln!(
f,
"\n{} update{}:\n",
self.parse_failures.len(),
if self.parse_failures.len() == 1 {
""
} else {
"s"
}
)?;
for (i, TestFailure { name, .. }) in self.parse_failures.iter().enumerate() {
writeln!(f, " {}. {name}", i + 1)?;
}
} else if !self.parse_failures.is_empty() && !self.overview_only {
if !self.has_parse_errors {
writeln!(
f,
"\n{} failure{}:",
self.parse_failures.len(),
if self.parse_failures.len() == 1 {
""
} else {
"s"
}
)?;
}
if self.color {
DiffKey.fmt(f)?;
}
for (
i,
TestFailure {
name,
actual,
expected,
is_cst,
},
) in self.parse_failures.iter().enumerate()
{
if expected == "NO ERROR" {
writeln!(f, "\n {}. {name}:\n", i + 1)?;
writeln!(f, " Expected an ERROR node, but got:")?;
let actual = if *is_cst {
actual
} else {
&format_sexp(actual, 2)
};
writeln!(
f,
" {}",
paint(self.color.then_some(AnsiColor::Red), actual)
)?;
} else {
writeln!(f, "\n {}. {name}:", i + 1)?;
if *is_cst {
writeln!(
f,
"{}",
TestDiff::new(actual, expected).with_color(self.color)
)?;
} else {
writeln!(
f,
"{}",
TestDiff::new(&format_sexp(actual, 2), &format_sexp(expected, 2))
.with_color(self.color,)
)?;
}
}
}
} else {
writeln!(f)?;
}
Ok(())
}
}
impl std::fmt::Display for TestSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.fmt_parse_results(f)?;
let mut render_assertion_results =
|name: &str, results: &TestResultHierarchy| -> std::fmt::Result {
writeln!(f, "{name}:")?;
for (depth, entry) in results.iter() {
write!(f, "{}", " ".repeat(depth + 2))?;
match &entry.info {
TestInfo::Group { .. } => writeln!(f, "{}", entry.name)?,
TestInfo::AssertionTest { outcome, test_num } => match outcome {
TestOutcome::AssertionPassed { assertion_count } => writeln!(
f,
"{:>3}. ✓ {} ({assertion_count} assertions)",
test_num,
paint(self.color.then_some(AnsiColor::Green), &entry.name)
)?,
TestOutcome::AssertionFailed { error } => {
writeln!(
f,
"{:>3}. ✗ {}",
test_num,
paint(self.color.then_some(AnsiColor::Red), &entry.name)
)?;
writeln!(f, "{} {error}", " ".repeat(depth + 1))?;
}
_ => unreachable!(),
},
TestInfo::ParseTest { .. } => unreachable!(),
}
}
Ok(())
};
if !self.highlight_results.root_group.is_empty() {
render_assertion_results("syntax highlighting", &self.highlight_results)?;
}
if !self.tag_results.root_group.is_empty() {
render_assertion_results("tags", &self.tag_results)?;
}
if !self.query_results.root_group.is_empty() {
render_assertion_results("queries", &self.query_results)?;
}
Ok(())
}
}
pub fn run_tests_at_path(
parser: &mut Parser,
opts: &TestOptions,
test_summary: &mut TestSummary,
) -> Result<()> {
let test_entry = parse_tests(&opts.path)?;
let mut _log_session = None;
if opts.debug_graph {
_log_session = Some(util::log_graphs(parser, "log.html", opts.open_log)?);
} else if opts.debug {
parser.set_logger(Some(Box::new(|log_type, message| {
if log_type == LogType::Lex {
io::stderr().write_all(b" ").unwrap();
}
writeln!(&mut io::stderr(), "{message}").unwrap();
})));
}
let mut corrected_entries = Vec::new();
run_tests(
parser,
test_entry,
opts,
test_summary,
&mut corrected_entries,
true,
)?;
parser.stop_printing_dot_graphs();
if test_summary.parse_failures.is_empty() || (opts.update && !test_summary.has_parse_errors) {
Ok(())
} else if opts.update && test_summary.has_parse_errors {
Err(anyhow!(indoc! {"
Some tests failed to parse with unexpected `ERROR` or `MISSING` nodes, as shown above, and cannot be updated automatically.
Either fix the grammar or manually update the tests if this is expected."}))
} else {
Err(anyhow!(""))
}
}
pub fn check_queries_at_path(language: &Language, path: &Path) -> Result<()> {
if path.exists() {
for entry in WalkDir::new(path)
.into_iter()
.filter_map(std::result::Result::ok)
.filter(|e| {
e.file_type().is_file()
&& e.path().extension().and_then(OsStr::to_str) == Some("scm")
&& !e.path().starts_with(".")
})
{
let filepath = entry.file_name().to_str().unwrap_or("");
let content = fs::read_to_string(entry.path())
.with_context(|| format!("Error reading query file {filepath:?}"))?;
Query::new(language, &content)
.with_context(|| format!("Error in query file {filepath:?}"))?;
}
}
Ok(())
}
pub struct DiffKey;
impl std::fmt::Display for DiffKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"\ncorrect / {} / {}",
paint(Some(AnsiColor::Green), "expected"),
paint(Some(AnsiColor::Red), "unexpected")
)?;
Ok(())
}
}
impl DiffKey {
/// Writes [`DiffKey`] to stdout
pub fn print() {
println!("{Self}");
}
}
pub struct TestDiff<'a> {
pub actual: &'a str,
pub expected: &'a str,
pub color: bool,
}
impl<'a> TestDiff<'a> {
#[must_use]
pub const fn new(actual: &'a str, expected: &'a str) -> Self {
Self {
actual,
expected,
color: true,
}
}
#[must_use]
pub const fn with_color(mut self, color: bool) -> Self {
self.color = color;
self
}
}
impl std::fmt::Display for TestDiff<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let diff = TextDiff::from_lines(self.actual, self.expected);
for diff in diff.iter_all_changes() {
match diff.tag() {
ChangeTag::Equal => {
if self.color {
write!(f, "{diff}")?;
} else {
write!(f, " {diff}")?;
}
}
ChangeTag::Insert => {
if self.color {
write!(
f,
"{}",
paint(Some(AnsiColor::Green), diff.as_str().unwrap())
)?;
} else {
write!(f, "+{diff}")?;
}
if diff.missing_newline() {
writeln!(f)?;
}
}
ChangeTag::Delete => {
if self.color {
write!(f, "{}", paint(Some(AnsiColor::Red), diff.as_str().unwrap()))?;
} else {
write!(f, "-{diff}")?;
}
if diff.missing_newline() {
writeln!(f)?;
}
}
}
}
Ok(())
}
}
#[derive(Debug, Serialize, JsonSchema)]
pub struct TestFailure {
name: String,
actual: String,
expected: String,
is_cst: bool,
}
impl TestFailure {
fn new<T, U, V>(name: T, actual: U, expected: V, is_cst: bool) -> Self
where
T: Into<String>,
U: Into<String>,
V: Into<String>,
{
Self {
name: name.into(),
actual: actual.into(),
expected: expected.into(),
is_cst,
}
}
}
struct TestCorrection {
name: String,
input: String,
output: String,
attributes_str: String,
header_delim_len: usize,
divider_delim_len: usize,
}
impl TestCorrection {
fn new<T, U, V, W>(
name: T,
input: U,
output: V,
attributes_str: W,
header_delim_len: usize,
divider_delim_len: usize,
) -> Self
where
T: Into<String>,
U: Into<String>,
V: Into<String>,
W: Into<String>,
{
Self {
name: name.into(),
input: input.into(),
output: output.into(),
attributes_str: attributes_str.into(),
header_delim_len,
divider_delim_len,
}
}
}
/// This will return false if we want to "fail fast". It will bail and not parse any more tests.
fn run_tests(
parser: &mut Parser,
test_entry: TestEntry,
opts: &TestOptions,
test_summary: &mut TestSummary,
corrected_entries: &mut Vec<TestCorrection>,
is_root: bool,
) -> Result<bool> {
match test_entry {
TestEntry::Example {
name,
input,
output,
header_delim_len,
divider_delim_len,
has_fields,
attributes_str,
attributes,
..
} => {
if attributes.skip {
test_summary.parse_results.add_case(TestResult {
name: name.clone(),
info: TestInfo::ParseTest {
outcome: TestOutcome::Skipped,
parse_rate: None,
test_num: test_summary.test_num,
},
});
test_summary.test_num += 1;
return Ok(true);
}
if !attributes.platform {
test_summary.parse_results.add_case(TestResult {
name: name.clone(),
info: TestInfo::ParseTest {
outcome: TestOutcome::Platform,
parse_rate: None,
test_num: test_summary.test_num,
},
});
test_summary.test_num += 1;
return Ok(true);
}
for (i, language_name) in attributes.languages.iter().enumerate() {
if !language_name.is_empty() {
let language = opts
.languages
.get(language_name.as_ref())
.ok_or_else(|| anyhow!("Language not found: {language_name}"))?;
parser.set_language(language)?;
}
let start = std::time::Instant::now();
let tree = parser.parse(&input, None).unwrap();
let parse_rate = {
let parse_time = start.elapsed();
let true_parse_rate = tree.root_node().byte_range().len() as f64
/ (parse_time.as_nanos() as f64 / 1_000_000.0);
let adj_parse_rate = adjusted_parse_rate(&tree, parse_time);
test_summary.parse_stats.total_parses += 1;
test_summary.parse_stats.total_duration += parse_time;
test_summary.parse_stats.total_bytes += tree.root_node().byte_range().len();
Some((true_parse_rate, adj_parse_rate))
};
if attributes.error {
if tree.root_node().has_error() {
test_summary.parse_results.add_case(TestResult {
name: name.clone(),
info: TestInfo::ParseTest {
outcome: TestOutcome::Passed,
parse_rate,
test_num: test_summary.test_num,
},
});
test_summary.parse_stats.successful_parses += 1;
if opts.update {
let input = String::from_utf8(input.clone()).unwrap();
let output = if attributes.cst {
output.clone()
} else {
format_sexp(&output, 0)
};
corrected_entries.push(TestCorrection::new(
&name,
input,
output,
&attributes_str,
header_delim_len,
divider_delim_len,
));
}
} else {
if opts.update {
let input = String::from_utf8(input.clone()).unwrap();
// Keep the original `expected` output if the actual output has no error
let output = if attributes.cst {
output.clone()
} else {
format_sexp(&output, 0)
};
corrected_entries.push(TestCorrection::new(
&name,
input,
output,
&attributes_str,
header_delim_len,
divider_delim_len,
));
}
test_summary.parse_results.add_case(TestResult {
name: name.clone(),
info: TestInfo::ParseTest {
outcome: TestOutcome::Failed,
parse_rate,
test_num: test_summary.test_num,
},
});
let actual = if attributes.cst {
render_test_cst(&input, &tree)?
} else {
tree.root_node().to_sexp()
};
test_summary.parse_failures.push(TestFailure::new(
&name,
actual,
"NO ERROR",
attributes.cst,
));
}
if attributes.fail_fast {
return Ok(false);
}
} else {
let mut actual = if attributes.cst {
render_test_cst(&input, &tree)?
} else {
tree.root_node().to_sexp()
};
if !(attributes.cst || opts.show_fields || has_fields) {
actual = strip_sexp_fields(&actual);
}
if actual == output {
test_summary.parse_results.add_case(TestResult {
name: name.clone(),
info: TestInfo::ParseTest {
outcome: TestOutcome::Passed,
parse_rate,
test_num: test_summary.test_num,
},
});
test_summary.parse_stats.successful_parses += 1;
if opts.update {
let input = String::from_utf8(input.clone()).unwrap();
let output = if attributes.cst {
actual
} else {
format_sexp(&output, 0)
};
corrected_entries.push(TestCorrection::new(
&name,
input,
output,
&attributes_str,
header_delim_len,
divider_delim_len,
));
}
} else {
if opts.update {
let input = String::from_utf8(input.clone()).unwrap();
let (expected_output, actual_output) = if attributes.cst {
(output.clone(), actual.clone())
} else {
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/highlight.rs | crates/cli/src/highlight.rs | use std::{
collections::{BTreeMap, HashSet},
fmt::Write,
fs,
io::{self, Write as _},
path::{self, Path, PathBuf},
str,
sync::{atomic::AtomicUsize, Arc},
time::Instant,
};
use ansi_colours::{ansi256_from_rgb, rgb_from_ansi256};
use anstyle::{Ansi256Color, AnsiColor, Color, Effects, RgbColor};
use anyhow::Result;
use log::{info, warn};
use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer};
use serde_json::{json, Value};
use tree_sitter_highlight::{HighlightConfiguration, HighlightEvent, Highlighter, HtmlRenderer};
use tree_sitter_loader::Loader;
pub const HTML_HEAD_HEADER: &str = "
<!doctype HTML>
<head>
<title>Tree-sitter Highlighting</title>
<style>
body {
font-family: monospace
}
.line-number {
user-select: none;
text-align: right;
color: rgba(27,31,35,.3);
padding: 0 10px;
}
.line {
white-space: pre;
}
</style>";
pub const HTML_BODY_HEADER: &str = "
</head>
<body>
";
pub const HTML_FOOTER: &str = "
</body>
";
#[derive(Debug, Default)]
pub struct Style {
pub ansi: anstyle::Style,
pub css: Option<String>,
}
#[derive(Debug)]
pub struct Theme {
pub styles: Vec<Style>,
pub highlight_names: Vec<String>,
}
#[derive(Default, Deserialize, Serialize)]
pub struct ThemeConfig {
#[serde(default)]
pub theme: Theme,
}
impl Theme {
pub fn load(path: &path::Path) -> io::Result<Self> {
let json = fs::read_to_string(path)?;
Ok(serde_json::from_str(&json).unwrap_or_default())
}
#[must_use]
pub fn default_style(&self) -> Style {
Style::default()
}
}
impl<'de> Deserialize<'de> for Theme {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let mut styles = Vec::new();
let mut highlight_names = Vec::new();
if let Ok(colors) = BTreeMap::<String, Value>::deserialize(deserializer) {
styles.reserve(colors.len());
highlight_names.reserve(colors.len());
for (name, style_value) in colors {
let mut style = Style::default();
parse_style(&mut style, style_value);
highlight_names.push(name);
styles.push(style);
}
}
Ok(Self {
styles,
highlight_names,
})
}
}
impl Serialize for Theme {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(self.styles.len()))?;
for (name, style) in self.highlight_names.iter().zip(&self.styles) {
let style = &style.ansi;
let color = style.get_fg_color().map(|color| match color {
Color::Ansi(color) => match color {
AnsiColor::Black => json!("black"),
AnsiColor::Blue => json!("blue"),
AnsiColor::Cyan => json!("cyan"),
AnsiColor::Green => json!("green"),
AnsiColor::Magenta => json!("purple"),
AnsiColor::Red => json!("red"),
AnsiColor::White => json!("white"),
AnsiColor::Yellow => json!("yellow"),
_ => unreachable!(),
},
Color::Ansi256(Ansi256Color(n)) => json!(n),
Color::Rgb(RgbColor(r, g, b)) => json!(format!("#{r:x?}{g:x?}{b:x?}")),
});
let effects = style.get_effects();
if effects.contains(Effects::BOLD)
|| effects.contains(Effects::ITALIC)
|| effects.contains(Effects::UNDERLINE)
{
let mut style_json = BTreeMap::new();
if let Some(color) = color {
style_json.insert("color", color);
}
if effects.contains(Effects::BOLD) {
style_json.insert("bold", Value::Bool(true));
}
if effects.contains(Effects::ITALIC) {
style_json.insert("italic", Value::Bool(true));
}
if effects.contains(Effects::UNDERLINE) {
style_json.insert("underline", Value::Bool(true));
}
map.serialize_entry(&name, &style_json)?;
} else if let Some(color) = color {
map.serialize_entry(&name, &color)?;
} else {
map.serialize_entry(&name, &Value::Null)?;
}
}
map.end()
}
}
impl Default for Theme {
fn default() -> Self {
serde_json::from_value(json!({
"attribute": {"color": 124, "italic": true},
"comment": {"color": 245, "italic": true},
"constant": 94,
"constant.builtin": {"color": 94, "bold": true},
"constructor": 136,
"embedded": null,
"function": 26,
"function.builtin": {"color": 26, "bold": true},
"keyword": 56,
"module": 136,
"number": {"color": 94, "bold": true},
"operator": {"color": 239, "bold": true},
"property": 124,
"property.builtin": {"color": 124, "bold": true},
"punctuation": 239,
"punctuation.bracket": 239,
"punctuation.delimiter": 239,
"punctuation.special": 239,
"string": 28,
"string.special": 30,
"tag": 18,
"type": 23,
"type.builtin": {"color": 23, "bold": true},
"variable": 252,
"variable.builtin": {"color": 252, "bold": true},
"variable.parameter": {"color": 252, "underline": true}
}))
.unwrap()
}
}
fn parse_style(style: &mut Style, json: Value) {
if let Value::Object(entries) = json {
for (property_name, value) in entries {
match property_name.as_str() {
"bold" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.bold();
}
}
"italic" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.italic();
}
}
"underline" => {
if value == Value::Bool(true) {
style.ansi = style.ansi.underline();
}
}
"color" => {
if let Some(color) = parse_color(value) {
style.ansi = style.ansi.fg_color(Some(color));
}
}
_ => {}
}
}
style.css = Some(style_to_css(style.ansi));
} else if let Some(color) = parse_color(json) {
style.ansi = style.ansi.fg_color(Some(color));
style.css = Some(style_to_css(style.ansi));
} else {
style.css = None;
}
if let Some(Color::Rgb(RgbColor(red, green, blue))) = style.ansi.get_fg_color() {
if !terminal_supports_truecolor() {
let ansi256 = Color::Ansi256(Ansi256Color(ansi256_from_rgb((red, green, blue))));
style.ansi = style.ansi.fg_color(Some(ansi256));
}
}
}
fn parse_color(json: Value) -> Option<Color> {
match json {
Value::Number(n) => n.as_u64().map(|n| Color::Ansi256(Ansi256Color(n as u8))),
Value::String(s) => match s.to_lowercase().as_str() {
"black" => Some(Color::Ansi(AnsiColor::Black)),
"blue" => Some(Color::Ansi(AnsiColor::Blue)),
"cyan" => Some(Color::Ansi(AnsiColor::Cyan)),
"green" => Some(Color::Ansi(AnsiColor::Green)),
"purple" => Some(Color::Ansi(AnsiColor::Magenta)),
"red" => Some(Color::Ansi(AnsiColor::Red)),
"white" => Some(Color::Ansi(AnsiColor::White)),
"yellow" => Some(Color::Ansi(AnsiColor::Yellow)),
s => {
if let Some((red, green, blue)) = hex_string_to_rgb(s) {
Some(Color::Rgb(RgbColor(red, green, blue)))
} else {
None
}
}
},
_ => None,
}
}
fn hex_string_to_rgb(s: &str) -> Option<(u8, u8, u8)> {
if s.starts_with('#') && s.len() >= 7 {
if let (Ok(red), Ok(green), Ok(blue)) = (
u8::from_str_radix(&s[1..3], 16),
u8::from_str_radix(&s[3..5], 16),
u8::from_str_radix(&s[5..7], 16),
) {
Some((red, green, blue))
} else {
None
}
} else {
None
}
}
fn style_to_css(style: anstyle::Style) -> String {
let mut result = String::new();
let effects = style.get_effects();
if effects.contains(Effects::UNDERLINE) {
write!(&mut result, "text-decoration: underline;").unwrap();
}
if effects.contains(Effects::BOLD) {
write!(&mut result, "font-weight: bold;").unwrap();
}
if effects.contains(Effects::ITALIC) {
write!(&mut result, "font-style: italic;").unwrap();
}
if let Some(color) = style.get_fg_color() {
write_color(&mut result, color);
}
result
}
fn write_color(buffer: &mut String, color: Color) {
match color {
Color::Ansi(color) => match color {
AnsiColor::Black => write!(buffer, "color: black").unwrap(),
AnsiColor::Red => write!(buffer, "color: red").unwrap(),
AnsiColor::Green => write!(buffer, "color: green").unwrap(),
AnsiColor::Yellow => write!(buffer, "color: yellow").unwrap(),
AnsiColor::Blue => write!(buffer, "color: blue").unwrap(),
AnsiColor::Magenta => write!(buffer, "color: purple").unwrap(),
AnsiColor::Cyan => write!(buffer, "color: cyan").unwrap(),
AnsiColor::White => write!(buffer, "color: white").unwrap(),
_ => unreachable!(),
},
Color::Ansi256(Ansi256Color(n)) => {
let (r, g, b) = rgb_from_ansi256(n);
write!(buffer, "color: #{r:02x}{g:02x}{b:02x}").unwrap();
}
Color::Rgb(RgbColor(r, g, b)) => write!(buffer, "color: #{r:02x}{g:02x}{b:02x}").unwrap(),
}
}
fn terminal_supports_truecolor() -> bool {
std::env::var("COLORTERM")
.is_ok_and(|truecolor| truecolor == "truecolor" || truecolor == "24bit")
}
pub struct HighlightOptions {
pub theme: Theme,
pub check: bool,
pub captures_path: Option<PathBuf>,
pub inline_styles: bool,
pub html: bool,
pub quiet: bool,
pub print_time: bool,
pub cancellation_flag: Arc<AtomicUsize>,
}
pub fn highlight(
loader: &Loader,
path: &Path,
name: &str,
config: &HighlightConfiguration,
print_name: bool,
opts: &HighlightOptions,
) -> Result<()> {
if opts.check {
let names = if let Some(path) = opts.captures_path.as_deref() {
let file = fs::read_to_string(path)?;
let capture_names = file
.lines()
.filter_map(|line| {
if line.trim().is_empty() || line.trim().starts_with(';') {
return None;
}
line.split(';').next().map(|s| s.trim().trim_matches('"'))
})
.collect::<HashSet<_>>();
config.nonconformant_capture_names(&capture_names)
} else {
config.nonconformant_capture_names(&HashSet::new())
};
if names.is_empty() {
info!("All highlight captures conform to standards.");
} else {
warn!(
"Non-standard highlight {} detected:\n* {}",
if names.len() > 1 {
"captures"
} else {
"capture"
},
names.join("\n* ")
);
}
}
let source = fs::read(path)?;
let stdout = io::stdout();
let mut stdout = stdout.lock();
let time = Instant::now();
let mut highlighter = Highlighter::new();
let events =
highlighter.highlight(config, &source, Some(&opts.cancellation_flag), |string| {
loader.highlight_config_for_injection_string(string)
})?;
let theme = &opts.theme;
if !opts.quiet && print_name {
writeln!(&mut stdout, "{name}")?;
}
if opts.html {
if !opts.quiet {
writeln!(&mut stdout, "{HTML_HEAD_HEADER}")?;
writeln!(&mut stdout, " <style>")?;
let names = theme.highlight_names.iter();
let styles = theme.styles.iter();
for (name, style) in names.zip(styles) {
if let Some(css) = &style.css {
writeln!(&mut stdout, " .{name} {{ {css}; }}")?;
}
}
writeln!(&mut stdout, " </style>")?;
writeln!(&mut stdout, "{HTML_BODY_HEADER}")?;
}
let mut renderer = HtmlRenderer::new();
renderer.render(events, &source, &move |highlight, output| {
if opts.inline_styles {
output.extend(b"style='");
output.extend(
theme.styles[highlight.0]
.css
.as_ref()
.map_or_else(|| "".as_bytes(), |css_style| css_style.as_bytes()),
);
output.extend(b"'");
} else {
output.extend(b"class='");
let mut parts = theme.highlight_names[highlight.0].split('.').peekable();
while let Some(part) = parts.next() {
output.extend(part.as_bytes());
if parts.peek().is_some() {
output.extend(b" ");
}
}
output.extend(b"'");
}
})?;
if !opts.quiet {
writeln!(&mut stdout, "<table>")?;
for (i, line) in renderer.lines().enumerate() {
writeln!(
&mut stdout,
"<tr><td class=line-number>{}</td><td class=line>{line}</td></tr>",
i + 1,
)?;
}
writeln!(&mut stdout, "</table>")?;
writeln!(&mut stdout, "{HTML_FOOTER}")?;
}
} else {
let mut style_stack = vec![theme.default_style().ansi];
for event in events {
match event? {
HighlightEvent::HighlightStart(highlight) => {
style_stack.push(theme.styles[highlight.0].ansi);
}
HighlightEvent::HighlightEnd => {
style_stack.pop();
}
HighlightEvent::Source { start, end } => {
let style = style_stack.last().unwrap();
write!(&mut stdout, "{style}").unwrap();
stdout.write_all(&source[start..end])?;
write!(&mut stdout, "{style:#}").unwrap();
}
}
}
}
if opts.print_time {
info!("Time: {}ms", time.elapsed().as_millis());
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::env;
use super::*;
const JUNGLE_GREEN: &str = "#26A69A";
const DARK_CYAN: &str = "#00AF87";
#[test]
fn test_parse_style() {
let original_environment_variable = env::var("COLORTERM");
let mut style = Style::default();
assert_eq!(style.ansi.get_fg_color(), None);
assert_eq!(style.css, None);
// darkcyan is an ANSI color and is preserved
env::set_var("COLORTERM", "");
parse_style(&mut style, Value::String(DARK_CYAN.to_string()));
assert_eq!(
style.ansi.get_fg_color(),
Some(Color::Ansi256(Ansi256Color(36)))
);
assert_eq!(style.css, Some("color: #00af87".to_string()));
// junglegreen is not an ANSI color and is preserved when the terminal supports it
env::set_var("COLORTERM", "truecolor");
parse_style(&mut style, Value::String(JUNGLE_GREEN.to_string()));
assert_eq!(
style.ansi.get_fg_color(),
Some(Color::Rgb(RgbColor(38, 166, 154)))
);
assert_eq!(style.css, Some("color: #26a69a".to_string()));
// junglegreen gets approximated as cadetblue when the terminal does not support it
env::set_var("COLORTERM", "");
parse_style(&mut style, Value::String(JUNGLE_GREEN.to_string()));
assert_eq!(
style.ansi.get_fg_color(),
Some(Color::Ansi256(Ansi256Color(72)))
);
assert_eq!(style.css, Some("color: #26a69a".to_string()));
if let Ok(environment_variable) = original_environment_variable {
env::set_var("COLORTERM", environment_variable);
} else {
env::remove_var("COLORTERM");
}
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/version.rs | crates/cli/src/version.rs | use std::{fs, path::PathBuf, process::Command};
use clap::ValueEnum;
use log::{info, warn};
use regex::Regex;
use semver::Version as SemverVersion;
use std::cmp::Ordering;
use tree_sitter_loader::TreeSitterJSON;
#[derive(Clone, Copy, Default, ValueEnum)]
pub enum BumpLevel {
#[default]
Patch,
Minor,
Major,
}
pub struct Version {
pub version: Option<SemverVersion>,
pub current_dir: PathBuf,
pub bump: Option<BumpLevel>,
}
#[derive(thiserror::Error, Debug)]
pub enum VersionError {
#[error(transparent)]
Json(#[from] serde_json::Error),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("Failed to update one or more files:\n\n{0}")]
Update(UpdateErrors),
}
#[derive(thiserror::Error, Debug)]
pub struct UpdateErrors(Vec<UpdateError>);
impl std::fmt::Display for UpdateErrors {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for error in &self.0 {
writeln!(f, "{error}\n")?;
}
Ok(())
}
}
#[derive(thiserror::Error, Debug)]
pub enum UpdateError {
#[error("Failed to update {1}:\n{0}")]
Io(std::io::Error, PathBuf),
#[error("Failed to run `{0}`:\n{1}")]
Command(&'static str, String),
}
impl Version {
#[must_use]
pub const fn new(
version: Option<SemverVersion>,
current_dir: PathBuf,
bump: Option<BumpLevel>,
) -> Self {
Self {
version,
current_dir,
bump,
}
}
pub fn run(mut self) -> Result<(), VersionError> {
let tree_sitter_json = self.current_dir.join("tree-sitter.json");
let tree_sitter_json =
serde_json::from_str::<TreeSitterJSON>(&fs::read_to_string(tree_sitter_json)?)?;
let current_version = tree_sitter_json.metadata.version;
self.version = match (self.version.is_some(), self.bump) {
(false, None) => {
info!("Current version: {current_version}");
return Ok(());
}
(true, None) => self.version,
(false, Some(bump)) => {
let mut v = current_version.clone();
match bump {
BumpLevel::Patch => v.patch += 1,
BumpLevel::Minor => {
v.minor += 1;
v.patch = 0;
}
BumpLevel::Major => {
v.major += 1;
v.minor = 0;
v.patch = 0;
}
}
Some(v)
}
(true, Some(_)) => unreachable!(),
};
let new_version = self.version.as_ref().unwrap();
match new_version.cmp(¤t_version) {
Ordering::Less => {
warn!("New version is lower than current!");
warn!("Reverting version {current_version} to {new_version}");
}
Ordering::Greater => {
info!("Bumping version {current_version} to {new_version}");
}
Ordering::Equal => {
info!("Keeping version {current_version}");
}
}
let is_multigrammar = tree_sitter_json.grammars.len() > 1;
let mut errors = Vec::new();
// Helper to push errors into the errors vector, returns true if an error was pushed
let mut push_err = |result: Result<(), UpdateError>| -> bool {
if let Err(e) = result {
errors.push(e);
return true;
}
false
};
push_err(self.update_treesitter_json());
// Only update Cargo.lock if Cargo.toml was updated
push_err(self.update_cargo_toml()).then(|| push_err(self.update_cargo_lock()));
// Only update package-lock.json if package.json was updated
push_err(self.update_package_json()).then(|| push_err(self.update_package_lock_json()));
push_err(self.update_makefile(is_multigrammar));
push_err(self.update_cmakelists_txt());
push_err(self.update_pyproject_toml());
push_err(self.update_zig_zon());
if errors.is_empty() {
Ok(())
} else {
Err(VersionError::Update(UpdateErrors(errors)))
}
}
fn update_file_with<F>(&self, path: &PathBuf, update_fn: F) -> Result<(), UpdateError>
where
F: Fn(&str) -> String,
{
let content = fs::read_to_string(path).map_err(|e| UpdateError::Io(e, path.clone()))?;
let updated_content = update_fn(&content);
fs::write(path, updated_content).map_err(|e| UpdateError::Io(e, path.clone()))
}
fn update_treesitter_json(&self) -> Result<(), UpdateError> {
let json_path = self.current_dir.join("tree-sitter.json");
self.update_file_with(&json_path, |content| {
content
.lines()
.map(|line| {
if line.contains("\"version\":") {
let prefix_index =
line.find("\"version\":").unwrap() + "\"version\":".len();
let start_quote =
line[prefix_index..].find('"').unwrap() + prefix_index + 1;
let end_quote =
line[start_quote + 1..].find('"').unwrap() + start_quote + 1;
format!(
"{}{}{}",
&line[..start_quote],
self.version.as_ref().unwrap(),
&line[end_quote..]
)
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n"
})
}
fn update_cargo_toml(&self) -> Result<(), UpdateError> {
let cargo_toml_path = self.current_dir.join("Cargo.toml");
if !cargo_toml_path.exists() {
return Ok(());
}
self.update_file_with(&cargo_toml_path, |content| {
content
.lines()
.map(|line| {
if line.starts_with("version =") {
format!("version = \"{}\"", self.version.as_ref().unwrap())
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n"
})?;
Ok(())
}
fn update_cargo_lock(&self) -> Result<(), UpdateError> {
if self.current_dir.join("Cargo.lock").exists() {
let Ok(cmd) = Command::new("cargo")
.arg("generate-lockfile")
.arg("--offline")
.current_dir(&self.current_dir)
.output()
else {
return Ok(()); // cargo is not `executable`, ignore
};
if !cmd.status.success() {
let stderr = String::from_utf8_lossy(&cmd.stderr);
return Err(UpdateError::Command(
"cargo generate-lockfile",
stderr.to_string(),
));
}
}
Ok(())
}
fn update_package_json(&self) -> Result<(), UpdateError> {
let package_json_path = self.current_dir.join("package.json");
if !package_json_path.exists() {
return Ok(());
}
self.update_file_with(&package_json_path, |content| {
content
.lines()
.map(|line| {
if line.contains("\"version\":") {
let prefix_index =
line.find("\"version\":").unwrap() + "\"version\":".len();
let start_quote =
line[prefix_index..].find('"').unwrap() + prefix_index + 1;
let end_quote =
line[start_quote + 1..].find('"').unwrap() + start_quote + 1;
format!(
"{}{}{}",
&line[..start_quote],
self.version.as_ref().unwrap(),
&line[end_quote..]
)
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n"
})?;
Ok(())
}
fn update_package_lock_json(&self) -> Result<(), UpdateError> {
if self.current_dir.join("package-lock.json").exists() {
let Ok(cmd) = Command::new("npm")
.arg("install")
.arg("--package-lock-only")
.current_dir(&self.current_dir)
.output()
else {
return Ok(()); // npm is not `executable`, ignore
};
if !cmd.status.success() {
let stderr = String::from_utf8_lossy(&cmd.stderr);
return Err(UpdateError::Command("npm install", stderr.to_string()));
}
}
Ok(())
}
fn update_makefile(&self, is_multigrammar: bool) -> Result<(), UpdateError> {
let makefile_path = if is_multigrammar {
self.current_dir.join("common").join("common.mak")
} else {
self.current_dir.join("Makefile")
};
self.update_file_with(&makefile_path, |content| {
content
.lines()
.map(|line| {
if line.starts_with("VERSION") {
format!("VERSION := {}", self.version.as_ref().unwrap())
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n"
})?;
Ok(())
}
fn update_cmakelists_txt(&self) -> Result<(), UpdateError> {
let cmake_lists_path = self.current_dir.join("CMakeLists.txt");
if !cmake_lists_path.exists() {
return Ok(());
}
self.update_file_with(&cmake_lists_path, |content| {
let re = Regex::new(r#"(\s*VERSION\s+)"[0-9]+\.[0-9]+\.[0-9]+""#)
.expect("Failed to compile regex");
re.replace(
content,
format!(r#"$1"{}""#, self.version.as_ref().unwrap()),
)
.to_string()
})?;
Ok(())
}
fn update_pyproject_toml(&self) -> Result<(), UpdateError> {
let pyproject_toml_path = self.current_dir.join("pyproject.toml");
if !pyproject_toml_path.exists() {
return Ok(());
}
self.update_file_with(&pyproject_toml_path, |content| {
content
.lines()
.map(|line| {
if line.starts_with("version =") {
format!("version = \"{}\"", self.version.as_ref().unwrap())
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n"
})?;
Ok(())
}
fn update_zig_zon(&self) -> Result<(), UpdateError> {
let zig_zon_path = self.current_dir.join("build.zig.zon");
if !zig_zon_path.exists() {
return Ok(());
}
self.update_file_with(&zig_zon_path, |content| {
let zig_version_prefix = ".version =";
content
.lines()
.map(|line| {
if line
.trim_start_matches(|c: char| c.is_ascii_whitespace())
.starts_with(zig_version_prefix)
{
let prefix_index =
line.find(zig_version_prefix).unwrap() + zig_version_prefix.len();
let start_quote =
line[prefix_index..].find('"').unwrap() + prefix_index + 1;
let end_quote =
line[start_quote + 1..].find('"').unwrap() + start_quote + 1;
format!(
"{}{}{}",
&line[..start_quote],
self.version.as_ref().unwrap(),
&line[end_quote..]
)
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n")
+ "\n"
})?;
Ok(())
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/parse.rs | crates/cli/src/parse.rs | use std::{
fmt, fs,
io::{self, Write},
ops::ControlFlow,
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
time::{Duration, Instant},
};
use anstyle::{AnsiColor, Color, RgbColor};
use anyhow::{anyhow, Context, Result};
use clap::ValueEnum;
use log::info;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use tree_sitter::{
ffi, InputEdit, Language, LogType, ParseOptions, ParseState, Parser, Point, Range, Tree,
TreeCursor,
};
use crate::{fuzz::edits::Edit, logger::paint, util};
#[derive(Debug, Default, Serialize, JsonSchema)]
pub struct Stats {
pub successful_parses: usize,
pub total_parses: usize,
pub total_bytes: usize,
pub total_duration: Duration,
}
impl fmt::Display for Stats {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let duration_us = self.total_duration.as_micros();
let success_rate = if self.total_parses > 0 {
format!(
"{:.2}%",
((self.successful_parses as f64) / (self.total_parses as f64)) * 100.0,
)
} else {
"N/A".to_string()
};
let duration_str = match (self.total_parses, duration_us) {
(0, _) => "N/A".to_string(),
(_, 0) => "0 bytes/ms".to_string(),
(_, _) => format!(
"{} bytes/ms",
((self.total_bytes as u128) * 1_000) / duration_us
),
};
writeln!(
f,
"Total parses: {}; successful parses: {}; failed parses: {}; success percentage: {success_rate}; average speed: {duration_str}",
self.total_parses,
self.successful_parses,
self.total_parses - self.successful_parses,
)
}
}
/// Sets the color used in the output of `tree-sitter parse --cst`
#[derive(Debug, Copy, Clone)]
pub struct ParseTheme {
/// The color of node kinds
pub node_kind: Option<Color>,
/// The color of text associated with a node
pub node_text: Option<Color>,
/// The color of node fields
pub field: Option<Color>,
/// The color of the range information for unnamed nodes
pub row_color: Option<Color>,
/// The color of the range information for named nodes
pub row_color_named: Option<Color>,
/// The color of extra nodes
pub extra: Option<Color>,
/// The color of ERROR nodes
pub error: Option<Color>,
/// The color of MISSING nodes and their associated text
pub missing: Option<Color>,
/// The color of newline characters
pub line_feed: Option<Color>,
/// The color of backticks
pub backtick: Option<Color>,
/// The color of literals
pub literal: Option<Color>,
}
impl ParseTheme {
const GRAY: Color = Color::Rgb(RgbColor(118, 118, 118));
const LIGHT_GRAY: Color = Color::Rgb(RgbColor(166, 172, 181));
const ORANGE: Color = Color::Rgb(RgbColor(255, 153, 51));
const YELLOW: Color = Color::Rgb(RgbColor(219, 219, 173));
const GREEN: Color = Color::Rgb(RgbColor(101, 192, 67));
#[must_use]
pub const fn empty() -> Self {
Self {
node_kind: None,
node_text: None,
field: None,
row_color: None,
row_color_named: None,
extra: None,
error: None,
missing: None,
line_feed: None,
backtick: None,
literal: None,
}
}
}
impl Default for ParseTheme {
fn default() -> Self {
Self {
node_kind: Some(AnsiColor::BrightCyan.into()),
node_text: Some(Self::GRAY),
field: Some(AnsiColor::Blue.into()),
row_color: Some(AnsiColor::White.into()),
row_color_named: Some(AnsiColor::BrightCyan.into()),
extra: Some(AnsiColor::BrightMagenta.into()),
error: Some(AnsiColor::Red.into()),
missing: Some(Self::ORANGE),
line_feed: Some(Self::LIGHT_GRAY),
backtick: Some(Self::GREEN),
literal: Some(Self::YELLOW),
}
}
}
#[derive(Debug, Copy, Clone, Deserialize, Serialize)]
pub struct Rgb(pub u8, pub u8, pub u8);
impl From<Rgb> for RgbColor {
fn from(val: Rgb) -> Self {
Self(val.0, val.1, val.2)
}
}
#[derive(Debug, Copy, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Config {
pub parse_theme: Option<ParseThemeRaw>,
}
#[derive(Debug, Copy, Clone, Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct ParseThemeRaw {
pub node_kind: Option<Rgb>,
pub node_text: Option<Rgb>,
pub field: Option<Rgb>,
pub row_color: Option<Rgb>,
pub row_color_named: Option<Rgb>,
pub extra: Option<Rgb>,
pub error: Option<Rgb>,
pub missing: Option<Rgb>,
pub line_feed: Option<Rgb>,
pub backtick: Option<Rgb>,
pub literal: Option<Rgb>,
}
impl From<ParseThemeRaw> for ParseTheme {
fn from(value: ParseThemeRaw) -> Self {
let val_or_default = |val: Option<Rgb>, default: Option<Color>| -> Option<Color> {
val.map_or(default, |v| Some(Color::Rgb(v.into())))
};
let default = Self::default();
Self {
node_kind: val_or_default(value.node_kind, default.node_kind),
node_text: val_or_default(value.node_text, default.node_text),
field: val_or_default(value.field, default.field),
row_color: val_or_default(value.row_color, default.row_color),
row_color_named: val_or_default(value.row_color_named, default.row_color_named),
extra: val_or_default(value.extra, default.extra),
error: val_or_default(value.error, default.error),
missing: val_or_default(value.missing, default.missing),
line_feed: val_or_default(value.line_feed, default.line_feed),
backtick: val_or_default(value.backtick, default.backtick),
literal: val_or_default(value.literal, default.literal),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum ParseOutput {
Normal,
Quiet,
Xml,
Cst,
Dot,
}
/// A position in a multi-line text document, in terms of rows and columns.
///
/// Rows and columns are zero-based.
///
/// This serves as a serializable wrapper for `Point`
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize)]
pub struct ParsePoint {
pub row: usize,
pub column: usize,
}
impl From<Point> for ParsePoint {
fn from(value: Point) -> Self {
Self {
row: value.row,
column: value.column,
}
}
}
#[derive(Serialize, Default, Debug, Clone)]
pub struct ParseSummary {
pub file: PathBuf,
pub successful: bool,
pub start: Option<ParsePoint>,
pub end: Option<ParsePoint>,
pub duration: Option<Duration>,
pub bytes: Option<usize>,
}
impl ParseSummary {
#[must_use]
pub fn new(path: &Path) -> Self {
Self {
file: path.to_path_buf(),
successful: false,
..Default::default()
}
}
}
#[derive(Serialize, Debug)]
pub struct ParseStats {
pub parse_summaries: Vec<ParseSummary>,
pub cumulative_stats: Stats,
pub source_count: usize,
}
impl Default for ParseStats {
fn default() -> Self {
Self {
parse_summaries: Vec::new(),
cumulative_stats: Stats::default(),
source_count: 1,
}
}
}
#[derive(Serialize, ValueEnum, Debug, Copy, Clone, Default, Eq, PartialEq)]
pub enum ParseDebugType {
#[default]
Quiet,
Normal,
Pretty,
}
pub struct ParseFileOptions<'a> {
pub edits: &'a [&'a str],
pub output: ParseOutput,
pub stats: &'a mut ParseStats,
pub print_time: bool,
pub timeout: u64,
pub debug: ParseDebugType,
pub debug_graph: bool,
pub cancellation_flag: Option<&'a AtomicUsize>,
pub encoding: Option<u32>,
pub open_log: bool,
pub no_ranges: bool,
pub parse_theme: &'a ParseTheme,
}
#[derive(Copy, Clone)]
pub struct ParseResult {
pub successful: bool,
pub bytes: usize,
pub duration: Option<Duration>,
}
pub fn parse_file_at_path(
parser: &mut Parser,
language: &Language,
path: &Path,
name: &str,
max_path_length: usize,
opts: &mut ParseFileOptions,
) -> Result<()> {
let mut _log_session = None;
parser.set_language(language)?;
let mut source_code = fs::read(path).with_context(|| format!("Error reading {name:?}"))?;
// Render an HTML graph if `--debug-graph` was passed
if opts.debug_graph {
_log_session = Some(util::log_graphs(parser, "log.html", opts.open_log)?);
}
// Log to stderr if `--debug` was passed
else if opts.debug != ParseDebugType::Quiet {
let mut curr_version: usize = 0;
let use_color = std::env::var("NO_COLOR").map_or(true, |v| v != "1");
let debug = opts.debug;
parser.set_logger(Some(Box::new(move |log_type, message| {
if debug == ParseDebugType::Normal {
if log_type == LogType::Lex {
write!(&mut io::stderr(), " ").unwrap();
}
writeln!(&mut io::stderr(), "{message}").unwrap();
} else {
let colors = &[
AnsiColor::White,
AnsiColor::Red,
AnsiColor::Blue,
AnsiColor::Green,
AnsiColor::Cyan,
AnsiColor::Yellow,
];
if message.starts_with("process version:") {
let comma_idx = message.find(',').unwrap();
curr_version = message["process version:".len()..comma_idx]
.parse()
.unwrap();
}
let color = if use_color {
Some(colors[curr_version])
} else {
None
};
let mut out = if log_type == LogType::Lex {
" ".to_string()
} else {
String::new()
};
out += &paint(color, message);
writeln!(&mut io::stderr(), "{out}").unwrap();
}
})));
}
let parse_time = Instant::now();
#[inline(always)]
fn is_utf16_le_bom(bom_bytes: &[u8]) -> bool {
bom_bytes == [0xFF, 0xFE]
}
#[inline(always)]
fn is_utf16_be_bom(bom_bytes: &[u8]) -> bool {
bom_bytes == [0xFE, 0xFF]
}
let encoding = match opts.encoding {
None if source_code.len() >= 2 => {
if is_utf16_le_bom(&source_code[0..2]) {
Some(ffi::TSInputEncodingUTF16LE)
} else if is_utf16_be_bom(&source_code[0..2]) {
Some(ffi::TSInputEncodingUTF16BE)
} else {
None
}
}
_ => opts.encoding,
};
// If the `--cancel` flag was passed, then cancel the parse
// when the user types a newline.
//
// Additionally, if the `--time` flag was passed, end the parse
// after the specified number of microseconds.
let start_time = Instant::now();
let progress_callback = &mut |_: &ParseState| {
if let Some(cancellation_flag) = opts.cancellation_flag {
if cancellation_flag.load(Ordering::SeqCst) != 0 {
return ControlFlow::Break(());
}
}
if opts.timeout > 0 && start_time.elapsed().as_micros() > opts.timeout as u128 {
return ControlFlow::Break(());
}
ControlFlow::Continue(())
};
let parse_opts = ParseOptions::new().progress_callback(progress_callback);
let tree = match encoding {
Some(encoding) if encoding == ffi::TSInputEncodingUTF16LE => {
let source_code_utf16 = source_code
.chunks_exact(2)
.map(|chunk| u16::from_le_bytes([chunk[0], chunk[1]]))
.collect::<Vec<_>>();
parser.parse_utf16_le_with_options(
&mut |i, _| {
if i < source_code_utf16.len() {
&source_code_utf16[i..]
} else {
&[]
}
},
None,
Some(parse_opts),
)
}
Some(encoding) if encoding == ffi::TSInputEncodingUTF16BE => {
let source_code_utf16 = source_code
.chunks_exact(2)
.map(|chunk| u16::from_be_bytes([chunk[0], chunk[1]]))
.collect::<Vec<_>>();
parser.parse_utf16_be_with_options(
&mut |i, _| {
if i < source_code_utf16.len() {
&source_code_utf16[i..]
} else {
&[]
}
},
None,
Some(parse_opts),
)
}
_ => parser.parse_with_options(
&mut |i, _| {
if i < source_code.len() {
&source_code[i..]
} else {
&[]
}
},
None,
Some(parse_opts),
),
};
let parse_duration = parse_time.elapsed();
let stdout = io::stdout();
let mut stdout = stdout.lock();
if let Some(mut tree) = tree {
if opts.debug_graph && !opts.edits.is_empty() {
info!("BEFORE:\n{}", String::from_utf8_lossy(&source_code));
}
let edit_time = Instant::now();
for (i, edit) in opts.edits.iter().enumerate() {
let edit = parse_edit_flag(&source_code, edit)?;
perform_edit(&mut tree, &mut source_code, &edit)?;
tree = parser.parse(&source_code, Some(&tree)).unwrap();
if opts.debug_graph {
info!("AFTER {i}:\n{}", String::from_utf8_lossy(&source_code));
}
}
let edit_duration = edit_time.elapsed();
parser.stop_printing_dot_graphs();
let parse_duration_ms = parse_duration.as_micros() as f64 / 1e3;
let edit_duration_ms = edit_duration.as_micros() as f64 / 1e3;
let mut cursor = tree.walk();
if opts.output == ParseOutput::Normal {
let mut needs_newline = false;
let mut indent_level = 0;
let mut did_visit_children = false;
loop {
let node = cursor.node();
let is_named = node.is_named();
if did_visit_children {
if is_named {
stdout.write_all(b")")?;
needs_newline = true;
}
if cursor.goto_next_sibling() {
did_visit_children = false;
} else if cursor.goto_parent() {
did_visit_children = true;
indent_level -= 1;
} else {
break;
}
} else {
if is_named {
if needs_newline {
stdout.write_all(b"\n")?;
}
for _ in 0..indent_level {
stdout.write_all(b" ")?;
}
let start = node.start_position();
let end = node.end_position();
if let Some(field_name) = cursor.field_name() {
write!(&mut stdout, "{field_name}: ")?;
}
write!(&mut stdout, "({}", node.kind())?;
if !opts.no_ranges {
write!(
&mut stdout,
" [{}, {}] - [{}, {}]",
start.row, start.column, end.row, end.column
)?;
}
needs_newline = true;
}
if cursor.goto_first_child() {
did_visit_children = false;
indent_level += 1;
} else {
did_visit_children = true;
}
}
}
cursor.reset(tree.root_node());
println!();
}
if opts.output == ParseOutput::Cst {
render_cst(&source_code, &tree, &mut cursor, opts, &mut stdout)?;
}
if opts.output == ParseOutput::Xml {
let mut needs_newline = false;
let mut indent_level = 2;
let mut did_visit_children = false;
let mut had_named_children = false;
let mut tags = Vec::<&str>::new();
// If we're parsing the first file, write the header
if opts.stats.parse_summaries.is_empty() {
writeln!(&mut stdout, "<?xml version=\"1.0\"?>")?;
writeln!(&mut stdout, "<sources>")?;
}
writeln!(&mut stdout, " <source name=\"{}\">", path.display())?;
loop {
let node = cursor.node();
let is_named = node.is_named();
if did_visit_children {
if is_named {
let tag = tags.pop();
if had_named_children {
for _ in 0..indent_level {
stdout.write_all(b" ")?;
}
}
write!(&mut stdout, "</{}>", tag.expect("there is a tag"))?;
// we only write a line in the case where it's the last sibling
if let Some(parent) = node.parent() {
if parent.child(parent.child_count() as u32 - 1).unwrap() == node {
stdout.write_all(b"\n")?;
}
}
needs_newline = true;
}
if cursor.goto_next_sibling() {
did_visit_children = false;
had_named_children = false;
} else if cursor.goto_parent() {
did_visit_children = true;
had_named_children = is_named;
indent_level -= 1;
if !is_named && needs_newline {
stdout.write_all(b"\n")?;
for _ in 0..indent_level {
stdout.write_all(b" ")?;
}
}
} else {
break;
}
} else {
if is_named {
if needs_newline {
stdout.write_all(b"\n")?;
}
for _ in 0..indent_level {
stdout.write_all(b" ")?;
}
write!(&mut stdout, "<{}", node.kind())?;
if let Some(field_name) = cursor.field_name() {
write!(&mut stdout, " field=\"{field_name}\"")?;
}
let start = node.start_position();
let end = node.end_position();
write!(&mut stdout, " srow=\"{}\"", start.row)?;
write!(&mut stdout, " scol=\"{}\"", start.column)?;
write!(&mut stdout, " erow=\"{}\"", end.row)?;
write!(&mut stdout, " ecol=\"{}\"", end.column)?;
write!(&mut stdout, ">")?;
tags.push(node.kind());
needs_newline = true;
}
if cursor.goto_first_child() {
did_visit_children = false;
had_named_children = false;
indent_level += 1;
} else {
did_visit_children = true;
let start = node.start_byte();
let end = node.end_byte();
let value =
std::str::from_utf8(&source_code[start..end]).expect("has a string");
if !is_named && needs_newline {
stdout.write_all(b"\n")?;
for _ in 0..indent_level {
stdout.write_all(b" ")?;
}
}
write!(&mut stdout, "{}", html_escape::encode_text(value))?;
}
}
}
writeln!(&mut stdout)?;
writeln!(&mut stdout, " </source>")?;
// If we parsed the last file, write the closing tag for the `sources` header
if opts.stats.parse_summaries.len() == opts.stats.source_count - 1 {
writeln!(&mut stdout, "</sources>")?;
}
cursor.reset(tree.root_node());
}
if opts.output == ParseOutput::Dot {
util::print_tree_graph(&tree, "log.html", opts.open_log).unwrap();
}
let mut first_error = None;
let mut earliest_node_with_error = None;
'outer: loop {
let node = cursor.node();
if node.has_error() {
if earliest_node_with_error.is_none() {
earliest_node_with_error = Some(node);
}
if node.is_error() || node.is_missing() {
first_error = Some(node);
break;
}
// If there's no more children, even though some outer node has an error,
// then that means that the first error is hidden, but the later error could be
// visible. So, we walk back up to the child of the first node with an error,
// and then check its siblings for errors.
if !cursor.goto_first_child() {
let earliest = earliest_node_with_error.unwrap();
while cursor.goto_parent() {
if cursor.node().parent().is_some_and(|p| p == earliest) {
while cursor.goto_next_sibling() {
let sibling = cursor.node();
if sibling.is_error() || sibling.is_missing() {
first_error = Some(sibling);
break 'outer;
}
if sibling.has_error() && cursor.goto_first_child() {
continue 'outer;
}
}
break;
}
}
break;
}
} else if !cursor.goto_next_sibling() {
break;
}
}
if first_error.is_some() || opts.print_time {
let path = path.to_string_lossy();
write!(
&mut stdout,
"{:width$}\tParse: {parse_duration_ms:>7.2} ms\t{:>6} bytes/ms",
name,
(source_code.len() as u128 * 1_000_000) / parse_duration.as_nanos(),
width = max_path_length
)?;
if let Some(node) = first_error {
let node_kind = node.kind();
let mut node_text = String::with_capacity(node_kind.len());
for c in node_kind.chars() {
if let Some(escaped) = escape_invisible(c) {
node_text += escaped;
} else {
node_text.push(c);
}
}
write!(&mut stdout, "\t(")?;
if node.is_missing() {
if node.is_named() {
write!(&mut stdout, "MISSING {node_text}")?;
} else {
write!(&mut stdout, "MISSING \"{node_text}\"")?;
}
} else {
write!(&mut stdout, "{node_text}")?;
}
let start = node.start_position();
let end = node.end_position();
write!(
&mut stdout,
" [{}, {}] - [{}, {}])",
start.row, start.column, end.row, end.column
)?;
}
if !opts.edits.is_empty() {
write!(
&mut stdout,
"\n{:width$}\tEdit: {edit_duration_ms:>7.2} ms",
" ".repeat(path.len()),
width = max_path_length,
)?;
}
writeln!(&mut stdout)?;
}
opts.stats.parse_summaries.push(ParseSummary {
file: path.to_path_buf(),
successful: first_error.is_none(),
start: Some(tree.root_node().start_position().into()),
end: Some(tree.root_node().end_position().into()),
duration: Some(parse_duration),
bytes: Some(source_code.len()),
});
return Ok(());
}
parser.stop_printing_dot_graphs();
if opts.print_time {
let duration = parse_time.elapsed();
let duration_ms = duration.as_micros() as f64 / 1e3;
writeln!(
&mut stdout,
"{:width$}\tParse: {duration_ms:>7.2} ms\t(timed out)",
path.to_str().unwrap(),
width = max_path_length
)?;
}
opts.stats.parse_summaries.push(ParseSummary {
file: path.to_path_buf(),
successful: false,
start: None,
end: None,
duration: None,
bytes: Some(source_code.len()),
});
Ok(())
}
const fn escape_invisible(c: char) -> Option<&'static str> {
Some(match c {
'\n' => "\\n",
'\r' => "\\r",
'\t' => "\\t",
'\0' => "\\0",
'\\' => "\\\\",
'\x0b' => "\\v",
'\x0c' => "\\f",
_ => return None,
})
}
const fn escape_delimiter(c: char) -> Option<&'static str> {
Some(match c {
'`' => "\\`",
'\"' => "\\\"",
_ => return None,
})
}
pub fn render_cst<'a, 'b: 'a>(
source_code: &[u8],
tree: &'b Tree,
cursor: &mut TreeCursor<'a>,
opts: &ParseFileOptions,
out: &mut impl Write,
) -> Result<()> {
let lossy_source_code = String::from_utf8_lossy(source_code);
let total_width = lossy_source_code
.lines()
.enumerate()
.map(|(row, col)| (row as f64).log10() as usize + (col.len() as f64).log10() as usize + 1)
.max()
.unwrap_or(1);
let mut indent_level = usize::from(!opts.no_ranges);
let mut did_visit_children = false;
let mut in_error = false;
loop {
if did_visit_children {
if cursor.goto_next_sibling() {
did_visit_children = false;
} else if cursor.goto_parent() {
did_visit_children = true;
indent_level -= 1;
if !cursor.node().has_error() {
in_error = false;
}
} else {
break;
}
} else {
cst_render_node(
opts,
cursor,
source_code,
out,
total_width,
indent_level,
in_error,
)?;
if cursor.goto_first_child() {
did_visit_children = false;
indent_level += 1;
if cursor.node().has_error() {
in_error = true;
}
} else {
did_visit_children = true;
}
}
}
cursor.reset(tree.root_node());
Ok(())
}
fn render_node_text(source: &str) -> String {
source
.chars()
.fold(String::with_capacity(source.len()), |mut acc, c| {
if let Some(esc) = escape_invisible(c) {
acc.push_str(esc);
} else if let Some(esc) = escape_delimiter(c) {
acc.push_str(esc);
} else {
acc.push(c);
}
acc
})
}
fn write_node_text(
opts: &ParseFileOptions,
out: &mut impl Write,
cursor: &TreeCursor,
is_named: bool,
source: &str,
color: Option<impl Into<Color> + Copy>,
text_info: (usize, usize),
) -> Result<()> {
let (total_width, indent_level) = text_info;
let (quote, quote_color) = if is_named {
('`', opts.parse_theme.backtick)
} else {
('\"', color.map(|c| c.into()))
};
if !is_named {
write!(
out,
"{}{}{}",
paint(quote_color, &String::from(quote)),
paint(color, &render_node_text(source)),
paint(quote_color, &String::from(quote)),
)?;
} else {
let multiline = source.contains('\n');
for (i, line) in source.split_inclusive('\n').enumerate() {
if line.is_empty() {
break;
}
let mut node_range = cursor.node().range();
// For each line of text, adjust the row by shifting it down `i` rows,
// and adjust the column by setting it to the length of *this* line.
node_range.start_point.row += i;
node_range.end_point.row = node_range.start_point.row;
node_range.end_point.column = line.len()
+ if i == 0 {
node_range.start_point.column
} else {
0
};
let formatted_line = render_line_feed(line, opts);
write!(
out,
"{}{}{}{}{}{}",
if multiline { "\n" } else { " " },
if multiline && !opts.no_ranges {
render_node_range(opts, cursor, is_named, true, total_width, node_range)
} else {
String::new()
},
if multiline {
" ".repeat(indent_level + 1)
} else {
String::new()
},
paint(quote_color, &String::from(quote)),
paint(color, &render_node_text(&formatted_line)),
paint(quote_color, &String::from(quote)),
)?;
}
}
Ok(())
}
fn render_line_feed(source: &str, opts: &ParseFileOptions) -> String {
if cfg!(windows) {
source.replace("\r\n", &paint(opts.parse_theme.line_feed, "\r\n"))
} else {
source.replace('\n', &paint(opts.parse_theme.line_feed, "\n"))
}
}
fn render_node_range(
opts: &ParseFileOptions,
cursor: &TreeCursor,
is_named: bool,
is_multiline: bool,
total_width: usize,
range: Range,
) -> String {
let has_field_name = cursor.field_name().is_some();
let range_color = if is_named && !is_multiline && !has_field_name {
opts.parse_theme.row_color_named
} else {
opts.parse_theme.row_color
};
let remaining_width_start = (total_width
- (range.start_point.row as f64).log10() as usize
- (range.start_point.column as f64).log10() as usize)
.max(1);
let remaining_width_end = (total_width
- (range.end_point.row as f64).log10() as usize
- (range.end_point.column as f64).log10() as usize)
.max(1);
paint(
range_color,
&format!(
"{}:{}{:remaining_width_start$}- {}:{}{:remaining_width_end$}",
range.start_point.row,
range.start_point.column,
' ',
range.end_point.row,
range.end_point.column,
' ',
),
)
}
fn cst_render_node(
opts: &ParseFileOptions,
cursor: &mut TreeCursor,
source_code: &[u8],
out: &mut impl Write,
total_width: usize,
indent_level: usize,
in_error: bool,
) -> Result<()> {
let node = cursor.node();
let is_named = node.is_named();
if !opts.no_ranges {
write!(
out,
"{}",
render_node_range(opts, cursor, is_named, false, total_width, node.range())
)?;
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests.rs | crates/cli/src/tests.rs | mod async_boundary_test;
mod corpus_test;
mod detect_language;
mod helpers;
mod highlight_test;
mod language_test;
mod node_test;
mod parser_test;
mod pathological_test;
mod query_test;
mod tags_test;
mod test_highlight_test;
mod test_tags_test;
mod text_provider_test;
mod tree_test;
#[cfg(feature = "wasm")]
mod wasm_language_test;
use tree_sitter_generate::GenerateResult;
pub use crate::fuzz::{
allocations,
edits::{get_random_edit, invert_edit},
random::Rand,
ITERATION_COUNT,
};
pub use helpers::fixtures::get_language;
/// This is a simple wrapper around [`tree_sitter_generate::generate_parser_for_grammar`], because
/// our tests do not need to pass in a version number, only the grammar JSON.
fn generate_parser(grammar_json: &str) -> GenerateResult<(String, String)> {
tree_sitter_generate::generate_parser_for_grammar(grammar_json, Some((0, 0, 0)))
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tree_sitter_cli.rs | crates/cli/src/tree_sitter_cli.rs | #![cfg_attr(not(any(test, doctest)), doc = include_str!("../README.md"))]
pub mod fuzz;
pub mod highlight;
pub mod init;
pub mod input;
pub mod logger;
pub mod parse;
pub mod playground;
pub mod query;
pub mod query_testing;
pub mod tags;
pub mod test;
pub mod test_highlight;
pub mod test_tags;
pub mod util;
pub mod version;
pub mod wasm;
#[cfg(test)]
mod tests;
#[cfg(doctest)]
mod tests;
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/fuzz.rs | crates/cli/src/fuzz.rs | use std::{
collections::HashMap,
env, fs,
path::{Path, PathBuf},
sync::LazyLock,
};
use log::{error, info};
use rand::Rng;
use regex::Regex;
use tree_sitter::{Language, Parser};
pub mod allocations;
pub mod corpus_test;
pub mod edits;
pub mod random;
pub mod scope_sequence;
use crate::{
fuzz::{
corpus_test::{
check_changed_ranges, check_consistent_sizes, get_parser, set_included_ranges,
},
edits::{get_random_edit, invert_edit},
random::Rand,
},
parse::perform_edit,
test::{parse_tests, strip_sexp_fields, DiffKey, TestDiff, TestEntry},
};
pub static LOG_ENABLED: LazyLock<bool> = LazyLock::new(|| env::var("TREE_SITTER_LOG").is_ok());
pub static LOG_GRAPH_ENABLED: LazyLock<bool> =
LazyLock::new(|| env::var("TREE_SITTER_LOG_GRAPHS").is_ok());
pub static LANGUAGE_FILTER: LazyLock<Option<String>> =
LazyLock::new(|| env::var("TREE_SITTER_LANGUAGE").ok());
pub static EXAMPLE_INCLUDE: LazyLock<Option<Regex>> =
LazyLock::new(|| regex_env_var("TREE_SITTER_EXAMPLE_INCLUDE"));
pub static EXAMPLE_EXCLUDE: LazyLock<Option<Regex>> =
LazyLock::new(|| regex_env_var("TREE_SITTER_EXAMPLE_EXCLUDE"));
pub static START_SEED: LazyLock<usize> = LazyLock::new(new_seed);
pub static EDIT_COUNT: LazyLock<usize> =
LazyLock::new(|| int_env_var("TREE_SITTER_EDITS").unwrap_or(3));
pub static ITERATION_COUNT: LazyLock<usize> =
LazyLock::new(|| int_env_var("TREE_SITTER_ITERATIONS").unwrap_or(10));
fn int_env_var(name: &'static str) -> Option<usize> {
env::var(name).ok().and_then(|e| e.parse().ok())
}
fn regex_env_var(name: &'static str) -> Option<Regex> {
env::var(name).ok().and_then(|e| Regex::new(&e).ok())
}
#[must_use]
pub fn new_seed() -> usize {
int_env_var("TREE_SITTER_SEED").unwrap_or_else(|| {
let mut rng = rand::thread_rng();
let seed = rng.gen::<usize>();
info!("Seed: {seed}");
seed
})
}
pub struct FuzzOptions {
pub skipped: Option<Vec<String>>,
pub subdir: Option<PathBuf>,
pub edits: usize,
pub iterations: usize,
pub include: Option<Regex>,
pub exclude: Option<Regex>,
pub log_graphs: bool,
pub log: bool,
}
pub fn fuzz_language_corpus(
language: &Language,
language_name: &str,
start_seed: usize,
grammar_dir: &Path,
options: &mut FuzzOptions,
) {
fn retain(entry: &mut TestEntry, language_name: &str) -> bool {
match entry {
TestEntry::Example { attributes, .. } => {
attributes.languages[0].is_empty()
|| attributes
.languages
.iter()
.any(|lang| lang.as_ref() == language_name)
}
TestEntry::Group {
ref mut children, ..
} => {
children.retain_mut(|child| retain(child, language_name));
!children.is_empty()
}
}
}
let subdir = options.subdir.take().unwrap_or_default();
let corpus_dir = grammar_dir.join(subdir).join("test").join("corpus");
if !corpus_dir.exists() || !corpus_dir.is_dir() {
error!("No corpus directory found, ensure that you have a `test/corpus` directory in your grammar directory with at least one test file.");
return;
}
if std::fs::read_dir(&corpus_dir).unwrap().count() == 0 {
error!("No corpus files found in `test/corpus`, ensure that you have at least one test file in your corpus directory.");
return;
}
let mut main_tests = parse_tests(&corpus_dir).unwrap();
match main_tests {
TestEntry::Group {
ref mut children, ..
} => {
children.retain_mut(|child| retain(child, language_name));
}
TestEntry::Example { .. } => unreachable!(),
}
let tests = flatten_tests(
main_tests,
options.include.as_ref(),
options.exclude.as_ref(),
);
let get_test_name = |test: &FlattenedTest| format!("{language_name} - {}", test.name);
let mut skipped = options
.skipped
.take()
.unwrap_or_default()
.into_iter()
.chain(tests.iter().filter(|x| x.skip).map(get_test_name))
.map(|x| (x, 0))
.collect::<HashMap<String, usize>>();
let mut failure_count = 0;
let log_seed = env::var("TREE_SITTER_LOG_SEED").is_ok();
let dump_edits = env::var("TREE_SITTER_DUMP_EDITS").is_ok();
if log_seed {
info!(" start seed: {start_seed}");
}
println!();
for (test_index, test) in tests.iter().enumerate() {
let test_name = get_test_name(test);
if let Some(counter) = skipped.get_mut(test_name.as_str()) {
println!(" {test_index}. {test_name} - SKIPPED");
*counter += 1;
continue;
}
println!(" {test_index}. {test_name}");
let passed = allocations::record_checked(|| {
let mut log_session = None;
let mut parser = get_parser(&mut log_session, "log.html");
parser.set_language(language).unwrap();
set_included_ranges(&mut parser, &test.input, test.template_delimiters);
let tree = parser.parse(&test.input, None).unwrap();
if test.error {
return true;
}
let mut actual_output = tree.root_node().to_sexp();
if !test.has_fields {
actual_output = strip_sexp_fields(&actual_output);
}
if actual_output != test.output {
println!("Incorrect initial parse for {test_name}");
DiffKey::print();
println!("{}", TestDiff::new(&actual_output, &test.output));
println!();
return false;
}
true
})
.unwrap_or_else(|e| {
error!("{e}");
false
});
if !passed {
failure_count += 1;
continue;
}
let mut parser = Parser::new();
parser.set_language(language).unwrap();
let tree = parser.parse(&test.input, None).unwrap();
drop(parser);
for trial in 0..options.iterations {
let seed = start_seed + trial;
let passed = allocations::record_checked(|| {
let mut rand = Rand::new(seed);
let mut log_session = None;
let mut parser = get_parser(&mut log_session, "log.html");
parser.set_language(language).unwrap();
let mut tree = tree.clone();
let mut input = test.input.clone();
if options.log_graphs {
info!("{}\n", String::from_utf8_lossy(&input));
}
// Perform a random series of edits and reparse.
let edit_count = rand.unsigned(*EDIT_COUNT);
let mut undo_stack = Vec::with_capacity(edit_count);
for _ in 0..=edit_count {
let edit = get_random_edit(&mut rand, &input);
undo_stack.push(invert_edit(&input, &edit));
perform_edit(&mut tree, &mut input, &edit).unwrap();
}
if log_seed {
info!(" {test_index}.{trial:<2} seed: {seed}");
}
if dump_edits {
fs::create_dir_all("fuzz").unwrap();
fs::write(
Path::new("fuzz")
.join(format!("edit.{seed}.{test_index}.{trial} {test_name}")),
&input,
)
.unwrap();
}
if options.log_graphs {
info!("{}\n", String::from_utf8_lossy(&input));
}
set_included_ranges(&mut parser, &input, test.template_delimiters);
let mut tree2 = parser.parse(&input, Some(&tree)).unwrap();
// Check that the new tree is consistent.
check_consistent_sizes(&tree2, &input);
if let Err(message) = check_changed_ranges(&tree, &tree2, &input) {
error!("\nUnexpected scope change in seed {seed} with start seed {start_seed}\n{message}\n\n",);
return false;
}
// Undo all of the edits and re-parse again.
while let Some(edit) = undo_stack.pop() {
perform_edit(&mut tree2, &mut input, &edit).unwrap();
}
if options.log_graphs {
info!("{}\n", String::from_utf8_lossy(&input));
}
set_included_ranges(&mut parser, &test.input, test.template_delimiters);
let tree3 = parser.parse(&input, Some(&tree2)).unwrap();
// Verify that the final tree matches the expectation from the corpus.
let mut actual_output = tree3.root_node().to_sexp();
if !test.has_fields {
actual_output = strip_sexp_fields(&actual_output);
}
if actual_output != test.output && !test.error {
println!("Incorrect parse for {test_name} - seed {seed}");
DiffKey::print();
println!("{}", TestDiff::new(&actual_output, &test.output));
println!();
return false;
}
// Check that the edited tree is consistent.
check_consistent_sizes(&tree3, &input);
if let Err(message) = check_changed_ranges(&tree2, &tree3, &input) {
error!("Unexpected scope change in seed {seed} with start seed {start_seed}\n{message}\n\n");
return false;
}
true
}).unwrap_or_else(|e| {
error!("{e}");
false
});
if !passed {
failure_count += 1;
break;
}
}
}
if failure_count != 0 {
info!("{failure_count} {language_name} corpus tests failed fuzzing");
}
skipped.retain(|_, v| *v == 0);
if !skipped.is_empty() {
info!("Non matchable skip definitions:");
for k in skipped.keys() {
info!(" {k}");
}
panic!("Non matchable skip definitions need to be removed");
}
}
pub struct FlattenedTest {
pub name: String,
pub input: Vec<u8>,
pub output: String,
pub languages: Vec<Box<str>>,
pub error: bool,
pub skip: bool,
pub has_fields: bool,
pub template_delimiters: Option<(&'static str, &'static str)>,
}
#[must_use]
pub fn flatten_tests(
test: TestEntry,
include: Option<&Regex>,
exclude: Option<&Regex>,
) -> Vec<FlattenedTest> {
fn helper(
test: TestEntry,
include: Option<&Regex>,
exclude: Option<&Regex>,
is_root: bool,
prefix: &str,
result: &mut Vec<FlattenedTest>,
) {
match test {
TestEntry::Example {
mut name,
input,
output,
has_fields,
attributes,
..
} => {
if !prefix.is_empty() {
name.insert_str(0, " - ");
name.insert_str(0, prefix);
}
if let Some(include) = include {
if !include.is_match(&name) {
return;
}
} else if let Some(exclude) = exclude {
if exclude.is_match(&name) {
return;
}
}
result.push(FlattenedTest {
name,
input,
output,
has_fields,
languages: attributes.languages,
error: attributes.error,
skip: attributes.skip,
template_delimiters: None,
});
}
TestEntry::Group {
mut name, children, ..
} => {
if !is_root && !prefix.is_empty() {
name.insert_str(0, " - ");
name.insert_str(0, prefix);
}
for child in children {
helper(child, include, exclude, false, &name, result);
}
}
}
}
let mut result = Vec::new();
helper(test, include, exclude, true, "", &mut result);
result
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/logger.rs | crates/cli/src/logger.rs | use std::io::Write;
use anstyle::{AnsiColor, Color, Style};
use log::{Level, LevelFilter, Log, Metadata, Record};
pub fn paint(color: Option<impl Into<Color>>, text: &str) -> String {
let style = Style::new().fg_color(color.map(Into::into));
format!("{style}{text}{style:#}")
}
struct Logger;
impl Log for Logger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
match record.level() {
Level::Error => eprintln!(
"{} {}",
paint(Some(AnsiColor::Red), "Error:"),
record.args()
),
Level::Warn => eprintln!(
"{} {}",
paint(Some(AnsiColor::Yellow), "Warning:"),
record.args()
),
Level::Info | Level::Debug => eprintln!("{}", record.args()),
Level::Trace => eprintln!(
"[{}] {}",
record
.module_path()
.unwrap_or_default()
.trim_start_matches("rust_tree_sitter_cli::"),
record.args()
),
}
}
fn flush(&self) {
let mut stderr = std::io::stderr().lock();
let _ = stderr.flush();
}
}
pub fn init() {
log::set_boxed_logger(Box::new(Logger {})).unwrap();
log::set_max_level(LevelFilter::Info);
}
pub fn enable_debug() {
log::set_max_level(LevelFilter::Debug);
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/test_tags.rs | crates/cli/src/test_tags.rs | use std::{fs, path::Path};
use anyhow::{anyhow, Result};
use tree_sitter_loader::{Config, Loader};
use tree_sitter_tags::{TagsConfiguration, TagsContext};
use crate::{
query_testing::{parse_position_comments, to_utf8_point, Assertion, Utf8Point},
test::{TestInfo, TestOutcome, TestResult, TestSummary},
util,
};
#[derive(Debug)]
pub struct Failure {
row: usize,
column: usize,
expected_tag: String,
actual_tags: Vec<String>,
}
impl std::error::Error for Failure {}
impl std::fmt::Display for Failure {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"Failure - row: {}, column: {}, expected tag: '{}', actual tag: ",
self.row, self.column, self.expected_tag
)?;
if self.actual_tags.is_empty() {
write!(f, "none.")?;
} else {
for (i, actual_tag) in self.actual_tags.iter().enumerate() {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "'{actual_tag}'")?;
}
}
Ok(())
}
}
pub fn test_tags(
loader: &Loader,
loader_config: &Config,
tags_context: &mut TagsContext,
directory: &Path,
test_summary: &mut TestSummary,
) -> Result<()> {
let mut failed = false;
for tag_test_file in fs::read_dir(directory)? {
let tag_test_file = tag_test_file?;
let test_file_path = tag_test_file.path();
let test_file_name = tag_test_file.file_name();
if test_file_path.is_dir() && test_file_path.read_dir()?.next().is_some() {
test_summary
.tag_results
.add_group(test_file_name.to_string_lossy().as_ref());
if test_tags(
loader,
loader_config,
tags_context,
&test_file_path,
test_summary,
)
.is_err()
{
failed = true;
}
test_summary.tag_results.pop_traversal();
} else {
let (language, language_config) = loader
.language_configuration_for_file_name(&test_file_path)?
.ok_or_else(|| {
anyhow!(
"{}",
util::lang_not_found_for_path(test_file_path.as_path(), loader_config)
)
})?;
let tags_config = language_config
.tags_config(language)?
.ok_or_else(|| anyhow!("No tags config found for {}", test_file_path.display()))?;
match test_tag(
tags_context,
tags_config,
fs::read(&test_file_path)?.as_slice(),
) {
Ok(assertion_count) => {
test_summary.tag_results.add_case(TestResult {
name: test_file_name.to_string_lossy().to_string(),
info: TestInfo::AssertionTest {
outcome: TestOutcome::AssertionPassed { assertion_count },
test_num: test_summary.test_num,
},
});
}
Err(e) => {
test_summary.tag_results.add_case(TestResult {
name: test_file_name.to_string_lossy().to_string(),
info: TestInfo::AssertionTest {
outcome: TestOutcome::AssertionFailed {
error: e.to_string(),
},
test_num: test_summary.test_num,
},
});
failed = true;
}
}
test_summary.test_num += 1;
}
}
if failed {
Err(anyhow!(""))
} else {
Ok(())
}
}
pub fn test_tag(
tags_context: &mut TagsContext,
tags_config: &TagsConfiguration,
source: &[u8],
) -> Result<usize> {
let tags = get_tag_positions(tags_context, tags_config, source)?;
let assertions = parse_position_comments(tags_context.parser(), &tags_config.language, source)?;
// Iterate through all of the assertions, checking against the actual tags.
let mut i = 0;
let mut actual_tags = Vec::<&String>::new();
for Assertion {
position,
length,
negative,
expected_capture_name: expected_tag,
} in &assertions
{
let mut passed = false;
let mut end_column = position.column + length - 1;
'tag_loop: while let Some(tag) = tags.get(i) {
if tag.1 <= *position {
i += 1;
continue;
}
// Iterate through all of the tags that start at or before this assertion's
// position, looking for one that matches the assertion
let mut j = i;
while let (false, Some(tag)) = (passed, tags.get(j)) {
end_column = position.column + length - 1;
if tag.0.column > end_column {
break 'tag_loop;
}
let tag_name = &tag.2;
if (*tag_name == *expected_tag) == *negative {
actual_tags.push(tag_name);
} else {
passed = true;
break 'tag_loop;
}
j += 1;
if tag == tags.last().unwrap() {
break 'tag_loop;
}
}
}
if !passed {
return Err(Failure {
row: position.row,
column: end_column,
expected_tag: expected_tag.clone(),
actual_tags: actual_tags.into_iter().cloned().collect(),
}
.into());
}
}
Ok(assertions.len())
}
pub fn get_tag_positions(
tags_context: &mut TagsContext,
tags_config: &TagsConfiguration,
source: &[u8],
) -> Result<Vec<(Utf8Point, Utf8Point, String)>> {
let (tags_iter, _has_error) = tags_context.generate_tags(tags_config, source, None)?;
let tag_positions = tags_iter
.filter_map(std::result::Result::ok)
.map(|tag| {
let tag_postfix = tags_config.syntax_type_name(tag.syntax_type_id).to_string();
let tag_name = if tag.is_definition {
format!("definition.{tag_postfix}")
} else {
format!("reference.{tag_postfix}")
};
(
to_utf8_point(tag.span.start, source),
to_utf8_point(tag.span.end, source),
tag_name,
)
})
.collect();
Ok(tag_positions)
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/playground.rs | crates/cli/src/playground.rs | use std::{
borrow::Cow,
env, fs,
net::TcpListener,
path::{Path, PathBuf},
str::{self, FromStr as _},
};
use anyhow::{anyhow, Context, Result};
use log::{error, info};
use tiny_http::{Header, Response, Server};
use super::wasm;
macro_rules! optional_resource {
($name:tt, $path:tt) => {
#[cfg(TREE_SITTER_EMBED_WASM_BINDING)]
fn $name(tree_sitter_dir: Option<&Path>) -> Cow<'static, [u8]> {
if let Some(tree_sitter_dir) = tree_sitter_dir {
Cow::Owned(fs::read(tree_sitter_dir.join($path)).unwrap())
} else {
Cow::Borrowed(include_bytes!(concat!("../../../", $path)))
}
}
#[cfg(not(TREE_SITTER_EMBED_WASM_BINDING))]
fn $name(tree_sitter_dir: Option<&Path>) -> Cow<'static, [u8]> {
if let Some(tree_sitter_dir) = tree_sitter_dir {
Cow::Owned(fs::read(tree_sitter_dir.join($path)).unwrap())
} else {
Cow::Borrowed(&[])
}
}
};
}
optional_resource!(get_playground_js, "docs/src/assets/js/playground.js");
optional_resource!(get_lib_js, "lib/binding_web/web-tree-sitter.js");
optional_resource!(get_lib_wasm, "lib/binding_web/web-tree-sitter.wasm");
fn get_main_html(tree_sitter_dir: Option<&Path>) -> Cow<'static, [u8]> {
tree_sitter_dir.map_or(
Cow::Borrowed(include_bytes!("playground.html")),
|tree_sitter_dir| {
Cow::Owned(fs::read(tree_sitter_dir.join("crates/cli/src/playground.html")).unwrap())
},
)
}
pub fn export(grammar_path: &Path, export_path: &Path) -> Result<()> {
let (grammar_name, language_wasm) = wasm::load_language_wasm_file(grammar_path)?;
fs::create_dir_all(export_path).with_context(|| {
format!(
"Failed to create export directory: {}",
export_path.display()
)
})?;
let tree_sitter_dir = env::var("TREE_SITTER_BASE_DIR").map(PathBuf::from).ok();
let playground_js = get_playground_js(tree_sitter_dir.as_deref());
let lib_js = get_lib_js(tree_sitter_dir.as_deref());
let lib_wasm = get_lib_wasm(tree_sitter_dir.as_deref());
let has_local_playground_js = !playground_js.is_empty();
let has_local_lib_js = !lib_js.is_empty();
let has_local_lib_wasm = !lib_wasm.is_empty();
let mut main_html = str::from_utf8(&get_main_html(tree_sitter_dir.as_deref()))
.unwrap()
.replace("THE_LANGUAGE_NAME", &grammar_name);
if !has_local_playground_js {
main_html = main_html.replace(
r#"<script type="module" src="playground.js"></script>"#,
r#"<script type="module" src="https://tree-sitter.github.io/tree-sitter/assets/js/playground.js"></script>"#
);
}
if !has_local_lib_js {
main_html = main_html.replace(
"import * as TreeSitter from './web-tree-sitter.js';",
"import * as TreeSitter from 'https://tree-sitter.github.io/web-tree-sitter.js';",
);
}
fs::write(export_path.join("index.html"), main_html.as_bytes())
.with_context(|| "Failed to write index.html")?;
fs::write(export_path.join("tree-sitter-parser.wasm"), language_wasm)
.with_context(|| "Failed to write parser wasm file")?;
if has_local_playground_js {
fs::write(export_path.join("playground.js"), playground_js)
.with_context(|| "Failed to write playground.js")?;
}
if has_local_lib_js {
fs::write(export_path.join("web-tree-sitter.js"), lib_js)
.with_context(|| "Failed to write web-tree-sitter.js")?;
}
if has_local_lib_wasm {
fs::write(export_path.join("web-tree-sitter.wasm"), lib_wasm)
.with_context(|| "Failed to write web-tree-sitter.wasm")?;
}
println!(
"Exported playground to {}",
export_path.canonicalize()?.display()
);
Ok(())
}
pub fn serve(grammar_path: &Path, open_in_browser: bool) -> Result<()> {
let server = get_server()?;
let (grammar_name, language_wasm) = wasm::load_language_wasm_file(grammar_path)?;
let url = format!("http://{}", server.server_addr());
info!("Started playground on: {url}");
if open_in_browser && webbrowser::open(&url).is_err() {
error!("Failed to open '{url}' in a web browser");
}
let tree_sitter_dir = env::var("TREE_SITTER_BASE_DIR").map(PathBuf::from).ok();
let main_html = str::from_utf8(&get_main_html(tree_sitter_dir.as_deref()))
.unwrap()
.replace("THE_LANGUAGE_NAME", &grammar_name)
.into_bytes();
let playground_js = get_playground_js(tree_sitter_dir.as_deref());
let lib_js = get_lib_js(tree_sitter_dir.as_deref());
let lib_wasm = get_lib_wasm(tree_sitter_dir.as_deref());
let html_header = Header::from_str("Content-Type: text/html").unwrap();
let js_header = Header::from_str("Content-Type: application/javascript").unwrap();
let wasm_header = Header::from_str("Content-Type: application/wasm").unwrap();
for request in server.incoming_requests() {
let res = match request.url() {
"/" => response(&main_html, &html_header),
"/tree-sitter-parser.wasm" => response(&language_wasm, &wasm_header),
"/playground.js" => {
if playground_js.is_empty() {
redirect("https://tree-sitter.github.io/tree-sitter/assets/js/playground.js")
} else {
response(&playground_js, &js_header)
}
}
"/web-tree-sitter.js" => {
if lib_js.is_empty() {
redirect("https://tree-sitter.github.io/web-tree-sitter.js")
} else {
response(&lib_js, &js_header)
}
}
"/web-tree-sitter.wasm" => {
if lib_wasm.is_empty() {
redirect("https://tree-sitter.github.io/web-tree-sitter.wasm")
} else {
response(&lib_wasm, &wasm_header)
}
}
_ => response(b"Not found", &html_header).with_status_code(404),
};
request
.respond(res)
.with_context(|| "Failed to write HTTP response")?;
}
Ok(())
}
fn redirect(url: &str) -> Response<&[u8]> {
Response::empty(302)
.with_data("".as_bytes(), Some(0))
.with_header(Header::from_bytes("Location", url.as_bytes()).unwrap())
}
fn response<'a>(data: &'a [u8], header: &Header) -> Response<&'a [u8]> {
Response::empty(200)
.with_data(data, Some(data.len()))
.with_header(header.clone())
}
fn get_server() -> Result<Server> {
let addr = env::var("TREE_SITTER_PLAYGROUND_ADDR").unwrap_or_else(|_| "127.0.0.1".to_owned());
let port = env::var("TREE_SITTER_PLAYGROUND_PORT")
.map(|v| {
v.parse::<u16>()
.with_context(|| "Invalid port specification")
})
.ok();
let listener = match port {
Some(port) => {
bind_to(&addr, port?).with_context(|| "Failed to bind to the specified port")?
}
None => get_listener_on_available_port(&addr)
.with_context(|| "Failed to find a free port to bind to it")?,
};
let server =
Server::from_listener(listener, None).map_err(|_| anyhow!("Failed to start web server"))?;
Ok(server)
}
fn get_listener_on_available_port(addr: &str) -> Option<TcpListener> {
(8000..12000).find_map(|port| bind_to(addr, port))
}
fn bind_to(addr: &str, port: u16) -> Option<TcpListener> {
TcpListener::bind(format!("{addr}:{port}")).ok()
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/util.rs | crates/cli/src/util.rs | use std::{
path::{Path, PathBuf},
process::{Child, ChildStdin, Command, Stdio},
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
};
use anyhow::{anyhow, Context, Result};
use indoc::indoc;
use log::error;
use tree_sitter::{Parser, Tree};
use tree_sitter_config::Config;
use tree_sitter_loader::Config as LoaderConfig;
const HTML_HEADER: &[u8] = b"
<!DOCTYPE html>
<style>
svg { width: 100%; }
</style>
";
#[must_use]
pub fn lang_not_found_for_path(path: &Path, loader_config: &LoaderConfig) -> String {
let path = path.display();
format!(
indoc! {"
No language found for path `{}`
If a language should be associated with this file extension, please ensure the path to `{}` is inside one of the following directories as specified by your 'config.json':\n\n{}\n
If the directory that contains the relevant grammar for `{}` is not listed above, please add the directory to the list of directories in your config file, {}
"},
path,
path,
loader_config
.parser_directories
.iter()
.enumerate()
.map(|(i, d)| format!(" {}. {}", i + 1, d.display()))
.collect::<Vec<_>>()
.join(" \n"),
path,
if let Ok(Some(config_path)) = Config::find_config_file() {
format!("located at {}", config_path.display())
} else {
String::from("which you need to create by running `tree-sitter init-config`")
}
)
}
#[must_use]
pub fn cancel_on_signal() -> Arc<AtomicUsize> {
let result = Arc::new(AtomicUsize::new(0));
ctrlc::set_handler({
let flag = result.clone();
move || {
flag.store(1, Ordering::Relaxed);
}
})
.expect("Error setting Ctrl-C handler");
result
}
pub struct LogSession {
path: PathBuf,
dot_process: Option<Child>,
dot_process_stdin: Option<ChildStdin>,
open_log: bool,
}
pub fn print_tree_graph(tree: &Tree, path: &str, quiet: bool) -> Result<()> {
let session = LogSession::new(path, quiet)?;
tree.print_dot_graph(session.dot_process_stdin.as_ref().unwrap());
Ok(())
}
pub fn log_graphs(parser: &mut Parser, path: &str, open_log: bool) -> Result<LogSession> {
let session = LogSession::new(path, open_log)?;
parser.print_dot_graphs(session.dot_process_stdin.as_ref().unwrap());
Ok(session)
}
impl LogSession {
fn new(path: &str, open_log: bool) -> Result<Self> {
use std::io::Write;
let mut dot_file = std::fs::File::create(path)?;
dot_file.write_all(HTML_HEADER)?;
let mut dot_process = Command::new("dot")
.arg("-Tsvg")
.stdin(Stdio::piped())
.stdout(dot_file)
.spawn()
.with_context(|| {
"Failed to run the `dot` command. Check that graphviz is installed."
})?;
let dot_stdin = dot_process
.stdin
.take()
.ok_or_else(|| anyhow!("Failed to open stdin for `dot` process."))?;
Ok(Self {
path: PathBuf::from(path),
dot_process: Some(dot_process),
dot_process_stdin: Some(dot_stdin),
open_log,
})
}
}
impl Drop for LogSession {
fn drop(&mut self) {
use std::fs;
drop(self.dot_process_stdin.take().unwrap());
let output = self.dot_process.take().unwrap().wait_with_output().unwrap();
if output.status.success() {
if self.open_log && fs::metadata(&self.path).unwrap().len() > HTML_HEADER.len() as u64 {
webbrowser::open(&self.path.to_string_lossy()).unwrap();
}
} else {
error!(
"Dot failed: {} {}",
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
);
}
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/init.rs | crates/cli/src/init.rs | use std::{
fs,
path::{Path, PathBuf},
str::{self, FromStr},
};
use anyhow::{anyhow, Context, Result};
use crc32fast::hash as crc32;
use heck::{ToKebabCase, ToShoutySnakeCase, ToSnakeCase, ToUpperCamelCase};
use indoc::{formatdoc, indoc};
use log::info;
use rand::{thread_rng, Rng};
use semver::Version;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use tree_sitter_generate::write_file;
use tree_sitter_loader::{
Author, Bindings, Grammar, Links, Metadata, PathsJSON, TreeSitterJSON,
DEFAULT_HIGHLIGHTS_QUERY_FILE_NAME, DEFAULT_INJECTIONS_QUERY_FILE_NAME,
DEFAULT_LOCALS_QUERY_FILE_NAME, DEFAULT_TAGS_QUERY_FILE_NAME,
};
const CLI_VERSION: &str = env!("CARGO_PKG_VERSION");
const CLI_VERSION_PLACEHOLDER: &str = "CLI_VERSION";
const ABI_VERSION_MAX: usize = tree_sitter::LANGUAGE_VERSION;
const ABI_VERSION_MAX_PLACEHOLDER: &str = "ABI_VERSION_MAX";
const PARSER_NAME_PLACEHOLDER: &str = "PARSER_NAME";
const CAMEL_PARSER_NAME_PLACEHOLDER: &str = "CAMEL_PARSER_NAME";
const TITLE_PARSER_NAME_PLACEHOLDER: &str = "TITLE_PARSER_NAME";
const UPPER_PARSER_NAME_PLACEHOLDER: &str = "UPPER_PARSER_NAME";
const LOWER_PARSER_NAME_PLACEHOLDER: &str = "LOWER_PARSER_NAME";
const KEBAB_PARSER_NAME_PLACEHOLDER: &str = "KEBAB_PARSER_NAME";
const PARSER_CLASS_NAME_PLACEHOLDER: &str = "PARSER_CLASS_NAME";
const PARSER_DESCRIPTION_PLACEHOLDER: &str = "PARSER_DESCRIPTION";
const PARSER_LICENSE_PLACEHOLDER: &str = "PARSER_LICENSE";
const PARSER_NS_PLACEHOLDER: &str = "PARSER_NS";
const PARSER_NS_CLEANED_PLACEHOLDER: &str = "PARSER_NS_CLEANED";
const PARSER_URL_PLACEHOLDER: &str = "PARSER_URL";
const PARSER_URL_STRIPPED_PLACEHOLDER: &str = "PARSER_URL_STRIPPED";
const PARSER_VERSION_PLACEHOLDER: &str = "PARSER_VERSION";
const PARSER_FINGERPRINT_PLACEHOLDER: &str = "PARSER_FINGERPRINT";
const AUTHOR_NAME_PLACEHOLDER: &str = "PARSER_AUTHOR_NAME";
const AUTHOR_EMAIL_PLACEHOLDER: &str = "PARSER_AUTHOR_EMAIL";
const AUTHOR_URL_PLACEHOLDER: &str = "PARSER_AUTHOR_URL";
const AUTHOR_BLOCK_JS: &str = "\n \"author\": {";
const AUTHOR_NAME_PLACEHOLDER_JS: &str = "\n \"name\": \"PARSER_AUTHOR_NAME\",";
const AUTHOR_EMAIL_PLACEHOLDER_JS: &str = ",\n \"email\": \"PARSER_AUTHOR_EMAIL\"";
const AUTHOR_URL_PLACEHOLDER_JS: &str = ",\n \"url\": \"PARSER_AUTHOR_URL\"";
const AUTHOR_BLOCK_PY: &str = "\nauthors = [{";
const AUTHOR_NAME_PLACEHOLDER_PY: &str = "name = \"PARSER_AUTHOR_NAME\"";
const AUTHOR_EMAIL_PLACEHOLDER_PY: &str = ", email = \"PARSER_AUTHOR_EMAIL\"";
const AUTHOR_BLOCK_RS: &str = "\nauthors = [";
const AUTHOR_NAME_PLACEHOLDER_RS: &str = "PARSER_AUTHOR_NAME";
const AUTHOR_EMAIL_PLACEHOLDER_RS: &str = " PARSER_AUTHOR_EMAIL";
const AUTHOR_BLOCK_JAVA: &str = "\n <developer>";
const AUTHOR_NAME_PLACEHOLDER_JAVA: &str = "\n <name>PARSER_AUTHOR_NAME</name>";
const AUTHOR_EMAIL_PLACEHOLDER_JAVA: &str = "\n <email>PARSER_AUTHOR_EMAIL</email>";
const AUTHOR_URL_PLACEHOLDER_JAVA: &str = "\n <url>PARSER_AUTHOR_URL</url>";
const AUTHOR_BLOCK_GRAMMAR: &str = "\n * @author ";
const AUTHOR_NAME_PLACEHOLDER_GRAMMAR: &str = "PARSER_AUTHOR_NAME";
const AUTHOR_EMAIL_PLACEHOLDER_GRAMMAR: &str = " PARSER_AUTHOR_EMAIL";
const FUNDING_URL_PLACEHOLDER: &str = "FUNDING_URL";
const HIGHLIGHTS_QUERY_PATH_PLACEHOLDER: &str = "HIGHLIGHTS_QUERY_PATH";
const INJECTIONS_QUERY_PATH_PLACEHOLDER: &str = "INJECTIONS_QUERY_PATH";
const LOCALS_QUERY_PATH_PLACEHOLDER: &str = "LOCALS_QUERY_PATH";
const TAGS_QUERY_PATH_PLACEHOLDER: &str = "TAGS_QUERY_PATH";
const GRAMMAR_JS_TEMPLATE: &str = include_str!("./templates/grammar.js");
const PACKAGE_JSON_TEMPLATE: &str = include_str!("./templates/package.json");
const GITIGNORE_TEMPLATE: &str = include_str!("./templates/gitignore");
const GITATTRIBUTES_TEMPLATE: &str = include_str!("./templates/gitattributes");
const EDITORCONFIG_TEMPLATE: &str = include_str!("./templates/.editorconfig");
const RUST_BINDING_VERSION: &str = env!("CARGO_PKG_VERSION");
const RUST_BINDING_VERSION_PLACEHOLDER: &str = "RUST_BINDING_VERSION";
const LIB_RS_TEMPLATE: &str = include_str!("./templates/lib.rs");
const BUILD_RS_TEMPLATE: &str = include_str!("./templates/build.rs");
const CARGO_TOML_TEMPLATE: &str = include_str!("./templates/_cargo.toml");
const INDEX_JS_TEMPLATE: &str = include_str!("./templates/index.js");
const INDEX_D_TS_TEMPLATE: &str = include_str!("./templates/index.d.ts");
const JS_BINDING_CC_TEMPLATE: &str = include_str!("./templates/js-binding.cc");
const BINDING_GYP_TEMPLATE: &str = include_str!("./templates/binding.gyp");
const BINDING_TEST_JS_TEMPLATE: &str = include_str!("./templates/binding_test.js");
const MAKEFILE_TEMPLATE: &str = include_str!("./templates/makefile");
const CMAKELISTS_TXT_TEMPLATE: &str = include_str!("./templates/cmakelists.cmake");
const PARSER_NAME_H_TEMPLATE: &str = include_str!("./templates/PARSER_NAME.h");
const PARSER_NAME_PC_IN_TEMPLATE: &str = include_str!("./templates/PARSER_NAME.pc.in");
const GO_MOD_TEMPLATE: &str = include_str!("./templates/go.mod");
const BINDING_GO_TEMPLATE: &str = include_str!("./templates/binding.go");
const BINDING_TEST_GO_TEMPLATE: &str = include_str!("./templates/binding_test.go");
const SETUP_PY_TEMPLATE: &str = include_str!("./templates/setup.py");
const INIT_PY_TEMPLATE: &str = include_str!("./templates/__init__.py");
const INIT_PYI_TEMPLATE: &str = include_str!("./templates/__init__.pyi");
const PYPROJECT_TOML_TEMPLATE: &str = include_str!("./templates/pyproject.toml");
const PY_BINDING_C_TEMPLATE: &str = include_str!("./templates/py-binding.c");
const TEST_BINDING_PY_TEMPLATE: &str = include_str!("./templates/test_binding.py");
const PACKAGE_SWIFT_TEMPLATE: &str = include_str!("./templates/package.swift");
const TESTS_SWIFT_TEMPLATE: &str = include_str!("./templates/tests.swift");
const POM_XML_TEMPLATE: &str = include_str!("./templates/pom.xml");
const BINDING_JAVA_TEMPLATE: &str = include_str!("./templates/binding.java");
const TEST_JAVA_TEMPLATE: &str = include_str!("./templates/test.java");
const BUILD_ZIG_TEMPLATE: &str = include_str!("./templates/build.zig");
const BUILD_ZIG_ZON_TEMPLATE: &str = include_str!("./templates/build.zig.zon");
const ROOT_ZIG_TEMPLATE: &str = include_str!("./templates/root.zig");
const TEST_ZIG_TEMPLATE: &str = include_str!("./templates/test.zig");
pub const TREE_SITTER_JSON_SCHEMA: &str =
"https://tree-sitter.github.io/tree-sitter/assets/schemas/config.schema.json";
#[derive(Serialize, Deserialize, Clone)]
pub struct JsonConfigOpts {
pub name: String,
pub camelcase: String,
pub title: String,
pub description: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub funding: Option<String>,
pub scope: String,
pub file_types: Vec<String>,
pub version: Version,
pub license: String,
pub author: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
pub namespace: Option<String>,
pub bindings: Bindings,
}
impl JsonConfigOpts {
#[must_use]
pub fn to_tree_sitter_json(self) -> TreeSitterJSON {
TreeSitterJSON {
schema: Some(TREE_SITTER_JSON_SCHEMA.to_string()),
grammars: vec![Grammar {
name: self.name.clone(),
camelcase: Some(self.camelcase),
title: Some(self.title),
scope: self.scope,
path: None,
external_files: PathsJSON::Empty,
file_types: Some(self.file_types),
highlights: PathsJSON::Empty,
injections: PathsJSON::Empty,
locals: PathsJSON::Empty,
tags: PathsJSON::Empty,
injection_regex: Some(format!("^{}$", self.name)),
first_line_regex: None,
content_regex: None,
class_name: Some(format!("TreeSitter{}", self.name.to_upper_camel_case())),
}],
metadata: Metadata {
version: self.version,
license: Some(self.license),
description: Some(self.description),
authors: Some(vec![Author {
name: self.author,
email: self.email,
url: self.url,
}]),
links: Some(Links {
repository: self.repository.unwrap_or_else(|| {
format!("https://github.com/tree-sitter/tree-sitter-{}", self.name)
}),
funding: self.funding,
}),
namespace: self.namespace,
},
bindings: self.bindings,
}
}
}
impl Default for JsonConfigOpts {
fn default() -> Self {
Self {
name: String::new(),
camelcase: String::new(),
title: String::new(),
description: String::new(),
repository: None,
funding: None,
scope: String::new(),
file_types: vec![],
version: Version::from_str("0.1.0").unwrap(),
license: String::new(),
author: String::new(),
email: None,
url: None,
namespace: None,
bindings: Bindings::default(),
}
}
}
struct GenerateOpts<'a> {
author_name: Option<&'a str>,
author_email: Option<&'a str>,
author_url: Option<&'a str>,
license: Option<&'a str>,
description: Option<&'a str>,
repository: Option<&'a str>,
funding: Option<&'a str>,
version: &'a Version,
camel_parser_name: &'a str,
title_parser_name: &'a str,
class_name: &'a str,
highlights_query_path: &'a str,
injections_query_path: &'a str,
locals_query_path: &'a str,
tags_query_path: &'a str,
namespace: Option<&'a str>,
}
pub fn generate_grammar_files(
repo_path: &Path,
language_name: &str,
allow_update: bool,
opts: Option<&JsonConfigOpts>,
) -> Result<()> {
let dashed_language_name = language_name.to_kebab_case();
let tree_sitter_config = missing_path_else(
repo_path.join("tree-sitter.json"),
true,
|path| {
// invariant: opts is always Some when `tree-sitter.json` doesn't exist
let Some(opts) = opts else { unreachable!() };
let tree_sitter_json = opts.clone().to_tree_sitter_json();
write_file(path, serde_json::to_string_pretty(&tree_sitter_json)?)?;
Ok(())
},
|path| {
// updating the config, if needed
if let Some(opts) = opts {
let tree_sitter_json = opts.clone().to_tree_sitter_json();
write_file(path, serde_json::to_string_pretty(&tree_sitter_json)?)?;
}
Ok(())
},
)?;
let tree_sitter_config = serde_json::from_str::<TreeSitterJSON>(
&fs::read_to_string(tree_sitter_config.as_path())
.with_context(|| "Failed to read tree-sitter.json")?,
)?;
let authors = tree_sitter_config.metadata.authors.as_ref();
let camel_name = tree_sitter_config.grammars[0]
.camelcase
.clone()
.unwrap_or_else(|| language_name.to_upper_camel_case());
let title_name = tree_sitter_config.grammars[0]
.title
.clone()
.unwrap_or_else(|| language_name.to_upper_camel_case());
let class_name = tree_sitter_config.grammars[0]
.class_name
.clone()
.unwrap_or_else(|| format!("TreeSitter{}", language_name.to_upper_camel_case()));
let default_highlights_path = Path::new("queries").join(DEFAULT_HIGHLIGHTS_QUERY_FILE_NAME);
let default_injections_path = Path::new("queries").join(DEFAULT_INJECTIONS_QUERY_FILE_NAME);
let default_locals_path = Path::new("queries").join(DEFAULT_LOCALS_QUERY_FILE_NAME);
let default_tags_path = Path::new("queries").join(DEFAULT_TAGS_QUERY_FILE_NAME);
let generate_opts = GenerateOpts {
author_name: authors
.map(|a| a.first().map(|a| a.name.as_str()))
.unwrap_or_default(),
author_email: authors
.map(|a| a.first().and_then(|a| a.email.as_deref()))
.unwrap_or_default(),
author_url: authors
.map(|a| a.first().and_then(|a| a.url.as_deref()))
.unwrap_or_default(),
license: tree_sitter_config.metadata.license.as_deref(),
description: tree_sitter_config.metadata.description.as_deref(),
repository: tree_sitter_config
.metadata
.links
.as_ref()
.map(|l| l.repository.as_str()),
funding: tree_sitter_config
.metadata
.links
.as_ref()
.and_then(|l| l.funding.as_deref()),
version: &tree_sitter_config.metadata.version,
camel_parser_name: &camel_name,
title_parser_name: &title_name,
class_name: &class_name,
highlights_query_path: tree_sitter_config.grammars[0]
.highlights
.to_variable_value(&default_highlights_path),
injections_query_path: tree_sitter_config.grammars[0]
.injections
.to_variable_value(&default_injections_path),
locals_query_path: tree_sitter_config.grammars[0]
.locals
.to_variable_value(&default_locals_path),
tags_query_path: tree_sitter_config.grammars[0]
.tags
.to_variable_value(&default_tags_path),
namespace: tree_sitter_config.metadata.namespace.as_deref(),
};
// Create package.json
missing_path_else(
repo_path.join("package.json"),
allow_update,
|path| {
generate_file(
path,
PACKAGE_JSON_TEMPLATE,
dashed_language_name.as_str(),
&generate_opts,
)
},
|path| {
let mut contents = fs::read_to_string(path)?
.replace(
r#""node-addon-api": "^8.3.1""#,
r#""node-addon-api": "^8.5.0""#,
)
.replace(
indoc! {r#"
"prebuildify": "^6.0.1",
"tree-sitter-cli":"#},
indoc! {r#"
"prebuildify": "^6.0.1",
"tree-sitter": "^0.25.0",
"tree-sitter-cli":"#},
);
if !contents.contains("module") {
info!("Migrating package.json to ESM");
contents = contents.replace(
r#""repository":"#,
indoc! {r#"
"type": "module",
"repository":"#},
);
}
write_file(path, contents)?;
Ok(())
},
)?;
// Do not create a grammar.js file in a repo with multiple language configs
if !tree_sitter_config.has_multiple_language_configs() {
missing_path_else(
repo_path.join("grammar.js"),
allow_update,
|path| generate_file(path, GRAMMAR_JS_TEMPLATE, language_name, &generate_opts),
|path| {
let mut contents = fs::read_to_string(path)?;
if contents.contains("module.exports") {
info!("Migrating grammars.js to ESM");
contents = contents.replace("module.exports =", "export default");
write_file(path, contents)?;
}
Ok(())
},
)?;
}
// Write .gitignore file
missing_path_else(
repo_path.join(".gitignore"),
allow_update,
|path| generate_file(path, GITIGNORE_TEMPLATE, language_name, &generate_opts),
|path| {
let mut contents = fs::read_to_string(path)?;
if !contents.contains("Zig artifacts") {
info!("Adding zig entries to .gitignore");
contents.push('\n');
contents.push_str(indoc! {"
# Zig artifacts
.zig-cache/
zig-cache/
zig-out/
"});
}
Ok(())
},
)?;
// Write .gitattributes file
missing_path_else(
repo_path.join(".gitattributes"),
allow_update,
|path| generate_file(path, GITATTRIBUTES_TEMPLATE, language_name, &generate_opts),
|path| {
let mut contents = fs::read_to_string(path)?;
let c_bindings_entry = "bindings/c/* ";
if contents.contains(c_bindings_entry) {
info!("Updating c bindings entry in .gitattributes");
contents = contents.replace(c_bindings_entry, "bindings/c/** ");
}
if !contents.contains("Zig bindings") {
info!("Adding zig entries to .gitattributes");
contents.push('\n');
contents.push_str(indoc! {"
# Zig bindings
build.zig linguist-generated
build.zig.zon linguist-generated
"});
}
write_file(path, contents)?;
Ok(())
},
)?;
// Write .editorconfig file
missing_path(repo_path.join(".editorconfig"), |path| {
generate_file(path, EDITORCONFIG_TEMPLATE, language_name, &generate_opts)
})?;
let bindings_dir = repo_path.join("bindings");
// Generate Rust bindings
if tree_sitter_config.bindings.rust {
missing_path(bindings_dir.join("rust"), create_dir)?.apply(|path| {
missing_path_else(path.join("lib.rs"), allow_update, |path| {
generate_file(path, LIB_RS_TEMPLATE, language_name, &generate_opts)
}, |path| {
let mut contents = fs::read_to_string(path)?;
if !contents.contains("#[cfg(with_highlights_query)]") {
info!("Updating query constants in bindings/rust/lib.rs");
let replacement = indoc! {r#"
#[cfg(with_highlights_query)]
/// The syntax highlighting query for this grammar.
pub const HIGHLIGHTS_QUERY: &str = include_str!("../../HIGHLIGHTS_QUERY_PATH");
#[cfg(with_injections_query)]
/// The language injection query for this grammar.
pub const INJECTIONS_QUERY: &str = include_str!("../../INJECTIONS_QUERY_PATH");
#[cfg(with_locals_query)]
/// The local variable query for this grammar.
pub const LOCALS_QUERY: &str = include_str!("../../LOCALS_QUERY_PATH");
#[cfg(with_tags_query)]
/// The symbol tagging query for this grammar.
pub const TAGS_QUERY: &str = include_str!("../../TAGS_QUERY_PATH");
"#}
.replace("HIGHLIGHTS_QUERY_PATH", generate_opts.highlights_query_path)
.replace("INJECTIONS_QUERY_PATH", generate_opts.injections_query_path)
.replace("LOCALS_QUERY_PATH", generate_opts.locals_query_path)
.replace("TAGS_QUERY_PATH", generate_opts.tags_query_path);
contents = contents
.replace(
indoc! {r#"
// NOTE: uncomment these to include any queries that this grammar contains:
// pub const HIGHLIGHTS_QUERY: &str = include_str!("../../queries/highlights.scm");
// pub const INJECTIONS_QUERY: &str = include_str!("../../queries/injections.scm");
// pub const LOCALS_QUERY: &str = include_str!("../../queries/locals.scm");
// pub const TAGS_QUERY: &str = include_str!("../../queries/tags.scm");
"#},
&replacement,
);
}
write_file(path, contents)?;
Ok(())
})?;
missing_path_else(
path.join("build.rs"),
allow_update,
|path| generate_file(path, BUILD_RS_TEMPLATE, language_name, &generate_opts),
|path| {
let mut contents = fs::read_to_string(path)?;
if !contents.contains("wasm32-unknown-unknown") {
info!("Adding wasm32-unknown-unknown target to bindings/rust/build.rs");
let replacement = indoc!{r#"
c_config.flag("-utf-8");
if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" {
let Ok(wasm_headers) = std::env::var("DEP_TREE_SITTER_LANGUAGE_WASM_HEADERS") else {
panic!("Environment variable DEP_TREE_SITTER_LANGUAGE_WASM_HEADERS must be set by the language crate");
};
let Ok(wasm_src) =
std::env::var("DEP_TREE_SITTER_LANGUAGE_WASM_SRC").map(std::path::PathBuf::from)
else {
panic!("Environment variable DEP_TREE_SITTER_LANGUAGE_WASM_SRC must be set by the language crate");
};
c_config.include(&wasm_headers);
c_config.files([
wasm_src.join("stdio.c"),
wasm_src.join("stdlib.c"),
wasm_src.join("string.c"),
]);
}
"#}
.lines()
.map(|line| if line.is_empty() { line.to_string() } else { format!(" {line}") })
.collect::<Vec<_>>()
.join("\n");
contents = contents.replace(r#" c_config.flag("-utf-8");"#, &replacement);
}
// Introduce configuration variables for dynamic query inclusion
if !contents.contains("with_highlights_query") {
info!("Adding support for dynamic query inclusion to bindings/rust/build.rs");
let replaced = indoc! {r#"
c_config.compile("tree-sitter-KEBAB_PARSER_NAME");
}"#}
.replace("KEBAB_PARSER_NAME", &language_name.to_kebab_case());
let replacement = indoc! {r#"
c_config.compile("tree-sitter-KEBAB_PARSER_NAME");
println!("cargo:rustc-check-cfg=cfg(with_highlights_query)");
if !"HIGHLIGHTS_QUERY_PATH".is_empty() && std::path::Path::new("HIGHLIGHTS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_highlights_query");
}
println!("cargo:rustc-check-cfg=cfg(with_injections_query)");
if !"INJECTIONS_QUERY_PATH".is_empty() && std::path::Path::new("INJECTIONS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_injections_query");
}
println!("cargo:rustc-check-cfg=cfg(with_locals_query)");
if !"LOCALS_QUERY_PATH".is_empty() && std::path::Path::new("LOCALS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_locals_query");
}
println!("cargo:rustc-check-cfg=cfg(with_tags_query)");
if !"TAGS_QUERY_PATH".is_empty() && std::path::Path::new("TAGS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_tags_query");
}
}"#}
.replace("KEBAB_PARSER_NAME", &language_name.to_kebab_case())
.replace("HIGHLIGHTS_QUERY_PATH", generate_opts.highlights_query_path)
.replace("INJECTIONS_QUERY_PATH", generate_opts.injections_query_path)
.replace("LOCALS_QUERY_PATH", generate_opts.locals_query_path)
.replace("TAGS_QUERY_PATH", generate_opts.tags_query_path);
contents = contents.replace(
&replaced,
&replacement,
);
}
write_file(path, contents)?;
Ok(())
},
)?;
missing_path_else(
repo_path.join("Cargo.toml"),
allow_update,
|path| {
generate_file(
path,
CARGO_TOML_TEMPLATE,
dashed_language_name.as_str(),
&generate_opts,
)
},
|path| {
let contents = fs::read_to_string(path)?;
if contents.contains("\"LICENSE\"") {
info!("Adding LICENSE entry to bindings/rust/Cargo.toml");
write_file(path, contents.replace("\"LICENSE\"", "\"/LICENSE\""))?;
}
Ok(())
},
)?;
Ok(())
})?;
}
// Generate Node bindings
if tree_sitter_config.bindings.node {
missing_path(bindings_dir.join("node"), create_dir)?.apply(|path| {
missing_path_else(
path.join("index.js"),
allow_update,
|path| generate_file(path, INDEX_JS_TEMPLATE, language_name, &generate_opts),
|path| {
let contents = fs::read_to_string(path)?;
if !contents.contains("Object.defineProperty") {
info!("Replacing index.js");
generate_file(path, INDEX_JS_TEMPLATE, language_name, &generate_opts)?;
}
Ok(())
},
)?;
missing_path_else(
path.join("index.d.ts"),
allow_update,
|path| generate_file(path, INDEX_D_TS_TEMPLATE, language_name, &generate_opts),
|path| {
let contents = fs::read_to_string(path)?;
if !contents.contains("export default binding") {
info!("Replacing index.d.ts");
generate_file(path, INDEX_D_TS_TEMPLATE, language_name, &generate_opts)?;
}
Ok(())
},
)?;
missing_path_else(
path.join("binding_test.js"),
allow_update,
|path| {
generate_file(
path,
BINDING_TEST_JS_TEMPLATE,
language_name,
&generate_opts,
)
},
|path| {
let contents = fs::read_to_string(path)?;
if !contents.contains("import") {
info!("Replacing binding_test.js");
generate_file(
path,
BINDING_TEST_JS_TEMPLATE,
language_name,
&generate_opts,
)?;
}
Ok(())
},
)?;
missing_path(path.join("binding.cc"), |path| {
generate_file(path, JS_BINDING_CC_TEMPLATE, language_name, &generate_opts)
})?;
missing_path_else(
repo_path.join("binding.gyp"),
allow_update,
|path| generate_file(path, BINDING_GYP_TEMPLATE, language_name, &generate_opts),
|path| {
let contents = fs::read_to_string(path)?;
if contents.contains("fs.exists(") {
info!("Replacing `fs.exists` calls in binding.gyp");
write_file(path, contents.replace("fs.exists(", "fs.existsSync("))?;
}
Ok(())
},
)?;
Ok(())
})?;
}
// Generate C bindings
if tree_sitter_config.bindings.c {
let kebab_case_name = language_name.to_kebab_case();
missing_path(bindings_dir.join("c"), create_dir)?.apply(|path| {
let header_name = format!("tree-sitter-{kebab_case_name}.h");
let old_file = &path.join(&header_name);
if allow_update && fs::exists(old_file).unwrap_or(false) {
info!("Removing bindings/c/{header_name}");
fs::remove_file(old_file)?;
}
missing_path(path.join("tree_sitter"), create_dir)?.apply(|include_path| {
missing_path(
include_path.join(&header_name),
|path| {
generate_file(path, PARSER_NAME_H_TEMPLATE, language_name, &generate_opts)
},
)?;
Ok(())
})?;
missing_path(
path.join(format!("tree-sitter-{kebab_case_name}.pc.in")),
|path| {
generate_file(
path,
PARSER_NAME_PC_IN_TEMPLATE,
language_name,
&generate_opts,
)
},
)?;
missing_path_else(
repo_path.join("Makefile"),
allow_update,
|path| {
generate_file(path, MAKEFILE_TEMPLATE, language_name, &generate_opts)
},
|path| {
let mut contents = fs::read_to_string(path)?;
if !contents.contains("cd '$(DESTDIR)$(LIBDIR)' && ln -sf") {
info!("Replacing Makefile");
generate_file(path, MAKEFILE_TEMPLATE, language_name, &generate_opts)?;
} else {
let replaced = indoc! {r"
$(PARSER): $(SRC_DIR)/grammar.json
$(TS) generate $^
"};
if contents.contains(replaced) {
info!("Adding --no-parser target to Makefile");
contents = contents
.replace(
replaced,
indoc! {r"
$(SRC_DIR)/grammar.json: grammar.js
$(TS) generate --no-parser $^
$(PARSER): $(SRC_DIR)/grammar.json
$(TS) generate $^
"}
);
}
write_file(path, contents)?;
}
Ok(())
},
)?;
missing_path_else(
repo_path.join("CMakeLists.txt"),
allow_update,
|path| generate_file(path, CMAKELISTS_TXT_TEMPLATE, language_name, &generate_opts),
|path| {
let contents = fs::read_to_string(path)?;
let replaced_contents = contents
.replace("add_custom_target(test", "add_custom_target(ts-test")
.replace(
&formatdoc! {r#"
install(FILES bindings/c/tree-sitter-{language_name}.h
DESTINATION "${{CMAKE_INSTALL_INCLUDEDIR}}/tree_sitter")
"#},
indoc! {r#"
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tags.rs | crates/cli/src/tags.rs | use std::{
fs,
io::{self, Write},
path::Path,
str,
sync::{atomic::AtomicUsize, Arc},
time::Instant,
};
use anyhow::Result;
use tree_sitter_tags::{TagsConfiguration, TagsContext};
pub struct TagsOptions {
pub scope: Option<String>,
pub quiet: bool,
pub print_time: bool,
pub cancellation_flag: Arc<AtomicUsize>,
}
pub fn generate_tags(
path: &Path,
name: &str,
config: &TagsConfiguration,
indent: bool,
opts: &TagsOptions,
) -> Result<()> {
let mut context = TagsContext::new();
let stdout = io::stdout();
let mut stdout = stdout.lock();
let indent_str = if indent {
if !opts.quiet {
writeln!(&mut stdout, "{name}")?;
}
"\t"
} else {
""
};
let source = fs::read(path)?;
let start = Instant::now();
for tag in context
.generate_tags(config, &source, Some(&opts.cancellation_flag))?
.0
{
let tag = tag?;
if !opts.quiet {
write!(
&mut stdout,
"{indent_str}{:<10}\t | {:<8}\t{} {} - {} `{}`",
str::from_utf8(&source[tag.name_range]).unwrap_or(""),
&config.syntax_type_name(tag.syntax_type_id),
if tag.is_definition { "def" } else { "ref" },
tag.span.start,
tag.span.end,
str::from_utf8(&source[tag.line_range]).unwrap_or(""),
)?;
if let Some(docs) = tag.docs {
if docs.len() > 120 {
write!(&mut stdout, "\t{:?}...", docs.get(0..120).unwrap_or(""))?;
} else {
write!(&mut stdout, "\t{:?}", &docs)?;
}
}
writeln!(&mut stdout)?;
}
}
if opts.print_time {
writeln!(
&mut stdout,
"{indent_str}time: {}ms",
start.elapsed().as_millis(),
)?;
}
Ok(())
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/query.rs | crates/cli/src/query.rs | use std::{
fs,
io::{self, Write},
ops::Range,
path::Path,
time::Instant,
};
use anyhow::{Context, Result};
use log::warn;
use streaming_iterator::StreamingIterator;
use tree_sitter::{Language, Parser, Point, Query, QueryCursor};
use crate::{
query_testing::{self, to_utf8_point},
test::{TestInfo, TestOutcome, TestResult, TestSummary},
};
#[derive(Default)]
pub struct QueryFileOptions {
pub ordered_captures: bool,
pub byte_range: Option<Range<usize>>,
pub point_range: Option<Range<Point>>,
pub containing_byte_range: Option<Range<usize>>,
pub containing_point_range: Option<Range<Point>>,
pub quiet: bool,
pub print_time: bool,
pub stdin: bool,
}
pub fn query_file_at_path(
language: &Language,
path: &Path,
name: &str,
query_path: &Path,
opts: &QueryFileOptions,
test_summary: Option<&mut TestSummary>,
) -> Result<()> {
let stdout = io::stdout();
let mut stdout = stdout.lock();
let query_source = fs::read_to_string(query_path)
.with_context(|| format!("Error reading query file {}", query_path.display()))?;
let query = Query::new(language, &query_source).with_context(|| "Query compilation failed")?;
let mut query_cursor = QueryCursor::new();
if let Some(ref range) = opts.byte_range {
query_cursor.set_byte_range(range.clone());
}
if let Some(ref range) = opts.point_range {
query_cursor.set_point_range(range.clone());
}
if let Some(ref range) = opts.containing_byte_range {
query_cursor.set_containing_byte_range(range.clone());
}
if let Some(ref range) = opts.containing_point_range {
query_cursor.set_containing_point_range(range.clone());
}
let mut parser = Parser::new();
parser.set_language(language)?;
let mut results = Vec::new();
let should_test = test_summary.is_some();
if !should_test && !opts.stdin {
writeln!(&mut stdout, "{name}")?;
}
let source_code =
fs::read(path).with_context(|| format!("Error reading source file {}", path.display()))?;
let tree = parser.parse(&source_code, None).unwrap();
let start = Instant::now();
if opts.ordered_captures {
let mut captures = query_cursor.captures(&query, tree.root_node(), source_code.as_slice());
while let Some((mat, capture_index)) = captures.next() {
let capture = mat.captures[*capture_index];
let capture_name = &query.capture_names()[capture.index as usize];
if !opts.quiet && !should_test {
writeln!(
&mut stdout,
" pattern: {:>2}, capture: {} - {capture_name}, start: {}, end: {}, text: `{}`",
mat.pattern_index,
capture.index,
capture.node.start_position(),
capture.node.end_position(),
capture.node.utf8_text(&source_code).unwrap_or("")
)?;
}
if should_test {
results.push(query_testing::CaptureInfo {
name: (*capture_name).to_string(),
start: to_utf8_point(capture.node.start_position(), source_code.as_slice()),
end: to_utf8_point(capture.node.end_position(), source_code.as_slice()),
});
}
}
} else {
let mut matches = query_cursor.matches(&query, tree.root_node(), source_code.as_slice());
while let Some(m) = matches.next() {
if !opts.quiet && !should_test {
writeln!(&mut stdout, " pattern: {}", m.pattern_index)?;
}
for capture in m.captures {
let start = capture.node.start_position();
let end = capture.node.end_position();
let capture_name = &query.capture_names()[capture.index as usize];
if !opts.quiet && !should_test {
if end.row == start.row {
writeln!(
&mut stdout,
" capture: {} - {capture_name}, start: {start}, end: {end}, text: `{}`",
capture.index,
capture.node.utf8_text(&source_code).unwrap_or("")
)?;
} else {
writeln!(
&mut stdout,
" capture: {capture_name}, start: {start}, end: {end}",
)?;
}
}
if should_test {
results.push(query_testing::CaptureInfo {
name: (*capture_name).to_string(),
start: to_utf8_point(capture.node.start_position(), source_code.as_slice()),
end: to_utf8_point(capture.node.end_position(), source_code.as_slice()),
});
}
}
}
}
if query_cursor.did_exceed_match_limit() {
warn!("Query exceeded maximum number of in-progress captures!");
}
if should_test {
let path_name = if opts.stdin {
"stdin"
} else {
Path::new(&path).file_name().unwrap().to_str().unwrap()
};
// Invariant: `test_summary` will always be `Some` when `should_test` is true
let test_summary = test_summary.unwrap();
match query_testing::assert_expected_captures(&results, path, &mut parser, language) {
Ok(assertion_count) => {
test_summary.query_results.add_case(TestResult {
name: path_name.to_string(),
info: TestInfo::AssertionTest {
outcome: TestOutcome::AssertionPassed { assertion_count },
test_num: test_summary.test_num,
},
});
}
Err(e) => {
test_summary.query_results.add_case(TestResult {
name: path_name.to_string(),
info: TestInfo::AssertionTest {
outcome: TestOutcome::AssertionFailed {
error: e.to_string(),
},
test_num: test_summary.test_num,
},
});
return Err(e);
}
}
}
if opts.print_time {
writeln!(&mut stdout, "{:?}", start.elapsed())?;
}
Ok(())
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/main.rs | crates/cli/src/main.rs | use std::{
collections::HashSet,
env, fs,
path::{Path, PathBuf},
};
use anstyle::{AnsiColor, Color, Style};
use anyhow::{anyhow, Context, Result};
use clap::{crate_authors, Args, Command, FromArgMatches as _, Subcommand, ValueEnum};
use clap_complete::generate;
use dialoguer::{theme::ColorfulTheme, Confirm, FuzzySelect, Input, MultiSelect};
use heck::ToUpperCamelCase;
use log::{error, info, warn};
use regex::Regex;
use semver::Version as SemverVersion;
use tree_sitter::{ffi, Parser, Point};
use tree_sitter_cli::{
fuzz::{
fuzz_language_corpus, FuzzOptions, EDIT_COUNT, ITERATION_COUNT, LOG_ENABLED,
LOG_GRAPH_ENABLED, START_SEED,
},
highlight::{self, HighlightOptions},
init::{generate_grammar_files, JsonConfigOpts, TREE_SITTER_JSON_SCHEMA},
input::{get_input, get_tmp_source_file, CliInput},
logger,
parse::{self, ParseDebugType, ParseFileOptions, ParseOutput, ParseTheme},
playground,
query::{self, QueryFileOptions},
tags::{self, TagsOptions},
test::{self, TestOptions, TestStats, TestSummary},
test_highlight, test_tags, util,
version::{self, BumpLevel},
wasm,
};
use tree_sitter_config::Config;
use tree_sitter_generate::OptLevel;
use tree_sitter_highlight::Highlighter;
use tree_sitter_loader::{self as loader, Bindings, TreeSitterJSON};
use tree_sitter_tags::TagsContext;
const BUILD_VERSION: &str = env!("CARGO_PKG_VERSION");
const BUILD_SHA: Option<&'static str> = option_env!("BUILD_SHA");
const DEFAULT_GENERATE_ABI_VERSION: usize = 15;
#[derive(Subcommand)]
#[command(about="Generates and tests parsers", author=crate_authors!("\n"), styles=get_styles())]
enum Commands {
/// Generate a default config file
InitConfig(InitConfig),
/// Initialize a grammar repository
Init(Init),
/// Generate a parser
Generate(Generate),
/// Compile a parser
Build(Build),
/// Parse files
Parse(Parse),
/// Run a parser's tests
Test(Test),
/// Display or increment the version of a grammar
Version(Version),
/// Fuzz a parser
Fuzz(Fuzz),
/// Search files using a syntax tree query
Query(Query),
/// Highlight a file
Highlight(Highlight),
/// Generate a list of tags
Tags(Tags),
/// Start local playground for a parser in the browser
Playground(Playground),
/// Print info about all known language parsers
DumpLanguages(DumpLanguages),
/// Generate shell completions
Complete(Complete),
}
#[derive(Args)]
struct InitConfig;
#[derive(Args)]
#[command(alias = "i")]
struct Init {
/// Update outdated files
#[arg(long, short)]
pub update: bool,
/// The path to the tree-sitter grammar directory
#[arg(long, short = 'p')]
pub grammar_path: Option<PathBuf>,
}
#[derive(Args)]
#[command(alias = "gen", alias = "g")]
struct Generate {
/// The path to the grammar file
#[arg(index = 1)]
pub grammar_path: Option<PathBuf>,
/// Show debug log during generation
#[arg(long, short)]
pub log: bool,
#[arg(
long = "abi",
value_name = "VERSION",
env = "TREE_SITTER_ABI_VERSION",
help = format!(concat!(
"Select the language ABI version to generate (default {}).\n",
"Use --abi=latest to generate the newest supported version ({}).",
),
DEFAULT_GENERATE_ABI_VERSION,
tree_sitter::LANGUAGE_VERSION,
)
)]
pub abi_version: Option<String>,
/// Only generate `grammar.json` and `node-types.json`
#[arg(long)]
pub no_parser: bool,
/// Deprecated: use the `build` command
#[arg(long, short = 'b')]
pub build: bool,
/// Deprecated: use the `build` command
#[arg(long, short = '0')]
pub debug_build: bool,
/// Deprecated: use the `build` command
#[arg(long, value_name = "PATH")]
pub libdir: Option<PathBuf>,
/// The path to output the generated source files
#[arg(long, short, value_name = "DIRECTORY")]
pub output: Option<PathBuf>,
/// Produce a report of the states for the given rule, use `-` to report every rule
#[arg(long, conflicts_with = "json", conflicts_with = "json_summary")]
pub report_states_for_rule: Option<String>,
/// Deprecated: use --json-summary
#[arg(
long,
conflicts_with = "json_summary",
conflicts_with = "report_states_for_rule"
)]
pub json: bool,
/// Report conflicts in a JSON format
#[arg(
long,
conflicts_with = "json",
conflicts_with = "report_states_for_rule"
)]
pub json_summary: bool,
/// The name or path of the JavaScript runtime to use for generating parsers
#[cfg(not(feature = "qjs-rt"))]
#[arg(
long,
value_name = "EXECUTABLE",
env = "TREE_SITTER_JS_RUNTIME",
default_value = "node"
)]
pub js_runtime: Option<String>,
#[cfg(feature = "qjs-rt")]
#[arg(
long,
value_name = "EXECUTABLE",
env = "TREE_SITTER_JS_RUNTIME",
default_value = "node"
)]
/// The name or path of the JavaScript runtime to use for generating parsers, specify `native`
/// to use the native `QuickJS` runtime
pub js_runtime: Option<String>,
/// Disable optimizations when generating the parser. Currently, this only affects
/// the merging of compatible parse states.
#[arg(long)]
pub disable_optimizations: bool,
}
#[derive(Args)]
#[command(alias = "b")]
struct Build {
/// Build a Wasm module instead of a dynamic library
#[arg(short, long)]
pub wasm: bool,
/// The path to output the compiled file
#[arg(short, long)]
pub output: Option<PathBuf>,
/// The path to the grammar directory
#[arg(index = 1, num_args = 1)]
pub path: Option<PathBuf>,
/// Make the parser reuse the same allocator as the library
#[arg(long)]
pub reuse_allocator: bool,
/// Compile a parser in debug mode
#[arg(long, short = '0')]
pub debug: bool,
}
#[derive(Args)]
#[command(alias = "p")]
struct Parse {
/// The path to a file with paths to source file(s)
#[arg(long = "paths")]
pub paths_file: Option<PathBuf>,
/// The source file(s) to use
#[arg(num_args=1..)]
pub paths: Option<Vec<PathBuf>>,
/// The path to the tree-sitter grammar directory, implies --rebuild
#[arg(long, short = 'p', conflicts_with = "rebuild")]
pub grammar_path: Option<PathBuf>,
/// The path to the parser's dynamic library
#[arg(long, short = 'l')]
pub lib_path: Option<PathBuf>,
/// If `--lib-path` is used, the name of the language used to extract the
/// library's language function
#[arg(long)]
pub lang_name: Option<String>,
/// Select a language by the scope instead of a file extension
#[arg(long)]
pub scope: Option<String>,
/// Show parsing debug log
#[arg(long, short = 'd')] // TODO: Rework once clap adds `default_missing_value_t`
#[allow(clippy::option_option)]
pub debug: Option<Option<ParseDebugType>>,
/// Compile a parser in debug mode
#[arg(long, short = '0')]
pub debug_build: bool,
/// Produce the log.html file with debug graphs
#[arg(long, short = 'D')]
pub debug_graph: bool,
/// Compile parsers to Wasm instead of native dynamic libraries
#[arg(long, hide = cfg!(not(feature = "wasm")))]
pub wasm: bool,
/// Output the parse data with graphviz dot
#[arg(long = "dot")]
pub output_dot: bool,
/// Output the parse data in XML format
#[arg(long = "xml", short = 'x')]
pub output_xml: bool,
/// Output the parse data in a pretty-printed CST format
#[arg(long = "cst", short = 'c')]
pub output_cst: bool,
/// Show parsing statistic
#[arg(long, short, conflicts_with = "json", conflicts_with = "json_summary")]
pub stat: bool,
/// Interrupt the parsing process by timeout (µs)
#[arg(long)]
pub timeout: Option<u64>,
/// Measure execution time
#[arg(long, short)]
pub time: bool,
/// Suppress main output
#[arg(long, short)]
pub quiet: bool,
#[allow(clippy::doc_markdown)]
/// Apply edits in the format: \"row,col|position delcount insert_text\", can be supplied
/// multiple times
#[arg(
long,
num_args = 1..,
)]
pub edits: Option<Vec<String>>,
/// The encoding of the input files
#[arg(long)]
pub encoding: Option<Encoding>,
/// Open `log.html` in the default browser, if `--debug-graph` is supplied
#[arg(long)]
pub open_log: bool,
/// Deprecated: use --json-summary
#[arg(long, conflicts_with = "json_summary", conflicts_with = "stat")]
pub json: bool,
/// Output parsing results in a JSON format
#[arg(long, short = 'j', conflicts_with = "json", conflicts_with = "stat")]
pub json_summary: bool,
/// The path to an alternative config.json file
#[arg(long)]
pub config_path: Option<PathBuf>,
/// Parse the contents of a specific test
#[arg(long, short = 'n')]
#[clap(conflicts_with = "paths", conflicts_with = "paths_file")]
pub test_number: Option<u32>,
/// Force rebuild the parser
#[arg(short, long)]
pub rebuild: bool,
/// Omit ranges in the output
#[arg(long)]
pub no_ranges: bool,
}
#[derive(ValueEnum, Clone)]
pub enum Encoding {
Utf8,
Utf16LE,
Utf16BE,
}
#[derive(Args)]
#[command(alias = "t")]
struct Test {
/// Only run corpus test cases whose name matches the given regex
#[arg(long, short)]
pub include: Option<Regex>,
/// Only run corpus test cases whose name does not match the given regex
#[arg(long, short)]
pub exclude: Option<Regex>,
/// Only run corpus test cases from a given filename
#[arg(long)]
pub file_name: Option<String>,
/// The path to the tree-sitter grammar directory, implies --rebuild
#[arg(long, short = 'p', conflicts_with = "rebuild")]
pub grammar_path: Option<PathBuf>,
/// The path to the parser's dynamic library
#[arg(long, short = 'l')]
pub lib_path: Option<PathBuf>,
/// If `--lib-path` is used, the name of the language used to extract the
/// library's language function
#[arg(long)]
pub lang_name: Option<String>,
/// Update all syntax trees in corpus files with current parser output
#[arg(long, short)]
pub update: bool,
/// Show parsing debug log
#[arg(long, short = 'd')]
pub debug: bool,
/// Compile a parser in debug mode
#[arg(long, short = '0')]
pub debug_build: bool,
/// Produce the log.html file with debug graphs
#[arg(long, short = 'D')]
pub debug_graph: bool,
/// Compile parsers to Wasm instead of native dynamic libraries
#[arg(long, hide = cfg!(not(feature = "wasm")))]
pub wasm: bool,
/// Open `log.html` in the default browser, if `--debug-graph` is supplied
#[arg(long)]
pub open_log: bool,
/// The path to an alternative config.json file
#[arg(long)]
pub config_path: Option<PathBuf>,
/// Force showing fields in test diffs
#[arg(long)]
pub show_fields: bool,
/// Show parsing statistics
#[arg(long)]
pub stat: Option<TestStats>,
/// Force rebuild the parser
#[arg(short, long)]
pub rebuild: bool,
/// Show only the pass-fail overview tree
#[arg(long)]
pub overview_only: bool,
/// Output the test summary in a JSON format
#[arg(long)]
pub json_summary: bool,
}
#[derive(Args)]
#[command(alias = "publish")]
/// Display or increment the version of a grammar
struct Version {
/// The version to bump to
#[arg(
conflicts_with = "bump",
long_help = "\
The version to bump to\n\
\n\
Examples:\n \
tree-sitter version: display the current version\n \
tree-sitter version <version>: bump to specified version\n \
tree-sitter version --bump <level>: automatic bump"
)]
pub version: Option<SemverVersion>,
/// The path to the tree-sitter grammar directory
#[arg(long, short = 'p')]
pub grammar_path: Option<PathBuf>,
/// Automatically bump from the current version
#[arg(long, value_enum, conflicts_with = "version")]
pub bump: Option<BumpLevel>,
}
#[derive(Args)]
#[command(alias = "f")]
struct Fuzz {
/// List of test names to skip
#[arg(long, short)]
pub skip: Option<Vec<String>>,
/// Subdirectory to the language
#[arg(long)]
pub subdir: Option<PathBuf>,
/// The path to the tree-sitter grammar directory, implies --rebuild
#[arg(long, short = 'p', conflicts_with = "rebuild")]
pub grammar_path: Option<PathBuf>,
/// The path to the parser's dynamic library
#[arg(long)]
pub lib_path: Option<PathBuf>,
/// If `--lib-path` is used, the name of the language used to extract the
/// library's language function
#[arg(long)]
pub lang_name: Option<String>,
/// Maximum number of edits to perform per fuzz test
#[arg(long)]
pub edits: Option<usize>,
/// Number of fuzzing iterations to run per test
#[arg(long)]
pub iterations: Option<usize>,
/// Only fuzz corpus test cases whose name matches the given regex
#[arg(long, short)]
pub include: Option<Regex>,
/// Only fuzz corpus test cases whose name does not match the given regex
#[arg(long, short)]
pub exclude: Option<Regex>,
/// Enable logging of graphs and input
#[arg(long)]
pub log_graphs: bool,
/// Enable parser logging
#[arg(long, short)]
pub log: bool,
/// Force rebuild the parser
#[arg(short, long)]
pub rebuild: bool,
}
#[derive(Args)]
#[command(alias = "q")]
struct Query {
/// Path to a file with queries
#[arg(index = 1, required = true)]
query_path: PathBuf,
/// The path to the tree-sitter grammar directory, implies --rebuild
#[arg(long, short = 'p', conflicts_with = "rebuild")]
pub grammar_path: Option<PathBuf>,
/// The path to the parser's dynamic library
#[arg(long, short = 'l')]
pub lib_path: Option<PathBuf>,
/// If `--lib-path` is used, the name of the language used to extract the
/// library's language function
#[arg(long)]
pub lang_name: Option<String>,
/// Measure execution time
#[arg(long, short)]
pub time: bool,
/// Suppress main output
#[arg(long, short)]
pub quiet: bool,
/// The path to a file with paths to source file(s)
#[arg(long = "paths")]
pub paths_file: Option<PathBuf>,
/// The source file(s) to use
#[arg(index = 2, num_args=1..)]
pub paths: Option<Vec<PathBuf>>,
/// The range of byte offsets in which the query will be executed
#[arg(long)]
pub byte_range: Option<String>,
/// The range of rows in which the query will be executed
#[arg(long)]
pub row_range: Option<String>,
/// The range of byte offsets in which the query will be executed. Only the matches that are fully contained within the provided
/// byte range will be returned.
#[arg(long)]
pub containing_byte_range: Option<String>,
/// The range of rows in which the query will be executed. Only the matches that are fully contained within the provided row range
/// will be returned.
#[arg(long)]
pub containing_row_range: Option<String>,
/// Select a language by the scope instead of a file extension
#[arg(long)]
pub scope: Option<String>,
/// Order by captures instead of matches
#[arg(long, short)]
pub captures: bool,
/// Whether to run query tests or not
#[arg(long)]
pub test: bool,
/// The path to an alternative config.json file
#[arg(long)]
pub config_path: Option<PathBuf>,
/// Query the contents of a specific test
#[arg(long, short = 'n')]
#[clap(conflicts_with = "paths", conflicts_with = "paths_file")]
pub test_number: Option<u32>,
/// Force rebuild the parser
#[arg(short, long)]
pub rebuild: bool,
}
#[derive(Args)]
#[command(alias = "hi")]
struct Highlight {
/// Generate highlighting as an HTML document
#[arg(long, short = 'H')]
pub html: bool,
/// When generating HTML, use css classes rather than inline styles
#[arg(long)]
pub css_classes: bool,
/// Check that highlighting captures conform strictly to standards
#[arg(long)]
pub check: bool,
/// The path to a file with captures
#[arg(long)]
pub captures_path: Option<PathBuf>,
/// The paths to files with queries
#[arg(long, num_args = 1..)]
pub query_paths: Option<Vec<PathBuf>>,
/// Select a language by the scope instead of a file extension
#[arg(long)]
pub scope: Option<String>,
/// Measure execution time
#[arg(long, short)]
pub time: bool,
/// Suppress main output
#[arg(long, short)]
pub quiet: bool,
/// The path to a file with paths to source file(s)
#[arg(long = "paths")]
pub paths_file: Option<PathBuf>,
/// The source file(s) to use
#[arg(num_args = 1..)]
pub paths: Option<Vec<PathBuf>>,
/// The path to the tree-sitter grammar directory, implies --rebuild
#[arg(long, short = 'p', conflicts_with = "rebuild")]
pub grammar_path: Option<PathBuf>,
/// The path to an alternative config.json file
#[arg(long)]
pub config_path: Option<PathBuf>,
/// Highlight the contents of a specific test
#[arg(long, short = 'n')]
#[clap(conflicts_with = "paths", conflicts_with = "paths_file")]
pub test_number: Option<u32>,
/// Force rebuild the parser
#[arg(short, long)]
pub rebuild: bool,
}
#[derive(Args)]
struct Tags {
/// Select a language by the scope instead of a file extension
#[arg(long)]
pub scope: Option<String>,
/// Measure execution time
#[arg(long, short)]
pub time: bool,
/// Suppress main output
#[arg(long, short)]
pub quiet: bool,
/// The path to a file with paths to source file(s)
#[arg(long = "paths")]
pub paths_file: Option<PathBuf>,
/// The source file(s) to use
#[arg(num_args = 1..)]
pub paths: Option<Vec<PathBuf>>,
/// The path to the tree-sitter grammar directory, implies --rebuild
#[arg(long, short = 'p', conflicts_with = "rebuild")]
pub grammar_path: Option<PathBuf>,
/// The path to an alternative config.json file
#[arg(long)]
pub config_path: Option<PathBuf>,
/// Generate tags from the contents of a specific test
#[arg(long, short = 'n')]
#[clap(conflicts_with = "paths", conflicts_with = "paths_file")]
pub test_number: Option<u32>,
/// Force rebuild the parser
#[arg(short, long)]
pub rebuild: bool,
}
#[derive(Args)]
#[command(alias = "play", alias = "pg", alias = "web-ui")]
struct Playground {
/// Don't open in default browser
#[arg(long, short)]
pub quiet: bool,
/// Path to the directory containing the grammar and Wasm files
#[arg(long)]
pub grammar_path: Option<PathBuf>,
/// Export playground files to specified directory instead of serving them
#[arg(long, short)]
pub export: Option<PathBuf>,
}
#[derive(Args)]
#[command(alias = "langs")]
struct DumpLanguages {
/// The path to an alternative config.json file
#[arg(long)]
pub config_path: Option<PathBuf>,
}
#[derive(Args)]
#[command(alias = "comp")]
struct Complete {
/// The shell to generate completions for
#[arg(long, short, value_enum)]
pub shell: Shell,
}
#[derive(ValueEnum, Clone)]
pub enum Shell {
Bash,
Elvish,
Fish,
PowerShell,
Zsh,
Nushell,
}
/// Complete `action` if the wasm feature is enabled, otherwise return an error
macro_rules! checked_wasm {
($action:block) => {
#[cfg(feature = "wasm")]
{
$action
}
#[cfg(not(feature = "wasm"))]
{
Err(anyhow!("--wasm flag specified, but this build of tree-sitter-cli does not include the wasm feature"))?;
}
};
}
impl InitConfig {
fn run() -> Result<()> {
if let Ok(Some(config_path)) = Config::find_config_file() {
return Err(anyhow!(
"Remove your existing config file first: {}",
config_path.to_string_lossy()
));
}
let mut config = Config::initial()?;
config.add(tree_sitter_loader::Config::initial())?;
config.add(tree_sitter_cli::highlight::ThemeConfig::default())?;
config.save()?;
info!(
"Saved initial configuration to {}",
config.location.display()
);
Ok(())
}
}
impl Init {
fn run(self, current_dir: &Path) -> Result<()> {
let configure_json = !current_dir.join("tree-sitter.json").exists();
let (language_name, json_config_opts) = if configure_json {
let mut opts = JsonConfigOpts::default();
let name = || {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Parser name")
.validate_with(|input: &String| {
if input.chars().all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '_') {
Ok(())
} else {
Err("The name must be lowercase and contain only letters, digits, and underscores")
}
})
.interact_text()
};
let camelcase_name = |name: &str| {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("CamelCase name")
.default(name.to_upper_camel_case())
.validate_with(|input: &String| {
if input
.chars()
.all(|c| c.is_ascii_alphabetic() || c.is_ascii_digit() || c == '_')
{
Ok(())
} else {
Err("The name must contain only letters, digits, and underscores")
}
})
.interact_text()
};
let title = |name: &str| {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Title (human-readable name)")
.default(name.to_upper_camel_case())
.interact_text()
};
let description = |name: &str| {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Description")
.default(format!(
"{} grammar for tree-sitter",
name.to_upper_camel_case()
))
.show_default(false)
.allow_empty(true)
.interact_text()
};
let repository = |name: &str| {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Repository URL")
.allow_empty(true)
.default(format!("https://github.com/tree-sitter/tree-sitter-{name}"))
.show_default(false)
.interact_text()
};
let funding = || {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Funding URL")
.allow_empty(true)
.interact_text()
.map(|e| Some(e.trim().to_string()))
};
let scope = |name: &str| {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("TextMate scope")
.default(format!("source.{name}"))
.validate_with(|input: &String| {
if input.starts_with("source.") || input.starts_with("text.") {
Ok(())
} else {
Err("The scope must start with 'source.' or 'text.'")
}
})
.interact_text()
};
let file_types = |name: &str| {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("File types (space-separated)")
.default(name.to_string())
.interact_text()
.map(|ft| {
let mut set = HashSet::new();
for ext in ft.split(' ') {
let ext = ext.trim();
if !ext.is_empty() {
set.insert(ext.to_string());
}
}
set.into_iter().collect::<Vec<_>>()
})
};
let initial_version = || {
Input::<SemverVersion>::with_theme(&ColorfulTheme::default())
.with_prompt("Version")
.default(SemverVersion::new(0, 1, 0))
.interact_text()
};
let license = || {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("License")
.default("MIT".to_string())
.allow_empty(true)
.interact()
};
let author = || {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Author name")
.interact_text()
};
let email = || {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Author email")
.allow_empty(true)
.interact_text()
.map(|e| (!e.trim().is_empty()).then_some(e))
};
let url = || {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Author URL")
.allow_empty(true)
.interact_text()
.map(|e| Some(e.trim().to_string()))
};
let namespace = || {
Input::<String>::with_theme(&ColorfulTheme::default())
.with_prompt("Package namespace")
.default("io.github.tree-sitter".to_string())
.allow_empty(true)
.interact()
};
let bindings = || {
let languages = Bindings::default().languages();
let enabled = MultiSelect::new()
.with_prompt("Bindings")
.items_checked(&languages)
.interact()?
.into_iter()
.map(|i| languages[i].0);
let out = Bindings::with_enabled_languages(enabled)
.expect("unexpected unsupported language");
anyhow::Ok(out)
};
let choices = [
"name",
"camelcase",
"title",
"description",
"repository",
"funding",
"scope",
"file_types",
"version",
"license",
"author",
"email",
"url",
"namespace",
"bindings",
"exit",
];
macro_rules! set_choice {
($choice:expr) => {
match $choice {
"name" => opts.name = name()?,
"camelcase" => opts.camelcase = camelcase_name(&opts.name)?,
"title" => opts.title = title(&opts.name)?,
"description" => opts.description = description(&opts.name)?,
"repository" => opts.repository = Some(repository(&opts.name)?),
"funding" => opts.funding = funding()?,
"scope" => opts.scope = scope(&opts.name)?,
"file_types" => opts.file_types = file_types(&opts.name)?,
"version" => opts.version = initial_version()?,
"license" => opts.license = license()?,
"author" => opts.author = author()?,
"email" => opts.email = email()?,
"url" => opts.url = url()?,
"namespace" => opts.namespace = Some(namespace()?),
"bindings" => opts.bindings = bindings()?,
"exit" => break,
_ => unreachable!(),
}
};
}
// Initial configuration
for choice in choices.iter().take(choices.len() - 1) {
set_choice!(*choice);
}
// Loop for editing the configuration
loop {
info!(
"Your current configuration:\n{}",
serde_json::to_string_pretty(&opts)?
);
if Confirm::with_theme(&ColorfulTheme::default())
.with_prompt("Does the config above look correct?")
.interact()?
{
break;
}
let idx = FuzzySelect::with_theme(&ColorfulTheme::default())
.with_prompt("Which field would you like to change?")
.items(&choices)
.interact()?;
set_choice!(choices[idx]);
}
(opts.name.clone(), Some(opts))
} else {
let old_config = fs::read_to_string(current_dir.join("tree-sitter.json"))
.with_context(|| "Failed to read tree-sitter.json")?;
let mut json = serde_json::from_str::<TreeSitterJSON>(&old_config)?;
if json.schema.is_none() {
json.schema = Some(TREE_SITTER_JSON_SCHEMA.to_string());
}
let new_config = format!("{}\n", serde_json::to_string_pretty(&json)?);
// Write the re-serialized config back, as newly added optional boolean fields
// will be included with explicit `false`s rather than implict `null`s
if self.update && !old_config.trim().eq(new_config.trim()) {
info!("Updating tree-sitter.json");
fs::write(
current_dir.join("tree-sitter.json"),
serde_json::to_string_pretty(&json)?,
)
.with_context(|| "Failed to write tree-sitter.json")?;
}
(json.grammars.swap_remove(0).name, None)
};
generate_grammar_files(
current_dir,
&language_name,
self.update,
json_config_opts.as_ref(),
)?;
Ok(())
}
}
impl Generate {
fn run(self, mut loader: loader::Loader, current_dir: &Path) -> Result<()> {
if self.log {
logger::enable_debug();
}
let abi_version =
self.abi_version
.as_ref()
.map_or(DEFAULT_GENERATE_ABI_VERSION, |version| {
if version == "latest" {
tree_sitter::LANGUAGE_VERSION
} else {
version.parse().expect("invalid abi version flag")
}
});
let json_summary = if self.json {
warn!("--json is deprecated, use --json-summary instead");
true
} else {
self.json_summary
};
if let Err(err) = tree_sitter_generate::generate_parser_in_directory(
current_dir,
self.output.as_deref(),
self.grammar_path.as_deref(),
abi_version,
self.report_states_for_rule.as_deref(),
self.js_runtime.as_deref(),
!self.no_parser,
if self.disable_optimizations {
OptLevel::empty()
} else {
OptLevel::default()
},
) {
if json_summary {
eprintln!("{}", serde_json::to_string_pretty(&err)?);
// Exit early to prevent errors from being printed a second time in the caller
std::process::exit(1);
} else {
// Removes extra context associated with the error
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/wasm.rs | crates/cli/src/wasm.rs | use std::{
fs,
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context, Result};
use tree_sitter::wasm_stdlib_symbols;
use tree_sitter_generate::{load_grammar_file, parse_grammar::GrammarJSON};
use tree_sitter_loader::Loader;
use wasmparser::Parser;
pub fn load_language_wasm_file(language_dir: &Path) -> Result<(String, Vec<u8>)> {
let grammar_name = get_grammar_name(language_dir)
.with_context(|| "Failed to get Wasm filename")
.unwrap();
let wasm_filename = format!("tree-sitter-{grammar_name}.wasm");
let contents = fs::read(language_dir.join(&wasm_filename)).with_context(|| {
format!("Failed to read {wasm_filename}. Run `tree-sitter build --wasm` first.",)
})?;
Ok((grammar_name, contents))
}
pub fn get_grammar_name(language_dir: &Path) -> Result<String> {
let src_dir = language_dir.join("src");
let grammar_json_path = src_dir.join("grammar.json");
let grammar_json = fs::read_to_string(&grammar_json_path).with_context(|| {
format!(
"Failed to read grammar file {}",
grammar_json_path.display()
)
})?;
let grammar: GrammarJSON = serde_json::from_str(&grammar_json).with_context(|| {
format!(
"Failed to parse grammar file {}",
grammar_json_path.display()
)
})?;
Ok(grammar.name)
}
pub fn compile_language_to_wasm(
loader: &Loader,
language_dir: &Path,
output_dir: &Path,
output_file: Option<PathBuf>,
) -> Result<()> {
let grammar_name = get_grammar_name(language_dir)
.or_else(|_| load_grammar_file(&language_dir.join("grammar.js"), None))?;
let output_filename =
output_file.unwrap_or_else(|| output_dir.join(format!("tree-sitter-{grammar_name}.wasm")));
let src_path = language_dir.join("src");
let scanner_path = loader.get_scanner_path(&src_path);
loader.compile_parser_to_wasm(
&grammar_name,
&src_path,
scanner_path
.as_ref()
.and_then(|p| Some(Path::new(p.file_name()?))),
&output_filename,
)?;
// Exit with an error if the external scanner uses symbols from the
// C or C++ standard libraries that aren't available to Wasm parsers.
let stdlib_symbols = wasm_stdlib_symbols().collect::<Vec<_>>();
let dylink_symbols = [
"__indirect_function_table",
"__memory_base",
"__stack_pointer",
"__table_base",
"__table_base",
"memory",
];
let builtin_symbols = [
"__assert_fail",
"__cxa_atexit",
"abort",
"emscripten_notify_memory_growth",
"tree_sitter_debug_message",
"proc_exit",
];
let mut missing_symbols = Vec::new();
let wasm_bytes = fs::read(&output_filename)?;
let parser = Parser::new(0);
for payload in parser.parse_all(&wasm_bytes) {
if let wasmparser::Payload::ImportSection(imports) = payload? {
for import in imports {
let import = import?.name;
if !builtin_symbols.contains(&import)
&& !stdlib_symbols.contains(&import)
&& !dylink_symbols.contains(&import)
{
missing_symbols.push(import);
}
}
}
}
if !missing_symbols.is_empty() {
Err(anyhow!(
concat!(
"This external scanner uses a symbol that isn't available to Wasm parsers.\n",
"\n",
"Missing symbols:\n",
" {}\n",
"\n",
"Available symbols:\n",
" {}",
),
missing_symbols.join("\n "),
stdlib_symbols.join("\n ")
))?;
}
Ok(())
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/query_testing.rs | crates/cli/src/query_testing.rs | use std::{fs, path::Path, sync::LazyLock};
use anyhow::{anyhow, Result};
use bstr::{BStr, ByteSlice};
use regex::Regex;
use tree_sitter::{Language, Parser, Point};
static CAPTURE_NAME_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new("[\\w_\\-.]+").unwrap());
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Utf8Point {
pub row: usize,
pub column: usize,
}
impl std::fmt::Display for Utf8Point {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "({}, {})", self.row, self.column)
}
}
impl Utf8Point {
#[must_use]
pub const fn new(row: usize, column: usize) -> Self {
Self { row, column }
}
}
#[must_use]
pub fn to_utf8_point(point: Point, source: &[u8]) -> Utf8Point {
if point.column == 0 {
return Utf8Point::new(point.row, 0);
}
let bstr = BStr::new(source);
let line = bstr.lines_with_terminator().nth(point.row).unwrap();
let mut utf8_column = 0;
for (_, grapheme_end, _) in line.grapheme_indices() {
utf8_column += 1;
if grapheme_end >= point.column {
break;
}
}
Utf8Point {
row: point.row,
column: utf8_column,
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct CaptureInfo {
pub name: String,
pub start: Utf8Point,
pub end: Utf8Point,
}
#[derive(Debug, PartialEq, Eq)]
pub struct Assertion {
pub position: Utf8Point,
pub length: usize,
pub negative: bool,
pub expected_capture_name: String,
}
impl Assertion {
#[must_use]
pub const fn new(
row: usize,
col: usize,
length: usize,
negative: bool,
expected_capture_name: String,
) -> Self {
Self {
position: Utf8Point::new(row, col),
length,
negative,
expected_capture_name,
}
}
}
/// Parse the given source code, finding all of the comments that contain
/// highlighting assertions. Return a vector of (position, expected highlight name)
/// pairs.
pub fn parse_position_comments(
parser: &mut Parser,
language: &Language,
source: &[u8],
) -> Result<Vec<Assertion>> {
let mut result = Vec::new();
let mut assertion_ranges = Vec::new();
// Parse the code.
parser.set_included_ranges(&[]).unwrap();
parser.set_language(language).unwrap();
let tree = parser.parse(source, None).unwrap();
// Walk the tree, finding comment nodes that contain assertions.
let mut ascending = false;
let mut cursor = tree.root_node().walk();
loop {
if ascending {
let node = cursor.node();
// Find every comment node.
if node.kind().to_lowercase().contains("comment") {
if let Ok(text) = node.utf8_text(source) {
let mut position = node.start_position();
if position.row > 0 {
// Find the arrow character ("^" or "<-") in the comment. A left arrow
// refers to the column where the comment node starts. An up arrow refers
// to its own column.
let mut has_left_caret = false;
let mut has_arrow = false;
let mut negative = false;
let mut arrow_end = 0;
let mut arrow_count = 1;
for (i, c) in text.char_indices() {
arrow_end = i + 1;
if c == '-' && has_left_caret {
has_arrow = true;
break;
}
if c == '^' {
has_arrow = true;
position.column += i;
// Continue counting remaining arrows and update their end column
for (_, c) in text[arrow_end..].char_indices() {
if c != '^' {
arrow_end += arrow_count - 1;
break;
}
arrow_count += 1;
}
break;
}
has_left_caret = c == '<';
}
// find any ! after arrows but before capture name
if has_arrow {
for (i, c) in text[arrow_end..].char_indices() {
if c == '!' {
negative = true;
arrow_end += i + 1;
break;
} else if !c.is_whitespace() {
break;
}
}
}
// If the comment node contains an arrow and a highlight name, record the
// highlight name and the position.
if let (true, Some(mat)) =
(has_arrow, CAPTURE_NAME_REGEX.find(&text[arrow_end..]))
{
assertion_ranges.push((node.start_position(), node.end_position()));
result.push(Assertion {
position: to_utf8_point(position, source),
length: arrow_count,
negative,
expected_capture_name: mat.as_str().to_string(),
});
}
}
}
}
// Continue walking the tree.
if cursor.goto_next_sibling() {
ascending = false;
} else if !cursor.goto_parent() {
break;
}
} else if !cursor.goto_first_child() {
ascending = true;
}
}
// Adjust the row number in each assertion's position to refer to the line of
// code *above* the assertion. There can be multiple lines of assertion comments and empty
// lines, so the positions may have to be decremented by more than one row.
let mut i = 0;
let lines = source.lines_with_terminator().collect::<Vec<_>>();
for assertion in &mut result {
let original_position = assertion.position;
loop {
let on_assertion_line = assertion_ranges[i..]
.iter()
.any(|(start, _)| start.row == assertion.position.row);
let on_empty_line = lines[assertion.position.row].len() <= assertion.position.column;
if on_assertion_line || on_empty_line {
if assertion.position.row > 0 {
assertion.position.row -= 1;
} else {
return Err(anyhow!(
"Error: could not find a line that corresponds to the assertion `{}` located at {original_position}",
assertion.expected_capture_name
));
}
} else {
while i < assertion_ranges.len()
&& assertion_ranges[i].0.row < assertion.position.row
{
i += 1;
}
break;
}
}
}
// The assertions can end up out of order due to the line adjustments.
result.sort_unstable_by_key(|a| a.position);
Ok(result)
}
pub fn assert_expected_captures(
infos: &[CaptureInfo],
path: &Path,
parser: &mut Parser,
language: &Language,
) -> Result<usize> {
let contents = fs::read_to_string(path)?;
let pairs = parse_position_comments(parser, language, contents.as_bytes())?;
for assertion in &pairs {
if let Some(found) = &infos.iter().find(|p| {
assertion.position >= p.start
&& (assertion.position.row < p.end.row
|| assertion.position.column + assertion.length - 1 < p.end.column)
}) {
if assertion.expected_capture_name != found.name && found.name != "name" {
return Err(anyhow!(
"Assertion failed: at {}, found {}, expected {}",
found.start,
found.name,
assertion.expected_capture_name,
));
}
} else {
return Err(anyhow!(
"Assertion failed: could not match {} at row {}, column {}",
assertion.expected_capture_name,
assertion.position.row,
assertion.position.column + assertion.length - 1,
));
}
}
Ok(pairs.len())
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/test_highlight.rs | crates/cli/src/test_highlight.rs | use std::{fs, path::Path};
use anyhow::{anyhow, Result};
use tree_sitter::Point;
use tree_sitter_highlight::{Highlight, HighlightConfiguration, HighlightEvent, Highlighter};
use tree_sitter_loader::{Config, Loader};
use crate::{
query_testing::{parse_position_comments, to_utf8_point, Assertion, Utf8Point},
test::{TestInfo, TestOutcome, TestResult, TestSummary},
util,
};
#[derive(Debug)]
pub struct Failure {
row: usize,
column: usize,
expected_highlight: String,
actual_highlights: Vec<String>,
}
impl std::error::Error for Failure {}
impl std::fmt::Display for Failure {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"Failure - row: {}, column: {}, expected highlight '{}', actual highlights: ",
self.row, self.column, self.expected_highlight
)?;
if self.actual_highlights.is_empty() {
write!(f, "none.")?;
} else {
for (i, actual_highlight) in self.actual_highlights.iter().enumerate() {
if i > 0 {
write!(f, ", ")?;
}
write!(f, "'{actual_highlight}'")?;
}
}
Ok(())
}
}
pub fn test_highlights(
loader: &Loader,
loader_config: &Config,
highlighter: &mut Highlighter,
directory: &Path,
test_summary: &mut TestSummary,
) -> Result<()> {
let mut failed = false;
for highlight_test_file in fs::read_dir(directory)? {
let highlight_test_file = highlight_test_file?;
let test_file_path = highlight_test_file.path();
let test_file_name = highlight_test_file.file_name();
if test_file_path.is_dir() && test_file_path.read_dir()?.next().is_some() {
test_summary
.highlight_results
.add_group(test_file_name.to_string_lossy().as_ref());
if test_highlights(
loader,
loader_config,
highlighter,
&test_file_path,
test_summary,
)
.is_err()
{
failed = true;
}
test_summary.highlight_results.pop_traversal();
} else {
let (language, language_config) = loader
.language_configuration_for_file_name(&test_file_path)?
.ok_or_else(|| {
anyhow!(
"{}",
util::lang_not_found_for_path(test_file_path.as_path(), loader_config)
)
})?;
let highlight_config = language_config
.highlight_config(language, None)?
.ok_or_else(|| {
anyhow!(
"No highlighting config found for {}",
test_file_path.display()
)
})?;
match test_highlight(
loader,
highlighter,
highlight_config,
fs::read(&test_file_path)?.as_slice(),
) {
Ok(assertion_count) => {
test_summary.highlight_results.add_case(TestResult {
name: test_file_name.to_string_lossy().to_string(),
info: TestInfo::AssertionTest {
outcome: TestOutcome::AssertionPassed { assertion_count },
test_num: test_summary.test_num,
},
});
}
Err(e) => {
test_summary.highlight_results.add_case(TestResult {
name: test_file_name.to_string_lossy().to_string(),
info: TestInfo::AssertionTest {
outcome: TestOutcome::AssertionFailed {
error: e.to_string(),
},
test_num: test_summary.test_num,
},
});
failed = true;
}
}
test_summary.test_num += 1;
}
}
if failed {
Err(anyhow!(""))
} else {
Ok(())
}
}
pub fn iterate_assertions(
assertions: &[Assertion],
highlights: &[(Utf8Point, Utf8Point, Highlight)],
highlight_names: &[String],
) -> Result<usize> {
// Iterate through all of the highlighting assertions, checking each one against the
// actual highlights.
let mut i = 0;
let mut actual_highlights = Vec::new();
for Assertion {
position,
length,
negative,
expected_capture_name: expected_highlight,
} in assertions
{
let mut passed = false;
let mut end_column = position.column + length - 1;
actual_highlights.clear();
// The assertions are ordered by position, so skip past all of the highlights that
// end at or before this assertion's position.
'highlight_loop: while let Some(highlight) = highlights.get(i) {
if highlight.1 <= *position {
i += 1;
continue;
}
// Iterate through all of the highlights that start at or before this assertion's
// position, looking for one that matches the assertion.
let mut j = i;
while let (false, Some(highlight)) = (passed, highlights.get(j)) {
end_column = position.column + length - 1;
if highlight.0.row >= position.row && highlight.0.column > end_column {
break 'highlight_loop;
}
// If the highlight matches the assertion, or if the highlight doesn't
// match the assertion but it's negative, this test passes. Otherwise,
// add this highlight to the list of actual highlights that span the
// assertion's position, in order to generate an error message in the event
// of a failure.
let highlight_name = &highlight_names[(highlight.2).0];
if (*highlight_name == *expected_highlight) == *negative {
actual_highlights.push(highlight_name);
} else {
passed = true;
break 'highlight_loop;
}
j += 1;
}
}
if !passed {
return Err(Failure {
row: position.row,
column: end_column,
expected_highlight: expected_highlight.clone(),
actual_highlights: actual_highlights.into_iter().cloned().collect(),
}
.into());
}
}
Ok(assertions.len())
}
pub fn test_highlight(
loader: &Loader,
highlighter: &mut Highlighter,
highlight_config: &HighlightConfiguration,
source: &[u8],
) -> Result<usize> {
// Highlight the file, and parse out all of the highlighting assertions.
let highlight_names = loader.highlight_names();
let highlights = get_highlight_positions(loader, highlighter, highlight_config, source)?;
let assertions =
parse_position_comments(highlighter.parser(), &highlight_config.language, source)?;
iterate_assertions(&assertions, &highlights, &highlight_names)
}
pub fn get_highlight_positions(
loader: &Loader,
highlighter: &mut Highlighter,
highlight_config: &HighlightConfiguration,
source: &[u8],
) -> Result<Vec<(Utf8Point, Utf8Point, Highlight)>> {
let mut row = 0;
let mut column = 0;
let mut byte_offset = 0;
let mut was_newline = false;
let mut result = Vec::new();
let mut highlight_stack = Vec::new();
let source = String::from_utf8_lossy(source);
let mut char_indices = source.char_indices();
for event in highlighter.highlight(highlight_config, source.as_bytes(), None, |string| {
loader.highlight_config_for_injection_string(string)
})? {
match event? {
HighlightEvent::HighlightStart(h) => highlight_stack.push(h),
HighlightEvent::HighlightEnd => {
highlight_stack.pop();
}
HighlightEvent::Source { start, end } => {
let mut start_position = Point::new(row, column);
while byte_offset < end {
if byte_offset <= start {
start_position = Point::new(row, column);
}
if let Some((i, c)) = char_indices.next() {
if was_newline {
row += 1;
column = 0;
} else {
column += i - byte_offset;
}
was_newline = c == '\n';
byte_offset = i;
} else {
break;
}
}
if let Some(highlight) = highlight_stack.last() {
let utf8_start_position = to_utf8_point(start_position, source.as_bytes());
let utf8_end_position =
to_utf8_point(Point::new(row, column), source.as_bytes());
result.push((utf8_start_position, utf8_end_position, *highlight));
}
}
}
}
Ok(result)
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/input.rs | crates/cli/src/input.rs | use std::{
fs,
io::{Read, Write},
path::{Path, PathBuf},
sync::{
atomic::{AtomicUsize, Ordering},
mpsc, Arc,
},
};
use anyhow::{anyhow, bail, Context, Result};
use glob::glob;
use crate::test::{parse_tests, TestEntry};
pub enum CliInput {
Paths(Vec<PathBuf>),
Test {
name: String,
contents: Vec<u8>,
languages: Vec<Box<str>>,
},
Stdin(Vec<u8>),
}
pub fn get_input(
paths_file: Option<&Path>,
paths: Option<Vec<PathBuf>>,
test_number: Option<u32>,
cancellation_flag: &Arc<AtomicUsize>,
) -> Result<CliInput> {
if let Some(paths_file) = paths_file {
return Ok(CliInput::Paths(
fs::read_to_string(paths_file)
.with_context(|| format!("Failed to read paths file {}", paths_file.display()))?
.trim()
.lines()
.map(PathBuf::from)
.collect::<Vec<_>>(),
));
}
if let Some(test_number) = test_number {
let current_dir = std::env::current_dir().unwrap();
let test_dir = current_dir.join("test").join("corpus");
if !test_dir.exists() {
return Err(anyhow!(
"Test corpus directory not found in current directory, see https://tree-sitter.github.io/tree-sitter/creating-parsers/5-writing-tests"
));
}
let test_entry = parse_tests(&test_dir)?;
let mut test_num = 0;
let Some((name, contents, languages)) =
get_test_info(&test_entry, test_number.max(1) - 1, &mut test_num)
else {
return Err(anyhow!("Failed to fetch contents of test #{test_number}"));
};
return Ok(CliInput::Test {
name,
contents,
languages,
});
}
if let Some(paths) = paths {
let mut result = Vec::new();
let mut incorporate_path = |path: PathBuf, positive| {
if positive {
result.push(path);
} else if let Some(index) = result.iter().position(|p| *p == path) {
result.remove(index);
}
};
for mut path in paths {
let mut positive = true;
if path.starts_with("!") {
positive = false;
path = path.strip_prefix("!").unwrap().to_path_buf();
}
if path.exists() {
incorporate_path(path, positive);
} else {
let Some(path_str) = path.to_str() else {
bail!("Invalid path: {}", path.display());
};
let paths = glob(path_str)
.with_context(|| format!("Invalid glob pattern {}", path.display()))?;
for path in paths {
incorporate_path(path?, positive);
}
}
}
if result.is_empty() {
return Err(anyhow!(
"No files were found at or matched by the provided pathname/glob"
));
}
return Ok(CliInput::Paths(result));
}
let reader_flag = cancellation_flag.clone();
let (tx, rx) = mpsc::channel();
// Spawn a thread to read from stdin, until ctrl-c or EOF is received
std::thread::spawn(move || {
let mut input = Vec::new();
let stdin = std::io::stdin();
let mut handle = stdin.lock();
// Read in chunks, so we can check the ctrl-c flag
loop {
if reader_flag.load(Ordering::Relaxed) == 1 {
break;
}
let mut buffer = [0; 1024];
match handle.read(&mut buffer) {
Ok(0) | Err(_) => break,
Ok(n) => input.extend_from_slice(&buffer[..n]),
}
}
// Signal to the main thread that we're done
tx.send(input).ok();
});
loop {
// If we've received a ctrl-c signal, exit
if cancellation_flag.load(Ordering::Relaxed) == 1 {
bail!("\n");
}
// If we're done receiving input from stdin, return it
if let Ok(input) = rx.try_recv() {
return Ok(CliInput::Stdin(input));
}
std::thread::sleep(std::time::Duration::from_millis(50));
}
}
#[allow(clippy::type_complexity)]
pub fn get_test_info(
test_entry: &TestEntry,
target_test: u32,
test_num: &mut u32,
) -> Option<(String, Vec<u8>, Vec<Box<str>>)> {
match test_entry {
TestEntry::Example {
name,
input,
attributes,
..
} => {
if *test_num == target_test {
return Some((name.clone(), input.clone(), attributes.languages.clone()));
}
*test_num += 1;
}
TestEntry::Group { children, .. } => {
for child in children {
if let Some((name, input, languages)) = get_test_info(child, target_test, test_num)
{
return Some((name, input, languages));
}
}
}
}
None
}
/// Writes `contents` to a temporary file and returns the path to that file.
pub fn get_tmp_source_file(contents: &[u8]) -> Result<PathBuf> {
let parse_path = std::env::temp_dir().join(".tree-sitter-temp");
let mut parse_file = std::fs::File::create(&parse_path)?;
parse_file.write_all(contents)?;
Ok(parse_path)
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/corpus_test.rs | crates/cli/src/tests/corpus_test.rs | use std::{collections::HashMap, env, fs};
use anyhow::Context;
use tree_sitter::Parser;
use tree_sitter_proc_macro::test_with_seed;
use crate::{
fuzz::{
corpus_test::{
check_changed_ranges, check_consistent_sizes, get_parser, set_included_ranges,
},
edits::{get_random_edit, invert_edit},
flatten_tests, new_seed,
random::Rand,
EDIT_COUNT, EXAMPLE_EXCLUDE, EXAMPLE_INCLUDE, ITERATION_COUNT, LANGUAGE_FILTER,
LOG_GRAPH_ENABLED, START_SEED,
},
parse::perform_edit,
test::{parse_tests, strip_sexp_fields, DiffKey, TestDiff},
tests::{
allocations,
helpers::fixtures::{fixtures_dir, get_language, get_test_language, SCRATCH_BASE_DIR},
},
};
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_bash_language(seed: usize) {
test_language_corpus(
"bash",
seed,
Some(&[
// Fragile tests where edit customization changes
// lead to significant parse tree structure changes.
"bash - corpus - commands - Nested Heredocs",
"bash - corpus - commands - Quoted Heredocs",
"bash - corpus - commands - Heredocs with weird characters",
]),
None,
);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_c_language(seed: usize) {
test_language_corpus("c", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_cpp_language(seed: usize) {
test_language_corpus("cpp", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_embedded_template_language(seed: usize) {
test_language_corpus("embedded-template", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_go_language(seed: usize) {
test_language_corpus("go", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_html_language(seed: usize) {
test_language_corpus("html", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_java_language(seed: usize) {
test_language_corpus(
"java",
seed,
Some(&["java - corpus - expressions - switch with unnamed pattern variable"]),
None,
);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_javascript_language(seed: usize) {
test_language_corpus("javascript", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_json_language(seed: usize) {
test_language_corpus("json", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_php_language(seed: usize) {
test_language_corpus("php", seed, None, Some("php"));
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_python_language(seed: usize) {
test_language_corpus("python", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_ruby_language(seed: usize) {
test_language_corpus("ruby", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_rust_language(seed: usize) {
test_language_corpus("rust", seed, None, None);
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_typescript_language(seed: usize) {
test_language_corpus("typescript", seed, None, Some("typescript"));
}
#[test_with_seed(retry=10, seed=*START_SEED, seed_fn=new_seed)]
fn test_corpus_for_tsx_language(seed: usize) {
test_language_corpus("typescript", seed, None, Some("tsx"));
}
pub fn test_language_corpus(
language_name: &str,
start_seed: usize,
skipped: Option<&[&str]>,
language_dir: Option<&str>,
) {
if let Some(filter) = LANGUAGE_FILTER.as_ref() {
if language_name != filter {
return;
}
}
let language_dir = language_dir.unwrap_or_default();
let grammars_dir = fixtures_dir().join("grammars");
let error_corpus_dir = fixtures_dir().join("error_corpus");
let template_corpus_dir = fixtures_dir().join("template_corpus");
let corpus_dir = grammars_dir.join(language_name).join("test").join("corpus");
println!("Testing {language_name} corpus @ {}", corpus_dir.display());
let error_corpus_file = error_corpus_dir.join(format!("{language_name}_errors.txt"));
let template_corpus_file = template_corpus_dir.join(format!("{language_name}_templates.txt"));
let main_tests = parse_tests(&corpus_dir).unwrap();
let error_tests = parse_tests(&error_corpus_file).unwrap_or_default();
let template_tests = parse_tests(&template_corpus_file).unwrap_or_default();
let mut tests = flatten_tests(
main_tests,
EXAMPLE_INCLUDE.as_ref(),
EXAMPLE_EXCLUDE.as_ref(),
);
tests.extend(flatten_tests(
error_tests,
EXAMPLE_INCLUDE.as_ref(),
EXAMPLE_EXCLUDE.as_ref(),
));
tests.extend(
flatten_tests(
template_tests,
EXAMPLE_INCLUDE.as_ref(),
EXAMPLE_EXCLUDE.as_ref(),
)
.into_iter()
.map(|mut t| {
t.template_delimiters = Some(("<%", "%>"));
t
}),
);
tests.retain(|t| t.languages[0].is_empty() || t.languages.contains(&Box::from(language_dir)));
let mut skipped = skipped.map(|x| x.iter().map(|x| (*x, 0)).collect::<HashMap<&str, usize>>());
let language_path = if language_dir.is_empty() {
language_name.to_string()
} else {
format!("{language_name}/{language_dir}")
};
let language = get_language(&language_path);
let mut failure_count = 0;
let log_seed = env::var("TREE_SITTER_LOG_SEED").is_ok();
let dump_edits = env::var("TREE_SITTER_DUMP_EDITS").is_ok();
if log_seed {
println!(" start seed: {start_seed}");
}
println!();
for (test_index, test) in tests.iter().enumerate() {
let test_name = format!("{language_name} - {}", test.name);
if let Some(skipped) = skipped.as_mut() {
if let Some(counter) = skipped.get_mut(test_name.as_str()) {
println!(" {test_index}. {test_name} - SKIPPED");
*counter += 1;
continue;
}
}
println!(" {test_index}. {test_name}");
let passed = allocations::record(|| {
let mut log_session = None;
let mut parser = get_parser(&mut log_session, "log.html");
parser.set_language(&language).unwrap();
set_included_ranges(&mut parser, &test.input, test.template_delimiters);
let tree = parser.parse(&test.input, None).unwrap();
let mut actual_output = tree.root_node().to_sexp();
if !test.has_fields {
actual_output = strip_sexp_fields(&actual_output);
}
if actual_output != test.output {
println!("Incorrect initial parse for {test_name}");
DiffKey::print();
println!("{}", TestDiff::new(&actual_output, &test.output));
println!();
return false;
}
true
});
if !passed {
failure_count += 1;
continue;
}
let mut parser = Parser::new();
parser.set_language(&language).unwrap();
let tree = parser.parse(&test.input, None).unwrap();
drop(parser);
for trial in 0..*ITERATION_COUNT {
let seed = start_seed + trial;
let passed = allocations::record(|| {
let mut rand = Rand::new(seed);
let mut log_session = None;
let mut parser = get_parser(&mut log_session, "log.html");
parser.set_language(&language).unwrap();
let mut tree = tree.clone();
let mut input = test.input.clone();
if *LOG_GRAPH_ENABLED {
eprintln!("{}\n", String::from_utf8_lossy(&input));
}
// Perform a random series of edits and reparse.
let edit_count = rand.unsigned(*EDIT_COUNT);
let mut undo_stack = Vec::with_capacity(edit_count);
for _ in 0..=edit_count {
let edit = get_random_edit(&mut rand, &input);
undo_stack.push(invert_edit(&input, &edit));
perform_edit(&mut tree, &mut input, &edit).unwrap();
}
if log_seed {
println!(" {test_index}.{trial:<2} seed: {seed}");
}
if dump_edits {
fs::write(
SCRATCH_BASE_DIR
.join(format!("edit.{seed}.{test_index}.{trial} {test_name}")),
&input,
)
.unwrap();
}
if *LOG_GRAPH_ENABLED {
eprintln!("{}\n", String::from_utf8_lossy(&input));
}
set_included_ranges(&mut parser, &input, test.template_delimiters);
let mut tree2 = parser.parse(&input, Some(&tree)).unwrap();
// Check that the new tree is consistent.
check_consistent_sizes(&tree2, &input);
if let Err(message) = check_changed_ranges(&tree, &tree2, &input) {
println!("\nUnexpected scope change in seed {seed} with start seed {start_seed}\n{message}\n\n",);
return false;
}
// Undo all of the edits and re-parse again.
while let Some(edit) = undo_stack.pop() {
perform_edit(&mut tree2, &mut input, &edit).unwrap();
}
if *LOG_GRAPH_ENABLED {
eprintln!("{}\n", String::from_utf8_lossy(&input));
}
set_included_ranges(&mut parser, &test.input, test.template_delimiters);
let tree3 = parser.parse(&input, Some(&tree2)).unwrap();
// Verify that the final tree matches the expectation from the corpus.
let mut actual_output = tree3.root_node().to_sexp();
if !test.has_fields {
actual_output = strip_sexp_fields(&actual_output);
}
if actual_output != test.output {
println!("Incorrect parse for {test_name} - seed {seed}");
DiffKey::print();
println!("{}", TestDiff::new(&actual_output, &test.output));
println!();
return false;
}
// Check that the edited tree is consistent.
check_consistent_sizes(&tree3, &input);
if let Err(message) = check_changed_ranges(&tree2, &tree3, &input) {
println!("Unexpected scope change in seed {seed} with start seed {start_seed}\n{message}\n\n");
return false;
}
true
});
if !passed {
failure_count += 1;
break;
}
}
}
assert!(
failure_count == 0,
"{failure_count} {language_name} corpus tests failed"
);
if let Some(skipped) = skipped.as_mut() {
skipped.retain(|_, v| *v == 0);
if !skipped.is_empty() {
println!("Non matchable skip definitions:");
for k in skipped.keys() {
println!(" {k}");
}
panic!("Non matchable skip definitions needs to be removed");
}
}
}
#[test]
fn test_feature_corpus_files() {
let test_grammars_dir = fixtures_dir().join("test_grammars");
let mut failure_count = 0;
for entry in fs::read_dir(test_grammars_dir).unwrap() {
let entry = entry.unwrap();
if !entry.metadata().unwrap().is_dir() {
continue;
}
let language_name = entry.file_name();
let language_name = language_name.to_str().unwrap();
if let Some(filter) = LANGUAGE_FILTER.as_ref() {
if language_name != filter {
continue;
}
}
let test_path = entry.path();
let mut grammar_path = test_path.join("grammar.js");
if !grammar_path.exists() {
grammar_path = test_path.join("grammar.json");
}
let error_message_path = test_path.join("expected_error.txt");
let grammar_json = tree_sitter_generate::load_grammar_file(&grammar_path, None)
.with_context(|| {
format!(
"Could not load grammar file for test language '{language_name}' at {}",
grammar_path.display()
)
})
.unwrap();
let generate_result =
tree_sitter_generate::generate_parser_for_grammar(&grammar_json, Some((0, 0, 0)));
if error_message_path.exists() {
if EXAMPLE_INCLUDE.is_some() || EXAMPLE_EXCLUDE.is_some() {
continue;
}
eprintln!("test language: {language_name:?}");
let expected_message = fs::read_to_string(&error_message_path)
.unwrap()
.replace("\r\n", "\n");
if let Err(e) = generate_result {
let actual_message = e.to_string().replace("\r\n", "\n");
if expected_message != actual_message {
eprintln!(
"Unexpected error message.\n\nExpected:\n\n`{expected_message}`\nActual:\n\n`{actual_message}`\n",
);
failure_count += 1;
}
} else {
eprintln!("Expected error message but got none for test grammar '{language_name}'",);
failure_count += 1;
}
} else {
if let Err(e) = &generate_result {
eprintln!("Unexpected error for test grammar '{language_name}':\n{e}",);
failure_count += 1;
continue;
}
let corpus_path = test_path.join("corpus.txt");
let c_code = generate_result.unwrap().1;
let language = get_test_language(language_name, &c_code, Some(&test_path));
let test = parse_tests(&corpus_path).unwrap();
let tests = flatten_tests(test, EXAMPLE_INCLUDE.as_ref(), EXAMPLE_EXCLUDE.as_ref());
if !tests.is_empty() {
eprintln!("test language: {language_name:?}");
}
for test in tests {
eprintln!(" example: {:?}", test.name);
let passed = allocations::record(|| {
let mut log_session = None;
let mut parser = get_parser(&mut log_session, "log.html");
parser.set_language(&language).unwrap();
let tree = parser.parse(&test.input, None).unwrap();
let mut actual_output = tree.root_node().to_sexp();
if !test.has_fields {
actual_output = strip_sexp_fields(&actual_output);
}
if actual_output == test.output {
true
} else {
DiffKey::print();
print!("{}", TestDiff::new(&actual_output, &test.output));
println!();
false
}
});
if !passed {
failure_count += 1;
}
}
}
}
assert!(failure_count == 0, "{failure_count} corpus tests failed");
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/node_test.rs | crates/cli/src/tests/node_test.rs | use tree_sitter::{InputEdit, Node, Parser, Point, Tree};
use tree_sitter_generate::load_grammar_file;
use super::{
get_random_edit,
helpers::fixtures::{fixtures_dir, get_language, get_test_language},
Rand,
};
use crate::{
parse::perform_edit,
tests::{generate_parser, helpers::fixtures::get_test_fixture_language},
};
const JSON_EXAMPLE: &str = r#"
[
123,
false,
{
"x": null
}
]
"#;
const GRAMMAR_WITH_ALIASES_AND_EXTRAS: &str = r#"{
"name": "aliases_and_extras",
"extras": [
{"type": "PATTERN", "value": "\\s+"},
{"type": "SYMBOL", "name": "comment"}
],
"rules": {
"a": {
"type": "SEQ",
"members": [
{"type": "SYMBOL", "name": "b"},
{
"type": "ALIAS",
"value": "B",
"named": true,
"content": {"type": "SYMBOL", "name": "b"}
},
{
"type": "ALIAS",
"value": "C",
"named": true,
"content": {"type": "SYMBOL", "name": "_c"}
}
]
},
"b": {"type": "STRING", "value": "b"},
"_c": {"type": "STRING", "value": "c"},
"comment": {"type": "STRING", "value": "..."}
}
}"#;
#[test]
fn test_node_child() {
let tree = parse_json_example();
let array_node = tree.root_node().child(0).unwrap();
assert_eq!(array_node.kind(), "array");
assert_eq!(array_node.named_child_count(), 3);
assert_eq!(array_node.start_byte(), JSON_EXAMPLE.find('[').unwrap());
assert_eq!(array_node.end_byte(), JSON_EXAMPLE.find(']').unwrap() + 1);
assert_eq!(array_node.start_position(), Point::new(2, 0));
assert_eq!(array_node.end_position(), Point::new(8, 1));
assert_eq!(array_node.child_count(), 7);
let left_bracket_node = array_node.child(0).unwrap();
let number_node = array_node.child(1).unwrap();
let comma_node1 = array_node.child(2).unwrap();
let false_node = array_node.child(3).unwrap();
let comma_node2 = array_node.child(4).unwrap();
let object_node = array_node.child(5).unwrap();
let right_bracket_node = array_node.child(6).unwrap();
assert_eq!(left_bracket_node.kind(), "[");
assert_eq!(number_node.kind(), "number");
assert_eq!(comma_node1.kind(), ",");
assert_eq!(false_node.kind(), "false");
assert_eq!(comma_node2.kind(), ",");
assert_eq!(object_node.kind(), "object");
assert_eq!(right_bracket_node.kind(), "]");
assert!(!left_bracket_node.is_named());
assert!(number_node.is_named());
assert!(!comma_node1.is_named());
assert!(false_node.is_named());
assert!(!comma_node2.is_named());
assert!(object_node.is_named());
assert!(!right_bracket_node.is_named());
assert_eq!(number_node.start_byte(), JSON_EXAMPLE.find("123").unwrap());
assert_eq!(
number_node.end_byte(),
JSON_EXAMPLE.find("123").unwrap() + 3
);
assert_eq!(number_node.start_position(), Point::new(3, 2));
assert_eq!(number_node.end_position(), Point::new(3, 5));
assert_eq!(false_node.start_byte(), JSON_EXAMPLE.find("false").unwrap());
assert_eq!(
false_node.end_byte(),
JSON_EXAMPLE.find("false").unwrap() + 5
);
assert_eq!(false_node.start_position(), Point::new(4, 2));
assert_eq!(false_node.end_position(), Point::new(4, 7));
assert_eq!(object_node.start_byte(), JSON_EXAMPLE.find('{').unwrap());
assert_eq!(object_node.start_position(), Point::new(5, 2));
assert_eq!(object_node.end_position(), Point::new(7, 3));
assert_eq!(object_node.child_count(), 3);
let left_brace_node = object_node.child(0).unwrap();
let pair_node = object_node.child(1).unwrap();
let right_brace_node = object_node.child(2).unwrap();
assert_eq!(left_brace_node.kind(), "{");
assert_eq!(pair_node.kind(), "pair");
assert_eq!(right_brace_node.kind(), "}");
assert!(!left_brace_node.is_named());
assert!(pair_node.is_named());
assert!(!right_brace_node.is_named());
assert_eq!(pair_node.start_byte(), JSON_EXAMPLE.find("\"x\"").unwrap());
assert_eq!(pair_node.end_byte(), JSON_EXAMPLE.find("null").unwrap() + 4);
assert_eq!(pair_node.start_position(), Point::new(6, 4));
assert_eq!(pair_node.end_position(), Point::new(6, 13));
assert_eq!(pair_node.child_count(), 3);
let string_node = pair_node.child(0).unwrap();
let colon_node = pair_node.child(1).unwrap();
let null_node = pair_node.child(2).unwrap();
assert_eq!(string_node.kind(), "string");
assert_eq!(colon_node.kind(), ":");
assert_eq!(null_node.kind(), "null");
assert!(string_node.is_named());
assert!(!colon_node.is_named());
assert!(null_node.is_named());
assert_eq!(
string_node.start_byte(),
JSON_EXAMPLE.find("\"x\"").unwrap()
);
assert_eq!(
string_node.end_byte(),
JSON_EXAMPLE.find("\"x\"").unwrap() + 3
);
assert_eq!(string_node.start_position(), Point::new(6, 4));
assert_eq!(string_node.end_position(), Point::new(6, 7));
assert_eq!(null_node.start_byte(), JSON_EXAMPLE.find("null").unwrap());
assert_eq!(null_node.end_byte(), JSON_EXAMPLE.find("null").unwrap() + 4);
assert_eq!(null_node.start_position(), Point::new(6, 9));
assert_eq!(null_node.end_position(), Point::new(6, 13));
assert_eq!(string_node.parent().unwrap(), pair_node);
assert_eq!(null_node.parent().unwrap(), pair_node);
assert_eq!(pair_node.parent().unwrap(), object_node);
assert_eq!(number_node.parent().unwrap(), array_node);
assert_eq!(false_node.parent().unwrap(), array_node);
assert_eq!(object_node.parent().unwrap(), array_node);
assert_eq!(array_node.parent().unwrap(), tree.root_node());
assert_eq!(tree.root_node().parent(), None);
assert_eq!(
tree.root_node().child_with_descendant(null_node).unwrap(),
array_node
);
assert_eq!(
array_node.child_with_descendant(null_node).unwrap(),
object_node
);
assert_eq!(
object_node.child_with_descendant(null_node).unwrap(),
pair_node
);
assert_eq!(
pair_node.child_with_descendant(null_node).unwrap(),
null_node
);
assert_eq!(null_node.child_with_descendant(null_node), None);
}
#[test]
fn test_node_children() {
let tree = parse_json_example();
let mut cursor = tree.walk();
let array_node = tree.root_node().child(0).unwrap();
assert_eq!(
array_node
.children(&mut cursor)
.map(|n| n.kind())
.collect::<Vec<_>>(),
&["[", "number", ",", "false", ",", "object", "]",]
);
assert_eq!(
array_node
.named_children(&mut cursor)
.map(|n| n.kind())
.collect::<Vec<_>>(),
&["number", "false", "object"]
);
let object_node = array_node
.named_children(&mut cursor)
.find(|n| n.kind() == "object")
.unwrap();
assert_eq!(
object_node
.children(&mut cursor)
.map(|n| n.kind())
.collect::<Vec<_>>(),
&["{", "pair", "}",]
);
}
#[test]
fn test_node_children_by_field_name() {
let mut parser = Parser::new();
parser.set_language(&get_language("python")).unwrap();
let source = "
if one:
a()
elif two:
b()
elif three:
c()
elif four:
d()
";
let tree = parser.parse(source, None).unwrap();
let node = tree.root_node().child(0).unwrap();
assert_eq!(node.kind(), "if_statement");
let mut cursor = tree.walk();
let alternatives = node.children_by_field_name("alternative", &mut cursor);
let alternative_texts =
alternatives.map(|n| &source[n.child_by_field_name("condition").unwrap().byte_range()]);
assert_eq!(
alternative_texts.collect::<Vec<_>>(),
&["two", "three", "four",]
);
}
#[test]
fn test_node_parent_of_child_by_field_name() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse("foo(a().b[0].c.d.e())", None).unwrap();
let call_node = tree
.root_node()
.named_child(0)
.unwrap()
.named_child(0)
.unwrap();
assert_eq!(call_node.kind(), "call_expression");
// Regression test - when a field points to a hidden node (in this case, `_expression`)
// the hidden node should not be added to the node parent cache.
assert_eq!(
call_node.child_by_field_name("function").unwrap().parent(),
Some(call_node)
);
}
#[test]
fn test_parent_of_zero_width_node() {
let code = "def dupa(foo):";
let mut parser = Parser::new();
parser.set_language(&get_language("python")).unwrap();
let tree = parser.parse(code, None).unwrap();
let root = tree.root_node();
let function_definition = root.child(0).unwrap();
let block = function_definition.child(4).unwrap();
let block_parent = block.parent().unwrap();
assert_eq!(block.to_string(), "(block)");
assert_eq!(block_parent.kind(), "function_definition");
assert_eq!(block_parent.to_string(), "(function_definition name: (identifier) parameters: (parameters (identifier)) body: (block))");
assert_eq!(
root.child_with_descendant(block).unwrap(),
function_definition
);
assert_eq!(
function_definition.child_with_descendant(block).unwrap(),
block
);
assert_eq!(block.child_with_descendant(block), None);
let code = "<script></script>";
parser.set_language(&get_language("html")).unwrap();
let tree = parser.parse(code, None).unwrap();
let root = tree.root_node();
let script_element = root.child(0).unwrap();
let raw_text = script_element.child(1).unwrap();
let parent = raw_text.parent().unwrap();
assert_eq!(parent, script_element);
}
#[test]
fn test_next_sibling_of_zero_width_node() {
let mut parser = Parser::new();
let language = get_test_fixture_language("next_sibling_from_zwt");
parser.set_language(&language).unwrap();
let tree = parser.parse("abdef", None).unwrap();
let root_node = tree.root_node();
let missing_c = root_node.child(2).unwrap();
assert!(missing_c.is_missing());
assert_eq!(missing_c.kind(), "c");
let node_d = root_node.child(3).unwrap();
assert_eq!(missing_c.next_sibling().unwrap(), node_d);
let prev_sibling = node_d.prev_sibling().unwrap();
assert_eq!(prev_sibling, missing_c);
}
#[test]
fn test_first_child_for_offset() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse("x10 + 100", None).unwrap();
let sum_node = tree.root_node().child(0).unwrap().child(0).unwrap();
assert_eq!(
sum_node.first_child_for_byte(0).unwrap().kind(),
"identifier"
);
assert_eq!(
sum_node.first_child_for_byte(1).unwrap().kind(),
"identifier"
);
assert_eq!(sum_node.first_child_for_byte(3).unwrap().kind(), "+");
assert_eq!(sum_node.first_child_for_byte(5).unwrap().kind(), "number");
}
#[test]
fn test_first_named_child_for_offset() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse("x10 + 100", None).unwrap();
let sum_node = tree.root_node().child(0).unwrap().child(0).unwrap();
assert_eq!(
sum_node.first_named_child_for_byte(0).unwrap().kind(),
"identifier"
);
assert_eq!(
sum_node.first_named_child_for_byte(1).unwrap().kind(),
"identifier"
);
assert_eq!(
sum_node.first_named_child_for_byte(3).unwrap().kind(),
"number"
);
}
#[test]
fn test_node_field_name_for_child() {
let mut parser = Parser::new();
parser.set_language(&get_language("c")).unwrap();
let tree = parser
.parse("int w = x + /* y is special! */ y;", None)
.unwrap();
let translation_unit_node = tree.root_node();
let declaration_node = translation_unit_node.named_child(0).unwrap();
let binary_expression_node = declaration_node
.child_by_field_name("declarator")
.unwrap()
.child_by_field_name("value")
.unwrap();
// -------------------
// left: (identifier) 0
// operator: "+" 1 <--- (not a named child)
// (comment) 2 <--- (is an extra)
// right: (identifier) 3
// -------------------
assert_eq!(binary_expression_node.field_name_for_child(0), Some("left"));
assert_eq!(
binary_expression_node.field_name_for_child(1),
Some("operator")
);
// The comment should not have a field name, as it's just an extra
assert_eq!(binary_expression_node.field_name_for_child(2), None);
assert_eq!(
binary_expression_node.field_name_for_child(3),
Some("right")
);
// Negative test - Not a valid child index
assert_eq!(binary_expression_node.field_name_for_child(4), None);
}
#[test]
fn test_node_field_name_for_named_child() {
let mut parser = Parser::new();
parser.set_language(&get_language("c")).unwrap();
let tree = parser
.parse("int w = x + /* y is special! */ y;", None)
.unwrap();
let translation_unit_node = tree.root_node();
let declaration_node = translation_unit_node.named_child(0).unwrap();
let binary_expression_node = declaration_node
.child_by_field_name("declarator")
.unwrap()
.child_by_field_name("value")
.unwrap();
// -------------------
// left: (identifier) 0
// operator: "+" _ <--- (not a named child)
// (comment) 1 <--- (is an extra)
// right: (identifier) 2
// -------------------
assert_eq!(
binary_expression_node.field_name_for_named_child(0),
Some("left")
);
// The comment should not have a field name, as it's just an extra
assert_eq!(binary_expression_node.field_name_for_named_child(1), None);
// The operator is not a named child, so the named child at index 2 is the right child
assert_eq!(
binary_expression_node.field_name_for_named_child(2),
Some("right")
);
// Negative test - Not a valid child index
assert_eq!(binary_expression_node.field_name_for_named_child(3), None);
}
#[test]
fn test_node_child_by_field_name_with_extra_hidden_children() {
let mut parser = Parser::new();
parser.set_language(&get_language("python")).unwrap();
// In the Python grammar, some fields are applied to `suite` nodes,
// which consist of an invisible `indent` token followed by a block.
// Check that when searching for a child with a field name, we don't
//
let tree = parser.parse("while a:\n pass", None).unwrap();
let while_node = tree.root_node().child(0).unwrap();
assert_eq!(while_node.kind(), "while_statement");
assert_eq!(
while_node.child_by_field_name("body").unwrap(),
while_node.child(3).unwrap(),
);
}
#[test]
fn test_node_named_child() {
let tree = parse_json_example();
let array_node = tree.root_node().child(0).unwrap();
let number_node = array_node.named_child(0).unwrap();
let false_node = array_node.named_child(1).unwrap();
let object_node = array_node.named_child(2).unwrap();
assert_eq!(number_node.kind(), "number");
assert_eq!(number_node.start_byte(), JSON_EXAMPLE.find("123").unwrap());
assert_eq!(
number_node.end_byte(),
JSON_EXAMPLE.find("123").unwrap() + 3
);
assert_eq!(number_node.start_position(), Point::new(3, 2));
assert_eq!(number_node.end_position(), Point::new(3, 5));
assert_eq!(false_node.kind(), "false");
assert_eq!(false_node.start_byte(), JSON_EXAMPLE.find("false").unwrap());
assert_eq!(
false_node.end_byte(),
JSON_EXAMPLE.find("false").unwrap() + 5
);
assert_eq!(false_node.start_position(), Point::new(4, 2));
assert_eq!(false_node.end_position(), Point::new(4, 7));
assert_eq!(object_node.kind(), "object");
assert_eq!(object_node.start_byte(), JSON_EXAMPLE.find('{').unwrap());
assert_eq!(object_node.start_position(), Point::new(5, 2));
assert_eq!(object_node.end_position(), Point::new(7, 3));
assert_eq!(object_node.named_child_count(), 1);
let pair_node = object_node.named_child(0).unwrap();
assert_eq!(pair_node.kind(), "pair");
assert_eq!(pair_node.start_byte(), JSON_EXAMPLE.find("\"x\"").unwrap());
assert_eq!(pair_node.end_byte(), JSON_EXAMPLE.find("null").unwrap() + 4);
assert_eq!(pair_node.start_position(), Point::new(6, 4));
assert_eq!(pair_node.end_position(), Point::new(6, 13));
let string_node = pair_node.named_child(0).unwrap();
let null_node = pair_node.named_child(1).unwrap();
assert_eq!(string_node.kind(), "string");
assert_eq!(null_node.kind(), "null");
assert_eq!(
string_node.start_byte(),
JSON_EXAMPLE.find("\"x\"").unwrap()
);
assert_eq!(
string_node.end_byte(),
JSON_EXAMPLE.find("\"x\"").unwrap() + 3
);
assert_eq!(string_node.start_position(), Point::new(6, 4));
assert_eq!(string_node.end_position(), Point::new(6, 7));
assert_eq!(null_node.start_byte(), JSON_EXAMPLE.find("null").unwrap());
assert_eq!(null_node.end_byte(), JSON_EXAMPLE.find("null").unwrap() + 4);
assert_eq!(null_node.start_position(), Point::new(6, 9));
assert_eq!(null_node.end_position(), Point::new(6, 13));
assert_eq!(string_node.parent().unwrap(), pair_node);
assert_eq!(null_node.parent().unwrap(), pair_node);
assert_eq!(pair_node.parent().unwrap(), object_node);
assert_eq!(number_node.parent().unwrap(), array_node);
assert_eq!(false_node.parent().unwrap(), array_node);
assert_eq!(object_node.parent().unwrap(), array_node);
assert_eq!(array_node.parent().unwrap(), tree.root_node());
assert_eq!(tree.root_node().parent(), None);
assert_eq!(
tree.root_node().child_with_descendant(null_node).unwrap(),
array_node
);
assert_eq!(
array_node.child_with_descendant(null_node).unwrap(),
object_node
);
assert_eq!(
object_node.child_with_descendant(null_node).unwrap(),
pair_node
);
assert_eq!(
pair_node.child_with_descendant(null_node).unwrap(),
null_node
);
assert_eq!(null_node.child_with_descendant(null_node), None);
}
#[test]
fn test_node_named_child_with_aliases_and_extras() {
let (parser_name, parser_code) = generate_parser(GRAMMAR_WITH_ALIASES_AND_EXTRAS).unwrap();
let mut parser = Parser::new();
parser
.set_language(&get_test_language(&parser_name, &parser_code, None))
.unwrap();
let tree = parser.parse("b ... b ... c", None).unwrap();
let root = tree.root_node();
assert_eq!(root.to_sexp(), "(a (b) (comment) (B) (comment) (C))");
assert_eq!(root.named_child_count(), 5);
assert_eq!(root.named_child(0).unwrap().kind(), "b");
assert_eq!(root.named_child(1).unwrap().kind(), "comment");
assert_eq!(root.named_child(2).unwrap().kind(), "B");
assert_eq!(root.named_child(3).unwrap().kind(), "comment");
assert_eq!(root.named_child(4).unwrap().kind(), "C");
}
#[test]
fn test_node_descendant_count() {
let tree = parse_json_example();
let value_node = tree.root_node();
let all_nodes = get_all_nodes(&tree);
assert_eq!(value_node.descendant_count(), all_nodes.len());
let mut cursor = value_node.walk();
for (i, node) in all_nodes.iter().enumerate() {
cursor.goto_descendant(i);
assert_eq!(cursor.node(), *node, "index {i}");
}
for (i, node) in all_nodes.iter().enumerate().rev() {
cursor.goto_descendant(i);
assert_eq!(cursor.node(), *node, "rev index {i}");
}
}
#[test]
fn test_descendant_count_single_node_tree() {
let mut parser = Parser::new();
parser
.set_language(&get_language("embedded-template"))
.unwrap();
let tree = parser.parse("hello", None).unwrap();
let nodes = get_all_nodes(&tree);
assert_eq!(nodes.len(), 2);
assert_eq!(tree.root_node().descendant_count(), 2);
let mut cursor = tree.root_node().walk();
cursor.goto_descendant(0);
assert_eq!(cursor.depth(), 0);
assert_eq!(cursor.node(), nodes[0]);
cursor.goto_descendant(1);
assert_eq!(cursor.depth(), 1);
assert_eq!(cursor.node(), nodes[1]);
}
#[test]
fn test_node_descendant_for_range() {
let tree = parse_json_example();
let array_node = tree.root_node();
// Leaf node exactly matches the given bounds - byte query
let colon_index = JSON_EXAMPLE.find(':').unwrap();
let colon_node = array_node
.descendant_for_byte_range(colon_index, colon_index + 1)
.unwrap();
assert_eq!(colon_node.kind(), ":");
assert_eq!(colon_node.start_byte(), colon_index);
assert_eq!(colon_node.end_byte(), colon_index + 1);
assert_eq!(colon_node.start_position(), Point::new(6, 7));
assert_eq!(colon_node.end_position(), Point::new(6, 8));
// Leaf node exactly matches the given bounds - point query
let colon_node = array_node
.descendant_for_point_range(Point::new(6, 7), Point::new(6, 8))
.unwrap();
assert_eq!(colon_node.kind(), ":");
assert_eq!(colon_node.start_byte(), colon_index);
assert_eq!(colon_node.end_byte(), colon_index + 1);
assert_eq!(colon_node.start_position(), Point::new(6, 7));
assert_eq!(colon_node.end_position(), Point::new(6, 8));
// The given point is between two adjacent leaf nodes - byte query
let colon_index = JSON_EXAMPLE.find(':').unwrap();
let colon_node = array_node
.descendant_for_byte_range(colon_index, colon_index)
.unwrap();
assert_eq!(colon_node.kind(), ":");
assert_eq!(colon_node.start_byte(), colon_index);
assert_eq!(colon_node.end_byte(), colon_index + 1);
assert_eq!(colon_node.start_position(), Point::new(6, 7));
assert_eq!(colon_node.end_position(), Point::new(6, 8));
// The given point is between two adjacent leaf nodes - point query
let colon_node = array_node
.descendant_for_point_range(Point::new(6, 7), Point::new(6, 7))
.unwrap();
assert_eq!(colon_node.kind(), ":");
assert_eq!(colon_node.start_byte(), colon_index);
assert_eq!(colon_node.end_byte(), colon_index + 1);
assert_eq!(colon_node.start_position(), Point::new(6, 7));
assert_eq!(colon_node.end_position(), Point::new(6, 8));
// Leaf node starts at the lower bound, ends after the upper bound - byte query
let string_index = JSON_EXAMPLE.find("\"x\"").unwrap();
let string_node = array_node
.descendant_for_byte_range(string_index, string_index + 2)
.unwrap();
assert_eq!(string_node.kind(), "string");
assert_eq!(string_node.start_byte(), string_index);
assert_eq!(string_node.end_byte(), string_index + 3);
assert_eq!(string_node.start_position(), Point::new(6, 4));
assert_eq!(string_node.end_position(), Point::new(6, 7));
// Leaf node starts at the lower bound, ends after the upper bound - point query
let string_node = array_node
.descendant_for_point_range(Point::new(6, 4), Point::new(6, 6))
.unwrap();
assert_eq!(string_node.kind(), "string");
assert_eq!(string_node.start_byte(), string_index);
assert_eq!(string_node.end_byte(), string_index + 3);
assert_eq!(string_node.start_position(), Point::new(6, 4));
assert_eq!(string_node.end_position(), Point::new(6, 7));
// Leaf node starts before the lower bound, ends at the upper bound - byte query
let null_index = JSON_EXAMPLE.find("null").unwrap();
let null_node = array_node
.descendant_for_byte_range(null_index + 1, null_index + 4)
.unwrap();
assert_eq!(null_node.kind(), "null");
assert_eq!(null_node.start_byte(), null_index);
assert_eq!(null_node.end_byte(), null_index + 4);
assert_eq!(null_node.start_position(), Point::new(6, 9));
assert_eq!(null_node.end_position(), Point::new(6, 13));
// Leaf node starts before the lower bound, ends at the upper bound - point query
let null_node = array_node
.descendant_for_point_range(Point::new(6, 11), Point::new(6, 13))
.unwrap();
assert_eq!(null_node.kind(), "null");
assert_eq!(null_node.start_byte(), null_index);
assert_eq!(null_node.end_byte(), null_index + 4);
assert_eq!(null_node.start_position(), Point::new(6, 9));
assert_eq!(null_node.end_position(), Point::new(6, 13));
// The bounds span multiple leaf nodes - return the smallest node that does span it.
let pair_node = array_node
.descendant_for_byte_range(string_index + 2, string_index + 4)
.unwrap();
assert_eq!(pair_node.kind(), "pair");
assert_eq!(pair_node.start_byte(), string_index);
assert_eq!(pair_node.end_byte(), string_index + 9);
assert_eq!(pair_node.start_position(), Point::new(6, 4));
assert_eq!(pair_node.end_position(), Point::new(6, 13));
assert_eq!(colon_node.parent(), Some(pair_node));
// no leaf spans the given range - return the smallest node that does span it.
let pair_node = array_node
.named_descendant_for_point_range(Point::new(6, 6), Point::new(6, 8))
.unwrap();
assert_eq!(pair_node.kind(), "pair");
assert_eq!(pair_node.start_byte(), string_index);
assert_eq!(pair_node.end_byte(), string_index + 9);
assert_eq!(pair_node.start_position(), Point::new(6, 4));
assert_eq!(pair_node.end_position(), Point::new(6, 13));
// Zero-width token
{
let code = "<script></script>";
let mut parser = Parser::new();
parser.set_language(&get_language("html")).unwrap();
let tree = parser.parse(code, None).unwrap();
let root = tree.root_node();
let child = root
.named_descendant_for_point_range(Point::new(0, 8), Point::new(0, 8))
.unwrap();
assert_eq!(child.kind(), "raw_text");
let child2 = root.named_descendant_for_byte_range(8, 8).unwrap();
assert_eq!(child2.kind(), "raw_text");
assert_eq!(child, child2);
}
// Negative test, start > end
assert_eq!(array_node.descendant_for_byte_range(1, 0), None);
assert_eq!(
array_node.descendant_for_point_range(Point::new(6, 8), Point::new(6, 7)),
None
);
}
#[test]
fn test_node_edit() {
let mut code = JSON_EXAMPLE.as_bytes().to_vec();
let mut tree = parse_json_example();
let mut rand = Rand::new(0);
for _ in 0..10 {
let mut nodes_before = get_all_nodes(&tree);
let edit = get_random_edit(&mut rand, &code);
let mut tree2 = tree.clone();
let edit = perform_edit(&mut tree2, &mut code, &edit).unwrap();
for node in &mut nodes_before {
node.edit(&edit);
}
let nodes_after = get_all_nodes(&tree2);
for (i, node) in nodes_before.into_iter().enumerate() {
assert_eq!(
(node.kind(), node.start_byte(), node.start_position()),
(
nodes_after[i].kind(),
nodes_after[i].start_byte(),
nodes_after[i].start_position()
),
);
}
tree = tree2;
}
}
#[test]
fn test_root_node_with_offset() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse(" if (a) b", None).unwrap();
let node = tree.root_node_with_offset(6, Point::new(2, 2));
assert_eq!(node.byte_range(), 8..16);
assert_eq!(node.start_position(), Point::new(2, 4));
assert_eq!(node.end_position(), Point::new(2, 12));
let child = node.child(0).unwrap().child(2).unwrap();
assert_eq!(child.kind(), "expression_statement");
assert_eq!(child.byte_range(), 15..16);
assert_eq!(child.start_position(), Point::new(2, 11));
assert_eq!(child.end_position(), Point::new(2, 12));
let mut cursor = node.walk();
cursor.goto_first_child();
cursor.goto_first_child();
cursor.goto_next_sibling();
let child = cursor.node();
assert_eq!(child.kind(), "parenthesized_expression");
assert_eq!(child.byte_range(), 11..14);
assert_eq!(child.start_position(), Point::new(2, 7));
assert_eq!(child.end_position(), Point::new(2, 10));
}
#[test]
fn test_node_is_extra() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse("foo(/* hi */);", None).unwrap();
let root_node = tree.root_node();
let comment_node = root_node.descendant_for_byte_range(7, 7).unwrap();
assert_eq!(root_node.kind(), "program");
assert_eq!(comment_node.kind(), "comment");
assert!(!root_node.is_extra());
assert!(comment_node.is_extra());
}
#[test]
fn test_node_is_error() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse("foo(", None).unwrap();
let root_node = tree.root_node();
assert_eq!(root_node.kind(), "program");
assert!(root_node.has_error());
let child = root_node.child(0).unwrap();
assert_eq!(child.kind(), "ERROR");
assert!(child.is_error());
}
#[test]
fn test_edit_point() {
let edit = InputEdit {
start_byte: 5,
old_end_byte: 5,
new_end_byte: 10,
start_position: Point::new(0, 5),
old_end_position: Point::new(0, 5),
new_end_position: Point::new(0, 10),
};
// Point after edit
let mut point = Point::new(0, 8);
let mut byte = 8;
edit.edit_point(&mut point, &mut byte);
assert_eq!(point, Point::new(0, 13));
assert_eq!(byte, 13);
// Point before edit
let mut point = Point::new(0, 2);
let mut byte = 2;
edit.edit_point(&mut point, &mut byte);
assert_eq!(point, Point::new(0, 2));
assert_eq!(byte, 2);
// Point at edit start
let mut point = Point::new(0, 5);
let mut byte = 5;
edit.edit_point(&mut point, &mut byte);
assert_eq!(point, Point::new(0, 10));
assert_eq!(byte, 10);
}
#[test]
fn test_edit_range() {
use tree_sitter::{InputEdit, Point, Range};
let edit = InputEdit {
start_byte: 10,
old_end_byte: 15,
new_end_byte: 20,
start_position: Point::new(1, 0),
old_end_position: Point::new(1, 5),
new_end_position: Point::new(2, 0),
};
// Range after edit
let mut range = Range {
start_byte: 20,
end_byte: 25,
start_point: Point::new(2, 0),
end_point: Point::new(2, 5),
};
edit.edit_range(&mut range);
assert_eq!(range.start_byte, 25);
assert_eq!(range.end_byte, 30);
assert_eq!(range.start_point, Point::new(3, 0));
assert_eq!(range.end_point, Point::new(3, 5));
// Range before edit
let mut range = Range {
start_byte: 5,
end_byte: 8,
start_point: Point::new(0, 5),
end_point: Point::new(0, 8),
};
edit.edit_range(&mut range);
assert_eq!(range.start_byte, 5);
assert_eq!(range.end_byte, 8);
assert_eq!(range.start_point, Point::new(0, 5));
assert_eq!(range.end_point, Point::new(0, 8));
// Range overlapping edit
let mut range = Range {
start_byte: 8,
end_byte: 12,
start_point: Point::new(0, 8),
end_point: Point::new(1, 2),
};
edit.edit_range(&mut range);
assert_eq!(range.start_byte, 8);
assert_eq!(range.end_byte, 10);
assert_eq!(range.start_point, Point::new(0, 8));
assert_eq!(range.end_point, Point::new(1, 0));
}
#[test]
fn test_node_sexp() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse("if (a) b", None).unwrap();
let root_node = tree.root_node();
let if_node = root_node.descendant_for_byte_range(0, 0).unwrap();
let paren_node = root_node.descendant_for_byte_range(3, 3).unwrap();
let identifier_node = root_node.descendant_for_byte_range(4, 4).unwrap();
assert_eq!(if_node.kind(), "if");
assert_eq!(if_node.to_sexp(), "(\"if\")");
assert_eq!(paren_node.kind(), "(");
assert_eq!(paren_node.to_sexp(), "(\"(\")");
assert_eq!(identifier_node.kind(), "identifier");
assert_eq!(identifier_node.to_sexp(), "(identifier)");
}
#[test]
fn test_node_field_names() {
let (parser_name, parser_code) = generate_parser(
r#"
{
"name": "test_grammar_with_fields",
"extras": [
{"type": "PATTERN", "value": "\\s+"}
],
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/query_test.rs | crates/cli/src/tests/query_test.rs | use std::{env, fmt::Write, ops::ControlFlow, sync::LazyLock};
use indoc::indoc;
use rand::{prelude::StdRng, SeedableRng};
use streaming_iterator::StreamingIterator;
use tree_sitter::{
CaptureQuantifier, InputEdit, Language, Node, Parser, Point, Query, QueryCursor,
QueryCursorOptions, QueryError, QueryErrorKind, QueryPredicate, QueryPredicateArg,
QueryProperty, Range,
};
use tree_sitter_generate::load_grammar_file;
use unindent::Unindent;
use super::helpers::{
allocations,
fixtures::{get_language, get_test_language},
query_helpers::{assert_query_matches, Match, Pattern},
};
use crate::tests::{
generate_parser,
helpers::{
fixtures::get_test_fixture_language,
query_helpers::{collect_captures, collect_matches},
},
ITERATION_COUNT,
};
static EXAMPLE_FILTER: LazyLock<Option<String>> =
LazyLock::new(|| env::var("TREE_SITTER_TEST_EXAMPLE_FILTER").ok());
#[test]
fn test_query_errors_on_invalid_syntax() {
allocations::record(|| {
let language = get_language("javascript");
assert!(Query::new(&language, "(if_statement)").is_ok());
assert!(Query::new(
&language,
"(if_statement condition:(parenthesized_expression (identifier)))"
)
.is_ok());
// Mismatched parens
assert_eq!(
Query::new(&language, "(if_statement").unwrap_err().message,
[
"(if_statement", //
" ^",
]
.join("\n")
);
assert_eq!(
Query::new(&language, "; comment 1\n; comment 2\n (if_statement))")
.unwrap_err()
.message,
[
" (if_statement))", //
" ^",
]
.join("\n")
);
// Return an error at the *beginning* of a bare identifier not followed a colon.
// If there's a colon but no pattern, return an error at the end of the colon.
assert_eq!(
Query::new(&language, "(if_statement identifier)")
.unwrap_err()
.message,
[
"(if_statement identifier)", //
" ^",
]
.join("\n")
);
assert_eq!(
Query::new(&language, "(if_statement condition:)")
.unwrap_err()
.message,
[
"(if_statement condition:)", //
" ^",
]
.join("\n")
);
// Return an error at the beginning of an unterminated string.
assert_eq!(
Query::new(&language, r#"(identifier) "h "#)
.unwrap_err()
.message,
[
r#"(identifier) "h "#, //
r" ^",
]
.join("\n")
);
// Empty tree pattern
assert_eq!(
Query::new(&language, r"((identifier) ()")
.unwrap_err()
.message,
[
"((identifier) ()", //
" ^",
]
.join("\n")
);
// Empty alternation
assert_eq!(
Query::new(&language, r"((identifier) [])")
.unwrap_err()
.message,
[
"((identifier) [])", //
" ^",
]
.join("\n")
);
// Unclosed sibling expression with predicate
assert_eq!(
Query::new(&language, r"((identifier) (#a?)")
.unwrap_err()
.message,
[
"((identifier) (#a?)", //
" ^",
]
.join("\n")
);
// Predicate not ending in `?` or `!`
assert_eq!(
Query::new(&language, r"((identifier) (#a))")
.unwrap_err()
.message,
[
"((identifier) (#a))", //
" ^",
]
.join("\n")
);
// Unclosed predicate
assert_eq!(
Query::new(&language, r"((identifier) @x (#eq? @x a")
.unwrap_err()
.message,
[
r"((identifier) @x (#eq? @x a",
r" ^",
]
.join("\n")
);
// Need at least one child node for a child anchor
assert_eq!(
Query::new(&language, r"(statement_block .)")
.unwrap_err()
.message,
[
//
r"(statement_block .)",
r" ^"
]
.join("\n")
);
// Need a field name after a negated field operator
assert_eq!(
Query::new(&language, r"(statement_block ! (if_statement))")
.unwrap_err()
.message,
[
r"(statement_block ! (if_statement))",
r" ^"
]
.join("\n")
);
// Unclosed alternation within a tree
// tree-sitter/tree-sitter/issues/968
assert_eq!(
Query::new(&get_language("c"), r#"(parameter_list [ ")" @foo)"#)
.unwrap_err()
.message,
[
r#"(parameter_list [ ")" @foo)"#,
r" ^"
]
.join("\n")
);
// Unclosed tree within an alternation
// tree-sitter/tree-sitter/issues/1436
assert_eq!(
Query::new(
&get_language("python"),
r"[(unary_operator (_) @operand) (not_operator (_) @operand]"
)
.unwrap_err()
.message,
[
r"[(unary_operator (_) @operand) (not_operator (_) @operand]",
r" ^"
]
.join("\n")
);
// MISSING keyword with full pattern
assert_eq!(
Query::new(
&get_language("c"),
r"(MISSING (function_declarator (identifier))) "
)
.unwrap_err()
.message,
[
r"(MISSING (function_declarator (identifier))) ",
r" ^",
]
.join("\n")
);
// MISSING keyword with multiple identifiers
assert_eq!(
Query::new(
&get_language("c"),
r"(MISSING function_declarator function_declarator) "
)
.unwrap_err()
.message,
[
r"(MISSING function_declarator function_declarator) ",
r" ^",
]
.join("\n")
);
assert_eq!(
Query::new(&language, "(statement / export_statement)").unwrap_err(),
QueryError {
row: 0,
offset: 11,
column: 11,
kind: QueryErrorKind::Syntax,
message: [
"(statement / export_statement)", //
" ^"
]
.join("\n")
}
);
});
}
#[test]
fn test_query_errors_on_invalid_symbols() {
allocations::record(|| {
let language = get_language("javascript");
assert_eq!(
Query::new(&language, "\">>>>\"").unwrap_err(),
QueryError {
row: 0,
offset: 1,
column: 1,
kind: QueryErrorKind::NodeType,
message: "\">>>>\"".to_string()
}
);
assert_eq!(
Query::new(&language, "\"te\\\"st\"").unwrap_err(),
QueryError {
row: 0,
offset: 1,
column: 1,
kind: QueryErrorKind::NodeType,
message: "\"te\\\"st\"".to_string()
}
);
assert_eq!(
Query::new(&language, "\"\\\\\" @cap").unwrap_err(),
QueryError {
row: 0,
offset: 1,
column: 1,
kind: QueryErrorKind::NodeType,
message: "\"\\\\\"".to_string()
}
);
assert_eq!(
Query::new(&language, "(clas)").unwrap_err(),
QueryError {
row: 0,
offset: 1,
column: 1,
kind: QueryErrorKind::NodeType,
message: "\"clas\"".to_string()
}
);
assert_eq!(
Query::new(&language, "(if_statement (arrayyyyy))").unwrap_err(),
QueryError {
row: 0,
offset: 15,
column: 15,
kind: QueryErrorKind::NodeType,
message: "\"arrayyyyy\"".to_string()
},
);
assert_eq!(
Query::new(&language, "(if_statement condition: (non_existent3))").unwrap_err(),
QueryError {
row: 0,
offset: 26,
column: 26,
kind: QueryErrorKind::NodeType,
message: "\"non_existent3\"".to_string()
},
);
assert_eq!(
Query::new(&language, "(if_statement condit: (identifier))").unwrap_err(),
QueryError {
row: 0,
offset: 14,
column: 14,
kind: QueryErrorKind::Field,
message: "\"condit\"".to_string()
},
);
assert_eq!(
Query::new(&language, "(if_statement conditioning: (identifier))").unwrap_err(),
QueryError {
row: 0,
offset: 14,
column: 14,
kind: QueryErrorKind::Field,
message: "\"conditioning\"".to_string()
}
);
assert_eq!(
Query::new(&language, "(if_statement !alternativ)").unwrap_err(),
QueryError {
row: 0,
offset: 15,
column: 15,
kind: QueryErrorKind::Field,
message: "\"alternativ\"".to_string()
}
);
assert_eq!(
Query::new(&language, "(if_statement !alternatives)").unwrap_err(),
QueryError {
row: 0,
offset: 15,
column: 15,
kind: QueryErrorKind::Field,
message: "\"alternatives\"".to_string()
}
);
assert_eq!(
Query::new(&language, "fakefield: (identifier)").unwrap_err(),
QueryError {
row: 0,
offset: 0,
column: 0,
kind: QueryErrorKind::Field,
message: "\"fakefield\"".to_string()
}
);
});
}
#[test]
fn test_query_errors_on_invalid_predicates() {
allocations::record(|| {
let language = get_language("javascript");
assert_eq!(
Query::new(&language, "((identifier) @id (@id))").unwrap_err(),
QueryError {
kind: QueryErrorKind::Syntax,
row: 0,
column: 19,
offset: 19,
message: [
"((identifier) @id (@id))", //
" ^"
]
.join("\n")
}
);
assert_eq!(
Query::new(&language, "((identifier) @id (#eq? @id))").unwrap_err(),
QueryError {
kind: QueryErrorKind::Predicate,
row: 0,
column: 0,
offset: 0,
message: "Wrong number of arguments to #eq? predicate. Expected 2, got 1."
.to_string()
}
);
assert_eq!(
Query::new(&language, "((identifier) @id (#eq? @id @ok))").unwrap_err(),
QueryError {
kind: QueryErrorKind::Capture,
row: 0,
column: 29,
offset: 29,
message: "\"ok\"".to_string(),
}
);
});
}
#[test]
fn test_query_errors_on_impossible_patterns() {
let js_lang = get_language("javascript");
let rb_lang = get_language("ruby");
allocations::record(|| {
assert_eq!(
Query::new(
&js_lang,
"(binary_expression left: (expression (identifier)) left: (expression (identifier)))"
),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 0,
offset: 37,
column: 37,
message: [
"(binary_expression left: (expression (identifier)) left: (expression (identifier)))",
" ^",
]
.join("\n"),
})
);
Query::new(
&js_lang,
"(function_declaration name: (identifier) (statement_block))",
)
.unwrap();
assert_eq!(
Query::new(&js_lang, "(function_declaration name: (statement_block))"),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 0,
offset: 22,
column: 22,
message: [
"(function_declaration name: (statement_block))",
" ^",
]
.join("\n")
})
);
Query::new(&rb_lang, "(call receiver:(call))").unwrap();
assert_eq!(
Query::new(&rb_lang, "(call receiver:(binary))"),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 0,
offset: 6,
column: 6,
message: [
"(call receiver:(binary))", //
" ^",
]
.join("\n")
})
);
Query::new(
&js_lang,
"[
(function_expression (identifier))
(function_declaration (identifier))
(generator_function_declaration (identifier))
]",
)
.unwrap();
assert_eq!(
Query::new(
&js_lang,
"[
(function_expression (identifier))
(function_declaration (object))
(generator_function_declaration (identifier))
]",
),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 2,
offset: 99,
column: 42,
message: [
" (function_declaration (object))", //
" ^",
]
.join("\n")
})
);
assert_eq!(
Query::new(&js_lang, "(identifier (identifier))",),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 0,
offset: 12,
column: 12,
message: [
"(identifier (identifier))", //
" ^",
]
.join("\n")
})
);
assert_eq!(
Query::new(&js_lang, "(true (true))",),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 0,
offset: 6,
column: 6,
message: [
"(true (true))", //
" ^",
]
.join("\n")
})
);
Query::new(
&js_lang,
"(if_statement
condition: (parenthesized_expression (expression) @cond))",
)
.unwrap();
assert_eq!(
Query::new(&js_lang, "(if_statement condition: (expression))"),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 0,
offset: 14,
column: 14,
message: [
"(if_statement condition: (expression))", //
" ^",
]
.join("\n")
})
);
assert_eq!(
Query::new(&js_lang, "(identifier/identifier)").unwrap_err(),
QueryError {
row: 0,
offset: 0,
column: 0,
kind: QueryErrorKind::Structure,
message: [
"(identifier/identifier)", //
"^"
]
.join("\n")
}
);
if js_lang.abi_version() >= 15 {
assert_eq!(
Query::new(&js_lang, "(statement/identifier)").unwrap_err(),
QueryError {
row: 0,
offset: 0,
column: 0,
kind: QueryErrorKind::Structure,
message: [
"(statement/identifier)", //
"^"
]
.join("\n")
}
);
assert_eq!(
Query::new(&js_lang, "(statement/pattern)").unwrap_err(),
QueryError {
row: 0,
offset: 0,
column: 0,
kind: QueryErrorKind::Structure,
message: [
"(statement/pattern)", //
"^"
]
.join("\n")
}
);
}
});
}
#[test]
fn test_query_verifies_possible_patterns_with_aliased_parent_nodes() {
allocations::record(|| {
let language = get_language("ruby");
Query::new(&language, "(destructured_parameter (identifier))").unwrap();
assert_eq!(
Query::new(&language, "(destructured_parameter (string))",),
Err(QueryError {
kind: QueryErrorKind::Structure,
row: 0,
offset: 24,
column: 24,
message: [
"(destructured_parameter (string))", //
" ^",
]
.join("\n")
})
);
});
}
#[test]
fn test_query_matches_with_simple_pattern() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"(function_declaration name: (identifier) @fn-name)",
)
.unwrap();
assert_query_matches(
&language,
&query,
"function one() { two(); function three() {} }",
&[
(0, vec![("fn-name", "one")]),
(0, vec![("fn-name", "three")]),
],
);
});
}
#[test]
fn test_query_matches_with_multiple_on_same_root() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"(class_declaration
name: (identifier) @the-class-name
(class_body
(method_definition
name: (property_identifier) @the-method-name)))",
)
.unwrap();
assert_query_matches(
&language,
&query,
"
class Person {
// the constructor
constructor(name) { this.name = name; }
// the getter
getFullName() { return this.name; }
}
",
&[
(
0,
vec![
("the-class-name", "Person"),
("the-method-name", "constructor"),
],
),
(
0,
vec![
("the-class-name", "Person"),
("the-method-name", "getFullName"),
],
),
],
);
});
}
#[test]
fn test_query_matches_with_multiple_patterns_different_roots() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"
(function_declaration name:(identifier) @fn-def)
(call_expression function:(identifier) @fn-ref)
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"
function f1() {
f2(f3());
}
",
&[
(0, vec![("fn-def", "f1")]),
(1, vec![("fn-ref", "f2")]),
(1, vec![("fn-ref", "f3")]),
],
);
});
}
#[test]
fn test_query_matches_with_multiple_patterns_same_root() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"
(pair
key: (property_identifier) @method-def
value: (function_expression))
(pair
key: (property_identifier) @method-def
value: (arrow_function))
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"
a = {
b: () => { return c; },
d: function() { return d; }
};
",
&[
(1, vec![("method-def", "b")]),
(0, vec![("method-def", "d")]),
],
);
});
}
#[test]
fn test_query_matches_with_nesting_and_no_fields() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"
(array
(array
(identifier) @x1
(identifier) @x2))
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"
[[a]];
[[c, d], [e, f, g, h]];
[[h], [i]];
",
&[
(0, vec![("x1", "c"), ("x2", "d")]),
(0, vec![("x1", "e"), ("x2", "f")]),
(0, vec![("x1", "e"), ("x2", "g")]),
(0, vec![("x1", "f"), ("x2", "g")]),
(0, vec![("x1", "e"), ("x2", "h")]),
(0, vec![("x1", "f"), ("x2", "h")]),
(0, vec![("x1", "g"), ("x2", "h")]),
],
);
});
}
#[test]
fn test_query_matches_with_many_results() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(&language, "(array (identifier) @element)").unwrap();
assert_query_matches(
&language,
&query,
&"[hello];\n".repeat(50),
&vec![(0, vec![("element", "hello")]); 50],
);
});
}
#[test]
fn test_query_matches_with_many_overlapping_results() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
r#"
(call_expression
function: (member_expression
property: (property_identifier) @method))
(call_expression
function: (identifier) @function)
((identifier) @constant
(#match? @constant "[A-Z\\d_]+"))
"#,
)
.unwrap();
let count = 1024;
// Deeply nested chained function calls:
// a
// .foo(bar(BAZ))
// .foo(bar(BAZ))
// .foo(bar(BAZ))
// ...
let source = format!("a{}", "\n .foo(bar(BAZ))".repeat(count));
assert_query_matches(
&language,
&query,
&source,
&[
(0, vec![("method", "foo")]),
(1, vec![("function", "bar")]),
(2, vec![("constant", "BAZ")]),
]
.iter()
.cloned()
.cycle()
.take(3 * count)
.collect::<Vec<_>>(),
);
});
}
#[test]
fn test_query_matches_capturing_error_nodes() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"
(ERROR (identifier) @the-error-identifier) @the-error
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"function a(b,, c, d :e:) {}",
&[(0, vec![("the-error", ":e:"), ("the-error-identifier", "e")])],
);
});
}
#[test]
fn test_query_matches_capturing_missing_nodes() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
r#"
(MISSING
; Comments should be valid
) @missing
(MISSING
; Comments should be valid
";"
; Comments should be valid
) @missing-semicolon
"#,
)
.unwrap();
// Missing anonymous nodes
assert_query_matches(
&language,
&query,
"
x = function(a) { b; } function(c) { d; }
// ^ MISSING semicolon here
",
&[
(0, vec![("missing", "")]),
(1, vec![("missing-semicolon", "")]),
],
);
let language = get_language("c");
let query = Query::new(
&language,
"(MISSING field_identifier) @missing-field-ident
(MISSING identifier) @missing-ident
(MISSING) @missing-anything",
)
.unwrap();
// Missing named nodes
assert_query_matches(
&language,
&query,
"
int main() {
if (a.) {
// ^ MISSING field_identifier here
b();
c();
if (*) d();
// ^ MISSING identifier here
}
}
",
&[
(0, vec![("missing-field-ident", "")]),
(2, vec![("missing-anything", "")]),
(1, vec![("missing-ident", "")]),
(2, vec![("missing-anything", "")]),
],
);
});
}
#[test]
fn test_query_matches_with_extra_children() {
allocations::record(|| {
let language = get_language("ruby");
let query = Query::new(
&language,
"
(program(comment) @top_level_comment)
(argument_list (heredoc_body) @heredoc_in_args)
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"
# top-level
puts(
# not-top-level
<<-IN_ARGS, bar.baz
HELLO
IN_ARGS
)
puts <<-NOT_IN_ARGS
NO
NOT_IN_ARGS
",
&[
(0, vec![("top_level_comment", "# top-level")]),
(
1,
vec![(
"heredoc_in_args",
"\n HELLO\n IN_ARGS",
)],
),
],
);
});
}
#[test]
fn test_query_matches_with_named_wildcard() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"
(return_statement (_) @the-return-value)
(binary_expression operator: _ @the-operator)
",
)
.unwrap();
let source = "return a + b - c;";
let mut parser = Parser::new();
parser.set_language(&language).unwrap();
let tree = parser.parse(source, None).unwrap();
let mut cursor = QueryCursor::new();
let matches = cursor.matches(&query, tree.root_node(), source.as_bytes());
assert_eq!(
collect_matches(matches, &query, source),
&[
(0, vec![("the-return-value", "a + b - c")]),
(1, vec![("the-operator", "+")]),
(1, vec![("the-operator", "-")]),
]
);
});
}
#[test]
fn test_query_matches_with_wildcard_at_the_root() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"
(_
(comment) @doc
.
(function_declaration
name: (identifier) @name))
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"/* one */ var x; /* two */ function y() {} /* three */ class Z {}",
&[(0, vec![("doc", "/* two */"), ("name", "y")])],
);
let query = Query::new(
&language,
"
(_ (string) @a)
(_ (number) @b)
(_ (true) @c)
(_ (false) @d)
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"['hi', x(true), {y: false}]",
&[
(0, vec![("a", "'hi'")]),
(2, vec![("c", "true")]),
(3, vec![("d", "false")]),
],
);
});
}
#[test]
fn test_query_matches_with_wildcard_within_wildcard() {
allocations::record(|| {
let language = get_language("javascript");
let query = Query::new(
&language,
"
(_ (_) @child) @parent
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"/* a */ b; c;",
&[
(0, vec![("parent", "/* a */ b; c;"), ("child", "/* a */")]),
(0, vec![("parent", "/* a */ b; c;"), ("child", "b;")]),
(0, vec![("parent", "b;"), ("child", "b")]),
(0, vec![("parent", "/* a */ b; c;"), ("child", "c;")]),
(0, vec![("parent", "c;"), ("child", "c")]),
],
);
});
}
#[test]
fn test_query_matches_with_immediate_siblings() {
allocations::record(|| {
let language = get_language("python");
// The immediate child operator '.' can be used in three similar ways:
// 1. Before the first child node in a pattern, it means that there cannot be any named
// siblings before that child node.
// 2. After the last child node in a pattern, it means that there cannot be any named
// sibling after that child node.
// 2. Between two child nodes in a pattern, it specifies that there cannot be any named
// siblings between those two child snodes.
let query = Query::new(
&language,
"
(dotted_name
(identifier) @parent
.
(identifier) @child)
(dotted_name
(identifier) @last-child
.)
(list
.
(_) @first-element)
",
)
.unwrap();
assert_query_matches(
&language,
&query,
"import a.b.c.d; return [w, [1, y], z]",
&[
(0, vec![("parent", "a"), ("child", "b")]),
(0, vec![("parent", "b"), ("child", "c")]),
(0, vec![("parent", "c"), ("child", "d")]),
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/pathological_test.rs | crates/cli/src/tests/pathological_test.rs | use tree_sitter::Parser;
use super::helpers::{allocations, fixtures::get_language};
#[test]
fn test_pathological_example_1() {
let language = "cpp";
let source = r#"*ss<s"ss<sqXqss<s._<s<sq<(qqX<sqss<s.ss<sqsssq<(qss<qssqXqss<s._<s<sq<(qqX<sqss<s.ss<sqsssq<(qss<sqss<sqss<s._<s<sq>(qqX<sqss<s.ss<sqsssq<(qss<sq&=ss<s<sqss<s._<s<sq<(qqX<sqss<s.ss<sqs"#;
allocations::record(|| {
let mut parser = Parser::new();
parser.set_language(&get_language(language)).unwrap();
parser.parse(source, None).unwrap();
});
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/highlight_test.rs | crates/cli/src/tests/highlight_test.rs | use std::{
ffi::CString,
fs,
os::raw::c_char,
ptr, slice, str,
sync::{
atomic::{AtomicUsize, Ordering},
LazyLock,
},
};
use tree_sitter_highlight::{
c, Error, Highlight, HighlightConfiguration, HighlightEvent, Highlighter, HtmlRenderer,
};
use super::helpers::fixtures::{get_highlight_config, get_language, get_language_queries_path};
static JS_HIGHLIGHT: LazyLock<HighlightConfiguration> =
LazyLock::new(|| get_highlight_config("javascript", Some("injections.scm"), &HIGHLIGHT_NAMES));
static JSDOC_HIGHLIGHT: LazyLock<HighlightConfiguration> =
LazyLock::new(|| get_highlight_config("jsdoc", None, &HIGHLIGHT_NAMES));
static HTML_HIGHLIGHT: LazyLock<HighlightConfiguration> =
LazyLock::new(|| get_highlight_config("html", Some("injections.scm"), &HIGHLIGHT_NAMES));
static EJS_HIGHLIGHT: LazyLock<HighlightConfiguration> = LazyLock::new(|| {
get_highlight_config(
"embedded-template",
Some("injections-ejs.scm"),
&HIGHLIGHT_NAMES,
)
});
static RUST_HIGHLIGHT: LazyLock<HighlightConfiguration> =
LazyLock::new(|| get_highlight_config("rust", Some("injections.scm"), &HIGHLIGHT_NAMES));
static HIGHLIGHT_NAMES: LazyLock<Vec<String>> = LazyLock::new(|| {
[
"attribute",
"boolean",
"carriage-return",
"comment",
"constant",
"constant.builtin",
"constructor",
"embedded",
"function",
"function.builtin",
"keyword",
"module",
"number",
"operator",
"property",
"property.builtin",
"punctuation",
"punctuation.bracket",
"punctuation.delimiter",
"punctuation.special",
"string",
"string.special",
"tag",
"type",
"type.builtin",
"variable",
"variable.builtin",
"variable.parameter",
]
.iter()
.copied()
.map(String::from)
.collect()
});
static HTML_ATTRS: LazyLock<Vec<String>> = LazyLock::new(|| {
HIGHLIGHT_NAMES
.iter()
.map(|s| format!("class={s}"))
.collect()
});
#[test]
fn test_highlighting_javascript() {
let source = "const a = function(b) { return b + c; }";
assert_eq!(
&to_token_vector(source, &JS_HIGHLIGHT).unwrap(),
&[vec![
("const", vec!["keyword"]),
(" ", vec![]),
("a", vec!["function"]),
(" ", vec![]),
("=", vec!["operator"]),
(" ", vec![]),
("function", vec!["keyword"]),
("(", vec!["punctuation.bracket"]),
("b", vec!["variable"]),
(")", vec!["punctuation.bracket"]),
(" ", vec![]),
("{", vec!["punctuation.bracket"]),
(" ", vec![]),
("return", vec!["keyword"]),
(" ", vec![]),
("b", vec!["variable"]),
(" ", vec![]),
("+", vec!["operator"]),
(" ", vec![]),
("c", vec!["variable"]),
(";", vec!["punctuation.delimiter"]),
(" ", vec![]),
("}", vec!["punctuation.bracket"]),
]]
);
}
#[test]
fn test_highlighting_injected_html_in_javascript() {
let source = ["const s = html `<div>${a < b}</div>`;"].join("\n");
assert_eq!(
&to_token_vector(&source, &JS_HIGHLIGHT).unwrap(),
&[vec![
("const", vec!["keyword"]),
(" ", vec![]),
("s", vec!["variable"]),
(" ", vec![]),
("=", vec!["operator"]),
(" ", vec![]),
("html", vec!["function"]),
(" ", vec![]),
("`", vec!["string"]),
("<", vec!["string", "punctuation.bracket"]),
("div", vec!["string", "tag"]),
(">", vec!["string", "punctuation.bracket"]),
("${", vec!["string", "embedded", "punctuation.special"]),
("a", vec!["string", "embedded", "variable"]),
(" ", vec!["string", "embedded"]),
("<", vec!["string", "embedded", "operator"]),
(" ", vec!["string", "embedded"]),
("b", vec!["string", "embedded", "variable"]),
("}", vec!["string", "embedded", "punctuation.special"]),
("</", vec!["string", "punctuation.bracket"]),
("div", vec!["string", "tag"]),
(">", vec!["string", "punctuation.bracket"]),
("`", vec!["string"]),
(";", vec!["punctuation.delimiter"]),
]]
);
}
#[test]
fn test_highlighting_injected_javascript_in_html_mini() {
let source = "<script>const x = new Thing();</script>";
assert_eq!(
&to_token_vector(source, &HTML_HIGHLIGHT).unwrap(),
&[vec![
("<", vec!["punctuation.bracket"]),
("script", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
("const", vec!["keyword"]),
(" ", vec![]),
("x", vec!["variable"]),
(" ", vec![]),
("=", vec!["operator"]),
(" ", vec![]),
("new", vec!["keyword"]),
(" ", vec![]),
("Thing", vec!["constructor"]),
("(", vec!["punctuation.bracket"]),
(")", vec!["punctuation.bracket"]),
(";", vec!["punctuation.delimiter"]),
("</", vec!["punctuation.bracket"]),
("script", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
],]
);
}
#[test]
fn test_highlighting_injected_javascript_in_html() {
let source = [
"<body>",
" <script>",
" const x = new Thing();",
" </script>",
"</body>",
]
.join("\n");
assert_eq!(
&to_token_vector(&source, &HTML_HIGHLIGHT).unwrap(),
&[
vec![
("<", vec!["punctuation.bracket"]),
("body", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
],
vec![
(" ", vec![]),
("<", vec!["punctuation.bracket"]),
("script", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
],
vec![
(" ", vec![]),
("const", vec!["keyword"]),
(" ", vec![]),
("x", vec!["variable"]),
(" ", vec![]),
("=", vec!["operator"]),
(" ", vec![]),
("new", vec!["keyword"]),
(" ", vec![]),
("Thing", vec!["constructor"]),
("(", vec!["punctuation.bracket"]),
(")", vec!["punctuation.bracket"]),
(";", vec!["punctuation.delimiter"]),
],
vec![
(" ", vec![]),
("</", vec!["punctuation.bracket"]),
("script", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
],
vec![
("</", vec!["punctuation.bracket"]),
("body", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
],
]
);
}
#[test]
fn test_highlighting_multiline_nodes_to_html() {
let source = [
"const SOMETHING = `",
" one ${",
" two()",
" } three",
"`",
"",
]
.join("\n");
assert_eq!(
&to_html(&source, &JS_HIGHLIGHT).unwrap(),
&[
"<span class=keyword>const</span> <span class=constant>SOMETHING</span> <span class=operator>=</span> <span class=string>`</span>\n".to_string(),
"<span class=string> one <span class=embedded><span class=punctuation.special>${</span></span></span>\n".to_string(),
"<span class=string><span class=embedded> <span class=function>two</span><span class=punctuation.bracket>(</span><span class=punctuation.bracket>)</span></span></span>\n".to_string(),
"<span class=string><span class=embedded> <span class=punctuation.special>}</span></span> three</span>\n".to_string(),
"<span class=string>`</span>\n".to_string(),
]
);
}
#[test]
fn test_highlighting_with_local_variable_tracking() {
let source = [
"module.exports = function a(b) {",
" const module = c;",
" console.log(module, b);",
"}",
]
.join("\n");
assert_eq!(
&to_token_vector(&source, &JS_HIGHLIGHT).unwrap(),
&[
vec![
("module", vec!["variable.builtin"]),
(".", vec!["punctuation.delimiter"]),
("exports", vec!["function"]),
(" ", vec![]),
("=", vec!["operator"]),
(" ", vec![]),
("function", vec!["keyword"]),
(" ", vec![]),
("a", vec!["function"]),
("(", vec!["punctuation.bracket"]),
("b", vec!["variable"]),
(")", vec!["punctuation.bracket"]),
(" ", vec![]),
("{", vec!["punctuation.bracket"])
],
vec![
(" ", vec![]),
("const", vec!["keyword"]),
(" ", vec![]),
("module", vec!["variable"]),
(" ", vec![]),
("=", vec!["operator"]),
(" ", vec![]),
("c", vec!["variable"]),
(";", vec!["punctuation.delimiter"])
],
vec![
(" ", vec![]),
("console", vec!["variable.builtin"]),
(".", vec!["punctuation.delimiter"]),
("log", vec!["function"]),
("(", vec!["punctuation.bracket"]),
// Not a builtin, because `module` was defined as a variable above.
("module", vec!["variable"]),
(",", vec!["punctuation.delimiter"]),
(" ", vec![]),
// A parameter, because `b` was defined as a parameter above.
("b", vec!["variable"]),
(")", vec!["punctuation.bracket"]),
(";", vec!["punctuation.delimiter"]),
],
vec![("}", vec!["punctuation.bracket"])]
],
);
}
#[test]
fn test_highlighting_empty_lines() {
let source = [
"class A {",
"",
" b(c) {",
"",
" d(e)",
"",
" }",
"",
"}",
]
.join("\n");
assert_eq!(
&to_html(&source, &JS_HIGHLIGHT).unwrap(),
&[
"<span class=keyword>class</span> <span class=constructor>A</span> <span class=punctuation.bracket>{</span>\n".to_string(),
"\n".to_string(),
" <span class=function>b</span><span class=punctuation.bracket>(</span><span class=variable>c</span><span class=punctuation.bracket>)</span> <span class=punctuation.bracket>{</span>\n".to_string(),
"\n".to_string(),
" <span class=function>d</span><span class=punctuation.bracket>(</span><span class=variable>e</span><span class=punctuation.bracket>)</span>\n".to_string(),
"\n".to_string(),
" <span class=punctuation.bracket>}</span>\n".to_string(),
"\n".to_string(),
"<span class=punctuation.bracket>}</span>\n".to_string(),
]
);
}
#[test]
fn test_highlighting_carriage_returns() {
let source = "a = \"a\rb\"\r\nb\r";
assert_eq!(
&to_html(source, &JS_HIGHLIGHT).unwrap(),
&[
"<span class=variable>a</span> <span class=operator>=</span> <span class=string>"a<span class=carriage-return></span><span class=variable>b</span>"</span>\n",
"<span class=variable>b</span><span class=carriage-return></span>\n",
],
);
}
#[test]
fn test_highlighting_ejs_with_html_and_javascript() {
let source = ["<div><% foo() %></div><script> bar() </script>"].join("\n");
assert_eq!(
&to_token_vector(&source, &EJS_HIGHLIGHT).unwrap(),
&[[
("<", vec!["punctuation.bracket"]),
("div", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
("<%", vec!["keyword"]),
(" ", vec![]),
("foo", vec!["function"]),
("(", vec!["punctuation.bracket"]),
(")", vec!["punctuation.bracket"]),
(" ", vec![]),
("%>", vec!["keyword"]),
("</", vec!["punctuation.bracket"]),
("div", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
("<", vec!["punctuation.bracket"]),
("script", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
(" ", vec![]),
("bar", vec!["function"]),
("(", vec!["punctuation.bracket"]),
(")", vec!["punctuation.bracket"]),
(" ", vec![]),
("</", vec!["punctuation.bracket"]),
("script", vec!["tag"]),
(">", vec!["punctuation.bracket"]),
]],
);
}
#[test]
fn test_highlighting_javascript_with_jsdoc() {
// Regression test: the middle comment has no highlights. This should not prevent
// later injections from highlighting properly.
let source = ["a /* @see a */ b; /* nothing */ c; /* @see b */"].join("\n");
assert_eq!(
&to_token_vector(&source, &JS_HIGHLIGHT).unwrap(),
&[[
("a", vec!["variable"]),
(" ", vec![]),
("/* ", vec!["comment"]),
("@see", vec!["comment", "keyword"]),
(" a */", vec!["comment"]),
(" ", vec![]),
("b", vec!["variable"]),
(";", vec!["punctuation.delimiter"]),
(" ", vec![]),
("/* nothing */", vec!["comment"]),
(" ", vec![]),
("c", vec!["variable"]),
(";", vec!["punctuation.delimiter"]),
(" ", vec![]),
("/* ", vec!["comment"]),
("@see", vec!["comment", "keyword"]),
(" b */", vec!["comment"])
]],
);
}
#[test]
fn test_highlighting_with_content_children_included() {
let source = ["assert!(", " a.b.c() < D::e::<F>()", ");"].join("\n");
assert_eq!(
&to_token_vector(&source, &RUST_HIGHLIGHT).unwrap(),
&[
vec![
("assert", vec!["function"]),
("!", vec!["function"]),
("(", vec!["punctuation.bracket"]),
],
vec![
(" a", vec![]),
(".", vec!["punctuation.delimiter"]),
("b", vec!["property"]),
(".", vec!["punctuation.delimiter"]),
("c", vec!["function"]),
("(", vec!["punctuation.bracket"]),
(")", vec!["punctuation.bracket"]),
(" < ", vec![]),
("D", vec!["type"]),
("::", vec!["punctuation.delimiter"]),
("e", vec!["function"]),
("::", vec!["punctuation.delimiter"]),
("<", vec!["punctuation.bracket"]),
("F", vec!["type"]),
(">", vec!["punctuation.bracket"]),
("(", vec!["punctuation.bracket"]),
(")", vec!["punctuation.bracket"]),
],
vec![
(")", vec!["punctuation.bracket"]),
(";", vec!["punctuation.delimiter"]),
]
],
);
}
#[test]
fn test_highlighting_cancellation() {
// An HTML document with a large injected JavaScript document:
let mut source = "<script>\n".to_string();
for _ in 0..500 {
source += "function a() { console.log('hi'); }\n";
}
source += "</script>\n";
// Cancel the highlighting before parsing the injected document.
let cancellation_flag = AtomicUsize::new(0);
let injection_callback = |name: &str| {
cancellation_flag.store(1, Ordering::SeqCst);
test_language_for_injection_string(name)
};
// The initial `highlight` call, which eagerly parses the outer document, should not fail.
let mut highlighter = Highlighter::new();
let mut events = highlighter
.highlight(
&HTML_HIGHLIGHT,
source.as_bytes(),
Some(&cancellation_flag),
injection_callback,
)
.unwrap();
// Iterating the scopes should not panic. It should return an error once the
// cancellation is detected.
let found_cancellation_error = events.any(|event| match event {
Ok(_) => false,
Err(Error::Cancelled) => true,
Err(Error::InvalidLanguage | Error::Unknown) => {
unreachable!("Unexpected error type while iterating events")
}
});
assert!(
found_cancellation_error,
"Expected a cancellation error while iterating events"
);
}
#[test]
fn test_highlighting_via_c_api() {
let highlights = [
"class=tag\0",
"class=function\0",
"class=string\0",
"class=keyword\0",
];
let highlight_names = highlights
.iter()
.map(|h| h["class=".len()..].as_ptr().cast::<c_char>())
.collect::<Vec<_>>();
let highlight_attrs = highlights
.iter()
.map(|h| h.as_bytes().as_ptr().cast::<c_char>())
.collect::<Vec<_>>();
let highlighter = unsafe {
c::ts_highlighter_new(
std::ptr::addr_of!(highlight_names[0]),
std::ptr::addr_of!(highlight_attrs[0]),
highlights.len() as u32,
)
};
let source_code = c_string("<script>\nconst a = b('c');\nc.d();\n</script>");
let js_scope = c_string("source.js");
let js_injection_regex = c_string("^javascript");
let language = get_language("javascript");
let lang_name = c_string("javascript");
let queries = get_language_queries_path("javascript");
let highlights_query = fs::read_to_string(queries.join("highlights.scm")).unwrap();
let injections_query = fs::read_to_string(queries.join("injections.scm")).unwrap();
let locals_query = fs::read_to_string(queries.join("locals.scm")).unwrap();
unsafe {
c::ts_highlighter_add_language(
highlighter,
lang_name.as_ptr(),
js_scope.as_ptr(),
js_injection_regex.as_ptr(),
language,
highlights_query.as_ptr().cast::<c_char>(),
injections_query.as_ptr().cast::<c_char>(),
locals_query.as_ptr().cast::<c_char>(),
highlights_query.len() as u32,
injections_query.len() as u32,
locals_query.len() as u32,
);
}
let html_scope = c_string("text.html.basic");
let html_injection_regex = c_string("^html");
let language = get_language("html");
let lang_name = c_string("html");
let queries = get_language_queries_path("html");
let highlights_query = fs::read_to_string(queries.join("highlights.scm")).unwrap();
let injections_query = fs::read_to_string(queries.join("injections.scm")).unwrap();
unsafe {
c::ts_highlighter_add_language(
highlighter,
lang_name.as_ptr(),
html_scope.as_ptr(),
html_injection_regex.as_ptr(),
language,
highlights_query.as_ptr().cast::<c_char>(),
injections_query.as_ptr().cast::<c_char>(),
ptr::null(),
highlights_query.len() as u32,
injections_query.len() as u32,
0,
);
}
let buffer = c::ts_highlight_buffer_new();
unsafe {
c::ts_highlighter_highlight(
highlighter,
html_scope.as_ptr(),
source_code.as_ptr(),
source_code.as_bytes().len() as u32,
buffer,
ptr::null_mut(),
);
}
let output_bytes = unsafe { c::ts_highlight_buffer_content(buffer) };
let output_line_offsets = unsafe { c::ts_highlight_buffer_line_offsets(buffer) };
let output_len = unsafe { c::ts_highlight_buffer_len(buffer) };
let output_line_count = unsafe { c::ts_highlight_buffer_line_count(buffer) };
let output_bytes = unsafe { slice::from_raw_parts(output_bytes, output_len as usize) };
let output_line_offsets =
unsafe { slice::from_raw_parts(output_line_offsets, output_line_count as usize) };
let mut lines = Vec::with_capacity(output_line_count as usize);
for i in 0..(output_line_count as usize) {
let line_start = output_line_offsets[i] as usize;
let line_end = output_line_offsets
.get(i + 1)
.map_or(output_bytes.len(), |x| *x as usize);
lines.push(str::from_utf8(&output_bytes[line_start..line_end]).unwrap());
}
assert_eq!(
lines,
vec![
"<<span class=tag>script</span>>\n",
"<span class=keyword>const</span> a = <span class=function>b</span>(<span class=string>'c'</span>);\n",
"c.<span class=function>d</span>();\n",
"</<span class=tag>script</span>>\n",
]
);
unsafe {
c::ts_highlighter_delete(highlighter);
c::ts_highlight_buffer_delete(buffer);
}
}
#[test]
fn test_highlighting_with_all_captures_applied() {
let source = "fn main(a: u32, b: u32) -> { let c = a + b; }";
let language = get_language("rust");
let highlights_query = indoc::indoc! {"
[
\"fn\"
\"let\"
] @keyword
(identifier) @variable
(function_item name: (identifier) @function)
(parameter pattern: (identifier) @variable.parameter)
(primitive_type) @type.builtin
\"=\" @operator
[ \"->\" \":\" \";\" ] @punctuation.delimiter
[ \"{\" \"}\" \"(\" \")\" ] @punctuation.bracket
"};
let mut rust_highlight_reverse =
HighlightConfiguration::new(language, "rust", highlights_query, "", "").unwrap();
rust_highlight_reverse.configure(&HIGHLIGHT_NAMES);
assert_eq!(
&to_token_vector(source, &rust_highlight_reverse).unwrap(),
&[[
("fn", vec!["keyword"]),
(" ", vec![]),
("main", vec!["function"]),
("(", vec!["punctuation.bracket"]),
("a", vec!["variable.parameter"]),
(":", vec!["punctuation.delimiter"]),
(" ", vec![]),
("u32", vec!["type.builtin"]),
(", ", vec![]),
("b", vec!["variable.parameter"]),
(":", vec!["punctuation.delimiter"]),
(" ", vec![]),
("u32", vec!["type.builtin"]),
(")", vec!["punctuation.bracket"]),
(" ", vec![]),
("->", vec!["punctuation.delimiter"]),
(" ", vec![]),
("{", vec!["punctuation.bracket"]),
(" ", vec![]),
("let", vec!["keyword"]),
(" ", vec![]),
("c", vec!["variable"]),
(" ", vec![]),
("=", vec!["operator"]),
(" ", vec![]),
("a", vec!["variable"]),
(" + ", vec![]),
("b", vec!["variable"]),
(";", vec!["punctuation.delimiter"]),
(" ", vec![]),
("}", vec!["punctuation.bracket"])
]],
);
}
#[test]
fn test_decode_utf8_lossy() {
use tree_sitter::LossyUtf8;
let parts = LossyUtf8::new(b"hi").collect::<Vec<_>>();
assert_eq!(parts, vec!["hi"]);
let parts = LossyUtf8::new(b"hi\xc0\xc1bye").collect::<Vec<_>>();
assert_eq!(parts, vec!["hi", "\u{fffd}", "\u{fffd}", "bye"]);
let parts = LossyUtf8::new(b"\xc0\xc1bye").collect::<Vec<_>>();
assert_eq!(parts, vec!["\u{fffd}", "\u{fffd}", "bye"]);
let parts = LossyUtf8::new(b"hello\xc0\xc1").collect::<Vec<_>>();
assert_eq!(parts, vec!["hello", "\u{fffd}", "\u{fffd}"]);
}
fn c_string(s: &str) -> CString {
CString::new(s.as_bytes().to_vec()).unwrap()
}
fn test_language_for_injection_string<'a>(string: &str) -> Option<&'a HighlightConfiguration> {
match string {
"javascript" => Some(&JS_HIGHLIGHT),
"html" => Some(&HTML_HIGHLIGHT),
"rust" => Some(&RUST_HIGHLIGHT),
"jsdoc" => Some(&JSDOC_HIGHLIGHT),
_ => None,
}
}
fn to_html<'a>(
src: &'a str,
language_config: &'a HighlightConfiguration,
) -> Result<Vec<String>, Error> {
let src = src.as_bytes();
let mut renderer = HtmlRenderer::new();
let mut highlighter = Highlighter::new();
let events = highlighter.highlight(
language_config,
src,
None,
&test_language_for_injection_string,
)?;
renderer.set_carriage_return_highlight(
HIGHLIGHT_NAMES
.iter()
.position(|s| s == "carriage-return")
.map(Highlight),
);
renderer
.render(events, src, &|highlight, output| {
output.extend(HTML_ATTRS[highlight.0].as_bytes());
})
.unwrap();
Ok(renderer
.lines()
.map(std::string::ToString::to_string)
.collect())
}
#[allow(clippy::type_complexity)]
fn to_token_vector<'a>(
src: &'a str,
language_config: &'a HighlightConfiguration,
) -> Result<Vec<Vec<(&'a str, Vec<&'static str>)>>, Error> {
let src = src.as_bytes();
let mut highlighter = Highlighter::new();
let mut lines = Vec::new();
let mut highlights = Vec::new();
let mut line = Vec::new();
let events = highlighter.highlight(
language_config,
src,
None,
&test_language_for_injection_string,
)?;
for event in events {
match event? {
HighlightEvent::HighlightStart(s) => highlights.push(HIGHLIGHT_NAMES[s.0].as_str()),
HighlightEvent::HighlightEnd => {
highlights.pop();
}
HighlightEvent::Source { start, end } => {
let s = str::from_utf8(&src[start..end]).unwrap();
for (i, l) in s.split('\n').enumerate() {
let l = l.trim_end_matches('\r');
if i > 0 {
lines.push(std::mem::take(&mut line));
}
if !l.is_empty() {
line.push((l, highlights.clone()));
}
}
}
}
}
if !line.is_empty() {
lines.push(line);
}
Ok(lines)
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/helpers.rs | crates/cli/src/tests/helpers.rs | pub use crate::fuzz::allocations;
pub mod edits;
pub(super) mod fixtures;
pub(super) mod query_helpers;
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/parser_test.rs | crates/cli/src/tests/parser_test.rs | use std::{
ops::ControlFlow,
sync::{
atomic::{AtomicUsize, Ordering},
mpsc,
},
thread,
time::{self, Duration},
};
use tree_sitter::{
Decode, IncludedRangesError, InputEdit, LogType, ParseOptions, ParseState, Parser, Point, Range,
};
use tree_sitter_generate::load_grammar_file;
use tree_sitter_proc_macro::retry;
use super::helpers::{
allocations,
edits::ReadRecorder,
fixtures::{get_language, get_test_language},
};
use crate::{
fuzz::edits::Edit,
parse::perform_edit,
tests::{
generate_parser,
helpers::fixtures::{fixtures_dir, get_test_fixture_language},
invert_edit,
},
};
#[test]
fn test_parsing_simple_string() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let tree = parser
.parse(
"
struct Stuff {}
fn main() {}
",
None,
)
.unwrap();
let root_node = tree.root_node();
assert_eq!(root_node.kind(), "source_file");
assert_eq!(
root_node.to_sexp(),
concat!(
"(source_file ",
"(struct_item name: (type_identifier) body: (field_declaration_list)) ",
"(function_item name: (identifier) parameters: (parameters) body: (block)))"
)
);
let struct_node = root_node.child(0).unwrap();
assert_eq!(struct_node.kind(), "struct_item");
}
#[test]
fn test_parsing_with_logging() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let mut messages = Vec::new();
parser.set_logger(Some(Box::new(|log_type, message| {
messages.push((log_type, message.to_string()));
})));
parser
.parse(
"
struct Stuff {}
fn main() {}
",
None,
)
.unwrap();
assert!(messages.contains(&(
LogType::Parse,
"reduce sym:struct_item, child_count:3".to_string()
)));
assert!(messages.contains(&(LogType::Lex, "skip character:' '".to_string())));
let mut row_starts_from_0 = false;
for (_, m) in &messages {
if m.contains("row:0") {
row_starts_from_0 = true;
break;
}
}
assert!(row_starts_from_0);
}
#[test]
fn test_parsing_with_debug_graph_enabled() {
use std::io::{BufRead, BufReader, Seek};
let has_zero_indexed_row = |s: &str| s.contains("position: 0,");
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let mut debug_graph_file = tempfile::tempfile().unwrap();
parser.print_dot_graphs(&debug_graph_file);
parser.parse("const zero = 0", None).unwrap();
debug_graph_file.rewind().unwrap();
let log_reader = BufReader::new(debug_graph_file)
.lines()
.map(|l| l.expect("Failed to read line from graph log"));
for line in log_reader {
assert!(
!has_zero_indexed_row(&line),
"Graph log output includes zero-indexed row: {line}",
);
}
}
#[test]
fn test_parsing_with_custom_utf8_input() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let lines = &["pub fn foo() {", " 1", "}"];
let tree = parser
.parse_with_options(
&mut |_, position| {
let row = position.row;
let column = position.column;
if row < lines.len() {
if column < lines[row].len() {
&lines[row].as_bytes()[column..]
} else {
b"\n"
}
} else {
&[]
}
},
None,
None,
)
.unwrap();
let root = tree.root_node();
assert_eq!(
root.to_sexp(),
concat!(
"(source_file ",
"(function_item ",
"(visibility_modifier) ",
"name: (identifier) ",
"parameters: (parameters) ",
"body: (block (integer_literal))))"
)
);
assert_eq!(root.kind(), "source_file");
assert!(!root.has_error());
assert_eq!(root.child(0).unwrap().kind(), "function_item");
}
#[test]
fn test_parsing_with_custom_utf16le_input() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let lines = ["pub fn foo() {", " 1", "}"]
.iter()
.map(|s| s.encode_utf16().map(u16::to_le).collect::<Vec<_>>())
.collect::<Vec<_>>();
let newline = [('\n' as u16).to_le()];
let tree = parser
.parse_utf16_le_with_options(
&mut |_, position| {
let row = position.row;
let column = position.column;
if row < lines.len() {
if column < lines[row].len() {
&lines[row][column..]
} else {
&newline
}
} else {
&[]
}
},
None,
None,
)
.unwrap();
let root = tree.root_node();
assert_eq!(
root.to_sexp(),
"(source_file (function_item (visibility_modifier) name: (identifier) parameters: (parameters) body: (block (integer_literal))))"
);
assert_eq!(root.kind(), "source_file");
assert!(!root.has_error());
assert_eq!(root.child(0).unwrap().kind(), "function_item");
}
#[test]
fn test_parsing_with_custom_utf16_be_input() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let lines: Vec<Vec<u16>> = ["pub fn foo() {", " 1", "}"]
.iter()
.map(|s| s.encode_utf16().collect::<Vec<_>>())
.map(|v| v.iter().map(|u| u.to_be()).collect())
.collect();
let newline = [('\n' as u16).to_be()];
let tree = parser
.parse_utf16_be_with_options(
&mut |_, position| {
let row = position.row;
let column = position.column;
if row < lines.len() {
if column < lines[row].len() {
&lines[row][column..]
} else {
&newline
}
} else {
&[]
}
},
None,
None,
)
.unwrap();
let root = tree.root_node();
assert_eq!(
root.to_sexp(),
"(source_file (function_item (visibility_modifier) name: (identifier) parameters: (parameters) body: (block (integer_literal))))"
);
assert_eq!(root.kind(), "source_file");
assert!(!root.has_error());
assert_eq!(root.child(0).unwrap().kind(), "function_item");
}
#[test]
fn test_parsing_with_callback_returning_owned_strings() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let text = b"pub fn foo() { 1 }";
let tree = parser
.parse_with_options(
&mut |i, _| String::from_utf8(text[i..].to_vec()).unwrap(),
None,
None,
)
.unwrap();
let root = tree.root_node();
assert_eq!(
root.to_sexp(),
"(source_file (function_item (visibility_modifier) name: (identifier) parameters: (parameters) body: (block (integer_literal))))"
);
}
#[test]
fn test_parsing_text_with_byte_order_mark() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
// Parse UTF16 text with a BOM
let tree = parser
.parse_utf16_le(
"\u{FEFF}fn a() {}"
.encode_utf16()
.map(u16::to_le)
.collect::<Vec<_>>(),
None,
)
.unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(source_file (function_item name: (identifier) parameters: (parameters) body: (block)))"
);
assert_eq!(tree.root_node().start_byte(), 2);
// Parse UTF8 text with a BOM
let mut tree = parser.parse("\u{FEFF}fn a() {}", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(source_file (function_item name: (identifier) parameters: (parameters) body: (block)))"
);
assert_eq!(tree.root_node().start_byte(), 3);
// Edit the text, inserting a character before the BOM. The BOM is now an error.
tree.edit(&InputEdit {
start_byte: 0,
old_end_byte: 0,
new_end_byte: 1,
start_position: Point::new(0, 0),
old_end_position: Point::new(0, 0),
new_end_position: Point::new(0, 1),
});
let mut tree = parser.parse(" \u{FEFF}fn a() {}", Some(&tree)).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(source_file (ERROR (UNEXPECTED 65279)) (function_item name: (identifier) parameters: (parameters) body: (block)))"
);
assert_eq!(tree.root_node().start_byte(), 1);
// Edit the text again, putting the BOM back at the beginning.
tree.edit(&InputEdit {
start_byte: 0,
old_end_byte: 1,
new_end_byte: 0,
start_position: Point::new(0, 0),
old_end_position: Point::new(0, 1),
new_end_position: Point::new(0, 0),
});
let tree = parser.parse("\u{FEFF}fn a() {}", Some(&tree)).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(source_file (function_item name: (identifier) parameters: (parameters) body: (block)))"
);
assert_eq!(tree.root_node().start_byte(), 3);
}
#[test]
fn test_parsing_invalid_chars_at_eof() {
let mut parser = Parser::new();
parser.set_language(&get_language("json")).unwrap();
let tree = parser.parse(b"\xdf", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(document (ERROR (UNEXPECTED INVALID)))"
);
}
#[test]
fn test_parsing_unexpected_null_characters_within_source() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse(b"var \0 something;", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(program (variable_declaration (ERROR (UNEXPECTED '\\0')) (variable_declarator name: (identifier))))"
);
}
#[test]
fn test_parsing_ends_when_input_callback_returns_empty() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let mut i = 0;
let source = b"abcdefghijklmnoqrs";
let tree = parser
.parse_with_options(
&mut |offset, _| {
i += 1;
if offset >= 6 {
b""
} else {
&source[offset..usize::min(source.len(), offset + 3)]
}
},
None,
None,
)
.unwrap();
assert_eq!(tree.root_node().end_byte(), 6);
}
// Incremental parsing
#[test]
fn test_parsing_after_editing_beginning_of_code() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let mut code = b"123 + 456 * (10 + x);".to_vec();
let mut tree = parser.parse(&code, None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
concat!(
"(program (expression_statement (binary_expression ",
"left: (number) ",
"right: (binary_expression left: (number) right: (parenthesized_expression ",
"(binary_expression left: (number) right: (identifier)))))))",
)
);
perform_edit(
&mut tree,
&mut code,
&Edit {
position: 3,
deleted_length: 0,
inserted_text: b" || 5".to_vec(),
},
)
.unwrap();
let mut recorder = ReadRecorder::new(&code);
let tree = parser
.parse_with_options(&mut |i, _| recorder.read(i), Some(&tree), None)
.unwrap();
assert_eq!(
tree.root_node().to_sexp(),
concat!(
"(program (expression_statement (binary_expression ",
"left: (number) ",
"right: (binary_expression ",
"left: (number) ",
"right: (binary_expression ",
"left: (number) ",
"right: (parenthesized_expression (binary_expression left: (number) right: (identifier))))))))",
)
);
assert_eq!(recorder.strings_read(), vec!["123 || 5 "]);
}
#[test]
fn test_parsing_after_editing_end_of_code() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let mut code = b"x * (100 + abc);".to_vec();
let mut tree = parser.parse(&code, None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
concat!(
"(program (expression_statement (binary_expression ",
"left: (identifier) ",
"right: (parenthesized_expression (binary_expression left: (number) right: (identifier))))))",
)
);
let position = code.len() - 2;
perform_edit(
&mut tree,
&mut code,
&Edit {
position,
deleted_length: 0,
inserted_text: b".d".to_vec(),
},
)
.unwrap();
let mut recorder = ReadRecorder::new(&code);
let tree = parser
.parse_with_options(&mut |i, _| recorder.read(i), Some(&tree), None)
.unwrap();
assert_eq!(
tree.root_node().to_sexp(),
concat!(
"(program (expression_statement (binary_expression ",
"left: (identifier) ",
"right: (parenthesized_expression (binary_expression ",
"left: (number) ",
"right: (member_expression ",
"object: (identifier) ",
"property: (property_identifier)))))))"
)
);
assert_eq!(recorder.strings_read(), vec![" * ", "abc.d)",]);
}
#[test]
fn test_parsing_empty_file_with_reused_tree() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let tree = parser.parse("", None);
parser.parse("", tree.as_ref());
let tree = parser.parse("\n ", None);
parser.parse("\n ", tree.as_ref());
}
#[test]
fn test_parsing_after_editing_tree_that_depends_on_column_values() {
let mut parser = Parser::new();
parser
.set_language(&get_test_fixture_language("uses_current_column"))
.unwrap();
let mut code = b"
a = b
c = do d
e + f
g
h + i
"
.to_vec();
let mut tree = parser.parse(&code, None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
concat!(
"(block ",
"(binary_expression (identifier) (identifier)) ",
"(binary_expression (identifier) (do_expression (block (identifier) (binary_expression (identifier) (identifier)) (identifier)))) ",
"(binary_expression (identifier) (identifier)))",
)
);
perform_edit(
&mut tree,
&mut code,
&Edit {
position: 8,
deleted_length: 0,
inserted_text: b"1234".to_vec(),
},
)
.unwrap();
assert_eq!(
code,
b"
a = b
c1234 = do d
e + f
g
h + i
"
);
let mut recorder = ReadRecorder::new(&code);
let tree = parser
.parse_with_options(&mut |i, _| recorder.read(i), Some(&tree), None)
.unwrap();
assert_eq!(
tree.root_node().to_sexp(),
concat!(
"(block ",
"(binary_expression (identifier) (identifier)) ",
"(binary_expression (identifier) (do_expression (block (identifier)))) ",
"(binary_expression (identifier) (identifier)) ",
"(identifier) ",
"(binary_expression (identifier) (identifier)))",
)
);
assert_eq!(
recorder.strings_read(),
vec!["\nc1234 = do d\n e + f\n g\n"]
);
}
#[test]
fn test_parsing_after_editing_tree_that_depends_on_column_position() {
let mut parser = Parser::new();
parser
.set_language(&get_test_fixture_language("depends_on_column"))
.unwrap();
let mut code = b"\n x".to_vec();
let mut tree = parser.parse(&code, None).unwrap();
assert_eq!(tree.root_node().to_sexp(), "(x_is_at (odd_column))");
perform_edit(
&mut tree,
&mut code,
&Edit {
position: 1,
deleted_length: 0,
inserted_text: b" ".to_vec(),
},
)
.unwrap();
assert_eq!(code, b"\n x");
let mut recorder = ReadRecorder::new(&code);
let mut tree = parser
.parse_with_options(&mut |i, _| recorder.read(i), Some(&tree), None)
.unwrap();
assert_eq!(tree.root_node().to_sexp(), "(x_is_at (even_column))",);
assert_eq!(recorder.strings_read(), vec!["\n x"]);
perform_edit(
&mut tree,
&mut code,
&Edit {
position: 1,
deleted_length: 0,
inserted_text: b"\n".to_vec(),
},
)
.unwrap();
assert_eq!(code, b"\n\n x");
let mut recorder = ReadRecorder::new(&code);
let tree = parser
.parse_with_options(&mut |i, _| recorder.read(i), Some(&tree), None)
.unwrap();
assert_eq!(tree.root_node().to_sexp(), "(x_is_at (even_column))",);
assert_eq!(recorder.strings_read(), vec!["\n\n x"]);
}
#[test]
fn test_parsing_after_detecting_error_in_the_middle_of_a_string_token() {
let mut parser = Parser::new();
parser.set_language(&get_language("python")).unwrap();
let mut source = b"a = b, 'c, d'".to_vec();
let tree = parser.parse(&source, None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(module (expression_statement (assignment left: (identifier) right: (expression_list (identifier) (string (string_start) (string_content) (string_end))))))"
);
// Delete a suffix of the source code, starting in the middle of the string
// literal, after some whitespace. With this deletion, the remaining string
// content: "c, " looks like two valid python tokens: an identifier and a comma.
// When this edit is undone, in order correctly recover the original tree, the
// parser needs to remember that before matching the `c` as an identifier, it
// lookahead ahead several bytes, trying to find the closing quotation mark in
// order to match the "string content" node.
let edit_ix = std::str::from_utf8(&source).unwrap().find("d'").unwrap();
let edit = Edit {
position: edit_ix,
deleted_length: source.len() - edit_ix,
inserted_text: Vec::new(),
};
let undo = invert_edit(&source, &edit);
let mut tree2 = tree.clone();
perform_edit(&mut tree2, &mut source, &edit).unwrap();
tree2 = parser.parse(&source, Some(&tree2)).unwrap();
assert!(tree2.root_node().has_error());
let mut tree3 = tree2.clone();
perform_edit(&mut tree3, &mut source, &undo).unwrap();
tree3 = parser.parse(&source, Some(&tree3)).unwrap();
assert_eq!(tree3.root_node().to_sexp(), tree.root_node().to_sexp(),);
}
// Thread safety
#[test]
fn test_parsing_on_multiple_threads() {
// Parse this source file so that each thread has a non-trivial amount of
// work to do.
let this_file_source = include_str!("parser_test.rs");
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let tree = parser.parse(this_file_source, None).unwrap();
let mut parse_threads = Vec::new();
for thread_id in 1..5 {
let mut tree_clone = tree.clone();
parse_threads.push(thread::spawn(move || {
// For each thread, prepend a different number of declarations to the
// source code.
let mut prepend_line_count = 0;
let mut prepended_source = String::new();
for _ in 0..thread_id {
prepend_line_count += 2;
prepended_source += "struct X {}\n\n";
}
tree_clone.edit(&InputEdit {
start_byte: 0,
old_end_byte: 0,
new_end_byte: prepended_source.len(),
start_position: Point::new(0, 0),
old_end_position: Point::new(0, 0),
new_end_position: Point::new(prepend_line_count, 0),
});
prepended_source += this_file_source;
// Reparse using the old tree as a starting point.
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
parser.parse(&prepended_source, Some(&tree_clone)).unwrap()
}));
}
// Check that the trees have the expected relationship to one another.
let trees = parse_threads
.into_iter()
.map(|thread| thread.join().unwrap());
let child_count_differences = trees
.map(|t| t.root_node().child_count() - tree.root_node().child_count())
.collect::<Vec<_>>();
assert_eq!(child_count_differences, &[1, 2, 3, 4]);
}
#[test]
fn test_parsing_cancelled_by_another_thread() {
let cancellation_flag = std::sync::Arc::new(AtomicUsize::new(0));
let flag = cancellation_flag.clone();
let callback = &mut |_: &ParseState| {
if cancellation_flag.load(Ordering::SeqCst) != 0 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
};
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
// Long input - parsing succeeds
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset == 0 {
" [".as_bytes()
} else if offset >= 20000 {
"".as_bytes()
} else {
"0,".as_bytes()
}
},
None,
Some(ParseOptions::new().progress_callback(callback)),
);
assert!(tree.is_some());
let cancel_thread = thread::spawn(move || {
thread::sleep(time::Duration::from_millis(100));
flag.store(1, Ordering::SeqCst);
});
// Infinite input
let tree = parser.parse_with_options(
&mut |offset, _| {
thread::yield_now();
thread::sleep(time::Duration::from_millis(10));
if offset == 0 {
b" ["
} else {
b"0,"
}
},
None,
Some(ParseOptions::new().progress_callback(callback)),
);
// Parsing returns None because it was cancelled.
cancel_thread.join().unwrap();
assert!(tree.is_none());
}
// Timeouts
#[test]
#[retry(10)]
fn test_parsing_with_a_timeout() {
let mut parser = Parser::new();
parser.set_language(&get_language("json")).unwrap();
// Parse an infinitely-long array, but pause after 1ms of processing.
let start_time = time::Instant::now();
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset == 0 {
b" ["
} else {
b",0"
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |_| {
if start_time.elapsed().as_micros() > 1000 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
})),
);
assert!(tree.is_none());
assert!(start_time.elapsed().as_micros() < 2000);
// Continue parsing, but pause after 1 ms of processing.
let start_time = time::Instant::now();
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset == 0 {
b" ["
} else {
b",0"
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |_| {
if start_time.elapsed().as_micros() > 5000 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
})),
);
assert!(tree.is_none());
assert!(start_time.elapsed().as_micros() > 100);
assert!(start_time.elapsed().as_micros() < 10000);
// Finish parsing
let tree = parser
.parse_with_options(
&mut |offset, _| match offset {
5001.. => "".as_bytes(),
5000 => "]".as_bytes(),
_ => ",0".as_bytes(),
},
None,
None,
)
.unwrap();
assert_eq!(tree.root_node().child(0).unwrap().kind(), "array");
}
#[test]
#[retry(10)]
fn test_parsing_with_a_timeout_and_a_reset() {
let mut parser = Parser::new();
parser.set_language(&get_language("json")).unwrap();
let start_time = time::Instant::now();
let code = "[\"ok\", 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]";
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset >= code.len() {
&[]
} else {
&code.as_bytes()[offset..]
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |_| {
if start_time.elapsed().as_micros() > 5 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
})),
);
assert!(tree.is_none());
// Without calling reset, the parser continues from where it left off, so
// it does not see the changes to the beginning of the source code.
let tree = parser.parse(
"[null, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]",
None,
).unwrap();
assert_eq!(
tree.root_node()
.named_child(0)
.unwrap()
.named_child(0)
.unwrap()
.kind(),
"string"
);
let start_time = time::Instant::now();
let code = "[\"ok\", 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]";
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset >= code.len() {
&[]
} else {
&code.as_bytes()[offset..]
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |_| {
if start_time.elapsed().as_micros() > 5 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
})),
);
assert!(tree.is_none());
// By calling reset, we force the parser to start over from scratch so
// that it sees the changes to the beginning of the source code.
parser.reset();
let tree = parser.parse(
"[null, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]",
None,
).unwrap();
assert_eq!(
tree.root_node()
.named_child(0)
.unwrap()
.named_child(0)
.unwrap()
.kind(),
"null"
);
}
#[test]
#[retry(10)]
fn test_parsing_with_a_timeout_and_implicit_reset() {
allocations::record(|| {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let code = "[\"ok\", 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]";
let start_time = time::Instant::now();
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset >= code.len() {
&[]
} else {
&code.as_bytes()[offset..]
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |_| {
if start_time.elapsed().as_micros() > 5 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
})),
);
assert!(tree.is_none());
// Changing the parser's language implicitly resets, discarding
// the previous partial parse.
parser.set_language(&get_language("json")).unwrap();
let tree = parser.parse(
"[null, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]",
None,
).unwrap();
assert_eq!(
tree.root_node()
.named_child(0)
.unwrap()
.named_child(0)
.unwrap()
.kind(),
"null"
);
});
}
#[test]
#[retry(10)]
fn test_parsing_with_timeout_and_no_completion() {
allocations::record(|| {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let code = "[\"ok\", 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]";
let start_time = time::Instant::now();
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset >= code.len() {
&[]
} else {
&code.as_bytes()[offset..]
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |_| {
if start_time.elapsed().as_micros() > 5 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
})),
);
assert!(tree.is_none());
// drop the parser when it has an unfinished parse
});
}
#[test]
fn test_parsing_with_timeout_during_balancing() {
allocations::record(|| {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let function_count = 100;
let code = "function() {}\n".repeat(function_count);
let mut current_byte_offset = 0;
let mut in_balancing = false;
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset >= code.len() {
&[]
} else {
&code.as_bytes()[offset..]
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |state| {
// The parser will call the progress_callback during parsing, and at the very end
// during tree-balancing. For very large trees, this balancing act can take quite
// some time, so we want to verify that timing out during this operation is
// possible.
//
// We verify this by checking the current byte offset, as this number will *not* be
// updated during tree balancing. If we see the same offset twice, we know that we
// are in the balancing phase.
if state.current_byte_offset() != current_byte_offset {
current_byte_offset = state.current_byte_offset();
ControlFlow::Continue(())
} else {
in_balancing = true;
ControlFlow::Break(())
}
})),
);
assert!(tree.is_none());
assert!(in_balancing);
// This should not cause an assertion failure.
parser.reset();
let tree = parser.parse_with_options(
&mut |offset, _| {
if offset >= code.len() {
&[]
} else {
&code.as_bytes()[offset..]
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |state| {
if state.current_byte_offset() != current_byte_offset {
current_byte_offset = state.current_byte_offset();
ControlFlow::Continue(())
} else {
in_balancing = true;
ControlFlow::Break(())
}
})),
);
assert!(tree.is_none());
assert!(in_balancing);
// If we resume parsing (implying we didn't call `parser.reset()`), we should be able to
// finish parsing the tree, continuing from where we left off.
let tree = parser
.parse_with_options(
&mut |offset, _| {
if offset >= code.len() {
&[]
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/tree_test.rs | crates/cli/src/tests/tree_test.rs | use std::str;
use tree_sitter::{InputEdit, Parser, Point, Range, Tree};
use super::helpers::fixtures::get_language;
use crate::{
fuzz::edits::Edit,
parse::perform_edit,
tests::{helpers::fixtures::get_test_fixture_language, invert_edit},
};
#[test]
fn test_tree_edit() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse(" abc !== def", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(program (expression_statement (binary_expression left: (identifier) right: (identifier))))"
);
// edit entirely within the tree's padding:
// resize the padding of the tree and its leftmost descendants.
{
let mut tree = tree.clone();
tree.edit(&InputEdit {
start_byte: 1,
old_end_byte: 1,
new_end_byte: 2,
start_position: Point::new(0, 1),
old_end_position: Point::new(0, 1),
new_end_position: Point::new(0, 2),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
assert!(expr.has_changes());
assert_eq!(expr.start_byte(), 3);
assert_eq!(expr.end_byte(), 16);
assert!(child1.has_changes());
assert_eq!(child1.start_byte(), 3);
assert_eq!(child1.end_byte(), 6);
assert!(!child2.has_changes());
assert_eq!(child2.start_byte(), 8);
assert_eq!(child2.end_byte(), 11);
}
// edit starting in the tree's padding but extending into its content:
// shrink the content to compensate for the expanded padding.
{
let mut tree = tree.clone();
tree.edit(&InputEdit {
start_byte: 1,
old_end_byte: 4,
new_end_byte: 5,
start_position: Point::new(0, 1),
old_end_position: Point::new(0, 5),
new_end_position: Point::new(0, 5),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
assert!(expr.has_changes());
assert_eq!(expr.start_byte(), 5);
assert_eq!(expr.end_byte(), 16);
assert!(child1.has_changes());
assert_eq!(child1.start_byte(), 5);
assert_eq!(child1.end_byte(), 6);
assert!(!child2.has_changes());
assert_eq!(child2.start_byte(), 8);
assert_eq!(child2.end_byte(), 11);
}
// insertion at the edge of a tree's padding:
// expand the tree's padding.
{
let mut tree = tree.clone();
tree.edit(&InputEdit {
start_byte: 2,
old_end_byte: 2,
new_end_byte: 4,
start_position: Point::new(0, 2),
old_end_position: Point::new(0, 2),
new_end_position: Point::new(0, 4),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
assert!(expr.has_changes());
assert_eq!(expr.byte_range(), 4..17);
assert!(child1.has_changes());
assert_eq!(child1.byte_range(), 4..7);
assert!(!child2.has_changes());
assert_eq!(child2.byte_range(), 9..12);
}
// replacement starting at the edge of the tree's padding:
// resize the content and not the padding.
{
let mut tree = tree.clone();
tree.edit(&InputEdit {
start_byte: 2,
old_end_byte: 2,
new_end_byte: 4,
start_position: Point::new(0, 2),
old_end_position: Point::new(0, 2),
new_end_position: Point::new(0, 4),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
assert!(expr.has_changes());
assert_eq!(expr.byte_range(), 4..17);
assert!(child1.has_changes());
assert_eq!(child1.byte_range(), 4..7);
assert!(!child2.has_changes());
assert_eq!(child2.byte_range(), 9..12);
}
// deletion that spans more than one child node:
// shrink subsequent child nodes.
{
let mut tree = tree.clone();
tree.edit(&InputEdit {
start_byte: 1,
old_end_byte: 11,
new_end_byte: 4,
start_position: Point::new(0, 1),
old_end_position: Point::new(0, 11),
new_end_position: Point::new(0, 4),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
let child3 = expr.child(2).unwrap();
assert!(expr.has_changes());
assert_eq!(expr.byte_range(), 4..8);
assert!(child1.has_changes());
assert_eq!(child1.byte_range(), 4..4);
assert!(child2.has_changes());
assert_eq!(child2.byte_range(), 4..4);
assert!(child3.has_changes());
assert_eq!(child3.byte_range(), 5..8);
}
// insertion at the end of the tree:
// extend the tree's content.
{
let mut tree = tree.clone();
tree.edit(&InputEdit {
start_byte: 15,
old_end_byte: 15,
new_end_byte: 16,
start_position: Point::new(0, 15),
old_end_position: Point::new(0, 15),
new_end_position: Point::new(0, 16),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
let child3 = expr.child(2).unwrap();
assert!(expr.has_changes());
assert_eq!(expr.byte_range(), 2..16);
assert!(!child1.has_changes());
assert_eq!(child1.byte_range(), 2..5);
assert!(!child2.has_changes());
assert_eq!(child2.byte_range(), 7..10);
assert!(child3.has_changes());
assert_eq!(child3.byte_range(), 12..16);
}
// replacement that starts within a token and extends beyond the end of the tree:
// resize the token and empty out any subsequent child nodes.
{
let mut tree = tree.clone();
tree.edit(&InputEdit {
start_byte: 3,
old_end_byte: 90,
new_end_byte: 4,
start_position: Point::new(0, 3),
old_end_position: Point::new(0, 90),
new_end_position: Point::new(0, 4),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
let child3 = expr.child(2).unwrap();
assert_eq!(expr.byte_range(), 2..4);
assert!(expr.has_changes());
assert_eq!(child1.byte_range(), 2..4);
assert!(child1.has_changes());
assert_eq!(child2.byte_range(), 4..4);
assert!(child2.has_changes());
assert_eq!(child3.byte_range(), 4..4);
assert!(child3.has_changes());
}
// replacement that starts in whitespace and extends beyond the end of the tree:
// shift the token's start position and empty out its content.
{
let mut tree = tree;
tree.edit(&InputEdit {
start_byte: 6,
old_end_byte: 90,
new_end_byte: 8,
start_position: Point::new(0, 6),
old_end_position: Point::new(0, 90),
new_end_position: Point::new(0, 8),
});
let expr = tree.root_node().child(0).unwrap().child(0).unwrap();
let child1 = expr.child(0).unwrap();
let child2 = expr.child(1).unwrap();
let child3 = expr.child(2).unwrap();
assert_eq!(expr.byte_range(), 2..8);
assert!(expr.has_changes());
assert_eq!(child1.byte_range(), 2..5);
assert!(!child1.has_changes());
assert_eq!(child2.byte_range(), 8..8);
assert!(child2.has_changes());
assert_eq!(child3.byte_range(), 8..8);
assert!(child3.has_changes());
}
}
#[test]
fn test_tree_edit_with_included_ranges() {
let mut parser = Parser::new();
parser.set_language(&get_language("html")).unwrap();
let source = "<div><% if a %><span>a</span><% else %><span>b</span><% end %></div>";
let ranges = [0..5, 15..29, 39..53, 62..68];
parser
.set_included_ranges(
&ranges
.iter()
.map(|range| Range {
start_byte: range.start,
end_byte: range.end,
start_point: Point::new(0, range.start),
end_point: Point::new(0, range.end),
})
.collect::<Vec<_>>(),
)
.unwrap();
let mut tree = parser.parse(source, None).unwrap();
tree.edit(&InputEdit {
start_byte: 29,
old_end_byte: 53,
new_end_byte: 29,
start_position: Point::new(0, 29),
old_end_position: Point::new(0, 53),
new_end_position: Point::new(0, 29),
});
assert_eq!(
tree.included_ranges(),
&[
Range {
start_byte: 0,
end_byte: 5,
start_point: Point::new(0, 0),
end_point: Point::new(0, 5),
},
Range {
start_byte: 15,
end_byte: 29,
start_point: Point::new(0, 15),
end_point: Point::new(0, 29),
},
Range {
start_byte: 29,
end_byte: 29,
start_point: Point::new(0, 29),
end_point: Point::new(0, 29),
},
Range {
start_byte: 38,
end_byte: 44,
start_point: Point::new(0, 38),
end_point: Point::new(0, 44),
}
]
);
}
#[test]
fn test_tree_cursor() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let tree = parser
.parse(
"
struct Stuff {
a: A,
b: Option<B>,
}
",
None,
)
.unwrap();
let mut cursor = tree.walk();
assert_eq!(cursor.node().kind(), "source_file");
assert!(cursor.goto_first_child());
assert_eq!(cursor.node().kind(), "struct_item");
assert!(cursor.goto_first_child());
assert_eq!(cursor.node().kind(), "struct");
assert!(!cursor.node().is_named());
assert!(cursor.goto_next_sibling());
assert_eq!(cursor.node().kind(), "type_identifier");
assert!(cursor.node().is_named());
assert!(cursor.goto_next_sibling());
assert_eq!(cursor.node().kind(), "field_declaration_list");
assert!(cursor.node().is_named());
assert!(cursor.goto_last_child());
assert_eq!(cursor.node().kind(), "}");
assert!(!cursor.node().is_named());
assert_eq!(cursor.node().start_position(), Point { row: 4, column: 16 });
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), ",");
assert!(!cursor.node().is_named());
assert_eq!(cursor.node().start_position(), Point { row: 3, column: 32 });
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), "field_declaration");
assert!(cursor.node().is_named());
assert_eq!(cursor.node().start_position(), Point { row: 3, column: 20 });
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), ",");
assert!(!cursor.node().is_named());
assert_eq!(cursor.node().start_position(), Point { row: 2, column: 24 });
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), "field_declaration");
assert!(cursor.node().is_named());
assert_eq!(cursor.node().start_position(), Point { row: 2, column: 20 });
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), "{");
assert!(!cursor.node().is_named());
assert_eq!(cursor.node().start_position(), Point { row: 1, column: 29 });
let mut copy = tree.walk();
copy.reset_to(&cursor);
assert_eq!(copy.node().kind(), "{");
assert!(!copy.node().is_named());
assert!(copy.goto_parent());
assert_eq!(copy.node().kind(), "field_declaration_list");
assert!(copy.node().is_named());
assert!(copy.goto_parent());
assert_eq!(copy.node().kind(), "struct_item");
}
#[test]
fn test_tree_cursor_previous_sibling_with_aliases() {
let mut parser = Parser::new();
parser
.set_language(&get_test_fixture_language("aliases_in_root"))
.unwrap();
let text = "# comment\n# \nfoo foo";
let tree = parser.parse(text, None).unwrap();
let mut cursor = tree.walk();
assert_eq!(cursor.node().kind(), "document");
cursor.goto_first_child();
assert_eq!(cursor.node().kind(), "comment");
assert!(cursor.goto_next_sibling());
assert_eq!(cursor.node().kind(), "comment");
assert!(cursor.goto_next_sibling());
assert_eq!(cursor.node().kind(), "bar");
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), "comment");
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), "comment");
assert!(cursor.goto_next_sibling());
assert_eq!(cursor.node().kind(), "comment");
assert!(cursor.goto_next_sibling());
assert_eq!(cursor.node().kind(), "bar");
}
#[test]
fn test_tree_cursor_previous_sibling() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let text = "
// Hi there
// This is fun!
// Another one!
";
let tree = parser.parse(text, None).unwrap();
let mut cursor = tree.walk();
assert_eq!(cursor.node().kind(), "source_file");
assert!(cursor.goto_last_child());
assert_eq!(cursor.node().kind(), "line_comment");
assert_eq!(
cursor.node().utf8_text(text.as_bytes()).unwrap(),
"// Another one!"
);
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), "line_comment");
assert_eq!(
cursor.node().utf8_text(text.as_bytes()).unwrap(),
"// This is fun!"
);
assert!(cursor.goto_previous_sibling());
assert_eq!(cursor.node().kind(), "line_comment");
assert_eq!(
cursor.node().utf8_text(text.as_bytes()).unwrap(),
"// Hi there"
);
assert!(!cursor.goto_previous_sibling());
}
#[test]
fn test_tree_cursor_fields() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser
.parse("function /*1*/ bar /*2*/ () {}", None)
.unwrap();
let mut cursor = tree.walk();
assert_eq!(cursor.node().kind(), "program");
cursor.goto_first_child();
assert_eq!(cursor.node().kind(), "function_declaration");
assert_eq!(cursor.field_name(), None);
cursor.goto_first_child();
assert_eq!(cursor.node().kind(), "function");
assert_eq!(cursor.field_name(), None);
cursor.goto_next_sibling();
assert_eq!(cursor.node().kind(), "comment");
assert_eq!(cursor.field_name(), None);
cursor.goto_next_sibling();
assert_eq!(cursor.node().kind(), "identifier");
assert_eq!(cursor.field_name(), Some("name"));
cursor.goto_next_sibling();
assert_eq!(cursor.node().kind(), "comment");
assert_eq!(cursor.field_name(), None);
cursor.goto_next_sibling();
assert_eq!(cursor.node().kind(), "formal_parameters");
assert_eq!(cursor.field_name(), Some("parameters"));
}
#[test]
fn test_tree_cursor_child_for_point() {
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let source = &"
[
one,
{
two: tree
},
four, five, six
];"[1..];
let tree = parser.parse(source, None).unwrap();
let mut c = tree.walk();
assert_eq!(c.node().kind(), "program");
assert_eq!(c.goto_first_child_for_point(Point::new(7, 0)), None);
assert_eq!(c.goto_first_child_for_point(Point::new(6, 7)), None);
assert_eq!(c.node().kind(), "program");
// descend to expression statement
assert_eq!(c.goto_first_child_for_point(Point::new(6, 5)), Some(0));
assert_eq!(c.node().kind(), "expression_statement");
// step into ';' and back up
assert_eq!(c.goto_first_child_for_point(Point::new(7, 0)), None);
assert_eq!(c.goto_first_child_for_point(Point::new(6, 6)), None);
assert_eq!(c.goto_first_child_for_point(Point::new(6, 5)), Some(1));
assert_eq!(
(c.node().kind(), c.node().start_position()),
(";", Point::new(6, 5))
);
assert!(c.goto_parent());
// descend into array
assert_eq!(c.goto_first_child_for_point(Point::new(6, 4)), Some(0));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("array", Point::new(0, 4))
);
// step into '[' and back up
assert_eq!(c.goto_first_child_for_point(Point::new(0, 4)), Some(0));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("[", Point::new(0, 4))
);
assert!(c.goto_parent());
// step into identifier 'one' and back up
assert_eq!(c.goto_first_child_for_point(Point::new(1, 0)), Some(1));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("identifier", Point::new(1, 8))
);
assert!(c.goto_parent());
assert_eq!(c.goto_first_child_for_point(Point::new(1, 10)), Some(1));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("identifier", Point::new(1, 8))
);
assert!(c.goto_parent());
// step into first ',' and back up
assert_eq!(c.goto_first_child_for_point(Point::new(1, 11)), Some(2));
assert_eq!(
(c.node().kind(), c.node().start_position()),
(",", Point::new(1, 11))
);
assert!(c.goto_parent());
// step into identifier 'four' and back up
assert_eq!(c.goto_first_child_for_point(Point::new(5, 0)), Some(5));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("identifier", Point::new(5, 8))
);
assert!(c.goto_parent());
assert_eq!(c.goto_first_child_for_point(Point::new(5, 0)), Some(5));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("identifier", Point::new(5, 8))
);
assert!(c.goto_parent());
// step into ']' and back up
assert_eq!(c.goto_first_child_for_point(Point::new(6, 0)), Some(10));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("]", Point::new(6, 4))
);
assert!(c.goto_parent());
assert_eq!(c.goto_first_child_for_point(Point::new(6, 0)), Some(10));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("]", Point::new(6, 4))
);
assert!(c.goto_parent());
// descend into object
assert_eq!(c.goto_first_child_for_point(Point::new(2, 0)), Some(3));
assert_eq!(
(c.node().kind(), c.node().start_position()),
("object", Point::new(2, 8))
);
}
#[test]
fn test_tree_node_equality() {
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let tree = parser.parse("struct A {}", None).unwrap();
let node1 = tree.root_node();
let node2 = tree.root_node();
assert_eq!(node1, node2);
assert_eq!(node1.child(0).unwrap(), node2.child(0).unwrap());
assert_ne!(node1.child(0).unwrap(), node2);
}
#[test]
fn test_get_changed_ranges() {
let source_code = b"{a: null};\n".to_vec();
let mut parser = Parser::new();
parser.set_language(&get_language("javascript")).unwrap();
let tree = parser.parse(&source_code, None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(program (expression_statement (object (pair key: (property_identifier) value: (null)))))"
);
// Updating one token
{
let mut tree = tree.clone();
let mut source_code = source_code.clone();
// Replace `null` with `nothing` - that token has changed syntax
let edit = Edit {
position: index_of(&source_code, "ull"),
deleted_length: 3,
inserted_text: b"othing".to_vec(),
};
let inverse_edit = invert_edit(&source_code, &edit);
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &edit);
assert_eq!(ranges, vec![range_of(&source_code, "nothing")]);
// Replace `nothing` with `null` - that token has changed syntax
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &inverse_edit);
assert_eq!(ranges, vec![range_of(&source_code, "null")]);
}
// Changing only leading whitespace
{
let mut tree = tree.clone();
let mut source_code = source_code.clone();
// Insert leading newline - no changed ranges
let edit = Edit {
position: 0,
deleted_length: 0,
inserted_text: b"\n".to_vec(),
};
let inverse_edit = invert_edit(&source_code, &edit);
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &edit);
assert_eq!(ranges, vec![]);
// Remove leading newline - no changed ranges
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &inverse_edit);
assert_eq!(ranges, vec![]);
}
// Inserting elements
{
let mut tree = tree.clone();
let mut source_code = source_code.clone();
// Insert a key-value pair before the `}` - those tokens are changed
let edit1 = Edit {
position: index_of(&source_code, "}"),
deleted_length: 0,
inserted_text: b", b: false".to_vec(),
};
let inverse_edit1 = invert_edit(&source_code, &edit1);
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &edit1);
assert_eq!(ranges, vec![range_of(&source_code, ", b: false")]);
let edit2 = Edit {
position: index_of(&source_code, ", b"),
deleted_length: 0,
inserted_text: b", c: 1".to_vec(),
};
let inverse_edit2 = invert_edit(&source_code, &edit2);
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &edit2);
assert_eq!(ranges, vec![range_of(&source_code, ", c: 1")]);
// Remove the middle pair
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &inverse_edit2);
assert_eq!(ranges, vec![]);
// Remove the second pair
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &inverse_edit1);
assert_eq!(ranges, vec![]);
}
// Wrapping elements in larger expressions
{
let mut tree = tree;
let mut source_code = source_code.clone();
// Replace `null` with the binary expression `b === null`
let edit1 = Edit {
position: index_of(&source_code, "null"),
deleted_length: 0,
inserted_text: b"b === ".to_vec(),
};
let inverse_edit1 = invert_edit(&source_code, &edit1);
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &edit1);
assert_eq!(ranges, vec![range_of(&source_code, "b === null")]);
// Undo
let ranges = get_changed_ranges(&mut parser, &mut tree, &mut source_code, &inverse_edit1);
assert_eq!(ranges, vec![range_of(&source_code, "null")]);
}
}
#[test]
fn test_consistency_with_mid_codepoint_edit() {
let mut parser = Parser::new();
parser.set_language(&get_language("php/php")).unwrap();
let mut source_code =
b"\n<?php\n\n<<<'\xE5\xAD\x97\xE6\xBC\xA2'\n T\n\xE5\xAD\x97\xE6\xBC\xA2;".to_vec();
let mut tree = parser.parse(&source_code, None).unwrap();
let edit = Edit {
position: 17,
deleted_length: 0,
inserted_text: vec![46],
};
perform_edit(&mut tree, &mut source_code, &edit).unwrap();
let mut tree2 = parser.parse(&source_code, Some(&tree)).unwrap();
let inverted = invert_edit(&source_code, &edit);
perform_edit(&mut tree2, &mut source_code, &inverted).unwrap();
let tree3 = parser.parse(&source_code, Some(&tree2)).unwrap();
assert_eq!(tree3.root_node().to_sexp(), tree.root_node().to_sexp());
}
#[test]
fn test_tree_cursor_on_aliased_root_with_extra_child() {
let source = r"
fn main() {
C/* hi */::<D>::E;
}
";
let mut parser = Parser::new();
parser.set_language(&get_language("rust")).unwrap();
let tree = parser.parse(source, None).unwrap();
let function = tree.root_node().child(0).unwrap();
let block = function.child(3).unwrap();
let expression_statement = block.child(1).unwrap();
let scoped_identifier = expression_statement.child(0).unwrap();
let generic_type = scoped_identifier.child(0).unwrap();
assert_eq!(generic_type.kind(), "generic_type");
let mut cursor = generic_type.walk();
assert!(cursor.goto_first_child());
assert_eq!(cursor.node().kind(), "type_identifier");
assert!(cursor.goto_next_sibling());
assert_eq!(cursor.node().kind(), "block_comment");
}
fn index_of(text: &[u8], substring: &str) -> usize {
str::from_utf8(text).unwrap().find(substring).unwrap()
}
fn range_of(text: &[u8], substring: &str) -> Range {
let start_byte = index_of(text, substring);
let end_byte = start_byte + substring.len();
Range {
start_byte,
end_byte,
start_point: Point::new(0, start_byte),
end_point: Point::new(0, end_byte),
}
}
fn get_changed_ranges(
parser: &mut Parser,
tree: &mut Tree,
source_code: &mut Vec<u8>,
edit: &Edit,
) -> Vec<Range> {
perform_edit(tree, source_code, edit).unwrap();
let new_tree = parser.parse(source_code, Some(tree)).unwrap();
let result = tree.changed_ranges(&new_tree).collect();
*tree = new_tree;
result
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/tags_test.rs | crates/cli/src/tests/tags_test.rs | use std::{
ffi::{CStr, CString},
fs, ptr, slice, str,
sync::atomic::{AtomicUsize, Ordering},
};
use tree_sitter::Point;
use tree_sitter_tags::{c_lib as c, Error, TagsConfiguration, TagsContext};
use super::helpers::{
allocations,
fixtures::{get_language, get_language_queries_path},
};
const PYTHON_TAG_QUERY: &str = r#"
(
(function_definition
name: (identifier) @name
body: (block . (expression_statement (string) @doc))) @definition.function
(#strip! @doc "(^['\"\\s]*)|(['\"\\s]*$)")
)
(function_definition
name: (identifier) @name) @definition.function
(
(class_definition
name: (identifier) @name
body: (block
. (expression_statement (string) @doc))) @definition.class
(#strip! @doc "(^['\"\\s]*)|(['\"\\s]*$)")
)
(class_definition
name: (identifier) @name) @definition.class
(call
function: (identifier) @name) @reference.call
(call
function: (attribute
attribute: (identifier) @name)) @reference.call
"#;
const JS_TAG_QUERY: &str = r#"
(
(comment)* @doc .
(class_declaration
name: (identifier) @name) @definition.class
(#select-adjacent! @doc @definition.class)
(#strip! @doc "(^[/\\*\\s]*)|([/\\*\\s]*$)")
)
(
(comment)* @doc .
(method_definition
name: (property_identifier) @name) @definition.method
(#select-adjacent! @doc @definition.method)
(#strip! @doc "(^[/\\*\\s]*)|([/\\*\\s]*$)")
)
(
(comment)* @doc .
(function_declaration
name: (identifier) @name) @definition.function
(#select-adjacent! @doc @definition.function)
(#strip! @doc "(^[/\\*\\s]*)|([/\\*\\s]*$)")
)
(call_expression
function: (identifier) @name) @reference.call
"#;
const RUBY_TAG_QUERY: &str = r"
(method
name: (_) @name) @definition.method
(call
method: (identifier) @name) @reference.call
(setter (identifier) @ignore)
((identifier) @name @reference.call
(#is-not? local))
";
#[test]
fn test_tags_python() {
let language = get_language("python");
let tags_config = TagsConfiguration::new(language, PYTHON_TAG_QUERY, "").unwrap();
let mut tag_context = TagsContext::new();
let source = br#"
class Customer:
"""
Data about a customer
"""
def age(self):
'''
Get the customer's age
'''
compute_age(self.id)
}
"#;
let tags = tag_context
.generate_tags(&tags_config, source, None)
.unwrap()
.0
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert_eq!(
tags.iter()
.map(|t| (
substr(source, &t.name_range),
tags_config.syntax_type_name(t.syntax_type_id)
))
.collect::<Vec<_>>(),
&[
("Customer", "class"),
("age", "function"),
("compute_age", "call"),
]
);
assert_eq!(substr(source, &tags[0].line_range), "class Customer:");
assert_eq!(substr(source, &tags[1].line_range), "def age(self):");
assert_eq!(tags[0].docs.as_ref().unwrap(), "Data about a customer");
assert_eq!(tags[1].docs.as_ref().unwrap(), "Get the customer's age");
}
#[test]
fn test_tags_javascript() {
let language = get_language("javascript");
let tags_config = TagsConfiguration::new(language, JS_TAG_QUERY, "").unwrap();
let source = br"
// hi
// Data about a customer.
// bla bla bla
class Customer {
/*
* Get the customer's age
*/
getAge() {
}
}
// ok
class Agent {
}
";
let mut tag_context = TagsContext::new();
let tags = tag_context
.generate_tags(&tags_config, source, None)
.unwrap()
.0
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert_eq!(
tags.iter()
.map(|t| (
substr(source, &t.name_range),
t.span.clone(),
tags_config.syntax_type_name(t.syntax_type_id)
))
.collect::<Vec<_>>(),
&[
("Customer", Point::new(5, 10)..Point::new(5, 18), "class",),
("getAge", Point::new(9, 8)..Point::new(9, 14), "method",),
("Agent", Point::new(15, 10)..Point::new(15, 15), "class",)
]
);
assert_eq!(
tags[0].docs.as_ref().unwrap(),
"Data about a customer.\nbla bla bla"
);
assert_eq!(tags[1].docs.as_ref().unwrap(), "Get the customer's age");
assert_eq!(tags[2].docs, None);
}
#[test]
fn test_tags_columns_measured_in_utf16_code_units() {
let language = get_language("python");
let tags_config = TagsConfiguration::new(language, PYTHON_TAG_QUERY, "").unwrap();
let mut tag_context = TagsContext::new();
let source = r#""❤️❤️❤️".hello_α_ω()"#.as_bytes();
let tag = tag_context
.generate_tags(&tags_config, source, None)
.unwrap()
.0
.next()
.unwrap()
.unwrap();
assert_eq!(substr(source, &tag.name_range), "hello_α_ω");
assert_eq!(tag.span, Point::new(0, 21)..Point::new(0, 32));
assert_eq!(tag.utf16_column_range, 9..18);
}
#[test]
fn test_tags_ruby() {
let language = get_language("ruby");
let locals_query =
fs::read_to_string(get_language_queries_path("ruby").join("locals.scm")).unwrap();
let tags_config = TagsConfiguration::new(language, RUBY_TAG_QUERY, &locals_query).unwrap();
let source = strip_whitespace(
8,
"
b = 1
def foo=()
c = 1
# a is a method because it is not in scope
# b is a method because `b` doesn't capture variables from its containing scope
bar a, b, c
[1, 2, 3].each do |a|
# a is a parameter
# b is a method
# c is a variable, because the block captures variables from its containing scope.
baz a, b, c
end
end",
);
let mut tag_context = TagsContext::new();
let tags = tag_context
.generate_tags(&tags_config, source.as_bytes(), None)
.unwrap()
.0
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert_eq!(
tags.iter()
.map(|t| (
substr(source.as_bytes(), &t.name_range),
tags_config.syntax_type_name(t.syntax_type_id),
(t.span.start.row, t.span.start.column),
))
.collect::<Vec<_>>(),
&[
("foo=", "method", (2, 4)),
("bar", "call", (7, 4)),
("a", "call", (7, 8)),
("b", "call", (7, 11)),
("each", "call", (9, 14)),
("baz", "call", (13, 8)),
("b", "call", (13, 15),),
]
);
}
#[test]
fn test_tags_cancellation() {
allocations::record(|| {
// Large javascript document
let source = "/* hi */ class A { /* ok */ b() {} }\n".repeat(500);
let cancellation_flag = AtomicUsize::new(0);
let language = get_language("javascript");
let tags_config = TagsConfiguration::new(language, JS_TAG_QUERY, "").unwrap();
let mut tag_context = TagsContext::new();
let tags = tag_context
.generate_tags(&tags_config, source.as_bytes(), Some(&cancellation_flag))
.unwrap();
let found_cancellation_error = tags.0.enumerate().any(|(i, tag)| {
if i == 150 {
cancellation_flag.store(1, Ordering::SeqCst);
}
match tag {
Ok(_) => false,
Err(Error::Cancelled) => true,
Err(e) => {
unreachable!("Unexpected error type while iterating tags: {e}")
}
}
});
assert!(
found_cancellation_error,
"Expected to halt tagging with a cancellation error"
);
});
}
#[test]
fn test_invalid_capture() {
let language = get_language("python");
let e = TagsConfiguration::new(language, "(identifier) @method", "")
.expect_err("expected InvalidCapture error");
assert_eq!(e, Error::InvalidCapture("method".to_string()));
}
#[test]
fn test_tags_with_parse_error() {
let language = get_language("python");
let tags_config = TagsConfiguration::new(language, PYTHON_TAG_QUERY, "").unwrap();
let mut tag_context = TagsContext::new();
let source = br"
class Fine: pass
class Bad
";
let (tags, failed) = tag_context
.generate_tags(&tags_config, source, None)
.unwrap();
let newtags = tags.collect::<Result<Vec<_>, _>>().unwrap();
assert!(failed, "syntax error should have been detected");
assert_eq!(
newtags
.iter()
.map(|t| (
substr(source, &t.name_range),
tags_config.syntax_type_name(t.syntax_type_id)
))
.collect::<Vec<_>>(),
&[("Fine", "class"),]
);
}
#[test]
fn test_tags_via_c_api() {
allocations::record(|| {
let tagger = c::ts_tagger_new();
let buffer = c::ts_tags_buffer_new();
let scope_name = "source.js";
let language = get_language("javascript");
let source_code = strip_whitespace(
12,
"
var a = 1;
// one
// two
// three
function b() {
}
// four
// five
class C extends D {
}
b(a);",
);
let c_scope_name = CString::new(scope_name).unwrap();
let result = unsafe {
c::ts_tagger_add_language(
tagger,
c_scope_name.as_ptr(),
language,
JS_TAG_QUERY.as_ptr(),
ptr::null(),
JS_TAG_QUERY.len() as u32,
0,
)
};
assert_eq!(result, c::TSTagsError::Ok);
let result = unsafe {
c::ts_tagger_tag(
tagger,
c_scope_name.as_ptr(),
source_code.as_ptr(),
source_code.len() as u32,
buffer,
ptr::null(),
)
};
assert_eq!(result, c::TSTagsError::Ok);
let tags = unsafe {
slice::from_raw_parts(
c::ts_tags_buffer_tags(buffer),
c::ts_tags_buffer_tags_len(buffer) as usize,
)
};
let docs = str::from_utf8(unsafe {
slice::from_raw_parts(
c::ts_tags_buffer_docs(buffer).cast::<u8>(),
c::ts_tags_buffer_docs_len(buffer) as usize,
)
})
.unwrap();
let syntax_types = unsafe {
let mut len = 0;
let ptr = c::ts_tagger_syntax_kinds_for_scope_name(
tagger,
c_scope_name.as_ptr(),
&raw mut len,
);
slice::from_raw_parts(ptr, len as usize)
.iter()
.map(|i| CStr::from_ptr(*i).to_str().unwrap())
.collect::<Vec<_>>()
};
assert_eq!(
tags.iter()
.map(|tag| (
syntax_types[tag.syntax_type_id as usize],
&source_code[tag.name_start_byte as usize..tag.name_end_byte as usize],
&source_code[tag.line_start_byte as usize..tag.line_end_byte as usize],
&docs[tag.docs_start_byte as usize..tag.docs_end_byte as usize],
))
.collect::<Vec<_>>(),
&[
("function", "b", "function b() {", "one\ntwo\nthree"),
("class", "C", "class C extends D {", "four\nfive"),
("call", "b", "b(a);", "")
]
);
unsafe {
c::ts_tags_buffer_delete(buffer);
c::ts_tagger_delete(tagger);
}
});
}
fn substr<'a>(source: &'a [u8], range: &std::ops::Range<usize>) -> &'a str {
std::str::from_utf8(&source[range.clone()]).unwrap()
}
fn strip_whitespace(indent: usize, s: &str) -> String {
s.lines()
.skip(1)
.map(|line| &line[line.len().min(indent)..])
.collect::<Vec<_>>()
.join("\n")
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/detect_language.rs | crates/cli/src/tests/detect_language.rs | use std::{fs, path::Path};
use tree_sitter_loader::Loader;
use crate::tests::helpers::fixtures::scratch_dir;
#[test]
fn detect_language_by_first_line_regex() {
let strace_dir = tree_sitter_dir(
r#"{
"grammars": [
{
"name": "strace",
"path": ".",
"scope": "source.strace",
"file-types": [
"strace"
],
"first-line-regex": "[0-9:.]* *execve"
}
],
"metadata": {
"version": "0.0.1"
}
}
"#,
"strace",
);
let mut loader = Loader::with_parser_lib_path(scratch_dir().to_path_buf());
let config = loader
.find_language_configurations_at_path(strace_dir.path(), false)
.unwrap();
// this is just to validate that we can read the tree-sitter.json correctly
assert_eq!(config[0].scope.as_ref().unwrap(), "source.strace");
let file_name = strace_dir.path().join("strace.log");
fs::write(&file_name, "execve\nworld").unwrap();
assert_eq!(
get_lang_scope(&loader, &file_name),
Some("source.strace".into())
);
let file_name = strace_dir.path().join("strace.log");
fs::write(&file_name, "447845 execve\nworld").unwrap();
assert_eq!(
get_lang_scope(&loader, &file_name),
Some("source.strace".into())
);
let file_name = strace_dir.path().join("strace.log");
fs::write(&file_name, "hello\nexecve").unwrap();
assert!(get_lang_scope(&loader, &file_name).is_none());
let file_name = strace_dir.path().join("strace.log");
fs::write(&file_name, "").unwrap();
assert!(get_lang_scope(&loader, &file_name).is_none());
let dummy_dir = tree_sitter_dir(
r#"{
"grammars": [
{
"name": "dummy",
"scope": "source.dummy",
"path": ".",
"file-types": [
"dummy"
]
}
],
"metadata": {
"version": "0.0.1"
}
}
"#,
"dummy",
);
// file-type takes precedence over first-line-regex
loader
.find_language_configurations_at_path(dummy_dir.path(), false)
.unwrap();
let file_name = dummy_dir.path().join("strace.dummy");
fs::write(&file_name, "execve").unwrap();
assert_eq!(
get_lang_scope(&loader, &file_name),
Some("source.dummy".into())
);
}
#[test]
fn detect_language_by_double_barrel_file_extension() {
let blade_dir = tree_sitter_dir(
r#"{
"grammars": [
{
"name": "blade",
"path": ".",
"scope": "source.blade",
"file-types": [
"blade.php"
]
}
],
"metadata": {
"version": "0.0.1"
}
}
"#,
"blade",
);
let mut loader = Loader::with_parser_lib_path(scratch_dir().to_path_buf());
let config = loader
.find_language_configurations_at_path(blade_dir.path(), false)
.unwrap();
// this is just to validate that we can read the tree-sitter.json correctly
assert_eq!(config[0].scope.as_ref().unwrap(), "source.blade");
let file_name = blade_dir.path().join("foo.blade.php");
fs::write(&file_name, "").unwrap();
assert_eq!(
get_lang_scope(&loader, &file_name),
Some("source.blade".into())
);
}
#[test]
fn detect_language_without_filename() {
let gitignore_dir = tree_sitter_dir(
r#"{
"grammars": [
{
"name": "gitignore",
"path": ".",
"scope": "source.gitignore",
"file-types": [
".gitignore"
]
}
],
"metadata": {
"version": "0.0.1"
}
}
"#,
"gitignore",
);
let mut loader = Loader::with_parser_lib_path(scratch_dir().to_path_buf());
let config = loader
.find_language_configurations_at_path(gitignore_dir.path(), false)
.unwrap();
// this is just to validate that we can read the tree-sitter.json correctly
assert_eq!(config[0].scope.as_ref().unwrap(), "source.gitignore");
let file_name = gitignore_dir.path().join(".gitignore");
fs::write(&file_name, "").unwrap();
assert_eq!(
get_lang_scope(&loader, &file_name),
Some("source.gitignore".into())
);
}
#[test]
fn detect_language_without_file_extension() {
let ssh_config_dir = tree_sitter_dir(
r#"{
"grammars": [
{
"name": "ssh_config",
"path": ".",
"scope": "source.ssh_config",
"file-types": [
"ssh_config"
]
}
],
"metadata": {
"version": "0.0.1"
}
}
"#,
"ssh_config",
);
let mut loader = Loader::with_parser_lib_path(scratch_dir().to_path_buf());
let config = loader
.find_language_configurations_at_path(ssh_config_dir.path(), false)
.unwrap();
// this is just to validate that we can read the tree-sitter.json correctly
assert_eq!(config[0].scope.as_ref().unwrap(), "source.ssh_config");
let file_name = ssh_config_dir.path().join("ssh_config");
fs::write(&file_name, "").unwrap();
assert_eq!(
get_lang_scope(&loader, &file_name),
Some("source.ssh_config".into())
);
}
fn tree_sitter_dir(tree_sitter_json: &str, name: &str) -> tempfile::TempDir {
let temp_dir = tempfile::tempdir().unwrap();
fs::write(temp_dir.path().join("tree-sitter.json"), tree_sitter_json).unwrap();
fs::create_dir_all(temp_dir.path().join("src/tree_sitter")).unwrap();
fs::write(
temp_dir.path().join("src/grammar.json"),
format!(r#"{{"name":"{name}"}}"#),
)
.unwrap();
fs::write(
temp_dir.path().join("src/parser.c"),
format!(
r#"
#include "tree_sitter/parser.h"
#ifdef _WIN32
#define TS_PUBLIC __declspec(dllexport)
#else
#define TS_PUBLIC __attribute__((visibility("default")))
#endif
TS_PUBLIC const TSLanguage *tree_sitter_{name}() {{}}
"#
),
)
.unwrap();
fs::write(
temp_dir.path().join("src/tree_sitter/parser.h"),
include_str!("../../../../lib/src/parser.h"),
)
.unwrap();
temp_dir
}
// If we manage to get the language scope, it means we correctly detected the file-type
fn get_lang_scope(loader: &Loader, file_name: &Path) -> Option<String> {
loader
.language_configuration_for_file_name(file_name)
.ok()
.and_then(|config| {
if let Some((_, config)) = config {
config.scope.clone()
} else if let Ok(Some((_, config))) =
loader.language_configuration_for_first_line_regex(file_name)
{
config.scope.clone()
} else {
None
}
})
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/test_tags_test.rs | crates/cli/src/tests/test_tags_test.rs | use tree_sitter::Parser;
use tree_sitter_tags::TagsContext;
use super::helpers::fixtures::{get_language, get_tags_config};
use crate::{
query_testing::{parse_position_comments, Assertion, Utf8Point},
test_tags::get_tag_positions,
};
#[test]
fn test_tags_test_with_basic_test() {
let language = get_language("python");
let config = get_tags_config("python");
let source = [
"# hi",
"def abc(d):",
" # <- definition.function",
" e = fgh(d)",
" # ^ reference.call",
" return d(e)",
" # ^ reference.call",
" # ^ !variable.parameter",
"",
]
.join("\n");
let assertions =
parse_position_comments(&mut Parser::new(), &language, source.as_bytes()).unwrap();
assert_eq!(
assertions,
&[
Assertion::new(1, 4, 1, false, String::from("definition.function")),
Assertion::new(3, 9, 1, false, String::from("reference.call")),
Assertion::new(5, 11, 1, false, String::from("reference.call")),
Assertion::new(5, 13, 1, true, String::from("variable.parameter")),
]
);
let mut tags_context = TagsContext::new();
let tag_positions = get_tag_positions(&mut tags_context, &config, source.as_bytes()).unwrap();
assert_eq!(
tag_positions,
&[
(
Utf8Point::new(1, 4),
Utf8Point::new(1, 7),
"definition.function".to_string()
),
(
Utf8Point::new(3, 8),
Utf8Point::new(3, 11),
"reference.call".to_string()
),
(
Utf8Point::new(5, 11),
Utf8Point::new(5, 12),
"reference.call".to_string()
),
]
);
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/async_boundary_test.rs | crates/cli/src/tests/async_boundary_test.rs | use std::{
future::Future,
pin::Pin,
ptr,
task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
};
use tree_sitter::Parser;
use super::helpers::fixtures::get_language;
#[test]
fn test_node_across_async_boundaries() {
let mut parser = Parser::new();
let language = get_language("bash");
parser.set_language(&language).unwrap();
let tree = parser.parse("#", None).unwrap();
let root = tree.root_node();
let (result, yields) = simple_async_executor(async {
let root_ref = &root;
// Test node captured by value
let fut_by_value = async {
yield_once().await;
root.child(0).unwrap().kind()
};
// Test node captured by reference
let fut_by_ref = async {
yield_once().await;
root_ref.child(0).unwrap().kind()
};
let result1 = fut_by_value.await;
let result2 = fut_by_ref.await;
assert_eq!(result1, result2);
result1
});
assert_eq!(result, "comment");
assert_eq!(yields, 2);
}
#[test]
fn test_cursor_across_async_boundaries() {
let mut parser = Parser::new();
let language = get_language("c");
parser.set_language(&language).unwrap();
let tree = parser.parse("#", None).unwrap();
let mut cursor = tree.walk();
let ((), yields) = simple_async_executor(async {
cursor.goto_first_child();
// Test cursor usage across yield point
yield_once().await;
cursor.goto_first_child();
// Test cursor in async block
let cursor_ref = &mut cursor;
let fut = async {
yield_once().await;
cursor_ref.goto_first_child();
};
fut.await;
});
assert_eq!(yields, 2);
}
#[test]
fn test_node_and_cursor_together() {
let mut parser = Parser::new();
let language = get_language("javascript");
parser.set_language(&language).unwrap();
let tree = parser.parse("#", None).unwrap();
let root = tree.root_node();
let mut cursor = tree.walk();
let ((), yields) = simple_async_executor(async {
cursor.goto_first_child();
let fut = async {
yield_once().await;
let _ = root.to_sexp();
cursor.goto_first_child();
};
yield_once().await;
fut.await;
});
assert_eq!(yields, 2);
}
fn simple_async_executor<F>(future: F) -> (F::Output, u32)
where
F: Future,
{
let waker = noop_waker();
let mut cx = Context::from_waker(&waker);
let mut yields = 0;
let mut future = Box::pin(future);
loop {
match future.as_mut().poll(&mut cx) {
Poll::Ready(result) => return (result, yields),
Poll::Pending => yields += 1,
}
}
}
async fn yield_once() {
struct YieldOnce {
yielded: bool,
}
impl Future for YieldOnce {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
if self.yielded {
Poll::Ready(())
} else {
self.yielded = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
YieldOnce { yielded: false }.await;
}
const fn noop_waker() -> Waker {
const VTABLE: RawWakerVTable = RawWakerVTable::new(
// Cloning just returns a new no-op raw waker
|_| RAW,
// `wake` does nothing
|_| {},
// `wake_by_ref` does nothing
|_| {},
// Dropping does nothing as we don't allocate anything
|_| {},
);
const RAW: RawWaker = RawWaker::new(ptr::null(), &VTABLE);
unsafe { Waker::from_raw(RAW) }
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/text_provider_test.rs | crates/cli/src/tests/text_provider_test.rs | use std::{iter, sync::Arc};
use streaming_iterator::StreamingIterator;
use tree_sitter::{Language, Node, Parser, Point, Query, QueryCursor, TextProvider, Tree};
use crate::tests::helpers::fixtures::get_language;
fn parse_text(text: impl AsRef<[u8]>) -> (Tree, Language) {
let language = get_language("c");
let mut parser = Parser::new();
parser.set_language(&language).unwrap();
(parser.parse(text, None).unwrap(), language)
}
fn parse_text_with<T, F>(callback: &mut F) -> (Tree, Language)
where
T: AsRef<[u8]>,
F: FnMut(usize, Point) -> T,
{
let language = get_language("c");
let mut parser = Parser::new();
parser.set_language(&language).unwrap();
let tree = parser.parse_with_options(callback, None, None).unwrap();
assert_eq!("comment", tree.root_node().child(0).unwrap().kind());
(tree, language)
}
fn tree_query<I: AsRef<[u8]>>(tree: &Tree, text: impl TextProvider<I>, language: &Language) {
let query = Query::new(language, "((comment) @c (#eq? @c \"// comment\"))").unwrap();
let mut cursor = QueryCursor::new();
let mut captures = cursor.captures(&query, tree.root_node(), text);
let (match_, idx) = captures.next().unwrap();
let capture = match_.captures[*idx];
assert_eq!(capture.index as usize, *idx);
assert_eq!("comment", capture.node.kind());
}
fn check_parsing<I: AsRef<[u8]>>(
parser_text: impl AsRef<[u8]>,
text_provider: impl TextProvider<I>,
) {
let (tree, language) = parse_text(parser_text);
tree_query(&tree, text_provider, &language);
}
fn check_parsing_callback<T, F, I: AsRef<[u8]>>(
parser_callback: &mut F,
text_provider: impl TextProvider<I>,
) where
T: AsRef<[u8]>,
F: FnMut(usize, Point) -> T,
{
let (tree, language) = parse_text_with(parser_callback);
tree_query(&tree, text_provider, &language);
}
#[test]
fn test_text_provider_for_str_slice() {
let text: &str = "// comment";
check_parsing(text, text.as_bytes());
check_parsing(text.as_bytes(), text.as_bytes());
}
#[test]
fn test_text_provider_for_string() {
let text: String = "// comment".to_owned();
check_parsing(text.clone(), text.as_bytes());
check_parsing(text.as_bytes(), text.as_bytes());
check_parsing(<_ as AsRef<[u8]>>::as_ref(&text), text.as_bytes());
}
#[test]
fn test_text_provider_for_box_of_str_slice() {
let text = "// comment".to_owned().into_boxed_str();
check_parsing(text.as_bytes(), text.as_bytes());
check_parsing(<_ as AsRef<str>>::as_ref(&text), text.as_bytes());
check_parsing(text.as_ref(), text.as_ref().as_bytes());
check_parsing(text.as_ref(), text.as_bytes());
}
#[test]
fn test_text_provider_for_box_of_bytes_slice() {
let text = "// comment".to_owned().into_boxed_str().into_boxed_bytes();
check_parsing(text.as_ref(), text.as_ref());
check_parsing(text.as_ref(), &*text);
check_parsing(&*text, &*text);
}
#[test]
fn test_text_provider_for_vec_of_bytes() {
let text = "// comment".to_owned().into_bytes();
check_parsing(&*text, &*text);
}
#[test]
fn test_text_provider_for_arc_of_bytes_slice() {
let text: Arc<[u8]> = Arc::from("// comment".to_owned().into_bytes());
check_parsing(&*text, &*text);
check_parsing(text.as_ref(), text.as_ref());
check_parsing(text.clone(), text.as_ref());
}
#[test]
fn test_text_provider_for_vec_utf16_text() {
let source_text = "你好".encode_utf16().collect::<Vec<_>>();
let language = get_language("c");
let mut parser = Parser::new();
parser.set_language(&language).unwrap();
let tree = parser.parse_utf16_le(&source_text, None).unwrap();
let tree_text = tree.root_node().utf16_text(&source_text);
assert_eq!(source_text, tree_text);
}
#[test]
fn test_text_provider_callback_with_str_slice() {
let text: &str = "// comment";
check_parsing(text, |_node: Node<'_>| iter::once(text));
check_parsing_callback(
&mut |offset, _point| {
(offset < text.len())
.then_some(text.as_bytes())
.unwrap_or_default()
},
|_node: Node<'_>| iter::once(text),
);
}
#[test]
fn test_text_provider_callback_with_owned_string_slice() {
let text: &str = "// comment";
check_parsing_callback(
&mut |offset, _point| {
(offset < text.len())
.then_some(text.as_bytes())
.unwrap_or_default()
},
|_node: Node<'_>| {
let slice: String = text.to_owned();
iter::once(slice)
},
);
}
#[test]
fn test_text_provider_callback_with_owned_bytes_vec_slice() {
let text: &str = "// comment";
check_parsing_callback(
&mut |offset, _point| {
(offset < text.len())
.then_some(text.as_bytes())
.unwrap_or_default()
},
|_node: Node<'_>| {
let slice = text.to_owned().into_bytes();
iter::once(slice)
},
);
}
#[test]
fn test_text_provider_callback_with_owned_arc_of_bytes_slice() {
let text: &str = "// comment";
check_parsing_callback(
&mut |offset, _point| {
(offset < text.len())
.then_some(text.as_bytes())
.unwrap_or_default()
},
|_node: Node<'_>| {
let slice: Arc<[u8]> = text.to_owned().into_bytes().into();
iter::once(slice)
},
);
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/wasm_language_test.rs | crates/cli/src/tests/wasm_language_test.rs | use std::fs;
use streaming_iterator::StreamingIterator;
use tree_sitter::{Parser, Query, QueryCursor, WasmError, WasmErrorKind, WasmStore};
use crate::tests::helpers::{
allocations,
fixtures::{get_test_fixture_language_wasm, ENGINE, WASM_DIR},
};
#[test]
fn test_wasm_stdlib_symbols() {
let symbols = tree_sitter::wasm_stdlib_symbols().collect::<Vec<_>>();
assert_eq!(
symbols,
{
let mut symbols = symbols.clone();
symbols.sort_unstable();
symbols
},
"symbols aren't sorted"
);
assert!(symbols.contains(&"malloc"));
assert!(symbols.contains(&"free"));
assert!(symbols.contains(&"memset"));
assert!(symbols.contains(&"memcpy"));
}
#[test]
fn test_load_wasm_ruby_language() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let wasm = fs::read(WASM_DIR.join("tree-sitter-ruby.wasm")).unwrap();
let language = store.load_language("ruby", &wasm).unwrap();
parser.set_wasm_store(store).unwrap();
parser.set_language(&language).unwrap();
let tree = parser.parse("class A; end", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(program (class name: (constant)))"
);
});
}
#[test]
fn test_load_wasm_html_language() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let wasm = fs::read(WASM_DIR.join("tree-sitter-html.wasm")).unwrap();
let language = store.load_language("html", &wasm).unwrap();
parser.set_wasm_store(store).unwrap();
parser.set_language(&language).unwrap();
let tree = parser
.parse("<div><span></span><p></p></div>", None)
.unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(document (element (start_tag (tag_name)) (element (start_tag (tag_name)) (end_tag (tag_name))) (element (start_tag (tag_name)) (end_tag (tag_name))) (end_tag (tag_name))))"
);
});
}
#[test]
fn test_load_wasm_rust_language() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let wasm = fs::read(WASM_DIR.join("tree-sitter-rust.wasm")).unwrap();
let language = store.load_language("rust", &wasm).unwrap();
parser.set_wasm_store(store).unwrap();
parser.set_language(&language).unwrap();
let tree = parser.parse("fn main() {}", None).unwrap();
assert_eq!(tree.root_node().to_sexp(), "(source_file (function_item name: (identifier) parameters: (parameters) body: (block)))");
});
}
#[test]
fn test_load_wasm_javascript_language() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let wasm = fs::read(WASM_DIR.join("tree-sitter-javascript.wasm")).unwrap();
let language = store.load_language("javascript", &wasm).unwrap();
parser.set_wasm_store(store).unwrap();
parser.set_language(&language).unwrap();
let tree = parser.parse("const a = b\nconst c = d", None).unwrap();
assert_eq!(tree.root_node().to_sexp(), "(program (lexical_declaration (variable_declarator name: (identifier) value: (identifier))) (lexical_declaration (variable_declarator name: (identifier) value: (identifier))))");
});
}
#[test]
fn test_load_wasm_python_language() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let wasm = fs::read(WASM_DIR.join("tree-sitter-python.wasm")).unwrap();
let language = store.load_language("python", &wasm).unwrap();
parser.set_wasm_store(store).unwrap();
parser.set_language(&language).unwrap();
let tree = parser.parse("a = b\nc = d", None).unwrap();
assert_eq!(tree.root_node().to_sexp(), "(module (expression_statement (assignment left: (identifier) right: (identifier))) (expression_statement (assignment left: (identifier) right: (identifier))))");
});
}
#[test]
fn test_load_fixture_language_wasm() {
allocations::record(|| {
let store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let language = get_test_fixture_language_wasm("epsilon_external_tokens");
parser.set_wasm_store(store).unwrap();
parser.set_language(&language).unwrap();
let tree = parser.parse("hello", None).unwrap();
assert_eq!(tree.root_node().to_sexp(), "(document (zero_width))");
});
}
#[test]
fn test_load_multiple_wasm_languages() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let wasm_cpp = fs::read(WASM_DIR.join("tree-sitter-cpp.wasm")).unwrap();
let wasm_rs = fs::read(WASM_DIR.join("tree-sitter-rust.wasm")).unwrap();
let wasm_rb = fs::read(WASM_DIR.join("tree-sitter-ruby.wasm")).unwrap();
let wasm_typescript = fs::read(WASM_DIR.join("tree-sitter-typescript.wasm")).unwrap();
let language_rust = store.load_language("rust", &wasm_rs).unwrap();
let language_cpp = store.load_language("cpp", &wasm_cpp).unwrap();
let language_ruby = store.load_language("ruby", &wasm_rb).unwrap();
let language_typescript = store.load_language("typescript", &wasm_typescript).unwrap();
parser.set_wasm_store(store).unwrap();
let mut parser2 = Parser::new();
parser2
.set_wasm_store(WasmStore::new(&ENGINE).unwrap())
.unwrap();
let mut query_cursor = QueryCursor::new();
// First, parse with the store that originally loaded the languages.
// Then parse with a new parser and Wasm store, so that the languages
// are added one-by-one, in between parses.
for mut parser in [parser, parser2] {
for _ in 0..2 {
let query_rust = Query::new(&language_rust, "(const_item) @foo").unwrap();
let query_typescript =
Query::new(&language_typescript, "(class_declaration) @foo").unwrap();
parser.set_language(&language_cpp).unwrap();
let tree = parser.parse("A<B> c = d();", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(translation_unit (declaration type: (template_type name: (type_identifier) arguments: (template_argument_list (type_descriptor type: (type_identifier)))) declarator: (init_declarator declarator: (identifier) value: (call_expression function: (identifier) arguments: (argument_list)))))"
);
parser.set_language(&language_rust).unwrap();
let source = "const A: B = c();";
let tree = parser.parse(source, None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(source_file (const_item name: (identifier) type: (type_identifier) value: (call_expression function: (identifier) arguments: (arguments))))"
);
assert_eq!(
query_cursor
.matches(&query_rust, tree.root_node(), source.as_bytes())
.count(),
1
);
parser.set_language(&language_ruby).unwrap();
let tree = parser.parse("class A; end", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(program (class name: (constant)))"
);
parser.set_language(&language_typescript).unwrap();
let tree = parser.parse("class A {}", None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(program (class_declaration name: (type_identifier) body: (class_body)))"
);
assert_eq!(
query_cursor
.matches(&query_typescript, tree.root_node(), source.as_bytes())
.count(),
1
);
}
}
});
}
#[test]
fn test_load_and_reload_wasm_language() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let wasm_rust = fs::read(WASM_DIR.join("tree-sitter-rust.wasm")).unwrap();
let wasm_typescript = fs::read(WASM_DIR.join("tree-sitter-typescript.wasm")).unwrap();
let language_rust = store.load_language("rust", &wasm_rust).unwrap();
let language_typescript = store.load_language("typescript", &wasm_typescript).unwrap();
assert_eq!(store.language_count(), 2);
// When a language is dropped, stores can release their instances of that language.
drop(language_rust);
assert_eq!(store.language_count(), 1);
let language_rust = store.load_language("rust", &wasm_rust).unwrap();
assert_eq!(store.language_count(), 2);
drop(language_rust);
drop(language_typescript);
assert_eq!(store.language_count(), 0);
});
}
#[test]
fn test_reset_wasm_store() {
allocations::record(|| {
let mut language_store = WasmStore::new(&ENGINE).unwrap();
let wasm = fs::read(WASM_DIR.join("tree-sitter-rust.wasm")).unwrap();
let language = language_store.load_language("rust", &wasm).unwrap();
let mut parser = Parser::new();
let parser_store = WasmStore::new(&ENGINE).unwrap();
parser.set_wasm_store(parser_store).unwrap();
parser.set_language(&language).unwrap();
let tree = parser.parse("fn main() {}", None).unwrap();
assert_eq!(tree.root_node().to_sexp(), "(source_file (function_item name: (identifier) parameters: (parameters) body: (block)))");
let parser_store = WasmStore::new(&ENGINE).unwrap();
parser.set_wasm_store(parser_store).unwrap();
let tree = parser.parse("fn main() {}", None).unwrap();
assert_eq!(tree.root_node().to_sexp(), "(source_file (function_item name: (identifier) parameters: (parameters) body: (block)))");
});
}
#[test]
fn test_load_wasm_errors() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let wasm = fs::read(WASM_DIR.join("tree-sitter-rust.wasm")).unwrap();
let bad_wasm = &wasm[1..];
assert_eq!(
store.load_language("rust", bad_wasm).unwrap_err(),
WasmError {
kind: WasmErrorKind::Parse,
message: "failed to parse dylink section of Wasm module".into(),
}
);
assert_eq!(
store.load_language("not_rust", &wasm).unwrap_err(),
WasmError {
kind: WasmErrorKind::Instantiate,
message: "module did not contain language function: tree_sitter_not_rust".into(),
}
);
let mut bad_wasm = wasm.clone();
bad_wasm[300..500].iter_mut().for_each(|b| *b = 0);
assert_eq!(
store.load_language("rust", &bad_wasm).unwrap_err().kind,
WasmErrorKind::Compile,
);
});
}
#[test]
fn test_wasm_oom() {
allocations::record(|| {
let mut store = WasmStore::new(&ENGINE).unwrap();
let mut parser = Parser::new();
let wasm = fs::read(WASM_DIR.join("tree-sitter-html.wasm")).unwrap();
let language = store.load_language("html", &wasm).unwrap();
parser.set_wasm_store(store).unwrap();
parser.set_language(&language).unwrap();
let tag_name = "a-b".repeat(2 * 1024 * 1024);
let code = format!("<{tag_name}>hello world</{tag_name}>");
assert!(parser.parse(&code, None).is_none());
let tag_name = "a-b".repeat(20);
let code = format!("<{tag_name}>hello world</{tag_name}>");
parser.set_language(&language).unwrap();
let tree = parser.parse(&code, None).unwrap();
assert_eq!(
tree.root_node().to_sexp(),
"(document (element (start_tag (tag_name)) (text) (end_tag (tag_name))))"
);
});
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/test_highlight_test.rs | crates/cli/src/tests/test_highlight_test.rs | use tree_sitter::Parser;
use tree_sitter_highlight::{Highlight, Highlighter};
use super::helpers::fixtures::{get_highlight_config, get_language, test_loader};
use crate::{
query_testing::{parse_position_comments, Assertion, Utf8Point},
test_highlight::get_highlight_positions,
};
#[test]
fn test_highlight_test_with_basic_test() {
let language = get_language("javascript");
let config = get_highlight_config(
"javascript",
Some("injections.scm"),
&[
"function".to_string(),
"variable".to_string(),
"keyword".to_string(),
],
);
let source = [
"// hi",
"var abc = function(d) {",
" // ^ function",
" // ^^^ keyword",
" return d + e;",
" // ^ variable",
" // ^ !variable",
"};",
"var y̆y̆y̆y̆ = function() {}",
" // ^ function",
" // ^ keyword",
]
.join("\n");
let assertions =
parse_position_comments(&mut Parser::new(), &language, source.as_bytes()).unwrap();
assert_eq!(
assertions,
&[
Assertion::new(1, 5, 1, false, String::from("function")),
Assertion::new(1, 11, 3, false, String::from("keyword")),
Assertion::new(4, 9, 1, false, String::from("variable")),
Assertion::new(4, 11, 1, true, String::from("variable")),
Assertion::new(8, 5, 1, false, String::from("function")),
Assertion::new(8, 11, 1, false, String::from("keyword")),
]
);
let mut highlighter = Highlighter::new();
let highlight_positions =
get_highlight_positions(test_loader(), &mut highlighter, &config, source.as_bytes())
.unwrap();
assert_eq!(
highlight_positions,
&[
(Utf8Point::new(1, 0), Utf8Point::new(1, 3), Highlight(2)), // "var"
(Utf8Point::new(1, 4), Utf8Point::new(1, 7), Highlight(0)), // "abc"
(Utf8Point::new(1, 10), Utf8Point::new(1, 18), Highlight(2)), // "function"
(Utf8Point::new(1, 19), Utf8Point::new(1, 20), Highlight(1)), // "d"
(Utf8Point::new(4, 2), Utf8Point::new(4, 8), Highlight(2)), // "return"
(Utf8Point::new(4, 9), Utf8Point::new(4, 10), Highlight(1)), // "d"
(Utf8Point::new(4, 13), Utf8Point::new(4, 14), Highlight(1)), // "e"
(Utf8Point::new(8, 0), Utf8Point::new(8, 3), Highlight(2)), // "var"
(Utf8Point::new(8, 4), Utf8Point::new(8, 8), Highlight(0)), // "y̆y̆y̆y̆"
(Utf8Point::new(8, 11), Utf8Point::new(8, 19), Highlight(2)), // "function"
]
);
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/language_test.rs | crates/cli/src/tests/language_test.rs | use tree_sitter::{self, Parser};
use super::helpers::fixtures::get_language;
#[test]
fn test_lookahead_iterator() {
let mut parser = Parser::new();
let language = get_language("rust");
parser.set_language(&language).unwrap();
let tree = parser.parse("struct Stuff {}", None).unwrap();
let mut cursor = tree.walk();
assert!(cursor.goto_first_child()); // struct
assert!(cursor.goto_first_child()); // struct keyword
let next_state = cursor.node().next_parse_state();
assert_ne!(next_state, 0);
assert_eq!(
next_state,
language.next_state(cursor.node().parse_state(), cursor.node().grammar_id())
);
assert!((next_state as usize) < language.parse_state_count());
assert!(cursor.goto_next_sibling()); // type_identifier
assert_eq!(next_state, cursor.node().parse_state());
assert_eq!(cursor.node().grammar_name(), "identifier");
assert_ne!(cursor.node().grammar_id(), cursor.node().kind_id());
let expected_symbols = ["//", "/*", "identifier", "line_comment", "block_comment"];
let mut lookahead = language.lookahead_iterator(next_state).unwrap();
assert_eq!(*lookahead.language(), language);
assert!(lookahead.iter_names().eq(expected_symbols));
lookahead.reset_state(next_state);
assert!(lookahead.iter_names().eq(expected_symbols));
lookahead.reset(&language, next_state);
assert!(lookahead
.map(|s| language.node_kind_for_id(s).unwrap())
.eq(expected_symbols));
}
#[test]
fn test_lookahead_iterator_modifiable_only_by_mut() {
let mut parser = Parser::new();
let language = get_language("rust");
parser.set_language(&language).unwrap();
let tree = parser.parse("struct Stuff {}", None).unwrap();
let mut cursor = tree.walk();
assert!(cursor.goto_first_child()); // struct
assert!(cursor.goto_first_child()); // struct keyword
let next_state = cursor.node().next_parse_state();
assert_ne!(next_state, 0);
let mut lookahead = language.lookahead_iterator(next_state).unwrap();
let _ = lookahead.next();
let mut names = lookahead.iter_names();
let _ = names.next();
}
#[test]
fn test_symbol_metadata_checks() {
let language = get_language("rust");
for i in 0..language.node_kind_count() {
let sym = i as u16;
let name = language.node_kind_for_id(sym).unwrap();
match name {
"_type"
| "_expression"
| "_pattern"
| "_literal"
| "_literal_pattern"
| "_declaration_statement" => assert!(language.node_kind_is_supertype(sym)),
"_raw_string_literal_start"
| "_raw_string_literal_end"
| "_line_doc_comment"
| "_error_sentinel" => assert!(!language.node_kind_is_supertype(sym)),
"enum_item" | "struct_item" | "type_item" => {
assert!(language.node_kind_is_named(sym));
}
"=>" | "[" | "]" | "(" | ")" | "{" | "}" => {
assert!(language.node_kind_is_visible(sym));
}
_ => {}
}
}
}
#[test]
fn test_supertypes() {
let language = get_language("rust");
let supertypes = language.supertypes();
if language.abi_version() < 15 {
return;
}
assert_eq!(supertypes.len(), 5);
assert_eq!(
supertypes
.iter()
.filter_map(|&s| language.node_kind_for_id(s))
.map(|s| s.to_string())
.collect::<Vec<String>>(),
vec![
"_expression",
"_literal",
"_literal_pattern",
"_pattern",
"_type"
]
);
for &supertype in supertypes {
let mut subtypes = language
.subtypes_for_supertype(supertype)
.iter()
.filter_map(|symbol| language.node_kind_for_id(*symbol))
.collect::<Vec<&str>>();
subtypes.sort_unstable();
subtypes.dedup();
match language.node_kind_for_id(supertype) {
Some("_literal") => {
assert_eq!(
subtypes,
&[
"boolean_literal",
"char_literal",
"float_literal",
"integer_literal",
"raw_string_literal",
"string_literal"
]
);
}
Some("_pattern") => {
assert_eq!(
subtypes,
&[
"_",
"_literal_pattern",
"captured_pattern",
"const_block",
"generic_pattern",
"identifier",
"macro_invocation",
"mut_pattern",
"or_pattern",
"range_pattern",
"ref_pattern",
"reference_pattern",
"remaining_field_pattern",
"scoped_identifier",
"slice_pattern",
"struct_pattern",
"tuple_pattern",
"tuple_struct_pattern",
]
);
}
Some("_type") => {
assert_eq!(
subtypes,
&[
"abstract_type",
"array_type",
"bounded_type",
"dynamic_type",
"function_type",
"generic_type",
"macro_invocation",
"metavariable",
"never_type",
"pointer_type",
"primitive_type",
"reference_type",
"removed_trait_bound",
"scoped_type_identifier",
"tuple_type",
"type_identifier",
"unit_type"
]
);
}
_ => {}
}
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/helpers/query_helpers.rs | crates/cli/src/tests/helpers/query_helpers.rs | use std::{cmp::Ordering, fmt::Write, ops::Range};
use rand::prelude::Rng;
use streaming_iterator::{IntoStreamingIterator, StreamingIterator};
use tree_sitter::{
Language, Node, Parser, Point, Query, QueryCapture, QueryCursor, QueryMatch, Tree, TreeCursor,
};
#[derive(Debug)]
pub struct Pattern {
kind: Option<&'static str>,
named: bool,
field: Option<&'static str>,
capture: Option<String>,
children: Vec<Self>,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Match<'a, 'tree> {
pub captures: Vec<(&'a str, Node<'tree>)>,
pub last_node: Option<Node<'tree>>,
}
const CAPTURE_NAMES: &[&str] = &[
"one", "two", "three", "four", "five", "six", "seven", "eight",
];
impl Pattern {
pub fn random_pattern_in_tree(tree: &Tree, rng: &mut impl Rng) -> (Self, Range<Point>) {
let mut cursor = tree.walk();
// Descend to the node at a random byte offset and depth.
let mut max_depth = 0;
let byte_offset = rng.gen_range(0..cursor.node().end_byte());
while cursor.goto_first_child_for_byte(byte_offset).is_some() {
max_depth += 1;
}
let depth = rng.gen_range(0..=max_depth);
for _ in 0..depth {
cursor.goto_parent();
}
// Build a pattern that matches that node.
// Sometimes include subsequent siblings of the node.
let pattern_start = cursor.node().start_position();
let mut roots = vec![Self::random_pattern_for_node(&mut cursor, rng)];
while roots.len() < 5 && cursor.goto_next_sibling() {
if rng.gen_bool(0.2) {
roots.push(Self::random_pattern_for_node(&mut cursor, rng));
}
}
let pattern_end = cursor.node().end_position();
let mut pattern = Self {
kind: None,
named: true,
field: None,
capture: None,
children: roots,
};
if pattern.children.len() == 1 ||
// In a parenthesized list of sibling patterns, the first
// sibling can't be an anonymous `_` wildcard.
(pattern.children[0].kind == Some("_") && !pattern.children[0].named)
{
pattern = pattern.children.pop().unwrap();
}
// In a parenthesized list of sibling patterns, the first
// sibling can't have a field name.
else {
pattern.children[0].field = None;
}
(pattern, pattern_start..pattern_end)
}
fn random_pattern_for_node(cursor: &mut TreeCursor, rng: &mut impl Rng) -> Self {
let node = cursor.node();
// Sometimes specify the node's type, sometimes use a wildcard.
let (kind, named) = if rng.gen_bool(0.9) {
(Some(node.kind()), node.is_named())
} else {
(Some("_"), node.is_named() && rng.gen_bool(0.8))
};
// Sometimes specify the node's field.
let field = if rng.gen_bool(0.75) {
cursor.field_name()
} else {
None
};
// Sometimes capture the node.
let capture = if rng.gen_bool(0.7) {
Some(CAPTURE_NAMES[rng.gen_range(0..CAPTURE_NAMES.len())].to_string())
} else {
None
};
// Walk the children and include child patterns for some of them.
let mut children = Vec::new();
if named && cursor.goto_first_child() {
let max_children = rng.gen_range(0..4);
while cursor.goto_next_sibling() {
if rng.gen_bool(0.6) {
let child_ast = Self::random_pattern_for_node(cursor, rng);
children.push(child_ast);
if children.len() >= max_children {
break;
}
}
}
cursor.goto_parent();
}
Self {
kind,
named,
field,
capture,
children,
}
}
fn write_to_string(&self, string: &mut String, indent: usize) {
if let Some(field) = self.field {
write!(string, "{field}: ").unwrap();
}
if self.named {
string.push('(');
let mut has_contents = if let Some(kind) = &self.kind {
write!(string, "{kind}").unwrap();
true
} else {
false
};
for child in &self.children {
let indent = indent + 2;
if has_contents {
string.push('\n');
string.push_str(&" ".repeat(indent));
}
child.write_to_string(string, indent);
has_contents = true;
}
string.push(')');
} else if self.kind == Some("_") {
string.push('_');
} else {
write!(string, "\"{}\"", self.kind.unwrap().replace('\"', "\\\"")).unwrap();
}
if let Some(capture) = &self.capture {
write!(string, " @{capture}").unwrap();
}
}
pub fn matches_in_tree<'tree>(&self, tree: &'tree Tree) -> Vec<Match<'_, 'tree>> {
let mut matches = Vec::new();
// Compute the matches naively: walk the tree and
// retry the entire pattern for each node.
let mut cursor = tree.walk();
let mut ascending = false;
loop {
if ascending {
if cursor.goto_next_sibling() {
ascending = false;
} else if !cursor.goto_parent() {
break;
}
} else {
let matches_here = self.match_node(&mut cursor);
matches.extend_from_slice(&matches_here);
if !cursor.goto_first_child() {
ascending = true;
}
}
}
matches.sort_unstable();
for m in &mut matches {
m.last_node = None;
}
matches.dedup();
matches
}
pub fn match_node<'tree>(&self, cursor: &mut TreeCursor<'tree>) -> Vec<Match<'_, 'tree>> {
let node = cursor.node();
// If a kind is specified, check that it matches the node.
if let Some(kind) = self.kind {
if kind == "_" {
if self.named && !node.is_named() {
return Vec::new();
}
} else if kind != node.kind() || self.named != node.is_named() {
return Vec::new();
}
}
// If a field is specified, check that it matches the node.
if let Some(field) = self.field {
if cursor.field_name() != Some(field) {
return Vec::new();
}
}
// Create a match for the current node.
let mat = Match {
captures: self
.capture
.as_ref()
.map_or_else(Vec::new, |name| vec![(name.as_str(), node)]),
last_node: Some(node),
};
// If there are no child patterns to match, then return this single match.
if self.children.is_empty() {
return vec![mat];
}
// Find every matching combination of child patterns and child nodes.
let mut finished_matches = Vec::<Match>::new();
if cursor.goto_first_child() {
let mut match_states = vec![(0, mat)];
loop {
let mut new_match_states = Vec::new();
for (pattern_index, mat) in &match_states {
let child_pattern = &self.children[*pattern_index];
let child_matches = child_pattern.match_node(cursor);
for child_match in child_matches {
let mut combined_match = mat.clone();
combined_match.last_node = child_match.last_node;
combined_match
.captures
.extend_from_slice(&child_match.captures);
if pattern_index + 1 < self.children.len() {
new_match_states.push((*pattern_index + 1, combined_match));
} else {
let mut existing = false;
for existing_match in &mut finished_matches {
if existing_match.captures == combined_match.captures {
if child_pattern.capture.is_some() {
existing_match.last_node = combined_match.last_node;
}
existing = true;
}
}
if !existing {
finished_matches.push(combined_match);
}
}
}
}
match_states.extend_from_slice(&new_match_states);
if !cursor.goto_next_sibling() {
break;
}
}
cursor.goto_parent();
}
finished_matches
}
}
impl std::fmt::Display for Pattern {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut result = String::new();
self.write_to_string(&mut result, 0);
write!(f, "{result}")
}
}
impl PartialOrd for Match<'_, '_> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Match<'_, '_> {
// Tree-sitter returns matches in the order that they terminate
// during a depth-first walk of the tree. If multiple matches
// terminate on the same node, those matches are produced in the
// order that their captures were discovered.
fn cmp(&self, other: &Self) -> Ordering {
if let Some((last_node_a, last_node_b)) = self.last_node.zip(other.last_node) {
let cmp = compare_depth_first(last_node_a, last_node_b);
if cmp.is_ne() {
return cmp;
}
}
for (a, b) in self.captures.iter().zip(other.captures.iter()) {
let cmp = compare_depth_first(a.1, b.1);
if !cmp.is_eq() {
return cmp;
}
}
self.captures.len().cmp(&other.captures.len())
}
}
fn compare_depth_first(a: Node, b: Node) -> Ordering {
let a = a.byte_range();
let b = b.byte_range();
a.start.cmp(&b.start).then_with(|| b.end.cmp(&a.end))
}
pub fn assert_query_matches(
language: &Language,
query: &Query,
source: &str,
expected: &[(usize, Vec<(&str, &str)>)],
) {
let mut parser = Parser::new();
parser.set_language(language).unwrap();
let tree = parser.parse(source, None).unwrap();
let mut cursor = QueryCursor::new();
let matches = cursor.matches(query, tree.root_node(), source.as_bytes());
pretty_assertions::assert_eq!(expected, collect_matches(matches, query, source));
pretty_assertions::assert_eq!(false, cursor.did_exceed_match_limit());
}
pub fn collect_matches<'a>(
mut matches: impl StreamingIterator<Item = QueryMatch<'a, 'a>>,
query: &'a Query,
source: &'a str,
) -> Vec<(usize, Vec<(&'a str, &'a str)>)> {
let mut result = Vec::new();
while let Some(m) = matches.next() {
result.push((
m.pattern_index,
format_captures(m.captures.iter().into_streaming_iter_ref(), query, source),
));
}
result
}
pub fn collect_captures<'a>(
captures: impl StreamingIterator<Item = (QueryMatch<'a, 'a>, usize)>,
query: &'a Query,
source: &'a str,
) -> Vec<(&'a str, &'a str)> {
format_captures(captures.map(|(m, i)| m.captures[*i]), query, source)
}
fn format_captures<'a>(
mut captures: impl StreamingIterator<Item = QueryCapture<'a>>,
query: &'a Query,
source: &'a str,
) -> Vec<(&'a str, &'a str)> {
let mut result = Vec::new();
while let Some(capture) = captures.next() {
result.push((
query.capture_names()[capture.index as usize],
capture.node.utf8_text(source.as_bytes()).unwrap(),
));
}
result
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/helpers/edits.rs | crates/cli/src/tests/helpers/edits.rs | use std::{ops::Range, str};
#[derive(Debug)]
pub struct ReadRecorder<'a> {
content: &'a [u8],
indices_read: Vec<usize>,
}
impl<'a> ReadRecorder<'a> {
#[must_use]
pub const fn new(content: &'a [u8]) -> Self {
Self {
content,
indices_read: Vec::new(),
}
}
pub fn read(&mut self, offset: usize) -> &'a [u8] {
if offset < self.content.len() {
if let Err(i) = self.indices_read.binary_search(&offset) {
self.indices_read.insert(i, offset);
}
&self.content[offset..(offset + 1)]
} else {
&[]
}
}
pub fn strings_read(&self) -> Vec<&'a str> {
let mut result = Vec::new();
let mut last_range = Option::<Range<usize>>::None;
for index in &self.indices_read {
if let Some(ref mut range) = &mut last_range {
if range.end == *index {
range.end += 1;
} else {
result.push(str::from_utf8(&self.content[range.clone()]).unwrap());
last_range = None;
}
} else {
last_range = Some(*index..(*index + 1));
}
}
if let Some(range) = last_range {
result.push(str::from_utf8(&self.content[range]).unwrap());
}
result
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/helpers/fixtures.rs | crates/cli/src/tests/helpers/fixtures.rs | use std::{
env, fs,
path::{Path, PathBuf},
sync::LazyLock,
};
use anyhow::Context;
use tree_sitter::Language;
use tree_sitter_generate::{load_grammar_file, ALLOC_HEADER, ARRAY_HEADER};
use tree_sitter_highlight::HighlightConfiguration;
use tree_sitter_loader::{CompileConfig, Loader};
use tree_sitter_tags::TagsConfiguration;
use crate::tests::generate_parser;
include!("./dirs.rs");
static TEST_LOADER: LazyLock<Loader> = LazyLock::new(|| {
let mut loader = Loader::with_parser_lib_path(SCRATCH_DIR.clone());
if env::var("TREE_SITTER_GRAMMAR_DEBUG").is_ok() {
loader.debug_build(true);
}
loader
});
#[cfg(feature = "wasm")]
pub static ENGINE: LazyLock<tree_sitter::wasmtime::Engine> = LazyLock::new(Default::default);
pub fn test_loader() -> &'static Loader {
&TEST_LOADER
}
pub fn fixtures_dir() -> &'static Path {
&FIXTURES_DIR
}
pub fn scratch_dir() -> &'static Path {
&SCRATCH_DIR
}
pub fn get_language(name: &str) -> Language {
let src_dir = GRAMMARS_DIR.join(name).join("src");
let mut config = CompileConfig::new(&src_dir, None, None);
config.header_paths.push(&HEADER_DIR);
TEST_LOADER.load_language_at_path(config).unwrap()
}
pub fn get_test_fixture_language(name: &str) -> Language {
get_test_fixture_language_internal(name, false)
}
#[cfg(feature = "wasm")]
pub fn get_test_fixture_language_wasm(name: &str) -> Language {
get_test_fixture_language_internal(name, true)
}
fn get_test_fixture_language_internal(name: &str, wasm: bool) -> Language {
let grammar_dir_path = fixtures_dir().join("test_grammars").join(name);
let grammar_json = load_grammar_file(&grammar_dir_path.join("grammar.js"), None).unwrap();
let (parser_name, parser_code) = generate_parser(&grammar_json).unwrap();
get_test_language_internal(&parser_name, &parser_code, Some(&grammar_dir_path), wasm)
}
pub fn get_language_queries_path(language_name: &str) -> PathBuf {
GRAMMARS_DIR.join(language_name).join("queries")
}
pub fn get_highlight_config(
language_name: &str,
injection_query_filename: Option<&str>,
highlight_names: &[String],
) -> HighlightConfiguration {
let language = get_language(language_name);
let queries_path = get_language_queries_path(language_name);
let highlights_query = fs::read_to_string(queries_path.join("highlights.scm")).unwrap();
let injections_query =
injection_query_filename.map_or_else(String::new, |injection_query_filename| {
fs::read_to_string(queries_path.join(injection_query_filename)).unwrap()
});
let locals_query = fs::read_to_string(queries_path.join("locals.scm")).unwrap_or_default();
let mut result = HighlightConfiguration::new(
language,
language_name,
&highlights_query,
&injections_query,
&locals_query,
)
.unwrap();
result.configure(highlight_names);
result
}
pub fn get_tags_config(language_name: &str) -> TagsConfiguration {
let language = get_language(language_name);
let queries_path = get_language_queries_path(language_name);
let tags_query = fs::read_to_string(queries_path.join("tags.scm")).unwrap();
let locals_query = fs::read_to_string(queries_path.join("locals.scm")).unwrap_or_default();
TagsConfiguration::new(language, &tags_query, &locals_query).unwrap()
}
pub fn get_test_language(name: &str, parser_code: &str, path: Option<&Path>) -> Language {
get_test_language_internal(name, parser_code, path, false)
}
fn get_test_language_internal(
name: &str,
parser_code: &str,
path: Option<&Path>,
wasm: bool,
) -> Language {
let src_dir = scratch_dir().join("src").join(name);
fs::create_dir_all(&src_dir).unwrap();
let parser_path = src_dir.join("parser.c");
if !fs::read_to_string(&parser_path).is_ok_and(|content| content == parser_code) {
fs::write(&parser_path, parser_code).unwrap();
}
let scanner_path = if let Some(path) = path {
let scanner_path = path.join("scanner.c");
if scanner_path.exists() {
let scanner_code = fs::read_to_string(&scanner_path).unwrap();
let scanner_copy_path = src_dir.join("scanner.c");
if !fs::read_to_string(&scanner_copy_path).is_ok_and(|content| content == scanner_code)
{
fs::write(&scanner_copy_path, scanner_code).unwrap();
}
Some(scanner_copy_path)
} else {
None
}
} else {
None
};
let header_path = src_dir.join("tree_sitter");
fs::create_dir_all(&header_path).unwrap();
for (file, content) in [
("alloc.h", ALLOC_HEADER),
("array.h", ARRAY_HEADER),
("parser.h", tree_sitter::PARSER_HEADER),
] {
let file = header_path.join(file);
fs::write(&file, content)
.with_context(|| format!("Failed to write {:?}", file.file_name().unwrap()))
.unwrap();
}
let paths_to_check = if let Some(scanner_path) = &scanner_path {
vec![parser_path, scanner_path.clone()]
} else {
vec![parser_path]
};
let mut config = CompileConfig::new(&src_dir, Some(&paths_to_check), None);
config.header_paths = vec![&HEADER_DIR];
config.name = name.to_string();
if wasm {
#[cfg(feature = "wasm")]
{
let mut loader = Loader::with_parser_lib_path(SCRATCH_DIR.clone());
loader.use_wasm(&ENGINE);
if env::var("TREE_SITTER_GRAMMAR_DEBUG").is_ok() {
loader.debug_build(true);
}
loader.load_language_at_path_with_name(config).unwrap()
}
#[cfg(not(feature = "wasm"))]
{
unimplemented!("Wasm feature is not enabled")
}
} else {
TEST_LOADER.load_language_at_path_with_name(config).unwrap()
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/helpers/dirs.rs | crates/cli/src/tests/helpers/dirs.rs | pub static ROOT_DIR: LazyLock<PathBuf> = LazyLock::new(|| {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.unwrap()
.parent()
.unwrap()
.to_owned()
});
pub static FIXTURES_DIR: LazyLock<PathBuf> =
LazyLock::new(|| ROOT_DIR.join("test").join("fixtures"));
pub static HEADER_DIR: LazyLock<PathBuf> = LazyLock::new(|| ROOT_DIR.join("lib").join("include"));
pub static GRAMMARS_DIR: LazyLock<PathBuf> =
LazyLock::new(|| ROOT_DIR.join("test").join("fixtures").join("grammars"));
pub static SCRATCH_BASE_DIR: LazyLock<PathBuf> = LazyLock::new(|| {
let result = ROOT_DIR.join("target").join("scratch");
fs::create_dir_all(&result).unwrap();
result
});
#[cfg(feature = "wasm")]
pub static WASM_DIR: LazyLock<PathBuf> = LazyLock::new(|| ROOT_DIR.join("target").join("release"));
pub static SCRATCH_DIR: LazyLock<PathBuf> = LazyLock::new(|| {
// https://doc.rust-lang.org/reference/conditional-compilation.html
let vendor = if cfg!(target_vendor = "apple") {
"apple"
} else if cfg!(target_vendor = "fortanix") {
"fortanix"
} else if cfg!(target_vendor = "pc") {
"pc"
} else {
"unknown"
};
let env = if cfg!(target_env = "gnu") {
"gnu"
} else if cfg!(target_env = "msvc") {
"msvc"
} else if cfg!(target_env = "musl") {
"musl"
} else if cfg!(target_env = "sgx") {
"sgx"
} else {
"unknown"
};
let endian = if cfg!(target_endian = "little") {
"little"
} else if cfg!(target_endian = "big") {
"big"
} else {
"unknown"
};
let machine = format!(
"{}-{}-{vendor}-{env}-{endian}",
std::env::consts::ARCH,
std::env::consts::OS
);
let result = SCRATCH_BASE_DIR.join(machine);
fs::create_dir_all(&result).unwrap();
result
});
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/tests/proc_macro/src/lib.rs | crates/cli/src/tests/proc_macro/src/lib.rs | use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use syn::{
parse::{Parse, ParseStream},
parse_macro_input, Error, Expr, Ident, ItemFn, LitInt, Token,
};
#[proc_macro_attribute]
pub fn retry(args: TokenStream, input: TokenStream) -> TokenStream {
let count = parse_macro_input!(args as LitInt);
let input = parse_macro_input!(input as ItemFn);
let attrs = &input.attrs;
let name = &input.sig.ident;
TokenStream::from(quote! {
#(#attrs),*
fn #name() {
#input
for i in 0..=#count {
let result = std::panic::catch_unwind(|| {
#name();
});
if result.is_ok() {
return;
}
if i == #count {
std::panic::resume_unwind(result.unwrap_err());
}
}
}
})
}
#[proc_macro_attribute]
pub fn test_with_seed(args: TokenStream, input: TokenStream) -> TokenStream {
struct Args {
retry: LitInt,
seed: Expr,
seed_fn: Option<Ident>,
}
impl Parse for Args {
fn parse(input: ParseStream) -> syn::Result<Self> {
let mut retry = None;
let mut seed = None;
let mut seed_fn = None;
while !input.is_empty() {
let name = input.parse::<Ident>()?;
match name.to_string().as_str() {
"retry" => {
input.parse::<Token![=]>()?;
retry.replace(input.parse()?);
}
"seed" => {
input.parse::<Token![=]>()?;
seed.replace(input.parse()?);
}
"seed_fn" => {
input.parse::<Token![=]>()?;
seed_fn.replace(input.parse()?);
}
x => {
return Err(Error::new(
name.span(),
format!("Unsupported parameter `{x}`"),
))
}
}
if !input.is_empty() {
input.parse::<Token![,]>()?;
}
}
if retry.is_none() {
retry.replace(LitInt::new("0", Span::mixed_site()));
}
Ok(Self {
retry: retry.expect("`retry` parameter is required"),
seed: seed.expect("`seed` parameter is required"),
seed_fn,
})
}
}
let Args {
retry,
seed,
seed_fn,
} = parse_macro_input!(args as Args);
let seed_fn = seed_fn.iter();
let func = parse_macro_input!(input as ItemFn);
let attrs = &func.attrs;
let name = &func.sig.ident;
TokenStream::from(quote! {
#[test]
#(#attrs),*
fn #name() {
#func
let mut seed = #seed;
for i in 0..=#retry {
let result = std::panic::catch_unwind(|| {
#name(seed);
});
if result.is_ok() {
return;
}
if i == #retry {
std::panic::resume_unwind(result.unwrap_err());
}
#(
seed = #seed_fn();
)*
if i < #retry {
println!("\nRetry {}/{} with a new seed {}", i + 1, #retry, seed);
}
}
}
})
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/fuzz/corpus_test.rs | crates/cli/src/fuzz/corpus_test.rs | use tree_sitter::{LogType, Node, Parser, Point, Range, Tree};
use super::{scope_sequence::ScopeSequence, LOG_ENABLED, LOG_GRAPH_ENABLED};
use crate::util;
pub fn check_consistent_sizes(tree: &Tree, input: &[u8]) {
fn check(node: Node, line_offsets: &[usize]) {
let start_byte = node.start_byte();
let end_byte = node.end_byte();
let start_point = node.start_position();
let end_point = node.end_position();
assert!(start_byte <= end_byte);
assert!(start_point <= end_point);
assert_eq!(
start_byte,
line_offsets[start_point.row] + start_point.column
);
assert_eq!(end_byte, line_offsets[end_point.row] + end_point.column);
let mut last_child_end_byte = start_byte;
let mut last_child_end_point = start_point;
let mut some_child_has_changes = false;
let mut actual_named_child_count = 0;
for i in 0..node.child_count() {
let child = node.child(i as u32).unwrap();
assert!(child.start_byte() >= last_child_end_byte);
assert!(child.start_position() >= last_child_end_point);
check(child, line_offsets);
if child.has_changes() {
some_child_has_changes = true;
}
if child.is_named() {
actual_named_child_count += 1;
}
last_child_end_byte = child.end_byte();
last_child_end_point = child.end_position();
}
assert_eq!(actual_named_child_count, node.named_child_count());
if node.child_count() > 0 {
assert!(end_byte >= last_child_end_byte);
assert!(end_point >= last_child_end_point);
}
if some_child_has_changes {
assert!(node.has_changes());
}
}
let mut line_offsets = vec![0];
for (i, c) in input.iter().enumerate() {
if *c == b'\n' {
line_offsets.push(i + 1);
}
}
check(tree.root_node(), &line_offsets);
}
pub fn check_changed_ranges(old_tree: &Tree, new_tree: &Tree, input: &[u8]) -> Result<(), String> {
let changed_ranges = old_tree.changed_ranges(new_tree).collect::<Vec<_>>();
let old_scope_sequence = ScopeSequence::new(old_tree);
let new_scope_sequence = ScopeSequence::new(new_tree);
let old_range = old_tree.root_node().range();
let new_range = new_tree.root_node().range();
let byte_range =
old_range.start_byte.min(new_range.start_byte)..old_range.end_byte.max(new_range.end_byte);
let point_range = old_range.start_point.min(new_range.start_point)
..old_range.end_point.max(new_range.end_point);
for range in &changed_ranges {
if range.end_byte > byte_range.end || range.end_point > point_range.end {
return Err(format!(
"changed range extends outside of the old and new trees {range:?}",
));
}
}
old_scope_sequence.check_changes(&new_scope_sequence, input, &changed_ranges)
}
pub fn set_included_ranges(parser: &mut Parser, input: &[u8], delimiters: Option<(&str, &str)>) {
if let Some((start, end)) = delimiters {
let mut ranges = Vec::new();
let mut ix = 0;
while ix < input.len() {
let Some(mut start_ix) = input[ix..]
.windows(2)
.position(|win| win == start.as_bytes())
else {
break;
};
start_ix += ix + start.len();
let end_ix = input[start_ix..]
.windows(2)
.position(|win| win == end.as_bytes())
.map_or(input.len(), |ix| start_ix + ix);
ix = end_ix;
ranges.push(Range {
start_byte: start_ix,
end_byte: end_ix,
start_point: point_for_offset(input, start_ix),
end_point: point_for_offset(input, end_ix),
});
}
parser.set_included_ranges(&ranges).unwrap();
} else {
parser.set_included_ranges(&[]).unwrap();
}
}
fn point_for_offset(text: &[u8], offset: usize) -> Point {
let mut point = Point::default();
for byte in &text[..offset] {
if *byte == b'\n' {
point.row += 1;
point.column = 0;
} else {
point.column += 1;
}
}
point
}
pub fn get_parser(session: &mut Option<util::LogSession>, log_filename: &str) -> Parser {
let mut parser = Parser::new();
if *LOG_ENABLED {
parser.set_logger(Some(Box::new(|log_type, msg| {
if log_type == LogType::Lex {
eprintln!(" {msg}");
} else {
eprintln!("{msg}");
}
})));
}
if *LOG_GRAPH_ENABLED {
*session = Some(util::log_graphs(&mut parser, log_filename, false).unwrap());
}
parser
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/fuzz/random.rs | crates/cli/src/fuzz/random.rs | use rand::{
distributions::Alphanumeric,
prelude::{Rng, SeedableRng, StdRng},
};
const OPERATORS: &[char] = &[
'+', '-', '<', '>', '(', ')', '*', '/', '&', '|', '!', ',', '.', '%',
];
pub struct Rand(StdRng);
impl Rand {
#[must_use]
pub fn new(seed: usize) -> Self {
Self(StdRng::seed_from_u64(seed as u64))
}
pub fn unsigned(&mut self, max: usize) -> usize {
self.0.gen_range(0..=max)
}
pub fn words(&mut self, max_count: usize) -> Vec<u8> {
let word_count = self.unsigned(max_count);
let mut result = Vec::with_capacity(2 * word_count);
for i in 0..word_count {
if i > 0 {
if self.unsigned(5) == 0 {
result.push(b'\n');
} else {
result.push(b' ');
}
}
if self.unsigned(3) == 0 {
let index = self.unsigned(OPERATORS.len() - 1);
result.push(OPERATORS[index] as u8);
} else {
for _ in 0..self.unsigned(8) {
result.push(self.0.sample(Alphanumeric));
}
}
}
result
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/fuzz/scope_sequence.rs | crates/cli/src/fuzz/scope_sequence.rs | use tree_sitter::{Point, Range, Tree};
#[derive(Debug)]
pub struct ScopeSequence(Vec<ScopeStack>);
type ScopeStack = Vec<&'static str>;
impl ScopeSequence {
#[must_use]
pub fn new(tree: &Tree) -> Self {
let mut result = Self(Vec::new());
let mut scope_stack = Vec::new();
let mut cursor = tree.walk();
let mut visited_children = false;
loop {
let node = cursor.node();
for _ in result.0.len()..node.start_byte() {
result.0.push(scope_stack.clone());
}
if visited_children {
for _ in result.0.len()..node.end_byte() {
result.0.push(scope_stack.clone());
}
scope_stack.pop();
if cursor.goto_next_sibling() {
visited_children = false;
} else if !cursor.goto_parent() {
break;
}
} else {
scope_stack.push(cursor.node().kind());
if !cursor.goto_first_child() {
visited_children = true;
}
}
}
result
}
pub fn check_changes(
&self,
other: &Self,
text: &[u8],
known_changed_ranges: &[Range],
) -> Result<(), String> {
let mut position = Point { row: 0, column: 0 };
for i in 0..(self.0.len().max(other.0.len())) {
let stack = &self.0.get(i);
let other_stack = &other.0.get(i);
if *stack != *other_stack && ![b'\r', b'\n'].contains(&text[i]) {
let containing_range = known_changed_ranges
.iter()
.find(|range| range.start_point <= position && position < range.end_point);
if containing_range.is_none() {
let line = &text[(i - position.column)..]
.split(|c| *c == b'\n')
.next()
.unwrap();
return Err(format!(
concat!(
"Position: {}\n",
"Byte offset: {}\n",
"Line: {}\n",
"{}^\n",
"Old scopes: {:?}\n",
"New scopes: {:?}\n",
"Invalidated ranges: {:?}",
),
position,
i,
String::from_utf8_lossy(line),
String::from(" ").repeat(position.column + "Line: ".len()),
stack,
other_stack,
known_changed_ranges,
));
}
}
if text[i] == b'\n' {
position.row += 1;
position.column = 0;
} else {
position.column += 1;
}
}
Ok(())
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/fuzz/edits.rs | crates/cli/src/fuzz/edits.rs | use super::random::Rand;
#[derive(Debug)]
pub struct Edit {
pub position: usize,
pub deleted_length: usize,
pub inserted_text: Vec<u8>,
}
#[must_use]
pub fn invert_edit(input: &[u8], edit: &Edit) -> Edit {
let position = edit.position;
let removed_content = &input[position..(position + edit.deleted_length)];
Edit {
position,
deleted_length: edit.inserted_text.len(),
inserted_text: removed_content.to_vec(),
}
}
pub fn get_random_edit(rand: &mut Rand, input: &[u8]) -> Edit {
let choice = rand.unsigned(10);
if choice < 2 {
// Insert text at end
let inserted_text = rand.words(3);
Edit {
position: input.len(),
deleted_length: 0,
inserted_text,
}
} else if choice < 5 {
// Delete text from the end
let deleted_length = rand.unsigned(30).min(input.len());
Edit {
position: input.len() - deleted_length,
deleted_length,
inserted_text: vec![],
}
} else if choice < 8 {
// Insert at a random position
let position = rand.unsigned(input.len());
let word_count = 1 + rand.unsigned(3);
let inserted_text = rand.words(word_count);
Edit {
position,
deleted_length: 0,
inserted_text,
}
} else {
// Replace at random position
let position = rand.unsigned(input.len());
let deleted_length = rand.unsigned(input.len() - position);
let word_count = 1 + rand.unsigned(3);
let inserted_text = rand.words(word_count);
Edit {
position,
deleted_length,
inserted_text,
}
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/fuzz/allocations.rs | crates/cli/src/fuzz/allocations.rs | use std::{
collections::HashMap,
os::raw::c_void,
sync::{
atomic::{AtomicBool, AtomicUsize, Ordering::SeqCst},
Mutex,
},
};
#[ctor::ctor]
unsafe fn initialize_allocation_recording() {
tree_sitter::set_allocator(
Some(ts_record_malloc),
Some(ts_record_calloc),
Some(ts_record_realloc),
Some(ts_record_free),
);
}
#[derive(Debug, PartialEq, Eq, Hash)]
struct Allocation(*const c_void);
unsafe impl Send for Allocation {}
unsafe impl Sync for Allocation {}
#[derive(Default)]
struct AllocationRecorder {
enabled: AtomicBool,
allocation_count: AtomicUsize,
outstanding_allocations: Mutex<HashMap<Allocation, usize>>,
}
thread_local! {
static RECORDER: AllocationRecorder = AllocationRecorder::default();
}
extern "C" {
fn malloc(size: usize) -> *mut c_void;
fn calloc(count: usize, size: usize) -> *mut c_void;
fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void;
fn free(ptr: *mut c_void);
}
pub fn record<T>(f: impl FnOnce() -> T) -> T {
record_checked(f).unwrap()
}
pub fn record_checked<T>(f: impl FnOnce() -> T) -> Result<T, String> {
RECORDER.with(|recorder| {
recorder.enabled.store(true, SeqCst);
recorder.allocation_count.store(0, SeqCst);
recorder.outstanding_allocations.lock().unwrap().clear();
});
let value = f();
let outstanding_allocation_indices = RECORDER.with(|recorder| {
recorder.enabled.store(false, SeqCst);
recorder.allocation_count.store(0, SeqCst);
recorder
.outstanding_allocations
.lock()
.unwrap()
.drain()
.map(|e| e.1)
.collect::<Vec<_>>()
});
if !outstanding_allocation_indices.is_empty() {
return Err(format!(
"Leaked allocation indices: {outstanding_allocation_indices:?}",
));
}
Ok(value)
}
fn record_alloc(ptr: *mut c_void) {
RECORDER.with(|recorder| {
if recorder.enabled.load(SeqCst) {
let count = recorder.allocation_count.fetch_add(1, SeqCst);
recorder
.outstanding_allocations
.lock()
.unwrap()
.insert(Allocation(ptr), count);
}
});
}
fn record_dealloc(ptr: *mut c_void) {
RECORDER.with(|recorder| {
if recorder.enabled.load(SeqCst) {
recorder
.outstanding_allocations
.lock()
.unwrap()
.remove(&Allocation(ptr));
}
});
}
/// # Safety
///
/// The caller must ensure that the returned pointer is eventually
/// freed by calling `ts_record_free`.
#[must_use]
pub unsafe extern "C" fn ts_record_malloc(size: usize) -> *mut c_void {
let result = malloc(size);
record_alloc(result);
result
}
/// # Safety
///
/// The caller must ensure that the returned pointer is eventually
/// freed by calling `ts_record_free`.
#[must_use]
pub unsafe extern "C" fn ts_record_calloc(count: usize, size: usize) -> *mut c_void {
let result = calloc(count, size);
record_alloc(result);
result
}
/// # Safety
///
/// The caller must ensure that the returned pointer is eventually
/// freed by calling `ts_record_free`.
#[must_use]
pub unsafe extern "C" fn ts_record_realloc(ptr: *mut c_void, size: usize) -> *mut c_void {
let result = realloc(ptr, size);
if ptr.is_null() {
record_alloc(result);
} else if !core::ptr::eq(ptr, result) {
record_dealloc(ptr);
record_alloc(result);
}
result
}
/// # Safety
///
/// The caller must ensure that `ptr` was allocated by a previous call
/// to `ts_record_malloc`, `ts_record_calloc`, or `ts_record_realloc`.
pub unsafe extern "C" fn ts_record_free(ptr: *mut c_void) {
record_dealloc(ptr);
free(ptr);
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/templates/lib.rs | crates/cli/src/templates/lib.rs | //! This crate provides TITLE_PARSER_NAME language support for the [tree-sitter] parsing library.
//!
//! Typically, you will use the [`LANGUAGE`] constant to add this language to a
//! tree-sitter [`Parser`], and then use the parser to parse some code:
//!
//! ```
//! let code = r#"
//! "#;
//! let mut parser = tree_sitter::Parser::new();
//! let language = tree_sitter_PARSER_NAME::LANGUAGE;
//! parser
//! .set_language(&language.into())
//! .expect("Error loading TITLE_PARSER_NAME parser");
//! let tree = parser.parse(code, None).unwrap();
//! assert!(!tree.root_node().has_error());
//! ```
//!
//! [`Parser`]: https://docs.rs/tree-sitter/RUST_BINDING_VERSION/tree_sitter/struct.Parser.html
//! [tree-sitter]: https://tree-sitter.github.io/
use tree_sitter_language::LanguageFn;
extern "C" {
fn tree_sitter_PARSER_NAME() -> *const ();
}
/// The tree-sitter [`LanguageFn`] for this grammar.
pub const LANGUAGE: LanguageFn = unsafe { LanguageFn::from_raw(tree_sitter_PARSER_NAME) };
/// The content of the [`node-types.json`] file for this grammar.
///
/// [`node-types.json`]: https://tree-sitter.github.io/tree-sitter/using-parsers/6-static-node-types
pub const NODE_TYPES: &str = include_str!("../../src/node-types.json");
#[cfg(with_highlights_query)]
/// The syntax highlighting query for this grammar.
pub const HIGHLIGHTS_QUERY: &str = include_str!("../../HIGHLIGHTS_QUERY_PATH");
#[cfg(with_injections_query)]
/// The language injection query for this grammar.
pub const INJECTIONS_QUERY: &str = include_str!("../../INJECTIONS_QUERY_PATH");
#[cfg(with_locals_query)]
/// The local variable query for this grammar.
pub const LOCALS_QUERY: &str = include_str!("../../LOCALS_QUERY_PATH");
#[cfg(with_tags_query)]
/// The symbol tagging query for this grammar.
pub const TAGS_QUERY: &str = include_str!("../../TAGS_QUERY_PATH");
#[cfg(test)]
mod tests {
#[test]
fn test_can_load_grammar() {
let mut parser = tree_sitter::Parser::new();
parser
.set_language(&super::LANGUAGE.into())
.expect("Error loading TITLE_PARSER_NAME parser");
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/src/templates/build.rs | crates/cli/src/templates/build.rs | fn main() {
let src_dir = std::path::Path::new("src");
let mut c_config = cc::Build::new();
c_config.std("c11").include(src_dir);
#[cfg(target_env = "msvc")]
c_config.flag("-utf-8");
if std::env::var("TARGET").unwrap() == "wasm32-unknown-unknown" {
let Ok(wasm_headers) = std::env::var("DEP_TREE_SITTER_LANGUAGE_WASM_HEADERS") else {
panic!("Environment variable DEP_TREE_SITTER_LANGUAGE_WASM_HEADERS must be set by the language crate");
};
let Ok(wasm_src) =
std::env::var("DEP_TREE_SITTER_LANGUAGE_WASM_SRC").map(std::path::PathBuf::from)
else {
panic!("Environment variable DEP_TREE_SITTER_LANGUAGE_WASM_SRC must be set by the language crate");
};
c_config.include(&wasm_headers);
c_config.files([
wasm_src.join("stdio.c"),
wasm_src.join("stdlib.c"),
wasm_src.join("string.c"),
]);
}
let parser_path = src_dir.join("parser.c");
c_config.file(&parser_path);
println!("cargo:rerun-if-changed={}", parser_path.to_str().unwrap());
let scanner_path = src_dir.join("scanner.c");
if scanner_path.exists() {
c_config.file(&scanner_path);
println!("cargo:rerun-if-changed={}", scanner_path.to_str().unwrap());
}
c_config.compile("tree-sitter-KEBAB_PARSER_NAME");
println!("cargo:rustc-check-cfg=cfg(with_highlights_query)");
if !"HIGHLIGHTS_QUERY_PATH".is_empty() && std::path::Path::new("HIGHLIGHTS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_highlights_query");
}
println!("cargo:rustc-check-cfg=cfg(with_injections_query)");
if !"INJECTIONS_QUERY_PATH".is_empty() && std::path::Path::new("INJECTIONS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_injections_query");
}
println!("cargo:rustc-check-cfg=cfg(with_locals_query)");
if !"LOCALS_QUERY_PATH".is_empty() && std::path::Path::new("LOCALS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_locals_query");
}
println!("cargo:rustc-check-cfg=cfg(with_tags_query)");
if !"TAGS_QUERY_PATH".is_empty() && std::path::Path::new("TAGS_QUERY_PATH").exists() {
println!("cargo:rustc-cfg=with_tags_query");
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/cli/benches/benchmark.rs | crates/cli/benches/benchmark.rs | use std::{
collections::BTreeMap,
env, fs,
path::{Path, PathBuf},
str,
sync::LazyLock,
time::Instant,
};
use anyhow::Context;
use log::info;
use tree_sitter::{Language, Parser, Query};
use tree_sitter_loader::{CompileConfig, Loader};
include!("../src/tests/helpers/dirs.rs");
static LANGUAGE_FILTER: LazyLock<Option<String>> =
LazyLock::new(|| env::var("TREE_SITTER_BENCHMARK_LANGUAGE_FILTER").ok());
static EXAMPLE_FILTER: LazyLock<Option<String>> =
LazyLock::new(|| env::var("TREE_SITTER_BENCHMARK_EXAMPLE_FILTER").ok());
static REPETITION_COUNT: LazyLock<usize> = LazyLock::new(|| {
env::var("TREE_SITTER_BENCHMARK_REPETITION_COUNT")
.map(|s| s.parse::<usize>().unwrap())
.unwrap_or(5)
});
static TEST_LOADER: LazyLock<Loader> =
LazyLock::new(|| Loader::with_parser_lib_path(SCRATCH_DIR.clone()));
#[allow(clippy::type_complexity)]
static EXAMPLE_AND_QUERY_PATHS_BY_LANGUAGE_DIR: LazyLock<
BTreeMap<PathBuf, (Vec<PathBuf>, Vec<PathBuf>)>,
> = LazyLock::new(|| {
fn process_dir(result: &mut BTreeMap<PathBuf, (Vec<PathBuf>, Vec<PathBuf>)>, dir: &Path) {
if dir.join("grammar.js").exists() {
let relative_path = dir.strip_prefix(GRAMMARS_DIR.as_path()).unwrap();
let (example_paths, query_paths) = result.entry(relative_path.to_owned()).or_default();
if let Ok(example_files) = fs::read_dir(dir.join("examples")) {
example_paths.extend(example_files.filter_map(|p| {
let p = p.unwrap().path();
if p.is_file() {
Some(p)
} else {
None
}
}));
}
if let Ok(query_files) = fs::read_dir(dir.join("queries")) {
query_paths.extend(query_files.filter_map(|p| {
let p = p.unwrap().path();
if p.is_file() {
Some(p)
} else {
None
}
}));
}
} else {
for entry in fs::read_dir(dir).unwrap() {
let entry = entry.unwrap().path();
if entry.is_dir() {
process_dir(result, &entry);
}
}
}
}
let mut result = BTreeMap::new();
process_dir(&mut result, &GRAMMARS_DIR);
result
});
fn main() {
tree_sitter_cli::logger::init();
let max_path_length = EXAMPLE_AND_QUERY_PATHS_BY_LANGUAGE_DIR
.values()
.flat_map(|(e, q)| {
e.iter()
.chain(q.iter())
.map(|s| s.file_name().unwrap().to_str().unwrap().len())
})
.max()
.unwrap_or(0);
info!("Benchmarking with {} repetitions", *REPETITION_COUNT);
let mut parser = Parser::new();
let mut all_normal_speeds = Vec::new();
let mut all_error_speeds = Vec::new();
for (language_path, (example_paths, query_paths)) in
EXAMPLE_AND_QUERY_PATHS_BY_LANGUAGE_DIR.iter()
{
let language_name = language_path.file_name().unwrap().to_str().unwrap();
if let Some(filter) = LANGUAGE_FILTER.as_ref() {
if language_name != filter.as_str() {
continue;
}
}
info!("\nLanguage: {language_name}");
let language = get_language(language_path);
parser.set_language(&language).unwrap();
info!(" Constructing Queries");
for path in query_paths {
if let Some(filter) = EXAMPLE_FILTER.as_ref() {
if !path.to_str().unwrap().contains(filter.as_str()) {
continue;
}
}
parse(path, max_path_length, |source| {
Query::new(&language, str::from_utf8(source).unwrap())
.with_context(|| format!("Query file path: {}", path.display()))
.expect("Failed to parse query");
});
}
info!(" Parsing Valid Code:");
let mut normal_speeds = Vec::new();
for example_path in example_paths {
if let Some(filter) = EXAMPLE_FILTER.as_ref() {
if !example_path.to_str().unwrap().contains(filter.as_str()) {
continue;
}
}
normal_speeds.push(parse(example_path, max_path_length, |code| {
parser.parse(code, None).expect("Failed to parse");
}));
}
info!(" Parsing Invalid Code (mismatched languages):");
let mut error_speeds = Vec::new();
for (other_language_path, (example_paths, _)) in
EXAMPLE_AND_QUERY_PATHS_BY_LANGUAGE_DIR.iter()
{
if other_language_path != language_path {
for example_path in example_paths {
if let Some(filter) = EXAMPLE_FILTER.as_ref() {
if !example_path.to_str().unwrap().contains(filter.as_str()) {
continue;
}
}
error_speeds.push(parse(example_path, max_path_length, |code| {
parser.parse(code, None).expect("Failed to parse");
}));
}
}
}
if let Some((average_normal, worst_normal)) = aggregate(&normal_speeds) {
info!(" Average Speed (normal): {average_normal} bytes/ms");
info!(" Worst Speed (normal): {worst_normal} bytes/ms");
}
if let Some((average_error, worst_error)) = aggregate(&error_speeds) {
info!(" Average Speed (errors): {average_error} bytes/ms");
info!(" Worst Speed (errors): {worst_error} bytes/ms");
}
all_normal_speeds.extend(normal_speeds);
all_error_speeds.extend(error_speeds);
}
info!("\n Overall");
if let Some((average_normal, worst_normal)) = aggregate(&all_normal_speeds) {
info!(" Average Speed (normal): {average_normal} bytes/ms");
info!(" Worst Speed (normal): {worst_normal} bytes/ms");
}
if let Some((average_error, worst_error)) = aggregate(&all_error_speeds) {
info!(" Average Speed (errors): {average_error} bytes/ms");
info!(" Worst Speed (errors): {worst_error} bytes/ms");
}
info!("");
}
fn aggregate(speeds: &[usize]) -> Option<(usize, usize)> {
if speeds.is_empty() {
return None;
}
let mut total = 0;
let mut max = usize::MAX;
for speed in speeds.iter().copied() {
total += speed;
if speed < max {
max = speed;
}
}
Some((total / speeds.len(), max))
}
fn parse(path: &Path, max_path_length: usize, mut action: impl FnMut(&[u8])) -> usize {
let source_code = fs::read(path)
.with_context(|| format!("Failed to read {}", path.display()))
.unwrap();
let time = Instant::now();
for _ in 0..*REPETITION_COUNT {
action(&source_code);
}
let duration = time.elapsed() / (*REPETITION_COUNT as u32);
let duration_ns = duration.as_nanos();
let speed = ((source_code.len() as u128) * 1_000_000) / duration_ns;
info!(
" {:max_path_length$}\ttime {:>7.2} ms\t\tspeed {speed:>6} bytes/ms",
path.file_name().unwrap().to_str().unwrap(),
(duration_ns as f64) / 1e6,
);
speed as usize
}
fn get_language(path: &Path) -> Language {
let src_path = GRAMMARS_DIR.join(path).join("src");
TEST_LOADER
.load_language_at_path(CompileConfig::new(&src_path, None, None))
.with_context(|| format!("Failed to load language at path {}", src_path.display()))
.unwrap()
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/highlight/src/highlight.rs | crates/highlight/src/highlight.rs | #![cfg_attr(not(any(test, doctest)), doc = include_str!("../README.md"))]
pub mod c_lib;
use core::slice;
use std::{
collections::HashSet,
iter,
marker::PhantomData,
mem::{self, MaybeUninit},
ops::{self, ControlFlow},
str,
sync::{
atomic::{AtomicUsize, Ordering},
LazyLock,
},
};
pub use c_lib as c;
use streaming_iterator::StreamingIterator;
use thiserror::Error;
use tree_sitter::{
ffi, Language, LossyUtf8, Node, ParseOptions, Parser, Point, Query, QueryCapture,
QueryCaptures, QueryCursor, QueryError, QueryMatch, Range, TextProvider, Tree,
};
const CANCELLATION_CHECK_INTERVAL: usize = 100;
const BUFFER_HTML_RESERVE_CAPACITY: usize = 10 * 1024;
const BUFFER_LINES_RESERVE_CAPACITY: usize = 1000;
static STANDARD_CAPTURE_NAMES: LazyLock<HashSet<&'static str>> = LazyLock::new(|| {
vec![
"attribute",
"boolean",
"carriage-return",
"comment",
"comment.documentation",
"constant",
"constant.builtin",
"constructor",
"constructor.builtin",
"embedded",
"error",
"escape",
"function",
"function.builtin",
"keyword",
"markup",
"markup.bold",
"markup.heading",
"markup.italic",
"markup.link",
"markup.link.url",
"markup.list",
"markup.list.checked",
"markup.list.numbered",
"markup.list.unchecked",
"markup.list.unnumbered",
"markup.quote",
"markup.raw",
"markup.raw.block",
"markup.raw.inline",
"markup.strikethrough",
"module",
"number",
"operator",
"property",
"property.builtin",
"punctuation",
"punctuation.bracket",
"punctuation.delimiter",
"punctuation.special",
"string",
"string.escape",
"string.regexp",
"string.special",
"string.special.symbol",
"tag",
"type",
"type.builtin",
"variable",
"variable.builtin",
"variable.member",
"variable.parameter",
]
.into_iter()
.collect()
});
/// Indicates which highlight should be applied to a region of source code.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Highlight(pub usize);
/// Represents the reason why syntax highlighting failed.
#[derive(Debug, Error, PartialEq, Eq)]
pub enum Error {
#[error("Cancelled")]
Cancelled,
#[error("Invalid language")]
InvalidLanguage,
#[error("Unknown error")]
Unknown,
}
/// Represents a single step in rendering a syntax-highlighted document.
#[derive(Copy, Clone, Debug)]
pub enum HighlightEvent {
Source { start: usize, end: usize },
HighlightStart(Highlight),
HighlightEnd,
}
/// Contains the data needed to highlight code written in a particular language.
///
/// This struct is immutable and can be shared between threads.
pub struct HighlightConfiguration {
pub language: Language,
pub language_name: String,
pub query: Query,
combined_injections_query: Option<Query>,
locals_pattern_index: usize,
highlights_pattern_index: usize,
highlight_indices: Vec<Option<Highlight>>,
non_local_variable_patterns: Vec<bool>,
injection_content_capture_index: Option<u32>,
injection_language_capture_index: Option<u32>,
local_scope_capture_index: Option<u32>,
local_def_capture_index: Option<u32>,
local_def_value_capture_index: Option<u32>,
local_ref_capture_index: Option<u32>,
}
/// Performs syntax highlighting, recognizing a given list of highlight names.
///
/// For the best performance `Highlighter` values should be reused between
/// syntax highlighting calls. A separate highlighter is needed for each thread that
/// is performing highlighting.
pub struct Highlighter {
pub parser: Parser,
cursors: Vec<QueryCursor>,
}
/// Converts a general-purpose syntax highlighting iterator into a sequence of lines of HTML.
pub struct HtmlRenderer {
pub html: Vec<u8>,
pub line_offsets: Vec<u32>,
carriage_return_highlight: Option<Highlight>,
// The offset in `self.html` of the last carriage return.
last_carriage_return: Option<usize>,
}
#[derive(Debug)]
struct LocalDef<'a> {
name: &'a str,
value_range: ops::Range<usize>,
highlight: Option<Highlight>,
}
#[derive(Debug)]
struct LocalScope<'a> {
inherits: bool,
range: ops::Range<usize>,
local_defs: Vec<LocalDef<'a>>,
}
struct HighlightIter<'a, F>
where
F: FnMut(&str) -> Option<&'a HighlightConfiguration> + 'a,
{
source: &'a [u8],
language_name: &'a str,
byte_offset: usize,
highlighter: &'a mut Highlighter,
injection_callback: F,
cancellation_flag: Option<&'a AtomicUsize>,
layers: Vec<HighlightIterLayer<'a>>,
iter_count: usize,
next_event: Option<HighlightEvent>,
last_highlight_range: Option<(usize, usize, usize)>,
}
struct HighlightIterLayer<'a> {
_tree: Tree,
cursor: QueryCursor,
captures: iter::Peekable<_QueryCaptures<'a, 'a, &'a [u8], &'a [u8]>>,
config: &'a HighlightConfiguration,
highlight_end_stack: Vec<usize>,
scope_stack: Vec<LocalScope<'a>>,
ranges: Vec<Range>,
depth: usize,
}
pub struct _QueryCaptures<'query, 'tree: 'query, T: TextProvider<I>, I: AsRef<[u8]>> {
ptr: *mut ffi::TSQueryCursor,
query: &'query Query,
text_provider: T,
buffer1: Vec<u8>,
buffer2: Vec<u8>,
_current_match: Option<(QueryMatch<'query, 'tree>, usize)>,
_options: Option<*mut ffi::TSQueryCursorOptions>,
_phantom: PhantomData<(&'tree (), I)>,
}
struct _QueryMatch<'cursor, 'tree> {
pub _pattern_index: usize,
pub _captures: &'cursor [QueryCapture<'tree>],
_id: u32,
_cursor: *mut ffi::TSQueryCursor,
}
impl<'tree> _QueryMatch<'_, 'tree> {
fn new(m: &ffi::TSQueryMatch, cursor: *mut ffi::TSQueryCursor) -> Self {
_QueryMatch {
_cursor: cursor,
_id: m.id,
_pattern_index: m.pattern_index as usize,
_captures: (m.capture_count > 0)
.then(|| unsafe {
slice::from_raw_parts(
m.captures.cast::<QueryCapture<'tree>>(),
m.capture_count as usize,
)
})
.unwrap_or_default(),
}
}
}
impl<'query, 'tree: 'query, T: TextProvider<I>, I: AsRef<[u8]>> Iterator
for _QueryCaptures<'query, 'tree, T, I>
{
type Item = (QueryMatch<'query, 'tree>, usize);
fn next(&mut self) -> Option<Self::Item> {
unsafe {
loop {
let mut capture_index = 0u32;
let mut m = MaybeUninit::<ffi::TSQueryMatch>::uninit();
if ffi::ts_query_cursor_next_capture(
self.ptr,
m.as_mut_ptr(),
core::ptr::addr_of_mut!(capture_index),
) {
let result = std::mem::transmute::<_QueryMatch, QueryMatch>(_QueryMatch::new(
&m.assume_init(),
self.ptr,
));
if result.satisfies_text_predicates(
self.query,
&mut self.buffer1,
&mut self.buffer2,
&mut self.text_provider,
) {
return Some((result, capture_index as usize));
}
result.remove();
} else {
return None;
}
}
}
}
}
impl Default for Highlighter {
fn default() -> Self {
Self::new()
}
}
impl Highlighter {
#[must_use]
pub fn new() -> Self {
Self {
parser: Parser::new(),
cursors: Vec::new(),
}
}
pub const fn parser(&mut self) -> &mut Parser {
&mut self.parser
}
/// Iterate over the highlighted regions for a given slice of source code.
pub fn highlight<'a>(
&'a mut self,
config: &'a HighlightConfiguration,
source: &'a [u8],
cancellation_flag: Option<&'a AtomicUsize>,
mut injection_callback: impl FnMut(&str) -> Option<&'a HighlightConfiguration> + 'a,
) -> Result<impl Iterator<Item = Result<HighlightEvent, Error>> + 'a, Error> {
let layers = HighlightIterLayer::new(
source,
None,
self,
cancellation_flag,
&mut injection_callback,
config,
0,
vec![Range {
start_byte: 0,
end_byte: usize::MAX,
start_point: Point::new(0, 0),
end_point: Point::new(usize::MAX, usize::MAX),
}],
)?;
assert_ne!(layers.len(), 0);
let mut result = HighlightIter {
source,
language_name: &config.language_name,
byte_offset: 0,
injection_callback,
cancellation_flag,
highlighter: self,
iter_count: 0,
layers,
next_event: None,
last_highlight_range: None,
};
result.sort_layers();
Ok(result)
}
}
impl HighlightConfiguration {
/// Creates a `HighlightConfiguration` for a given `Language` and set of highlighting
/// queries.
///
/// # Parameters
///
/// * `language` - The Tree-sitter `Language` that should be used for parsing.
/// * `highlights_query` - A string containing tree patterns for syntax highlighting. This
/// should be non-empty, otherwise no syntax highlights will be added.
/// * `injections_query` - A string containing tree patterns for injecting other languages into
/// the document. This can be empty if no injections are desired.
/// * `locals_query` - A string containing tree patterns for tracking local variable definitions
/// and references. This can be empty if local variable tracking is not needed.
///
/// Returns a `HighlightConfiguration` that can then be used with the `highlight` method.
pub fn new(
language: Language,
name: impl Into<String>,
highlights_query: &str,
injection_query: &str,
locals_query: &str,
) -> Result<Self, QueryError> {
// Concatenate the query strings, keeping track of the start offset of each section.
let mut query_source = String::with_capacity(
injection_query.len() + locals_query.len() + highlights_query.len(),
);
query_source.push_str(injection_query);
let locals_query_offset = injection_query.len();
query_source.push_str(locals_query);
let highlights_query_offset = injection_query.len() + locals_query.len();
query_source.push_str(highlights_query);
// Construct a single query by concatenating the three query strings, but record the
// range of pattern indices that belong to each individual string.
let mut query = Query::new(&language, &query_source)?;
let mut locals_pattern_index = 0;
let mut highlights_pattern_index = 0;
for i in 0..(query.pattern_count()) {
let pattern_offset = query.start_byte_for_pattern(i);
if pattern_offset < highlights_query_offset {
if pattern_offset < highlights_query_offset {
highlights_pattern_index += 1;
}
if pattern_offset < locals_query_offset {
locals_pattern_index += 1;
}
}
}
// Construct a separate query just for dealing with the 'combined injections'.
// Disable the combined injection patterns in the main query.
let mut combined_injections_query = Query::new(&language, injection_query)?;
let mut has_combined_queries = false;
for pattern_index in 0..locals_pattern_index {
let settings = query.property_settings(pattern_index);
if settings.iter().any(|s| &*s.key == "injection.combined") {
has_combined_queries = true;
query.disable_pattern(pattern_index);
} else {
combined_injections_query.disable_pattern(pattern_index);
}
}
let combined_injections_query = if has_combined_queries {
Some(combined_injections_query)
} else {
None
};
// Find all of the highlighting patterns that are disabled for nodes that
// have been identified as local variables.
let non_local_variable_patterns = (0..query.pattern_count())
.map(|i| {
query
.property_predicates(i)
.iter()
.any(|(prop, positive)| !*positive && prop.key.as_ref() == "local")
})
.collect();
// Store the numeric ids for all of the special captures.
let mut injection_content_capture_index = None;
let mut injection_language_capture_index = None;
let mut local_def_capture_index = None;
let mut local_def_value_capture_index = None;
let mut local_ref_capture_index = None;
let mut local_scope_capture_index = None;
for (i, name) in query.capture_names().iter().enumerate() {
let i = Some(i as u32);
match *name {
"injection.content" => injection_content_capture_index = i,
"injection.language" => injection_language_capture_index = i,
"local.definition" => local_def_capture_index = i,
"local.definition-value" => local_def_value_capture_index = i,
"local.reference" => local_ref_capture_index = i,
"local.scope" => local_scope_capture_index = i,
_ => {}
}
}
let highlight_indices = vec![None; query.capture_names().len()];
Ok(Self {
language,
language_name: name.into(),
query,
combined_injections_query,
locals_pattern_index,
highlights_pattern_index,
highlight_indices,
non_local_variable_patterns,
injection_content_capture_index,
injection_language_capture_index,
local_def_capture_index,
local_def_value_capture_index,
local_ref_capture_index,
local_scope_capture_index,
})
}
/// Get a slice containing all of the highlight names used in the configuration.
#[must_use]
pub const fn names(&self) -> &[&str] {
self.query.capture_names()
}
/// Set the list of recognized highlight names.
///
/// Tree-sitter syntax-highlighting queries specify highlights in the form of dot-separated
/// highlight names like `punctuation.bracket` and `function.method.builtin`. Consumers of
/// these queries can choose to recognize highlights with different levels of specificity.
/// For example, the string `function.builtin` will match against `function.method.builtin`
/// and `function.builtin.constructor`, but will not match `function.method`.
///
/// When highlighting, results are returned as `Highlight` values, which contain the index
/// of the matched highlight this list of highlight names.
pub fn configure(&mut self, recognized_names: &[impl AsRef<str>]) {
let mut capture_parts = Vec::new();
self.highlight_indices.clear();
self.highlight_indices
.extend(self.query.capture_names().iter().map(move |capture_name| {
capture_parts.clear();
capture_parts.extend(capture_name.split('.'));
let mut best_index = None;
let mut best_match_len = 0;
for (i, recognized_name) in recognized_names.iter().enumerate() {
let mut len = 0;
let mut matches = true;
for part in recognized_name.as_ref().split('.') {
len += 1;
if !capture_parts.contains(&part) {
matches = false;
break;
}
}
if matches && len > best_match_len {
best_index = Some(i);
best_match_len = len;
}
}
best_index.map(Highlight)
}));
}
// Return the list of this configuration's capture names that are neither present in the
// list of predefined 'canonical' names nor start with an underscore (denoting 'private'
// captures used as part of capture internals).
#[must_use]
pub fn nonconformant_capture_names(&self, capture_names: &HashSet<&str>) -> Vec<&str> {
let capture_names = if capture_names.is_empty() {
&*STANDARD_CAPTURE_NAMES
} else {
capture_names
};
self.names()
.iter()
.filter(|&n| !(n.starts_with('_') || capture_names.contains(n)))
.copied()
.collect()
}
}
impl<'a> HighlightIterLayer<'a> {
/// Create a new 'layer' of highlighting for this document.
///
/// In the event that the new layer contains "combined injections" (injections where multiple
/// disjoint ranges are parsed as one syntax tree), these will be eagerly processed and
/// added to the returned vector.
#[allow(clippy::too_many_arguments)]
fn new<F: FnMut(&str) -> Option<&'a HighlightConfiguration> + 'a>(
source: &'a [u8],
parent_name: Option<&str>,
highlighter: &mut Highlighter,
cancellation_flag: Option<&'a AtomicUsize>,
injection_callback: &mut F,
mut config: &'a HighlightConfiguration,
mut depth: usize,
mut ranges: Vec<Range>,
) -> Result<Vec<Self>, Error> {
let mut result = Vec::with_capacity(1);
let mut queue = Vec::new();
loop {
if highlighter.parser.set_included_ranges(&ranges).is_ok() {
highlighter
.parser
.set_language(&config.language)
.map_err(|_| Error::InvalidLanguage)?;
let tree = highlighter
.parser
.parse_with_options(
&mut |i, _| {
if i < source.len() {
&source[i..]
} else {
&[]
}
},
None,
Some(ParseOptions::new().progress_callback(&mut |_| {
if let Some(cancellation_flag) = cancellation_flag {
if cancellation_flag.load(Ordering::SeqCst) != 0 {
ControlFlow::Break(())
} else {
ControlFlow::Continue(())
}
} else {
ControlFlow::Continue(())
}
})),
)
.ok_or(Error::Cancelled)?;
let mut cursor = highlighter.cursors.pop().unwrap_or_default();
// Process combined injections.
if let Some(combined_injections_query) = &config.combined_injections_query {
let mut injections_by_pattern_index =
vec![(None, Vec::new(), false); combined_injections_query.pattern_count()];
let mut matches =
cursor.matches(combined_injections_query, tree.root_node(), source);
while let Some(mat) = matches.next() {
let entry = &mut injections_by_pattern_index[mat.pattern_index];
let (language_name, content_node, include_children) = injection_for_match(
config,
parent_name,
combined_injections_query,
mat,
source,
);
if language_name.is_some() {
entry.0 = language_name;
}
if let Some(content_node) = content_node {
entry.1.push(content_node);
}
entry.2 = include_children;
}
for (lang_name, content_nodes, includes_children) in injections_by_pattern_index
{
if let (Some(lang_name), false) = (lang_name, content_nodes.is_empty()) {
if let Some(next_config) = (injection_callback)(lang_name) {
let ranges = Self::intersect_ranges(
&ranges,
&content_nodes,
includes_children,
);
if !ranges.is_empty() {
queue.push((next_config, depth + 1, ranges));
}
}
}
}
}
// The `captures` iterator borrows the `Tree` and the `QueryCursor`, which
// prevents them from being moved. But both of these values are really just
// pointers, so it's actually ok to move them.
let tree_ref = unsafe { mem::transmute::<&Tree, &'static Tree>(&tree) };
let cursor_ref = unsafe {
mem::transmute::<&mut QueryCursor, &'static mut QueryCursor>(&mut cursor)
};
let captures = unsafe {
std::mem::transmute::<QueryCaptures<_, _>, _QueryCaptures<_, _>>(
cursor_ref.captures(&config.query, tree_ref.root_node(), source),
)
}
.peekable();
result.push(HighlightIterLayer {
highlight_end_stack: Vec::new(),
scope_stack: vec![LocalScope {
inherits: false,
range: 0..usize::MAX,
local_defs: Vec::new(),
}],
cursor,
depth,
_tree: tree,
captures,
config,
ranges,
});
}
if queue.is_empty() {
break;
}
let (next_config, next_depth, next_ranges) = queue.remove(0);
config = next_config;
depth = next_depth;
ranges = next_ranges;
}
Ok(result)
}
// Compute the ranges that should be included when parsing an injection.
// This takes into account three things:
// * `parent_ranges` - The ranges must all fall within the *current* layer's ranges.
// * `nodes` - Every injection takes place within a set of nodes. The injection ranges are the
// ranges of those nodes.
// * `includes_children` - For some injections, the content nodes' children should be excluded
// from the nested document, so that only the content nodes' *own* content is reparsed. For
// other injections, the content nodes' entire ranges should be reparsed, including the ranges
// of their children.
fn intersect_ranges(
parent_ranges: &[Range],
nodes: &[Node],
includes_children: bool,
) -> Vec<Range> {
let mut cursor = nodes[0].walk();
let mut result = Vec::new();
let mut parent_range_iter = parent_ranges.iter();
let mut parent_range = parent_range_iter
.next()
.expect("Layers should only be constructed with non-empty ranges vectors");
for node in nodes {
let mut preceding_range = Range {
start_byte: 0,
start_point: Point::new(0, 0),
end_byte: node.start_byte(),
end_point: node.start_position(),
};
let following_range = Range {
start_byte: node.end_byte(),
start_point: node.end_position(),
end_byte: usize::MAX,
end_point: Point::new(usize::MAX, usize::MAX),
};
for excluded_range in node
.children(&mut cursor)
.filter_map(|child| {
if includes_children {
None
} else {
Some(child.range())
}
})
.chain(std::iter::once(following_range))
{
let mut range = Range {
start_byte: preceding_range.end_byte,
start_point: preceding_range.end_point,
end_byte: excluded_range.start_byte,
end_point: excluded_range.start_point,
};
preceding_range = excluded_range;
if range.end_byte < parent_range.start_byte {
continue;
}
while parent_range.start_byte <= range.end_byte {
if parent_range.end_byte > range.start_byte {
if range.start_byte < parent_range.start_byte {
range.start_byte = parent_range.start_byte;
range.start_point = parent_range.start_point;
}
if parent_range.end_byte < range.end_byte {
if range.start_byte < parent_range.end_byte {
result.push(Range {
start_byte: range.start_byte,
start_point: range.start_point,
end_byte: parent_range.end_byte,
end_point: parent_range.end_point,
});
}
range.start_byte = parent_range.end_byte;
range.start_point = parent_range.end_point;
} else {
if range.start_byte < range.end_byte {
result.push(range);
}
break;
}
}
if let Some(next_range) = parent_range_iter.next() {
parent_range = next_range;
} else {
return result;
}
}
}
}
result
}
// First, sort scope boundaries by their byte offset in the document. At a
// given position, emit scope endings before scope beginnings. Finally, emit
// scope boundaries from deeper layers first.
fn sort_key(&mut self) -> Option<(usize, bool, isize)> {
let depth = -(self.depth as isize);
let next_start = self
.captures
.peek()
.map(|(m, i)| m.captures[*i].node.start_byte());
let next_end = self.highlight_end_stack.last().copied();
match (next_start, next_end) {
(Some(start), Some(end)) => {
if start < end {
Some((start, true, depth))
} else {
Some((end, false, depth))
}
}
(Some(i), None) => Some((i, true, depth)),
(None, Some(j)) => Some((j, false, depth)),
_ => None,
}
}
}
impl<'a, F> HighlightIter<'a, F>
where
F: FnMut(&str) -> Option<&'a HighlightConfiguration> + 'a,
{
fn emit_event(
&mut self,
offset: usize,
event: Option<HighlightEvent>,
) -> Option<Result<HighlightEvent, Error>> {
let result;
if self.byte_offset < offset {
result = Some(Ok(HighlightEvent::Source {
start: self.byte_offset,
end: offset,
}));
self.byte_offset = offset;
self.next_event = event;
} else {
result = event.map(Ok);
}
self.sort_layers();
result
}
fn sort_layers(&mut self) {
while !self.layers.is_empty() {
if let Some(sort_key) = self.layers[0].sort_key() {
let mut i = 0;
while i + 1 < self.layers.len() {
if let Some(next_offset) = self.layers[i + 1].sort_key() {
if next_offset < sort_key {
i += 1;
continue;
}
}
break;
}
if i > 0 {
self.layers[0..=i].rotate_left(1);
}
break;
}
let layer = self.layers.remove(0);
self.highlighter.cursors.push(layer.cursor);
}
}
fn insert_layer(&mut self, mut layer: HighlightIterLayer<'a>) {
if let Some(sort_key) = layer.sort_key() {
let mut i = 1;
while i < self.layers.len() {
if let Some(sort_key_i) = self.layers[i].sort_key() {
if sort_key_i > sort_key {
self.layers.insert(i, layer);
return;
}
i += 1;
} else {
self.layers.remove(i);
}
}
self.layers.push(layer);
}
}
}
impl<'a, F> Iterator for HighlightIter<'a, F>
where
F: FnMut(&str) -> Option<&'a HighlightConfiguration> + 'a,
{
type Item = Result<HighlightEvent, Error>;
fn next(&mut self) -> Option<Self::Item> {
'main: loop {
// If we've already determined the next highlight boundary, just return it.
if let Some(e) = self.next_event.take() {
return Some(Ok(e));
}
// Periodically check for cancellation, returning `Cancelled` error if the
// cancellation flag was flipped.
if let Some(cancellation_flag) = self.cancellation_flag {
self.iter_count += 1;
if self.iter_count >= CANCELLATION_CHECK_INTERVAL {
self.iter_count = 0;
if cancellation_flag.load(Ordering::Relaxed) != 0 {
return Some(Err(Error::Cancelled));
}
}
}
// If none of the layers have any more highlight boundaries, terminate.
if self.layers.is_empty() {
return if self.byte_offset < self.source.len() {
let result = Some(Ok(HighlightEvent::Source {
start: self.byte_offset,
end: self.source.len(),
}));
self.byte_offset = self.source.len();
result
} else {
None
};
}
// Get the next capture from whichever layer has the earliest highlight boundary.
let range;
let layer = &mut self.layers[0];
if let Some((next_match, capture_index)) = layer.captures.peek() {
let next_capture = next_match.captures[*capture_index];
range = next_capture.node.byte_range();
// If any previous highlight ends before this node starts, then before
// processing this capture, emit the source code up until the end of the
// previous highlight, and an end event for that highlight.
if let Some(end_byte) = layer.highlight_end_stack.last().copied() {
if end_byte <= range.start {
layer.highlight_end_stack.pop();
return self.emit_event(end_byte, Some(HighlightEvent::HighlightEnd));
}
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/highlight/src/c_lib.rs | crates/highlight/src/c_lib.rs | use std::{
collections::HashMap, ffi::CStr, fmt, os::raw::c_char, process::abort, slice, str,
sync::atomic::AtomicUsize,
};
use regex::Regex;
use tree_sitter::Language;
use super::{Error, Highlight, HighlightConfiguration, Highlighter, HtmlRenderer};
pub struct TSHighlighter {
pub languages: HashMap<String, (Option<Regex>, HighlightConfiguration)>,
pub attribute_strings: Vec<&'static [u8]>,
pub highlight_names: Vec<String>,
pub carriage_return_index: Option<usize>,
}
pub struct TSHighlightBuffer {
highlighter: Highlighter,
renderer: HtmlRenderer,
}
#[repr(C)]
pub enum ErrorCode {
Ok,
UnknownScope,
Timeout,
InvalidLanguage,
InvalidUtf8,
InvalidRegex,
InvalidQuery,
InvalidLanguageName,
}
/// Create a new [`TSHighlighter`] instance.
///
/// # Safety
///
/// The caller must ensure that the `highlight_names` and `attribute_strings` arrays are valid for
/// the lifetime of the returned [`TSHighlighter`] instance, and are non-null.
#[no_mangle]
pub unsafe extern "C" fn ts_highlighter_new(
highlight_names: *const *const c_char,
attribute_strings: *const *const c_char,
highlight_count: u32,
) -> *mut TSHighlighter {
let highlight_names = slice::from_raw_parts(highlight_names, highlight_count as usize);
let attribute_strings = slice::from_raw_parts(attribute_strings, highlight_count as usize);
let highlight_names = highlight_names
.iter()
.map(|s| CStr::from_ptr(*s).to_string_lossy().to_string())
.collect::<Vec<_>>();
let attribute_strings = attribute_strings
.iter()
.map(|s| CStr::from_ptr(*s).to_bytes())
.collect();
let carriage_return_index = highlight_names.iter().position(|s| s == "carriage-return");
Box::into_raw(Box::new(TSHighlighter {
languages: HashMap::new(),
attribute_strings,
highlight_names,
carriage_return_index,
}))
}
/// Add a language to a [`TSHighlighter`] instance.
///
/// Returns an [`ErrorCode`] indicating whether the language was added successfully or not.
///
/// # Safety
///
/// `this` must be non-null and must be a valid pointer to a [`TSHighlighter`] instance
/// created by [`ts_highlighter_new`].
///
/// The caller must ensure that any `*const c_char` (C-style string) parameters are valid for the
/// lifetime of the [`TSHighlighter`] instance, and are non-null.
#[no_mangle]
pub unsafe extern "C" fn ts_highlighter_add_language(
this: *mut TSHighlighter,
language_name: *const c_char,
scope_name: *const c_char,
injection_regex: *const c_char,
language: Language,
highlight_query: *const c_char,
injection_query: *const c_char,
locals_query: *const c_char,
highlight_query_len: u32,
injection_query_len: u32,
locals_query_len: u32,
) -> ErrorCode {
let f = move || {
let this = unwrap_mut_ptr(this);
let scope_name = CStr::from_ptr(scope_name);
let scope_name = scope_name
.to_str()
.or(Err(ErrorCode::InvalidUtf8))?
.to_string();
let injection_regex = if injection_regex.is_null() {
None
} else {
let pattern = CStr::from_ptr(injection_regex);
let pattern = pattern.to_str().or(Err(ErrorCode::InvalidUtf8))?;
Some(Regex::new(pattern).or(Err(ErrorCode::InvalidRegex))?)
};
let highlight_query =
slice::from_raw_parts(highlight_query.cast::<u8>(), highlight_query_len as usize);
let highlight_query = str::from_utf8(highlight_query).or(Err(ErrorCode::InvalidUtf8))?;
let injection_query = if injection_query_len > 0 {
let query =
slice::from_raw_parts(injection_query.cast::<u8>(), injection_query_len as usize);
str::from_utf8(query).or(Err(ErrorCode::InvalidUtf8))?
} else {
""
};
let locals_query = if locals_query_len > 0 {
let query = slice::from_raw_parts(locals_query.cast::<u8>(), locals_query_len as usize);
str::from_utf8(query).or(Err(ErrorCode::InvalidUtf8))?
} else {
""
};
let lang = CStr::from_ptr(language_name)
.to_str()
.or(Err(ErrorCode::InvalidLanguageName))?;
let mut config = HighlightConfiguration::new(
language,
lang,
highlight_query,
injection_query,
locals_query,
)
.or(Err(ErrorCode::InvalidQuery))?;
config.configure(this.highlight_names.as_slice());
this.languages.insert(scope_name, (injection_regex, config));
Ok(())
};
match f() {
Ok(()) => ErrorCode::Ok,
Err(e) => e,
}
}
#[no_mangle]
pub extern "C" fn ts_highlight_buffer_new() -> *mut TSHighlightBuffer {
Box::into_raw(Box::new(TSHighlightBuffer {
highlighter: Highlighter::new(),
renderer: HtmlRenderer::new(),
}))
}
/// Deletes a [`TSHighlighter`] instance.
///
/// # Safety
///
/// `this` must be non-null and must be a valid pointer to a [`TSHighlighter`] instance
/// created by [`ts_highlighter_new`].
///
/// It cannot be used after this function is called.
#[no_mangle]
pub unsafe extern "C" fn ts_highlighter_delete(this: *mut TSHighlighter) {
drop(Box::from_raw(this));
}
/// Deletes a [`TSHighlightBuffer`] instance.
///
/// # Safety
///
/// `this` must be non-null and must be a valid pointer to a [`TSHighlightBuffer`] instance
/// created by [`ts_highlight_buffer_new`]
///
/// It cannot be used after this function is called.
#[no_mangle]
pub unsafe extern "C" fn ts_highlight_buffer_delete(this: *mut TSHighlightBuffer) {
drop(Box::from_raw(this));
}
/// Get the HTML content of a [`TSHighlightBuffer`] instance as a raw pointer.
///
/// # Safety
///
/// `this` must be non-null and must be a valid pointer to a [`TSHighlightBuffer`] instance
/// created by [`ts_highlight_buffer_new`].
///
/// The returned pointer, a C-style string, must not outlive the [`TSHighlightBuffer`] instance,
/// else the data will point to garbage.
///
/// To get the length of the HTML content, use [`ts_highlight_buffer_len`].
#[no_mangle]
pub unsafe extern "C" fn ts_highlight_buffer_content(this: *const TSHighlightBuffer) -> *const u8 {
let this = unwrap_ptr(this);
this.renderer.html.as_slice().as_ptr()
}
/// Get the line offsets of a [`TSHighlightBuffer`] instance as a C-style array.
///
/// # Safety
///
/// `this` must be non-null and must be a valid pointer to a [`TSHighlightBuffer`] instance
/// created by [`ts_highlight_buffer_new`].
///
/// The returned pointer, a C-style array of [`u32`]s, must not outlive the [`TSHighlightBuffer`]
/// instance, else the data will point to garbage.
///
/// To get the length of the array, use [`ts_highlight_buffer_line_count`].
#[no_mangle]
pub unsafe extern "C" fn ts_highlight_buffer_line_offsets(
this: *const TSHighlightBuffer,
) -> *const u32 {
let this = unwrap_ptr(this);
this.renderer.line_offsets.as_slice().as_ptr()
}
/// Get the length of the HTML content of a [`TSHighlightBuffer`] instance.
///
/// # Safety
///
/// `this` must be non-null and must be a valid pointer to a [`TSHighlightBuffer`] instance
/// created by [`ts_highlight_buffer_new`].
#[no_mangle]
pub unsafe extern "C" fn ts_highlight_buffer_len(this: *const TSHighlightBuffer) -> u32 {
let this = unwrap_ptr(this);
this.renderer.html.len() as u32
}
/// Get the number of lines in a [`TSHighlightBuffer`] instance.
///
/// # Safety
///
/// `this` must be non-null and must be a valid pointer to a [`TSHighlightBuffer`] instance
/// created by [`ts_highlight_buffer_new`].
#[no_mangle]
pub unsafe extern "C" fn ts_highlight_buffer_line_count(this: *const TSHighlightBuffer) -> u32 {
let this = unwrap_ptr(this);
this.renderer.line_offsets.len() as u32
}
/// Highlight a string of source code.
///
/// # Safety
///
/// The caller must ensure that `scope_name`, `source_code`, `output`, and `cancellation_flag` are
/// valid for the lifetime of the [`TSHighlighter`] instance, and are non-null.
///
/// `this` must be a non-null pointer to a [`TSHighlighter`] instance created by
/// [`ts_highlighter_new`]
#[no_mangle]
pub unsafe extern "C" fn ts_highlighter_highlight(
this: *const TSHighlighter,
scope_name: *const c_char,
source_code: *const c_char,
source_code_len: u32,
output: *mut TSHighlightBuffer,
cancellation_flag: *const AtomicUsize,
) -> ErrorCode {
let this = unwrap_ptr(this);
let output = unwrap_mut_ptr(output);
let scope_name = unwrap(CStr::from_ptr(scope_name).to_str());
let source_code = slice::from_raw_parts(source_code.cast::<u8>(), source_code_len as usize);
let cancellation_flag = cancellation_flag.as_ref();
this.highlight(source_code, scope_name, output, cancellation_flag)
}
impl TSHighlighter {
fn highlight(
&self,
source_code: &[u8],
scope_name: &str,
output: &mut TSHighlightBuffer,
cancellation_flag: Option<&AtomicUsize>,
) -> ErrorCode {
let entry = self.languages.get(scope_name);
if entry.is_none() {
return ErrorCode::UnknownScope;
}
let (_, configuration) = entry.unwrap();
let languages = &self.languages;
let highlights = output.highlighter.highlight(
configuration,
source_code,
cancellation_flag,
move |injection_string| {
languages.values().find_map(|(injection_regex, config)| {
injection_regex.as_ref().and_then(|regex| {
if regex.is_match(injection_string) {
Some(config)
} else {
None
}
})
})
},
);
if let Ok(highlights) = highlights {
output.renderer.reset();
output
.renderer
.set_carriage_return_highlight(self.carriage_return_index.map(Highlight));
let result = output.renderer.render(highlights, source_code, &|s, out| {
out.extend(self.attribute_strings[s.0]);
});
match result {
Err(Error::Cancelled | Error::Unknown) => ErrorCode::Timeout,
Err(Error::InvalidLanguage) => ErrorCode::InvalidLanguage,
Ok(()) => ErrorCode::Ok,
}
} else {
ErrorCode::Timeout
}
}
}
unsafe fn unwrap_ptr<'a, T>(result: *const T) -> &'a T {
result.as_ref().unwrap_or_else(|| {
eprintln!("{}:{} - pointer must not be null", file!(), line!());
abort();
})
}
unsafe fn unwrap_mut_ptr<'a, T>(result: *mut T) -> &'a mut T {
result.as_mut().unwrap_or_else(|| {
eprintln!("{}:{} - pointer must not be null", file!(), line!());
abort();
})
}
fn unwrap<T, E: fmt::Display>(result: Result<T, E>) -> T {
result.unwrap_or_else(|error| {
eprintln!("tree-sitter highlight error: {error}");
abort();
})
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/language/build.rs | crates/language/build.rs | fn main() {
if std::env::var("TARGET")
.unwrap_or_default()
.starts_with("wasm32-unknown")
{
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
let wasm_headers = std::path::Path::new(&manifest_dir).join("wasm/include");
let wasm_src = std::path::Path::new(&manifest_dir).join("wasm/src");
println!("cargo::metadata=wasm-headers={}", wasm_headers.display());
println!("cargo::metadata=wasm-src={}", wasm_src.display());
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/crates/language/src/language.rs | crates/language/src/language.rs | #![no_std]
/// `LanguageFn` wraps a C function that returns a pointer to a tree-sitter grammar.
#[repr(transparent)]
#[derive(Clone, Copy)]
pub struct LanguageFn(unsafe extern "C" fn() -> *const ());
impl LanguageFn {
/// Creates a [`LanguageFn`].
///
/// # Safety
///
/// Only call this with language functions generated from grammars
/// by the Tree-sitter CLI.
pub const unsafe fn from_raw(f: unsafe extern "C" fn() -> *const ()) -> Self {
Self(f)
}
/// Gets the function wrapped by this [`LanguageFn`].
#[must_use]
pub const fn into_raw(self) -> unsafe extern "C" fn() -> *const () {
self.0
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/lib/binding_rust/bindings.rs | lib/binding_rust/bindings.rs | /* automatically generated by rust-bindgen 0.72.1 */
pub const TREE_SITTER_LANGUAGE_VERSION: u32 = 15;
pub const TREE_SITTER_MIN_COMPATIBLE_LANGUAGE_VERSION: u32 = 13;
pub type TSStateId = u16;
pub type TSSymbol = u16;
pub type TSFieldId = u16;
#[repr(C)]
#[derive(Debug)]
pub struct TSLanguage {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug)]
pub struct TSParser {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug)]
pub struct TSTree {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug)]
pub struct TSQuery {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug)]
pub struct TSQueryCursor {
_unused: [u8; 0],
}
#[repr(C)]
#[derive(Debug)]
pub struct TSLookaheadIterator {
_unused: [u8; 0],
}
pub type TSDecodeFunction = ::core::option::Option<
unsafe extern "C" fn(string: *const u8, length: u32, code_point: *mut i32) -> u32,
>;
pub const TSInputEncodingUTF8: TSInputEncoding = 0;
pub const TSInputEncodingUTF16LE: TSInputEncoding = 1;
pub const TSInputEncodingUTF16BE: TSInputEncoding = 2;
pub const TSInputEncodingCustom: TSInputEncoding = 3;
pub type TSInputEncoding = ::core::ffi::c_uint;
pub const TSSymbolTypeRegular: TSSymbolType = 0;
pub const TSSymbolTypeAnonymous: TSSymbolType = 1;
pub const TSSymbolTypeSupertype: TSSymbolType = 2;
pub const TSSymbolTypeAuxiliary: TSSymbolType = 3;
pub type TSSymbolType = ::core::ffi::c_uint;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSPoint {
pub row: u32,
pub column: u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSRange {
pub start_point: TSPoint,
pub end_point: TSPoint,
pub start_byte: u32,
pub end_byte: u32,
}
#[repr(C)]
#[derive(Debug)]
pub struct TSInput {
pub payload: *mut ::core::ffi::c_void,
pub read: ::core::option::Option<
unsafe extern "C" fn(
payload: *mut ::core::ffi::c_void,
byte_index: u32,
position: TSPoint,
bytes_read: *mut u32,
) -> *const ::core::ffi::c_char,
>,
pub encoding: TSInputEncoding,
pub decode: TSDecodeFunction,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSParseState {
pub payload: *mut ::core::ffi::c_void,
pub current_byte_offset: u32,
pub has_error: bool,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSParseOptions {
pub payload: *mut ::core::ffi::c_void,
pub progress_callback:
::core::option::Option<unsafe extern "C" fn(state: *mut TSParseState) -> bool>,
}
pub const TSLogTypeParse: TSLogType = 0;
pub const TSLogTypeLex: TSLogType = 1;
pub type TSLogType = ::core::ffi::c_uint;
#[repr(C)]
#[derive(Debug)]
pub struct TSLogger {
pub payload: *mut ::core::ffi::c_void,
pub log: ::core::option::Option<
unsafe extern "C" fn(
payload: *mut ::core::ffi::c_void,
log_type: TSLogType,
buffer: *const ::core::ffi::c_char,
),
>,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSInputEdit {
pub start_byte: u32,
pub old_end_byte: u32,
pub new_end_byte: u32,
pub start_point: TSPoint,
pub old_end_point: TSPoint,
pub new_end_point: TSPoint,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSNode {
pub context: [u32; 4usize],
pub id: *const ::core::ffi::c_void,
pub tree: *const TSTree,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSTreeCursor {
pub tree: *const ::core::ffi::c_void,
pub id: *const ::core::ffi::c_void,
pub context: [u32; 3usize],
}
#[repr(C)]
#[derive(Debug)]
pub struct TSQueryCapture {
pub node: TSNode,
pub index: u32,
}
pub const TSQuantifierZero: TSQuantifier = 0;
pub const TSQuantifierZeroOrOne: TSQuantifier = 1;
pub const TSQuantifierZeroOrMore: TSQuantifier = 2;
pub const TSQuantifierOne: TSQuantifier = 3;
pub const TSQuantifierOneOrMore: TSQuantifier = 4;
pub type TSQuantifier = ::core::ffi::c_uint;
#[repr(C)]
#[derive(Debug)]
pub struct TSQueryMatch {
pub id: u32,
pub pattern_index: u16,
pub capture_count: u16,
pub captures: *const TSQueryCapture,
}
pub const TSQueryPredicateStepTypeDone: TSQueryPredicateStepType = 0;
pub const TSQueryPredicateStepTypeCapture: TSQueryPredicateStepType = 1;
pub const TSQueryPredicateStepTypeString: TSQueryPredicateStepType = 2;
pub type TSQueryPredicateStepType = ::core::ffi::c_uint;
#[repr(C)]
#[derive(Debug)]
pub struct TSQueryPredicateStep {
pub type_: TSQueryPredicateStepType,
pub value_id: u32,
}
pub const TSQueryErrorNone: TSQueryError = 0;
pub const TSQueryErrorSyntax: TSQueryError = 1;
pub const TSQueryErrorNodeType: TSQueryError = 2;
pub const TSQueryErrorField: TSQueryError = 3;
pub const TSQueryErrorCapture: TSQueryError = 4;
pub const TSQueryErrorStructure: TSQueryError = 5;
pub const TSQueryErrorLanguage: TSQueryError = 6;
pub type TSQueryError = ::core::ffi::c_uint;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSQueryCursorState {
pub payload: *mut ::core::ffi::c_void,
pub current_byte_offset: u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSQueryCursorOptions {
pub payload: *mut ::core::ffi::c_void,
pub progress_callback:
::core::option::Option<unsafe extern "C" fn(state: *mut TSQueryCursorState) -> bool>,
}
#[doc = " The metadata associated with a language.\n\n Currently, this metadata can be used to check the [Semantic Version](https://semver.org/)\n of the language. This version information should be used to signal if a given parser might\n be incompatible with existing queries when upgrading between major versions, or minor versions\n if it's in zerover."]
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct TSLanguageMetadata {
pub major_version: u8,
pub minor_version: u8,
pub patch_version: u8,
}
extern "C" {
#[doc = " Create a new parser."]
pub fn ts_parser_new() -> *mut TSParser;
}
extern "C" {
#[doc = " Delete the parser, freeing all of the memory that it used."]
pub fn ts_parser_delete(self_: *mut TSParser);
}
extern "C" {
#[doc = " Get the parser's current language."]
pub fn ts_parser_language(self_: *const TSParser) -> *const TSLanguage;
}
extern "C" {
#[doc = " Set the language that the parser should use for parsing.\n\n Returns a boolean indicating whether or not the language was successfully\n assigned. True means assignment succeeded. False means there was a version\n mismatch: the language was generated with an incompatible version of the\n Tree-sitter CLI. Check the language's ABI version using [`ts_language_abi_version`]\n and compare it to this library's [`TREE_SITTER_LANGUAGE_VERSION`] and\n [`TREE_SITTER_MIN_COMPATIBLE_LANGUAGE_VERSION`] constants."]
pub fn ts_parser_set_language(self_: *mut TSParser, language: *const TSLanguage) -> bool;
}
extern "C" {
#[doc = " Set the ranges of text that the parser should include when parsing.\n\n By default, the parser will always include entire documents. This function\n allows you to parse only a *portion* of a document but still return a syntax\n tree whose ranges match up with the document as a whole. You can also pass\n multiple disjoint ranges.\n\n The second and third parameters specify the location and length of an array\n of ranges. The parser does *not* take ownership of these ranges; it copies\n the data, so it doesn't matter how these ranges are allocated.\n\n If `count` is zero, then the entire document will be parsed. Otherwise,\n the given ranges must be ordered from earliest to latest in the document,\n and they must not overlap. That is, the following must hold for all:\n\n `i < count - 1`: `ranges[i].end_byte <= ranges[i + 1].start_byte`\n\n If this requirement is not satisfied, the operation will fail, the ranges\n will not be assigned, and this function will return `false`. On success,\n this function returns `true`"]
pub fn ts_parser_set_included_ranges(
self_: *mut TSParser,
ranges: *const TSRange,
count: u32,
) -> bool;
}
extern "C" {
#[doc = " Get the ranges of text that the parser will include when parsing.\n\n The returned pointer is owned by the parser. The caller should not free it\n or write to it. The length of the array will be written to the given\n `count` pointer."]
pub fn ts_parser_included_ranges(self_: *const TSParser, count: *mut u32) -> *const TSRange;
}
extern "C" {
#[doc = " Use the parser to parse some source code and create a syntax tree.\n\n If you are parsing this document for the first time, pass `NULL` for the\n `old_tree` parameter. Otherwise, if you have already parsed an earlier\n version of this document and the document has since been edited, pass the\n previous syntax tree so that the unchanged parts of it can be reused.\n This will save time and memory. For this to work correctly, you must have\n already edited the old syntax tree using the [`ts_tree_edit`] function in a\n way that exactly matches the source code changes.\n\n The [`TSInput`] parameter lets you specify how to read the text. It has the\n following three fields:\n 1. [`read`]: A function to retrieve a chunk of text at a given byte offset\n and (row, column) position. The function should return a pointer to the\n text and write its length to the [`bytes_read`] pointer. The parser does\n not take ownership of this buffer; it just borrows it until it has\n finished reading it. The function should write a zero value to the\n [`bytes_read`] pointer to indicate the end of the document.\n 2. [`payload`]: An arbitrary pointer that will be passed to each invocation\n of the [`read`] function.\n 3. [`encoding`]: An indication of how the text is encoded. Either\n `TSInputEncodingUTF8` or `TSInputEncodingUTF16`.\n\n This function returns a syntax tree on success, and `NULL` on failure. There\n are four possible reasons for failure:\n 1. The parser does not have a language assigned. Check for this using the\n[`ts_parser_language`] function.\n 2. Parsing was cancelled due to the progress callback returning true. This callback\n is passed in [`ts_parser_parse_with_options`] inside the [`TSParseOptions`] struct.\n\n [`read`]: TSInput::read\n [`payload`]: TSInput::payload\n [`encoding`]: TSInput::encoding\n [`bytes_read`]: TSInput::read"]
pub fn ts_parser_parse(
self_: *mut TSParser,
old_tree: *const TSTree,
input: TSInput,
) -> *mut TSTree;
}
extern "C" {
#[doc = " Use the parser to parse some source code and create a syntax tree, with some options.\n\n See [`ts_parser_parse`] for more details.\n\n See [`TSParseOptions`] for more details on the options."]
pub fn ts_parser_parse_with_options(
self_: *mut TSParser,
old_tree: *const TSTree,
input: TSInput,
parse_options: TSParseOptions,
) -> *mut TSTree;
}
extern "C" {
#[doc = " Use the parser to parse some source code stored in one contiguous buffer.\n The first two parameters are the same as in the [`ts_parser_parse`] function\n above. The second two parameters indicate the location of the buffer and its\n length in bytes."]
pub fn ts_parser_parse_string(
self_: *mut TSParser,
old_tree: *const TSTree,
string: *const ::core::ffi::c_char,
length: u32,
) -> *mut TSTree;
}
extern "C" {
#[doc = " Use the parser to parse some source code stored in one contiguous buffer with\n a given encoding. The first four parameters work the same as in the\n [`ts_parser_parse_string`] method above. The final parameter indicates whether\n the text is encoded as UTF8 or UTF16."]
pub fn ts_parser_parse_string_encoding(
self_: *mut TSParser,
old_tree: *const TSTree,
string: *const ::core::ffi::c_char,
length: u32,
encoding: TSInputEncoding,
) -> *mut TSTree;
}
extern "C" {
#[doc = " Instruct the parser to start the next parse from the beginning.\n\n If the parser previously failed because of the progress callback, then\n by default, it will resume where it left off on the next call to\n [`ts_parser_parse`] or other parsing functions. If you don't want to resume,\n and instead intend to use this parser to parse some other document, you must\n call [`ts_parser_reset`] first."]
pub fn ts_parser_reset(self_: *mut TSParser);
}
extern "C" {
#[doc = " Set the logger that a parser should use during parsing.\n\n The parser does not take ownership over the logger payload. If a logger was\n previously assigned, the caller is responsible for releasing any memory\n owned by the previous logger."]
pub fn ts_parser_set_logger(self_: *mut TSParser, logger: TSLogger);
}
extern "C" {
#[doc = " Get the parser's current logger."]
pub fn ts_parser_logger(self_: *const TSParser) -> TSLogger;
}
extern "C" {
#[doc = " Set the file descriptor to which the parser should write debugging graphs\n during parsing. The graphs are formatted in the DOT language. You may want\n to pipe these graphs directly to a `dot(1)` process in order to generate\n SVG output. You can turn off this logging by passing a negative number."]
pub fn ts_parser_print_dot_graphs(self_: *mut TSParser, fd: ::core::ffi::c_int);
}
extern "C" {
#[doc = " Create a shallow copy of the syntax tree. This is very fast.\n\n You need to copy a syntax tree in order to use it on more than one thread at\n a time, as syntax trees are not thread safe."]
pub fn ts_tree_copy(self_: *const TSTree) -> *mut TSTree;
}
extern "C" {
#[doc = " Delete the syntax tree, freeing all of the memory that it used."]
pub fn ts_tree_delete(self_: *mut TSTree);
}
extern "C" {
#[doc = " Get the root node of the syntax tree."]
pub fn ts_tree_root_node(self_: *const TSTree) -> TSNode;
}
extern "C" {
#[doc = " Get the root node of the syntax tree, but with its position\n shifted forward by the given offset."]
pub fn ts_tree_root_node_with_offset(
self_: *const TSTree,
offset_bytes: u32,
offset_extent: TSPoint,
) -> TSNode;
}
extern "C" {
#[doc = " Get the language that was used to parse the syntax tree."]
pub fn ts_tree_language(self_: *const TSTree) -> *const TSLanguage;
}
extern "C" {
#[doc = " Get the array of included ranges that was used to parse the syntax tree.\n\n The returned pointer must be freed by the caller."]
pub fn ts_tree_included_ranges(self_: *const TSTree, length: *mut u32) -> *mut TSRange;
}
extern "C" {
#[doc = " Edit the syntax tree to keep it in sync with source code that has been\n edited.\n\n You must describe the edit both in terms of byte offsets and in terms of\n (row, column) coordinates."]
pub fn ts_tree_edit(self_: *mut TSTree, edit: *const TSInputEdit);
}
extern "C" {
#[doc = " Compare an old edited syntax tree to a new syntax tree representing the same\n document, returning an array of ranges whose syntactic structure has changed.\n\n For this to work correctly, the old syntax tree must have been edited such\n that its ranges match up to the new tree. Generally, you'll want to call\n this function right after calling one of the [`ts_parser_parse`] functions.\n You need to pass the old tree that was passed to parse, as well as the new\n tree that was returned from that function.\n\n The returned ranges indicate areas where the hierarchical structure of syntax\n nodes (from root to leaf) has changed between the old and new trees. Characters\n outside these ranges have identical ancestor nodes in both trees.\n\n Note that the returned ranges may be slightly larger than the exact changed areas,\n but Tree-sitter attempts to make them as small as possible.\n\n The returned array is allocated using `malloc` and the caller is responsible\n for freeing it using `free`. The length of the array will be written to the\n given `length` pointer."]
pub fn ts_tree_get_changed_ranges(
old_tree: *const TSTree,
new_tree: *const TSTree,
length: *mut u32,
) -> *mut TSRange;
}
extern "C" {
#[doc = " Write a DOT graph describing the syntax tree to the given file."]
pub fn ts_tree_print_dot_graph(self_: *const TSTree, file_descriptor: ::core::ffi::c_int);
}
extern "C" {
#[doc = " Get the node's type as a null-terminated string."]
pub fn ts_node_type(self_: TSNode) -> *const ::core::ffi::c_char;
}
extern "C" {
#[doc = " Get the node's type as a numerical id."]
pub fn ts_node_symbol(self_: TSNode) -> TSSymbol;
}
extern "C" {
#[doc = " Get the node's language."]
pub fn ts_node_language(self_: TSNode) -> *const TSLanguage;
}
extern "C" {
#[doc = " Get the node's type as it appears in the grammar ignoring aliases as a\n null-terminated string."]
pub fn ts_node_grammar_type(self_: TSNode) -> *const ::core::ffi::c_char;
}
extern "C" {
#[doc = " Get the node's type as a numerical id as it appears in the grammar ignoring\n aliases. This should be used in [`ts_language_next_state`] instead of\n [`ts_node_symbol`]."]
pub fn ts_node_grammar_symbol(self_: TSNode) -> TSSymbol;
}
extern "C" {
#[doc = " Get the node's start byte."]
pub fn ts_node_start_byte(self_: TSNode) -> u32;
}
extern "C" {
#[doc = " Get the node's start position in terms of rows and columns."]
pub fn ts_node_start_point(self_: TSNode) -> TSPoint;
}
extern "C" {
#[doc = " Get the node's end byte."]
pub fn ts_node_end_byte(self_: TSNode) -> u32;
}
extern "C" {
#[doc = " Get the node's end position in terms of rows and columns."]
pub fn ts_node_end_point(self_: TSNode) -> TSPoint;
}
extern "C" {
#[doc = " Get an S-expression representing the node as a string.\n\n This string is allocated with `malloc` and the caller is responsible for\n freeing it using `free`."]
pub fn ts_node_string(self_: TSNode) -> *mut ::core::ffi::c_char;
}
extern "C" {
#[doc = " Check if the node is null. Functions like [`ts_node_child`] and\n [`ts_node_next_sibling`] will return a null node to indicate that no such node\n was found."]
pub fn ts_node_is_null(self_: TSNode) -> bool;
}
extern "C" {
#[doc = " Check if the node is *named*. Named nodes correspond to named rules in the\n grammar, whereas *anonymous* nodes correspond to string literals in the\n grammar."]
pub fn ts_node_is_named(self_: TSNode) -> bool;
}
extern "C" {
#[doc = " Check if the node is *missing*. Missing nodes are inserted by the parser in\n order to recover from certain kinds of syntax errors."]
pub fn ts_node_is_missing(self_: TSNode) -> bool;
}
extern "C" {
#[doc = " Check if the node is *extra*. Extra nodes represent things like comments,\n which are not required the grammar, but can appear anywhere."]
pub fn ts_node_is_extra(self_: TSNode) -> bool;
}
extern "C" {
#[doc = " Check if a syntax node has been edited."]
pub fn ts_node_has_changes(self_: TSNode) -> bool;
}
extern "C" {
#[doc = " Check if the node is a syntax error or contains any syntax errors."]
pub fn ts_node_has_error(self_: TSNode) -> bool;
}
extern "C" {
#[doc = " Check if the node is a syntax error."]
pub fn ts_node_is_error(self_: TSNode) -> bool;
}
extern "C" {
#[doc = " Get this node's parse state."]
pub fn ts_node_parse_state(self_: TSNode) -> TSStateId;
}
extern "C" {
#[doc = " Get the parse state after this node."]
pub fn ts_node_next_parse_state(self_: TSNode) -> TSStateId;
}
extern "C" {
#[doc = " Get the node's immediate parent.\n Prefer [`ts_node_child_with_descendant`] for\n iterating over the node's ancestors."]
pub fn ts_node_parent(self_: TSNode) -> TSNode;
}
extern "C" {
#[doc = " Get the node that contains `descendant`.\n\n Note that this can return `descendant` itself."]
pub fn ts_node_child_with_descendant(self_: TSNode, descendant: TSNode) -> TSNode;
}
extern "C" {
#[doc = " Get the node's child at the given index, where zero represents the first\n child."]
pub fn ts_node_child(self_: TSNode, child_index: u32) -> TSNode;
}
extern "C" {
#[doc = " Get the field name for node's child at the given index, where zero represents\n the first child. Returns NULL, if no field is found."]
pub fn ts_node_field_name_for_child(
self_: TSNode,
child_index: u32,
) -> *const ::core::ffi::c_char;
}
extern "C" {
#[doc = " Get the field name for node's named child at the given index, where zero\n represents the first named child. Returns NULL, if no field is found."]
pub fn ts_node_field_name_for_named_child(
self_: TSNode,
named_child_index: u32,
) -> *const ::core::ffi::c_char;
}
extern "C" {
#[doc = " Get the node's number of children."]
pub fn ts_node_child_count(self_: TSNode) -> u32;
}
extern "C" {
#[doc = " Get the node's *named* child at the given index.\n\n See also [`ts_node_is_named`]."]
pub fn ts_node_named_child(self_: TSNode, child_index: u32) -> TSNode;
}
extern "C" {
#[doc = " Get the node's number of *named* children.\n\n See also [`ts_node_is_named`]."]
pub fn ts_node_named_child_count(self_: TSNode) -> u32;
}
extern "C" {
#[doc = " Get the node's child with the given field name."]
pub fn ts_node_child_by_field_name(
self_: TSNode,
name: *const ::core::ffi::c_char,
name_length: u32,
) -> TSNode;
}
extern "C" {
#[doc = " Get the node's child with the given numerical field id.\n\n You can convert a field name to an id using the\n [`ts_language_field_id_for_name`] function."]
pub fn ts_node_child_by_field_id(self_: TSNode, field_id: TSFieldId) -> TSNode;
}
extern "C" {
#[doc = " Get the node's next / previous sibling."]
pub fn ts_node_next_sibling(self_: TSNode) -> TSNode;
}
extern "C" {
pub fn ts_node_prev_sibling(self_: TSNode) -> TSNode;
}
extern "C" {
#[doc = " Get the node's next / previous *named* sibling."]
pub fn ts_node_next_named_sibling(self_: TSNode) -> TSNode;
}
extern "C" {
pub fn ts_node_prev_named_sibling(self_: TSNode) -> TSNode;
}
extern "C" {
#[doc = " Get the node's first child that contains or starts after the given byte offset."]
pub fn ts_node_first_child_for_byte(self_: TSNode, byte: u32) -> TSNode;
}
extern "C" {
#[doc = " Get the node's first named child that contains or starts after the given byte offset."]
pub fn ts_node_first_named_child_for_byte(self_: TSNode, byte: u32) -> TSNode;
}
extern "C" {
#[doc = " Get the node's number of descendants, including one for the node itself."]
pub fn ts_node_descendant_count(self_: TSNode) -> u32;
}
extern "C" {
#[doc = " Get the smallest node within this node that spans the given range of bytes\n or (row, column) positions."]
pub fn ts_node_descendant_for_byte_range(self_: TSNode, start: u32, end: u32) -> TSNode;
}
extern "C" {
pub fn ts_node_descendant_for_point_range(
self_: TSNode,
start: TSPoint,
end: TSPoint,
) -> TSNode;
}
extern "C" {
#[doc = " Get the smallest named node within this node that spans the given range of\n bytes or (row, column) positions."]
pub fn ts_node_named_descendant_for_byte_range(self_: TSNode, start: u32, end: u32) -> TSNode;
}
extern "C" {
pub fn ts_node_named_descendant_for_point_range(
self_: TSNode,
start: TSPoint,
end: TSPoint,
) -> TSNode;
}
extern "C" {
#[doc = " Edit the node to keep it in-sync with source code that has been edited.\n\n This function is only rarely needed. When you edit a syntax tree with the\n [`ts_tree_edit`] function, all of the nodes that you retrieve from the tree\n afterward will already reflect the edit. You only need to use [`ts_node_edit`]\n when you have a [`TSNode`] instance that you want to keep and continue to use\n after an edit."]
pub fn ts_node_edit(self_: *mut TSNode, edit: *const TSInputEdit);
}
extern "C" {
#[doc = " Check if two nodes are identical."]
pub fn ts_node_eq(self_: TSNode, other: TSNode) -> bool;
}
extern "C" {
#[doc = " Edit a point to keep it in-sync with source code that has been edited.\n\n This function updates a single point's byte offset and row/column position\n based on an edit operation. This is useful for editing points without\n requiring a tree or node instance."]
pub fn ts_point_edit(point: *mut TSPoint, point_byte: *mut u32, edit: *const TSInputEdit);
}
extern "C" {
#[doc = " Edit a range to keep it in-sync with source code that has been edited.\n\n This function updates a range's start and end positions based on an edit\n operation. This is useful for editing ranges without requiring a tree\n or node instance."]
pub fn ts_range_edit(range: *mut TSRange, edit: *const TSInputEdit);
}
extern "C" {
#[doc = " Create a new tree cursor starting from the given node.\n\n A tree cursor allows you to walk a syntax tree more efficiently than is\n possible using the [`TSNode`] functions. It is a mutable object that is always\n on a certain syntax node, and can be moved imperatively to different nodes.\n\n Note that the given node is considered the root of the cursor,\n and the cursor cannot walk outside this node."]
pub fn ts_tree_cursor_new(node: TSNode) -> TSTreeCursor;
}
extern "C" {
#[doc = " Delete a tree cursor, freeing all of the memory that it used."]
pub fn ts_tree_cursor_delete(self_: *mut TSTreeCursor);
}
extern "C" {
#[doc = " Re-initialize a tree cursor to start at the original node that the cursor was\n constructed with."]
pub fn ts_tree_cursor_reset(self_: *mut TSTreeCursor, node: TSNode);
}
extern "C" {
#[doc = " Re-initialize a tree cursor to the same position as another cursor.\n\n Unlike [`ts_tree_cursor_reset`], this will not lose parent information and\n allows reusing already created cursors."]
pub fn ts_tree_cursor_reset_to(dst: *mut TSTreeCursor, src: *const TSTreeCursor);
}
extern "C" {
#[doc = " Get the tree cursor's current node."]
pub fn ts_tree_cursor_current_node(self_: *const TSTreeCursor) -> TSNode;
}
extern "C" {
#[doc = " Get the field name of the tree cursor's current node.\n\n This returns `NULL` if the current node doesn't have a field.\n See also [`ts_node_child_by_field_name`]."]
pub fn ts_tree_cursor_current_field_name(
self_: *const TSTreeCursor,
) -> *const ::core::ffi::c_char;
}
extern "C" {
#[doc = " Get the field id of the tree cursor's current node.\n\n This returns zero if the current node doesn't have a field.\n See also [`ts_node_child_by_field_id`], [`ts_language_field_id_for_name`]."]
pub fn ts_tree_cursor_current_field_id(self_: *const TSTreeCursor) -> TSFieldId;
}
extern "C" {
#[doc = " Move the cursor to the parent of its current node.\n\n This returns `true` if the cursor successfully moved, and returns `false`\n if there was no parent node (the cursor was already on the root node).\n\n Note that the node the cursor was constructed with is considered the root\n of the cursor, and the cursor cannot walk outside this node."]
pub fn ts_tree_cursor_goto_parent(self_: *mut TSTreeCursor) -> bool;
}
extern "C" {
#[doc = " Move the cursor to the next sibling of its current node.\n\n This returns `true` if the cursor successfully moved, and returns `false`\n if there was no next sibling node.\n\n Note that the node the cursor was constructed with is considered the root\n of the cursor, and the cursor cannot walk outside this node."]
pub fn ts_tree_cursor_goto_next_sibling(self_: *mut TSTreeCursor) -> bool;
}
extern "C" {
#[doc = " Move the cursor to the previous sibling of its current node.\n\n This returns `true` if the cursor successfully moved, and returns `false` if\n there was no previous sibling node.\n\n Note, that this function may be slower than\n [`ts_tree_cursor_goto_next_sibling`] due to how node positions are stored. In\n the worst case, this will need to iterate through all the children up to the\n previous sibling node to recalculate its position. Also note that the node the cursor\n was constructed with is considered the root of the cursor, and the cursor cannot\n walk outside this node."]
pub fn ts_tree_cursor_goto_previous_sibling(self_: *mut TSTreeCursor) -> bool;
}
extern "C" {
#[doc = " Move the cursor to the first child of its current node.\n\n This returns `true` if the cursor successfully moved, and returns `false`\n if there were no children."]
pub fn ts_tree_cursor_goto_first_child(self_: *mut TSTreeCursor) -> bool;
}
extern "C" {
#[doc = " Move the cursor to the last child of its current node.\n\n This returns `true` if the cursor successfully moved, and returns `false` if\n there were no children.\n\n Note that this function may be slower than [`ts_tree_cursor_goto_first_child`]\n because it needs to iterate through all the children to compute the child's\n position."]
pub fn ts_tree_cursor_goto_last_child(self_: *mut TSTreeCursor) -> bool;
}
extern "C" {
#[doc = " Move the cursor to the node that is the nth descendant of\n the original node that the cursor was constructed with, where\n zero represents the original node itself."]
pub fn ts_tree_cursor_goto_descendant(self_: *mut TSTreeCursor, goal_descendant_index: u32);
}
extern "C" {
#[doc = " Get the index of the cursor's current node out of all of the\n descendants of the original node that the cursor was constructed with."]
pub fn ts_tree_cursor_current_descendant_index(self_: *const TSTreeCursor) -> u32;
}
extern "C" {
#[doc = " Get the depth of the cursor's current node relative to the original\n node that the cursor was constructed with."]
pub fn ts_tree_cursor_current_depth(self_: *const TSTreeCursor) -> u32;
}
extern "C" {
#[doc = " Move the cursor to the first child of its current node that contains or starts after\n the given byte offset or point.\n\n This returns the index of the child node if one was found, and returns -1\n if no such child was found."]
pub fn ts_tree_cursor_goto_first_child_for_byte(
self_: *mut TSTreeCursor,
goal_byte: u32,
) -> i64;
}
extern "C" {
pub fn ts_tree_cursor_goto_first_child_for_point(
self_: *mut TSTreeCursor,
goal_point: TSPoint,
) -> i64;
}
extern "C" {
pub fn ts_tree_cursor_copy(cursor: *const TSTreeCursor) -> TSTreeCursor;
}
extern "C" {
#[doc = " Create a new query from a string containing one or more S-expression\n patterns. The query is associated with a particular language, and can\n only be run on syntax nodes parsed with that language.\n\n If all of the given patterns are valid, this returns a [`TSQuery`].\n If a pattern is invalid, this returns `NULL`, and provides two pieces\n of information about the problem:\n 1. The byte offset of the error is written to the `error_offset` parameter.\n 2. The type of error is written to the `error_type` parameter."]
pub fn ts_query_new(
language: *const TSLanguage,
source: *const ::core::ffi::c_char,
source_len: u32,
error_offset: *mut u32,
error_type: *mut TSQueryError,
) -> *mut TSQuery;
}
extern "C" {
#[doc = " Delete a query, freeing all of the memory that it used."]
pub fn ts_query_delete(self_: *mut TSQuery);
}
extern "C" {
#[doc = " Get the number of patterns, captures, or string literals in the query."]
pub fn ts_query_pattern_count(self_: *const TSQuery) -> u32;
}
extern "C" {
pub fn ts_query_capture_count(self_: *const TSQuery) -> u32;
}
extern "C" {
pub fn ts_query_string_count(self_: *const TSQuery) -> u32;
}
extern "C" {
#[doc = " Get the byte offset where the given pattern starts in the query's source.\n\n This can be useful when combining queries by concatenating their source\n code strings."]
pub fn ts_query_start_byte_for_pattern(self_: *const TSQuery, pattern_index: u32) -> u32;
}
extern "C" {
#[doc = " Get the byte offset where the given pattern ends in the query's source.\n\n This can be useful when combining queries by concatenating their source\n code strings."]
pub fn ts_query_end_byte_for_pattern(self_: *const TSQuery, pattern_index: u32) -> u32;
}
extern "C" {
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/lib/binding_rust/lib.rs | lib/binding_rust/lib.rs | #![cfg_attr(not(any(test, doctest)), doc = include_str!("./README.md"))]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(docsrs, feature(doc_cfg))]
pub mod ffi;
mod util;
#[cfg(not(feature = "std"))]
extern crate alloc;
#[cfg(not(feature = "std"))]
use alloc::{boxed::Box, format, string::String, string::ToString, vec::Vec};
use core::{
ffi::{c_char, c_void, CStr},
fmt::{self, Write},
hash, iter,
marker::PhantomData,
mem::MaybeUninit,
num::NonZeroU16,
ops::{self, ControlFlow, Deref},
ptr::{self, NonNull},
slice, str,
};
#[cfg(feature = "std")]
use std::error;
#[cfg(all(unix, feature = "std"))]
use std::os::fd::AsRawFd;
#[cfg(all(windows, feature = "std"))]
use std::os::windows::io::AsRawHandle;
pub use streaming_iterator::{StreamingIterator, StreamingIteratorMut};
use tree_sitter_language::LanguageFn;
#[cfg(feature = "wasm")]
mod wasm_language;
#[cfg(feature = "wasm")]
#[cfg_attr(docsrs, doc(cfg(feature = "wasm")))]
pub use wasm_language::*;
/// The latest ABI version that is supported by the current version of the
/// library.
///
/// When Languages are generated by the Tree-sitter CLI, they are
/// assigned an ABI version number that corresponds to the current CLI version.
/// The Tree-sitter library is generally backwards-compatible with languages
/// generated using older CLI versions, but is not forwards-compatible.
#[doc(alias = "TREE_SITTER_LANGUAGE_VERSION")]
pub const LANGUAGE_VERSION: usize = ffi::TREE_SITTER_LANGUAGE_VERSION as usize;
/// The earliest ABI version that is supported by the current version of the
/// library.
#[doc(alias = "TREE_SITTER_MIN_COMPATIBLE_LANGUAGE_VERSION")]
pub const MIN_COMPATIBLE_LANGUAGE_VERSION: usize =
ffi::TREE_SITTER_MIN_COMPATIBLE_LANGUAGE_VERSION as usize;
pub const PARSER_HEADER: &str = include_str!("../src/parser.h");
/// An opaque object that defines how to parse a particular language. The code
/// for each `Language` is generated by the Tree-sitter CLI.
#[doc(alias = "TSLanguage")]
#[derive(Debug, PartialEq, Eq, Hash)]
#[repr(transparent)]
pub struct Language(*const ffi::TSLanguage);
pub struct LanguageRef<'a>(*const ffi::TSLanguage, PhantomData<&'a ()>);
/// The metadata associated with a language.
///
/// Currently, this metadata can be used to check the [Semantic Version](https://semver.org/)
/// of the language. This version information should be used to signal if a given parser might
/// be incompatible with existing queries when upgrading between major versions, or minor versions
/// if it's in zerover.
#[doc(alias = "TSLanguageMetadata")]
pub struct LanguageMetadata {
pub major_version: u8,
pub minor_version: u8,
pub patch_version: u8,
}
impl From<ffi::TSLanguageMetadata> for LanguageMetadata {
fn from(val: ffi::TSLanguageMetadata) -> Self {
Self {
major_version: val.major_version,
minor_version: val.minor_version,
patch_version: val.patch_version,
}
}
}
/// A tree that represents the syntactic structure of a source code file.
#[doc(alias = "TSTree")]
pub struct Tree(NonNull<ffi::TSTree>);
/// A position in a multi-line text document, in terms of rows and columns.
///
/// Rows and columns are zero-based.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Point {
pub row: usize,
pub column: usize,
}
/// A range of positions in a multi-line text document, both in terms of bytes
/// and of rows and columns.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct Range {
pub start_byte: usize,
pub end_byte: usize,
pub start_point: Point,
pub end_point: Point,
}
/// A summary of a change to a text document.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct InputEdit {
pub start_byte: usize,
pub old_end_byte: usize,
pub new_end_byte: usize,
pub start_position: Point,
pub old_end_position: Point,
pub new_end_position: Point,
}
impl InputEdit {
/// Edit a point to keep it in-sync with source code that has been edited.
///
/// This function updates a single point's byte offset and row/column position
/// based on this edit operation. This is useful for editing points without
/// requiring a tree or node instance.
#[doc(alias = "ts_point_edit")]
pub fn edit_point(&self, point: &mut Point, byte: &mut usize) {
let edit = self.into();
let mut ts_point = (*point).into();
let mut ts_byte = *byte as u32;
unsafe {
ffi::ts_point_edit(
core::ptr::addr_of_mut!(ts_point),
core::ptr::addr_of_mut!(ts_byte),
&edit,
);
}
*point = ts_point.into();
*byte = ts_byte as usize;
}
/// Edit a range to keep it in-sync with source code that has been edited.
///
/// This function updates a range's start and end positions based on this edit
/// operation. This is useful for editing ranges without requiring a tree
/// or node instance.
#[doc(alias = "ts_range_edit")]
pub fn edit_range(&self, range: &mut Range) {
let edit = self.into();
let mut ts_range = (*range).into();
unsafe {
ffi::ts_range_edit(core::ptr::addr_of_mut!(ts_range), &edit);
}
*range = ts_range.into();
}
}
/// A single node within a syntax [`Tree`].
#[doc(alias = "TSNode")]
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct Node<'tree>(ffi::TSNode, PhantomData<&'tree ()>);
/// A stateful object that this is used to produce a [`Tree`] based on some
/// source code.
#[doc(alias = "TSParser")]
pub struct Parser(NonNull<ffi::TSParser>);
/// A stateful object that is used to look up symbols valid in a specific parse
/// state
#[doc(alias = "TSLookaheadIterator")]
pub struct LookaheadIterator(NonNull<ffi::TSLookaheadIterator>);
struct LookaheadNamesIterator<'a>(&'a mut LookaheadIterator);
/// A stateful object that is passed into a [`ParseProgressCallback`]
/// to pass in the current state of the parser.
pub struct ParseState(NonNull<ffi::TSParseState>);
impl ParseState {
#[must_use]
pub const fn current_byte_offset(&self) -> usize {
unsafe { self.0.as_ref() }.current_byte_offset as usize
}
#[must_use]
pub const fn has_error(&self) -> bool {
unsafe { self.0.as_ref() }.has_error
}
}
/// A stateful object that is passed into a [`QueryProgressCallback`]
/// to pass in the current state of the query execution.
pub struct QueryCursorState(NonNull<ffi::TSQueryCursorState>);
impl QueryCursorState {
#[must_use]
pub const fn current_byte_offset(&self) -> usize {
unsafe { self.0.as_ref() }.current_byte_offset as usize
}
}
#[derive(Default)]
pub struct ParseOptions<'a> {
pub progress_callback: Option<ParseProgressCallback<'a>>,
}
impl<'a> ParseOptions<'a> {
#[must_use]
pub fn new() -> Self {
Self::default()
}
#[must_use]
pub fn progress_callback<F: FnMut(&ParseState) -> ControlFlow<()>>(
mut self,
callback: &'a mut F,
) -> Self {
self.progress_callback = Some(callback);
self
}
/// Create a new `ParseOptions` with a shorter lifetime, borrowing from this one.
///
/// This is useful when you need to reuse parse options multiple times, e.g., calling
/// [`Parser::parse_with_options`] multiple times with the same options.
#[must_use]
pub fn reborrow(&mut self) -> ParseOptions {
ParseOptions {
progress_callback: match &mut self.progress_callback {
Some(cb) => Some(*cb),
None => None,
},
}
}
}
#[derive(Default)]
pub struct QueryCursorOptions<'a> {
pub progress_callback: Option<QueryProgressCallback<'a>>,
}
impl<'a> QueryCursorOptions<'a> {
#[must_use]
pub fn new() -> Self {
Self::default()
}
#[must_use]
pub fn progress_callback<F: FnMut(&QueryCursorState) -> ControlFlow<()>>(
mut self,
callback: &'a mut F,
) -> Self {
self.progress_callback = Some(callback);
self
}
/// Create a new `QueryCursorOptions` with a shorter lifetime, borrowing from this one.
///
/// This is useful when you need to reuse query cursor options multiple times, e.g., calling
/// [`QueryCursor::matches`] multiple times with the same options.
#[must_use]
pub fn reborrow(&mut self) -> QueryCursorOptions {
QueryCursorOptions {
progress_callback: match &mut self.progress_callback {
Some(cb) => Some(*cb),
None => None,
},
}
}
}
struct QueryCursorOptionsDrop(*mut ffi::TSQueryCursorOptions);
impl Drop for QueryCursorOptionsDrop {
fn drop(&mut self) {
unsafe {
if !(*self.0).payload.is_null() {
drop(Box::from_raw(
(*self.0).payload.cast::<QueryProgressCallback>(),
));
}
drop(Box::from_raw(self.0));
}
}
}
/// A type of log message.
#[derive(Debug, PartialEq, Eq)]
pub enum LogType {
Parse,
Lex,
}
type FieldId = NonZeroU16;
/// A callback that receives log messages during parsing.
type Logger<'a> = Box<dyn FnMut(LogType, &str) + 'a>;
/// A callback that receives the parse state during parsing.
type ParseProgressCallback<'a> = &'a mut dyn FnMut(&ParseState) -> ControlFlow<()>;
/// A callback that receives the query state during query execution.
type QueryProgressCallback<'a> = &'a mut dyn FnMut(&QueryCursorState) -> ControlFlow<()>;
pub trait Decode {
/// A callback that decodes the next code point from the input slice. It should return the code
/// point, and how many bytes were decoded.
fn decode(bytes: &[u8]) -> (i32, u32);
}
/// A stateful object for walking a syntax [`Tree`] efficiently.
#[doc(alias = "TSTreeCursor")]
pub struct TreeCursor<'cursor>(ffi::TSTreeCursor, PhantomData<&'cursor ()>);
/// A set of patterns that match nodes in a syntax tree.
#[doc(alias = "TSQuery")]
#[derive(Debug)]
#[allow(clippy::type_complexity)]
pub struct Query {
ptr: NonNull<ffi::TSQuery>,
capture_names: Box<[&'static str]>,
capture_quantifiers: Box<[Box<[CaptureQuantifier]>]>,
text_predicates: Box<[Box<[TextPredicateCapture]>]>,
property_settings: Box<[Box<[QueryProperty]>]>,
property_predicates: Box<[Box<[(QueryProperty, bool)]>]>,
general_predicates: Box<[Box<[QueryPredicate]>]>,
}
/// A quantifier for captures
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum CaptureQuantifier {
Zero,
ZeroOrOne,
ZeroOrMore,
One,
OneOrMore,
}
impl From<ffi::TSQuantifier> for CaptureQuantifier {
fn from(value: ffi::TSQuantifier) -> Self {
match value {
ffi::TSQuantifierZero => Self::Zero,
ffi::TSQuantifierZeroOrOne => Self::ZeroOrOne,
ffi::TSQuantifierZeroOrMore => Self::ZeroOrMore,
ffi::TSQuantifierOne => Self::One,
ffi::TSQuantifierOneOrMore => Self::OneOrMore,
_ => unreachable!(),
}
}
}
/// A stateful object for executing a [`Query`] on a syntax [`Tree`].
#[doc(alias = "TSQueryCursor")]
pub struct QueryCursor {
ptr: NonNull<ffi::TSQueryCursor>,
}
/// A key-value pair associated with a particular pattern in a [`Query`].
#[derive(Debug, PartialEq, Eq)]
pub struct QueryProperty {
pub key: Box<str>,
pub value: Option<Box<str>>,
pub capture_id: Option<usize>,
}
#[derive(Debug, PartialEq, Eq)]
pub enum QueryPredicateArg {
Capture(u32),
String(Box<str>),
}
/// A key-value pair associated with a particular pattern in a [`Query`].
#[derive(Debug, PartialEq, Eq)]
pub struct QueryPredicate {
pub operator: Box<str>,
pub args: Box<[QueryPredicateArg]>,
}
/// A match of a [`Query`] to a particular set of [`Node`]s.
pub struct QueryMatch<'cursor, 'tree> {
pub pattern_index: usize,
pub captures: &'cursor [QueryCapture<'tree>],
id: u32,
cursor: *mut ffi::TSQueryCursor,
}
/// A sequence of [`QueryMatch`]es associated with a given [`QueryCursor`].
pub struct QueryMatches<'query, 'tree: 'query, T: TextProvider<I>, I: AsRef<[u8]>> {
ptr: *mut ffi::TSQueryCursor,
query: &'query Query,
text_provider: T,
buffer1: Vec<u8>,
buffer2: Vec<u8>,
current_match: Option<QueryMatch<'query, 'tree>>,
_options: Option<QueryCursorOptionsDrop>,
_phantom: PhantomData<(&'tree (), I)>,
}
/// A sequence of [`QueryCapture`]s associated with a given [`QueryCursor`].
///
/// During iteration, each element contains a [`QueryMatch`] and index. The index can
/// be used to access the new capture inside of the [`QueryMatch::captures`]'s [`captures`].
pub struct QueryCaptures<'query, 'tree: 'query, T: TextProvider<I>, I: AsRef<[u8]>> {
ptr: *mut ffi::TSQueryCursor,
query: &'query Query,
text_provider: T,
buffer1: Vec<u8>,
buffer2: Vec<u8>,
current_match: Option<(QueryMatch<'query, 'tree>, usize)>,
_options: Option<QueryCursorOptionsDrop>,
_phantom: PhantomData<(&'tree (), I)>,
}
pub trait TextProvider<I>
where
I: AsRef<[u8]>,
{
type I: Iterator<Item = I>;
fn text(&mut self, node: Node) -> Self::I;
}
/// A particular [`Node`] that has been captured with a particular name within a
/// [`Query`].
#[derive(Clone, Copy, Debug)]
#[repr(C)]
pub struct QueryCapture<'tree> {
pub node: Node<'tree>,
pub index: u32,
}
/// An error that occurred when trying to assign an incompatible [`Language`] to
/// a [`Parser`]. If the `wasm` feature is enabled, this can also indicate a failure
/// to load the Wasm store.
#[derive(Debug, PartialEq, Eq)]
pub enum LanguageError {
Version(usize),
#[cfg(feature = "wasm")]
Wasm,
}
/// An error that occurred in [`Parser::set_included_ranges`].
#[derive(Debug, PartialEq, Eq)]
pub struct IncludedRangesError(pub usize);
/// An error that occurred when trying to create a [`Query`].
#[derive(Debug, PartialEq, Eq)]
pub struct QueryError {
pub row: usize,
pub column: usize,
pub offset: usize,
pub message: String,
pub kind: QueryErrorKind,
}
#[derive(Debug, PartialEq, Eq)]
pub enum QueryErrorKind {
Syntax,
NodeType,
Field,
Capture,
Predicate,
Structure,
Language,
}
#[derive(Debug)]
/// The first item is the capture index
/// The next is capture specific, depending on what item is expected
/// The first bool is if the capture is positive
/// The last item is a bool signifying whether or not it's meant to match
/// any or all captures
enum TextPredicateCapture {
EqString(u32, Box<str>, bool, bool),
EqCapture(u32, u32, bool, bool),
MatchString(u32, regex::bytes::Regex, bool, bool),
AnyString(u32, Box<[Box<str>]>, bool),
}
// TODO: Remove this struct at some point. If `core::str::lossy::Utf8Lossy`
// is ever stabilized.
pub struct LossyUtf8<'a> {
bytes: &'a [u8],
in_replacement: bool,
}
impl Language {
#[must_use]
pub fn new(builder: LanguageFn) -> Self {
Self(unsafe { builder.into_raw()().cast() })
}
/// Get the name of this language. This returns `None` in older parsers.
#[doc(alias = "ts_language_name")]
#[must_use]
pub fn name(&self) -> Option<&'static str> {
let ptr = unsafe { ffi::ts_language_name(self.0) };
(!ptr.is_null()).then(|| unsafe { CStr::from_ptr(ptr) }.to_str().unwrap())
}
/// Get the ABI version number that indicates which version of the
/// Tree-sitter CLI that was used to generate this [`Language`].
#[doc(alias = "ts_language_abi_version")]
#[must_use]
pub fn abi_version(&self) -> usize {
unsafe { ffi::ts_language_abi_version(self.0) as usize }
}
/// Get the metadata for this language. This information is generated by the
/// CLI, and relies on the language author providing the correct metadata in
/// the language's `tree-sitter.json` file.
///
/// See also [`LanguageMetadata`].
#[doc(alias = "ts_language_metadata")]
#[must_use]
pub fn metadata(&self) -> Option<LanguageMetadata> {
unsafe {
let ptr = ffi::ts_language_metadata(self.0);
(!ptr.is_null()).then(|| (*ptr).into())
}
}
/// Get the number of distinct node types in this language.
#[doc(alias = "ts_language_symbol_count")]
#[must_use]
pub fn node_kind_count(&self) -> usize {
unsafe { ffi::ts_language_symbol_count(self.0) as usize }
}
/// Get the number of valid states in this language.
#[doc(alias = "ts_language_state_count")]
#[must_use]
pub fn parse_state_count(&self) -> usize {
unsafe { ffi::ts_language_state_count(self.0) as usize }
}
/// Get a list of all supertype symbols for the language.
#[doc(alias = "ts_language_supertypes")]
#[must_use]
pub fn supertypes(&self) -> &[u16] {
let mut length = 0u32;
unsafe {
let ptr = ffi::ts_language_supertypes(self.0, core::ptr::addr_of_mut!(length));
if length == 0 {
&[]
} else {
slice::from_raw_parts(ptr.cast_mut(), length as usize)
}
}
}
/// Get a list of all subtype symbols for a given supertype symbol.
#[doc(alias = "ts_language_supertype_map")]
#[must_use]
pub fn subtypes_for_supertype(&self, supertype: u16) -> &[u16] {
unsafe {
let mut length = 0u32;
let ptr = ffi::ts_language_subtypes(self.0, supertype, core::ptr::addr_of_mut!(length));
if length == 0 {
&[]
} else {
slice::from_raw_parts(ptr.cast_mut(), length as usize)
}
}
}
/// Get the name of the node kind for the given numerical id.
#[doc(alias = "ts_language_symbol_name")]
#[must_use]
pub fn node_kind_for_id(&self, id: u16) -> Option<&'static str> {
let ptr = unsafe { ffi::ts_language_symbol_name(self.0, id) };
(!ptr.is_null()).then(|| unsafe { CStr::from_ptr(ptr) }.to_str().unwrap())
}
/// Get the numeric id for the given node kind.
#[doc(alias = "ts_language_symbol_for_name")]
#[must_use]
pub fn id_for_node_kind(&self, kind: &str, named: bool) -> u16 {
unsafe {
ffi::ts_language_symbol_for_name(
self.0,
kind.as_bytes().as_ptr().cast::<c_char>(),
kind.len() as u32,
named,
)
}
}
/// Check if the node type for the given numerical id is named (as opposed
/// to an anonymous node type).
#[must_use]
pub fn node_kind_is_named(&self, id: u16) -> bool {
unsafe { ffi::ts_language_symbol_type(self.0, id) == ffi::TSSymbolTypeRegular }
}
/// Check if the node type for the given numerical id is visible (as opposed
/// to a hidden node type).
#[must_use]
pub fn node_kind_is_visible(&self, id: u16) -> bool {
unsafe { ffi::ts_language_symbol_type(self.0, id) <= ffi::TSSymbolTypeAnonymous }
}
/// Check if the node type for the given numerical id is a supertype.
#[must_use]
pub fn node_kind_is_supertype(&self, id: u16) -> bool {
unsafe { ffi::ts_language_symbol_type(self.0, id) == ffi::TSSymbolTypeSupertype }
}
/// Get the number of distinct field names in this language.
#[doc(alias = "ts_language_field_count")]
#[must_use]
pub fn field_count(&self) -> usize {
unsafe { ffi::ts_language_field_count(self.0) as usize }
}
/// Get the field name for the given numerical id.
#[doc(alias = "ts_language_field_name_for_id")]
#[must_use]
pub fn field_name_for_id(&self, field_id: u16) -> Option<&'static str> {
let ptr = unsafe { ffi::ts_language_field_name_for_id(self.0, field_id) };
(!ptr.is_null()).then(|| unsafe { CStr::from_ptr(ptr) }.to_str().unwrap())
}
/// Get the numerical id for the given field name.
#[doc(alias = "ts_language_field_id_for_name")]
#[must_use]
pub fn field_id_for_name(&self, field_name: impl AsRef<[u8]>) -> Option<FieldId> {
let field_name = field_name.as_ref();
let id = unsafe {
ffi::ts_language_field_id_for_name(
self.0,
field_name.as_ptr().cast::<c_char>(),
field_name.len() as u32,
)
};
FieldId::new(id)
}
/// Get the next parse state. Combine this with
/// [`lookahead_iterator`](Language::lookahead_iterator) to
/// generate completion suggestions or valid symbols in error nodes.
///
/// Example:
/// ```ignore
/// let state = language.next_state(node.parse_state(), node.grammar_id());
/// ```
#[doc(alias = "ts_language_next_state")]
#[must_use]
pub fn next_state(&self, state: u16, id: u16) -> u16 {
unsafe { ffi::ts_language_next_state(self.0, state, id) }
}
/// Create a new lookahead iterator for this language and parse state.
///
/// This returns `None` if state is invalid for this language.
///
/// Iterating [`LookaheadIterator`] will yield valid symbols in the given
/// parse state. Newly created lookahead iterators will return the `ERROR`
/// symbol from [`LookaheadIterator::current_symbol`].
///
/// Lookahead iterators can be useful to generate suggestions and improve
/// syntax error diagnostics. To get symbols valid in an `ERROR` node, use the
/// lookahead iterator on its first leaf node state. For `MISSING` nodes, a
/// lookahead iterator created on the previous non-extra leaf node may be
/// appropriate.
#[doc(alias = "ts_lookahead_iterator_new")]
#[must_use]
pub fn lookahead_iterator(&self, state: u16) -> Option<LookaheadIterator> {
let ptr = unsafe { ffi::ts_lookahead_iterator_new(self.0, state) };
(!ptr.is_null()).then(|| unsafe { LookaheadIterator::from_raw(ptr) })
}
}
impl From<LanguageFn> for Language {
fn from(value: LanguageFn) -> Self {
Self::new(value)
}
}
impl Clone for Language {
fn clone(&self) -> Self {
unsafe { Self(ffi::ts_language_copy(self.0)) }
}
}
impl Drop for Language {
fn drop(&mut self) {
unsafe { ffi::ts_language_delete(self.0) }
}
}
impl Deref for LanguageRef<'_> {
type Target = Language;
fn deref(&self) -> &Self::Target {
unsafe { &*(core::ptr::addr_of!(self.0).cast::<Language>()) }
}
}
impl Default for Parser {
fn default() -> Self {
Self::new()
}
}
impl Parser {
/// Create a new parser.
#[doc(alias = "ts_parser_new")]
#[must_use]
pub fn new() -> Self {
unsafe {
let parser = ffi::ts_parser_new();
Self(NonNull::new_unchecked(parser))
}
}
/// Set the language that the parser should use for parsing.
///
/// Returns a Result indicating whether or not the language was successfully
/// assigned. True means assignment succeeded. False means there was a
/// version mismatch: the language was generated with an incompatible
/// version of the Tree-sitter CLI. Check the language's version using
/// [`Language::version`] and compare it to this library's
/// [`LANGUAGE_VERSION`] and [`MIN_COMPATIBLE_LANGUAGE_VERSION`] constants.
#[doc(alias = "ts_parser_set_language")]
pub fn set_language(&mut self, language: &Language) -> Result<(), LanguageError> {
let version = language.abi_version();
if (MIN_COMPATIBLE_LANGUAGE_VERSION..=LANGUAGE_VERSION).contains(&version) {
#[allow(unused_variables)]
let success = unsafe { ffi::ts_parser_set_language(self.0.as_ptr(), language.0) };
#[cfg(feature = "wasm")]
if !success {
return Err(LanguageError::Wasm);
}
Ok(())
} else {
Err(LanguageError::Version(version))
}
}
/// Get the parser's current language.
#[doc(alias = "ts_parser_language")]
#[must_use]
pub fn language(&self) -> Option<LanguageRef<'_>> {
let ptr = unsafe { ffi::ts_parser_language(self.0.as_ptr()) };
(!ptr.is_null()).then_some(LanguageRef(ptr, PhantomData))
}
/// Get the parser's current logger.
#[doc(alias = "ts_parser_logger")]
#[must_use]
pub fn logger(&self) -> Option<&Logger> {
let logger = unsafe { ffi::ts_parser_logger(self.0.as_ptr()) };
unsafe { logger.payload.cast::<Logger>().as_ref() }
}
/// Set the logging callback that the parser should use during parsing.
#[doc(alias = "ts_parser_set_logger")]
pub fn set_logger(&mut self, logger: Option<Logger>) {
let prev_logger = unsafe { ffi::ts_parser_logger(self.0.as_ptr()) };
if !prev_logger.payload.is_null() {
drop(unsafe { Box::from_raw(prev_logger.payload.cast::<Logger>()) });
}
let c_logger = if let Some(logger) = logger {
let container = Box::new(logger);
unsafe extern "C" fn log(
payload: *mut c_void,
c_log_type: ffi::TSLogType,
c_message: *const c_char,
) {
let callback = payload.cast::<Logger>().as_mut().unwrap();
if let Ok(message) = CStr::from_ptr(c_message).to_str() {
let log_type = if c_log_type == ffi::TSLogTypeParse {
LogType::Parse
} else {
LogType::Lex
};
callback(log_type, message);
}
}
let raw_container = Box::into_raw(container);
ffi::TSLogger {
payload: raw_container.cast::<c_void>(),
log: Some(log),
}
} else {
ffi::TSLogger {
payload: ptr::null_mut(),
log: None,
}
};
unsafe { ffi::ts_parser_set_logger(self.0.as_ptr(), c_logger) };
}
/// Set the destination to which the parser should write debugging graphs
/// during parsing. The graphs are formatted in the DOT language. You may
/// want to pipe these graphs directly to a `dot(1)` process in order to
/// generate SVG output.
#[doc(alias = "ts_parser_print_dot_graphs")]
#[cfg(not(target_os = "wasi"))]
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn print_dot_graphs(
&mut self,
#[cfg(unix)] file: &impl AsRawFd,
#[cfg(windows)] file: &impl AsRawHandle,
) {
#[cfg(unix)]
{
let fd = file.as_raw_fd();
unsafe {
ffi::ts_parser_print_dot_graphs(self.0.as_ptr(), ffi::_ts_dup(fd));
}
}
#[cfg(windows)]
{
let handle = file.as_raw_handle();
unsafe {
ffi::ts_parser_print_dot_graphs(self.0.as_ptr(), ffi::_ts_dup(handle));
}
}
}
/// Stop the parser from printing debugging graphs while parsing.
#[doc(alias = "ts_parser_print_dot_graphs")]
#[cfg(not(target_os = "wasi"))]
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn stop_printing_dot_graphs(&mut self) {
unsafe { ffi::ts_parser_print_dot_graphs(self.0.as_ptr(), -1) }
}
/// Parse a slice of UTF8 text.
///
/// # Arguments:
/// * `text` The UTF8-encoded text to parse.
/// * `old_tree` A previous syntax tree parsed from the same document. If the text of the
/// document has changed since `old_tree` was created, then you must edit `old_tree` to match
/// the new text using [`Tree::edit`].
///
/// Returns a [`Tree`] if parsing succeeded, or `None` if:
/// * The parser has not yet had a language assigned with [`Parser::set_language`]
#[doc(alias = "ts_parser_parse")]
pub fn parse(&mut self, text: impl AsRef<[u8]>, old_tree: Option<&Tree>) -> Option<Tree> {
let bytes = text.as_ref();
let len = bytes.len();
self.parse_with_options(
&mut |i, _| (i < len).then(|| &bytes[i..]).unwrap_or_default(),
old_tree,
None,
)
}
/// Parse text provided in chunks by a callback.
///
/// # Arguments:
/// * `callback` A function that takes a byte offset and position and returns a slice of
/// UTF8-encoded text starting at that byte offset and position. The slices can be of any
/// length. If the given position is at the end of the text, the callback should return an
/// empty slice.
/// * `old_tree` A previous syntax tree parsed from the same document. If the text of the
/// document has changed since `old_tree` was created, then you must edit `old_tree` to match
/// the new text using [`Tree::edit`].
/// * `options` Options for parsing the text. This can be used to set a progress callback.
pub fn parse_with_options<T: AsRef<[u8]>, F: FnMut(usize, Point) -> T>(
&mut self,
callback: &mut F,
old_tree: Option<&Tree>,
options: Option<ParseOptions>,
) -> Option<Tree> {
type Payload<'a, F, T> = (&'a mut F, Option<T>);
// This C function is passed to Tree-sitter as the progress callback.
unsafe extern "C" fn progress(state: *mut ffi::TSParseState) -> bool {
let callback = (*state)
.payload
.cast::<ParseProgressCallback>()
.as_mut()
.unwrap();
match callback(&ParseState::from_raw(state)) {
ControlFlow::Continue(()) => false,
ControlFlow::Break(()) => true,
}
}
// This C function is passed to Tree-sitter as the input callback.
unsafe extern "C" fn read<T: AsRef<[u8]>, F: FnMut(usize, Point) -> T>(
payload: *mut c_void,
byte_offset: u32,
position: ffi::TSPoint,
bytes_read: *mut u32,
) -> *const c_char {
let (callback, text) = payload.cast::<Payload<F, T>>().as_mut().unwrap();
*text = Some(callback(byte_offset as usize, position.into()));
let slice = text.as_ref().unwrap().as_ref();
*bytes_read = slice.len() as u32;
slice.as_ptr().cast::<c_char>()
}
let empty_options = ffi::TSParseOptions {
payload: ptr::null_mut(),
progress_callback: None,
};
let mut callback_ptr;
let parse_options = if let Some(options) = options {
if let Some(cb) = options.progress_callback {
callback_ptr = cb;
ffi::TSParseOptions {
payload: core::ptr::addr_of_mut!(callback_ptr).cast::<c_void>(),
progress_callback: Some(progress),
}
} else {
empty_options
}
} else {
empty_options
};
// A pointer to this payload is passed on every call to the `read` C function.
// The payload contains two things:
// 1. A reference to the rust `callback`.
// 2. The text that was returned from the previous call to `callback`. This allows the
// callback to return owned values like vectors.
let mut payload: Payload<F, T> = (callback, None);
let c_input = ffi::TSInput {
payload: ptr::addr_of_mut!(payload).cast::<c_void>(),
read: Some(read::<T, F>),
encoding: ffi::TSInputEncodingUTF8,
decode: None,
};
let c_old_tree = old_tree.map_or(ptr::null_mut(), |t| t.0.as_ptr());
unsafe {
let c_new_tree = ffi::ts_parser_parse_with_options(
self.0.as_ptr(),
c_old_tree,
c_input,
parse_options,
);
NonNull::new(c_new_tree).map(Tree)
}
}
/// Parse a slice of UTF16 little-endian text.
///
/// # Arguments:
/// * `text` The UTF16-encoded text to parse.
/// * `old_tree` A previous syntax tree parsed from the same document. If the text of the
/// document has changed since `old_tree` was created, then you must edit `old_tree` to match
/// the new text using [`Tree::edit`].
pub fn parse_utf16_le(
&mut self,
input: impl AsRef<[u16]>,
old_tree: Option<&Tree>,
) -> Option<Tree> {
let code_points = input.as_ref();
let len = code_points.len();
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | true |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/lib/binding_rust/wasm_language.rs | lib/binding_rust/wasm_language.rs | use std::{
error,
ffi::{CStr, CString},
fmt,
mem::{self, MaybeUninit},
os::raw::c_char,
};
pub use wasmtime_c_api::wasmtime;
use crate::{ffi, Language, LanguageError, Parser, FREE_FN};
// Force Cargo to include wasmtime-c-api as a dependency of this crate,
// even though it is only used by the C code.
#[allow(unused)]
fn _use_wasmtime() {
wasmtime_c_api::wasm_engine_new();
}
#[repr(C)]
#[derive(Clone)]
#[allow(non_camel_case_types)]
pub struct wasm_engine_t {
pub(crate) engine: wasmtime::Engine,
}
pub struct WasmStore(*mut ffi::TSWasmStore);
unsafe impl Send for WasmStore {}
unsafe impl Sync for WasmStore {}
#[derive(Debug, PartialEq, Eq)]
pub struct WasmError {
pub kind: WasmErrorKind,
pub message: String,
}
#[derive(Debug, PartialEq, Eq)]
pub enum WasmErrorKind {
Parse,
Compile,
Instantiate,
Other,
}
impl WasmStore {
pub fn new(engine: &wasmtime::Engine) -> Result<Self, WasmError> {
unsafe {
let mut error = MaybeUninit::<ffi::TSWasmError>::uninit();
let store = ffi::ts_wasm_store_new(
std::ptr::from_ref::<wasmtime::Engine>(engine)
.cast_mut()
.cast(),
error.as_mut_ptr(),
);
if store.is_null() {
Err(WasmError::new(error.assume_init()))
} else {
Ok(Self(store))
}
}
}
pub fn load_language(&mut self, name: &str, bytes: &[u8]) -> Result<Language, WasmError> {
let name = CString::new(name).unwrap();
unsafe {
let mut error = MaybeUninit::<ffi::TSWasmError>::uninit();
let language = ffi::ts_wasm_store_load_language(
self.0,
name.as_ptr(),
bytes.as_ptr().cast::<c_char>(),
bytes.len() as u32,
error.as_mut_ptr(),
);
if language.is_null() {
Err(WasmError::new(error.assume_init()))
} else {
Ok(Language(language))
}
}
}
#[must_use]
pub fn language_count(&self) -> usize {
unsafe { ffi::ts_wasm_store_language_count(self.0) }
}
}
impl WasmError {
unsafe fn new(error: ffi::TSWasmError) -> Self {
let message = CStr::from_ptr(error.message).to_str().unwrap().to_string();
(FREE_FN)(error.message.cast());
Self {
kind: match error.kind {
ffi::TSWasmErrorKindParse => WasmErrorKind::Parse,
ffi::TSWasmErrorKindCompile => WasmErrorKind::Compile,
ffi::TSWasmErrorKindInstantiate => WasmErrorKind::Instantiate,
_ => WasmErrorKind::Other,
},
message,
}
}
}
impl Language {
#[must_use]
pub fn is_wasm(&self) -> bool {
unsafe { ffi::ts_language_is_wasm(self.0) }
}
}
impl Parser {
pub fn set_wasm_store(&mut self, store: WasmStore) -> Result<(), LanguageError> {
unsafe { ffi::ts_parser_set_wasm_store(self.0.as_ptr(), store.0) };
mem::forget(store);
Ok(())
}
pub fn take_wasm_store(&mut self) -> Option<WasmStore> {
let ptr = unsafe { ffi::ts_parser_take_wasm_store(self.0.as_ptr()) };
if ptr.is_null() {
None
} else {
Some(WasmStore(ptr))
}
}
}
impl Drop for WasmStore {
fn drop(&mut self) {
unsafe { ffi::ts_wasm_store_delete(self.0) };
}
}
impl fmt::Display for WasmError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let kind = match self.kind {
WasmErrorKind::Parse => "Failed to parse Wasm",
WasmErrorKind::Compile => "Failed to compile Wasm",
WasmErrorKind::Instantiate => "Failed to instantiate Wasm module",
WasmErrorKind::Other => "Unknown error",
};
write!(f, "{kind}: {}", self.message)
}
}
impl error::Error for WasmError {}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/lib/binding_rust/build.rs | lib/binding_rust/build.rs | use std::{env, fs, path::PathBuf};
fn main() {
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let target = env::var("TARGET").unwrap();
#[cfg(feature = "bindgen")]
generate_bindings(&out_dir);
fs::copy(
"src/wasm/stdlib-symbols.txt",
out_dir.join("stdlib-symbols.txt"),
)
.unwrap();
let mut config = cc::Build::new();
println!("cargo:rerun-if-env-changed=CARGO_FEATURE_WASM");
if env::var("CARGO_FEATURE_WASM").is_ok() {
config
.define("TREE_SITTER_FEATURE_WASM", "")
.define("static_assert(...)", "")
.include(env::var("DEP_WASMTIME_C_API_INCLUDE").unwrap());
}
let manifest_path = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
let include_path = manifest_path.join("include");
let src_path = manifest_path.join("src");
let wasm_path = src_path.join("wasm");
if target.starts_with("wasm32-unknown") {
configure_wasm_build(&mut config);
}
for entry in fs::read_dir(&src_path).unwrap() {
let entry = entry.unwrap();
let path = src_path.join(entry.file_name());
println!("cargo:rerun-if-changed={}", path.to_str().unwrap());
}
config
.flag_if_supported("-std=c11")
.flag_if_supported("-fvisibility=hidden")
.flag_if_supported("-Wshadow")
.flag_if_supported("-Wno-unused-parameter")
.flag_if_supported("-Wno-incompatible-pointer-types")
.include(&src_path)
.include(&wasm_path)
.include(&include_path)
.define("_POSIX_C_SOURCE", "200112L")
.define("_DEFAULT_SOURCE", None)
.define("_DARWIN_C_SOURCE", None)
.warnings(false)
.file(src_path.join("lib.c"))
.compile("tree-sitter");
println!("cargo:include={}", include_path.display());
}
fn configure_wasm_build(config: &mut cc::Build) {
let Ok(wasm_headers) = env::var("DEP_TREE_SITTER_LANGUAGE_WASM_HEADERS") else {
panic!("Environment variable DEP_TREE_SITTER_LANGUAGE_WASM_HEADERS must be set by the language crate");
};
let Ok(wasm_src) = env::var("DEP_TREE_SITTER_LANGUAGE_WASM_SRC").map(PathBuf::from) else {
panic!("Environment variable DEP_TREE_SITTER_LANGUAGE_WASM_SRC must be set by the language crate");
};
config.include(&wasm_headers);
config.files([
wasm_src.join("stdio.c"),
wasm_src.join("stdlib.c"),
wasm_src.join("string.c"),
]);
}
#[cfg(feature = "bindgen")]
fn generate_bindings(out_dir: &std::path::Path) {
use std::str::FromStr;
use bindgen::RustTarget;
const HEADER_PATH: &str = "include/tree_sitter/api.h";
println!("cargo:rerun-if-changed={HEADER_PATH}");
let no_copy = [
"TSInput",
"TSLanguage",
"TSLogger",
"TSLookaheadIterator",
"TSParser",
"TSTree",
"TSQuery",
"TSQueryCursor",
"TSQueryCapture",
"TSQueryMatch",
"TSQueryPredicateStep",
];
let rust_version = env!("CARGO_PKG_RUST_VERSION");
let bindings = bindgen::Builder::default()
.header(HEADER_PATH)
.layout_tests(false)
.allowlist_type("^TS.*")
.allowlist_function("^ts_.*")
.allowlist_var("^TREE_SITTER.*")
.no_copy(no_copy.join("|"))
.prepend_enum_name(false)
.use_core()
.clang_arg("-D TREE_SITTER_FEATURE_WASM")
.rust_target(RustTarget::from_str(rust_version).unwrap())
.generate()
.expect("Failed to generate bindings");
let bindings_rs = out_dir.join("bindings.rs");
bindings.write_to_file(&bindings_rs).unwrap_or_else(|_| {
panic!(
"Failed to write bindings into path: {}",
bindings_rs.display()
)
});
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/lib/binding_rust/util.rs | lib/binding_rust/util.rs | use core::ffi::c_void;
use super::FREE_FN;
/// A raw pointer and a length, exposed as an iterator.
pub struct CBufferIter<T> {
ptr: *mut T,
count: usize,
i: usize,
}
impl<T> CBufferIter<T> {
pub const unsafe fn new(ptr: *mut T, count: usize) -> Self {
Self { ptr, count, i: 0 }
}
}
impl<T: Copy> Iterator for CBufferIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
let i = self.i;
if i >= self.count {
None
} else {
self.i += 1;
Some(unsafe { *self.ptr.add(i) })
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.count - self.i;
(remaining, Some(remaining))
}
}
impl<T: Copy> ExactSizeIterator for CBufferIter<T> {}
impl<T> Drop for CBufferIter<T> {
fn drop(&mut self) {
if !self.ptr.is_null() {
unsafe { (FREE_FN)(self.ptr.cast::<c_void>()) };
}
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
tree-sitter/tree-sitter | https://github.com/tree-sitter/tree-sitter/blob/dd60d5cff079dbae8db798ce7272879dbd2ac9e8/lib/binding_rust/ffi.rs | lib/binding_rust/ffi.rs | #![allow(dead_code)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(clippy::missing_const_for_fn)]
#[cfg(feature = "bindgen")]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
#[cfg(not(feature = "bindgen"))]
include!("./bindings.rs");
#[cfg(unix)]
#[cfg(feature = "std")]
extern "C" {
pub(crate) fn _ts_dup(fd: std::os::raw::c_int) -> std::os::raw::c_int;
}
#[cfg(windows)]
#[cfg(feature = "std")]
extern "C" {
pub(crate) fn _ts_dup(handle: *mut std::os::raw::c_void) -> std::os::raw::c_int;
}
use core::{marker::PhantomData, mem::ManuallyDrop, ptr::NonNull, str};
use crate::{
Language, LookaheadIterator, Node, ParseState, Parser, Query, QueryCursor, QueryCursorState,
QueryError, Tree, TreeCursor,
};
impl Language {
/// Reconstructs a [`Language`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(ptr: *const TSLanguage) -> Self {
Self(ptr)
}
/// Consumes the [`Language`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> *const TSLanguage {
ManuallyDrop::new(self).0
}
}
impl Parser {
/// Reconstructs a [`Parser`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(ptr: *mut TSParser) -> Self {
Self(NonNull::new_unchecked(ptr))
}
/// Consumes the [`Parser`], returning a raw pointer to the underlying C structure.
///
/// # Safety
///
/// It's a caller responsibility to adjust parser's state
/// like disable logging or dot graphs printing if this
/// may cause issues like use after free.
#[must_use]
pub fn into_raw(self) -> *mut TSParser {
ManuallyDrop::new(self).0.as_ptr()
}
}
impl ParseState {
/// Reconstructs a [`ParseState`] from a raw pointer
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(ptr: *mut TSParseState) -> Self {
Self(NonNull::new_unchecked(ptr))
}
/// Consumes the [`ParseState`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> *mut TSParseState {
ManuallyDrop::new(self).0.as_ptr()
}
}
impl Tree {
/// Reconstructs a [`Tree`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(ptr: *mut TSTree) -> Self {
Self(NonNull::new_unchecked(ptr))
}
/// Consumes the [`Tree`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> *mut TSTree {
ManuallyDrop::new(self).0.as_ptr()
}
}
impl Node<'_> {
/// Reconstructs a [`Node`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(raw: TSNode) -> Self {
Self(raw, PhantomData)
}
/// Consumes the [`Node`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> TSNode {
ManuallyDrop::new(self).0
}
}
impl TreeCursor<'_> {
/// Reconstructs a [`TreeCursor`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(raw: TSTreeCursor) -> Self {
Self(raw, PhantomData)
}
/// Consumes the [`TreeCursor`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> TSTreeCursor {
ManuallyDrop::new(self).0
}
}
impl Query {
/// Reconstructs a [`Query`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
pub unsafe fn from_raw(ptr: *mut TSQuery, source: &str) -> Result<Self, QueryError> {
Self::from_raw_parts(ptr, source)
}
/// Consumes the [`Query`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> *mut TSQuery {
ManuallyDrop::new(self).ptr.as_ptr()
}
}
impl QueryCursor {
/// Reconstructs a [`QueryCursor`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(ptr: *mut TSQueryCursor) -> Self {
Self {
ptr: NonNull::new_unchecked(ptr),
}
}
/// Consumes the [`QueryCursor`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> *mut TSQueryCursor {
ManuallyDrop::new(self).ptr.as_ptr()
}
}
impl QueryCursorState {
/// Reconstructs a [`QueryCursorState`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(ptr: *mut TSQueryCursorState) -> Self {
Self(NonNull::new_unchecked(ptr))
}
/// Consumes the [`QueryCursorState`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> *mut TSQueryCursorState {
ManuallyDrop::new(self).0.as_ptr()
}
}
impl LookaheadIterator {
/// Reconstructs a [`LookaheadIterator`] from a raw pointer.
///
/// # Safety
///
/// `ptr` must be non-null.
#[must_use]
pub const unsafe fn from_raw(ptr: *mut TSLookaheadIterator) -> Self {
Self(NonNull::new_unchecked(ptr))
}
/// Consumes the [`LookaheadIterator`], returning a raw pointer to the underlying C structure.
#[must_use]
pub fn into_raw(self) -> *mut TSLookaheadIterator {
ManuallyDrop::new(self).0.as_ptr()
}
}
| rust | MIT | dd60d5cff079dbae8db798ce7272879dbd2ac9e8 | 2026-01-04T15:38:34.599794Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/gen-protos/src/main.rs | lib/gen-protos/src/main.rs | // Copyright 2022 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Result;
use std::path::Path;
fn main() -> Result<()> {
let input = [
"default_index.proto",
"git_store.proto",
"local_working_copy.proto",
"simple_op_store.proto",
"simple_store.proto",
];
let root = Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap();
let protos_dir = root.join("src").join("protos");
prost_build::Config::new()
.out_dir(&protos_dir)
.include_file("mod.rs")
// For old protoc versions. 3.12.4 needs this, but 3.21.12 doesn't.
.protoc_arg("--experimental_allow_proto3_optional")
.compile_protos(
&input
.into_iter()
.map(|x| protos_dir.join(x))
.collect::<Vec<_>>(),
&[protos_dir],
)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/merge.rs | lib/src/merge.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generic algorithms for working with merged values, plus specializations for
//! some common types of merged values.
use std::borrow::Borrow;
use std::collections::HashMap;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fmt::Write as _;
use std::hash::Hash;
use std::iter;
use std::iter::zip;
use std::ops::Deref;
use std::slice;
use std::sync::Arc;
use futures::future::try_join_all;
use itertools::Itertools as _;
use smallvec::SmallVec;
use smallvec::smallvec;
use smallvec::smallvec_inline;
use crate::backend::BackendResult;
use crate::backend::CopyId;
use crate::backend::FileId;
use crate::backend::TreeValue;
use crate::conflict_labels::ConflictLabels;
use crate::content_hash::ContentHash;
use crate::content_hash::DigestUpdate;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathComponent;
use crate::store::Store;
use crate::tree::Tree;
/// A generic diff/transition from one value to another.
///
/// This is not a diff in the `patch(1)` sense. See `diff::ContentDiff` for
/// that.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Diff<T> {
/// The state before
pub before: T,
/// The state after
pub after: T,
}
impl<T> Diff<T> {
/// Create a new diff
pub fn new(before: T, after: T) -> Self {
Self { before, after }
}
/// Apply a function to both values
pub fn map<U>(self, mut f: impl FnMut(T) -> U) -> Diff<U> {
Diff {
before: f(self.before),
after: f(self.after),
}
}
/// Combine a `Diff<T>` and a `Diff<U>` into a `Diff<(T, U)>`.
pub fn zip<U>(self, other: Diff<U>) -> Diff<(T, U)> {
Diff {
before: (self.before, other.before),
after: (self.after, other.after),
}
}
/// Inverts a diff, swapping the before and after terms.
pub fn invert(self) -> Self {
Self {
before: self.after,
after: self.before,
}
}
/// Convert a `&Diff<T>` into a `Diff<&T>`.
pub fn as_ref(&self) -> Diff<&T> {
Diff {
before: &self.before,
after: &self.after,
}
}
/// Converts a `Diff<T>` or `&Diff<T>` to `Diff<&T::Target>`. (e.g.
/// `Diff<String>` to `Diff<&str>`)
pub fn as_deref(&self) -> Diff<&T::Target>
where
T: Deref,
{
self.as_ref().map(Deref::deref)
}
/// Convert a diff into an array `[before, after]`.
pub fn into_array(self) -> [T; 2] {
[self.before, self.after]
}
}
impl<T: Eq> Diff<T> {
/// Whether the diff represents a change, i.e. if `before` and `after` are
/// not equal
pub fn is_changed(&self) -> bool {
self.before != self.after
}
}
/// Whether to resolve conflict that makes the same change at all sides.
#[derive(Clone, Copy, Debug, Eq, PartialEq, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum SameChange {
/// Leaves same-change conflict unresolved.
Keep,
/// Resolves same-change conflict as if one side were unchanged.
/// (i.e. `A+(A-B)=A`)
///
/// This matches what Git and Mercurial do (in the 3-way case at least), but
/// not what Darcs does. It means that repeated 3-way merging of multiple
/// trees may give different results depending on the order of merging.
Accept,
}
/// Attempt to resolve trivial conflicts between the inputs. There must be
/// an odd number of terms.
pub fn trivial_merge<T>(values: &[T], same_change: SameChange) -> Option<&T>
where
T: Eq + Hash,
{
assert!(
values.len() % 2 == 1,
"trivial_merge() requires an odd number of terms"
);
// Optimize the common cases of 3-way merge and 1-way (non-)merge
if let [add] = values {
return Some(add);
} else if let [add0, remove, add1] = values {
return if add0 == add1 && same_change == SameChange::Accept {
Some(add0)
} else if add0 == remove {
Some(add1)
} else if add1 == remove {
Some(add0)
} else {
None
};
}
// Number of occurrences of each value, with positive indexes counted as +1 and
// negative as -1, thereby letting positive and negative terms with the same
// value (i.e. key in the map) cancel each other.
let mut counts: HashMap<&T, i32> = HashMap::new();
for (value, n) in zip(values, [1, -1].into_iter().cycle()) {
counts.entry(value).and_modify(|e| *e += n).or_insert(n);
}
// Collect non-zero value. Values with a count of 0 means that they have
// canceled out.
counts.retain(|_, count| *count != 0);
if counts.len() == 1 {
// If there is a single value with a count of 1 left, then that is the result.
let (value, count) = counts.into_iter().next().unwrap();
assert_eq!(count, 1);
Some(value)
} else if counts.len() == 2 && same_change == SameChange::Accept {
// All sides made the same change.
let [(value1, count1), (value2, count2)] = counts.into_iter().next_array().unwrap();
assert_eq!(count1 + count2, 1);
if count1 > 0 {
Some(value1)
} else {
Some(value2)
}
} else {
None
}
}
/// A generic representation of merged values.
///
/// There is exactly one more `adds()` than `removes()`. When interpreted as a
/// series of diffs, the merge's (i+1)-st add is matched with the i-th
/// remove. The zeroth add is considered a diff from the non-existent state.
#[derive(PartialEq, Eq, Hash, Clone, serde::Serialize)]
#[serde(transparent)]
pub struct Merge<T> {
/// Alternates between positive and negative terms, starting with positive.
values: SmallVec<[T; 1]>,
}
impl<T: Debug> Debug for Merge<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
// Format like an enum with two variants to make it less verbose in the common
// case of a resolved state.
if let Some(value) = self.as_resolved() {
f.debug_tuple("Resolved").field(value).finish()
} else {
f.debug_tuple("Conflicted").field(&self.values).finish()
}
}
}
impl<T> Merge<T> {
/// Creates a `Merge` from the given values, in which positive and negative
/// terms alternate.
pub fn from_vec(values: impl Into<SmallVec<[T; 1]>>) -> Self {
let values = values.into();
assert!(values.len() % 2 != 0, "must have an odd number of terms");
Self { values }
}
/// Creates a new merge object from the given removes and adds.
pub fn from_removes_adds(
removes: impl IntoIterator<Item = T>,
adds: impl IntoIterator<Item = T>,
) -> Self {
let removes = removes.into_iter();
let mut adds = adds.into_iter();
let mut values = SmallVec::with_capacity(removes.size_hint().0 * 2 + 1);
values.push(adds.next().expect("must have at least one add"));
for diff in removes.zip_longest(adds) {
let (remove, add) = diff.both().expect("must have one more adds than removes");
values.extend([remove, add]);
}
Self { values }
}
/// Creates a `Merge` from a first side and a series of diffs to apply to
/// that side.
pub fn from_diffs(first_side: T, diffs: impl IntoIterator<Item = Diff<T>>) -> Self {
let values = iter::once(first_side)
.chain(diffs.into_iter().flat_map(Diff::into_array))
.collect();
Self { values }
}
/// Creates a `Merge` with a single resolved value.
pub const fn resolved(value: T) -> Self {
Self {
values: smallvec_inline![value],
}
}
/// Creates a `Merge` by repeating a single value.
pub fn repeated(value: T, num_sides: usize) -> Self
where
T: Clone,
{
Self {
values: smallvec![value; num_sides * 2 - 1],
}
}
/// Create a `Merge` from a `removes` and `adds`, padding with `None` to
/// make sure that there is exactly one more `adds` than `removes`.
pub fn from_legacy_form(
removes: impl IntoIterator<Item = T>,
adds: impl IntoIterator<Item = T>,
) -> Merge<Option<T>> {
let removes = removes.into_iter();
let mut adds = adds.into_iter().fuse();
let mut values = smallvec_inline![adds.next()];
for diff in removes.zip_longest(adds) {
let (remove, add) = diff.map_any(Some, Some).or_default();
values.extend([remove, add]);
}
Merge { values }
}
/// The removed values, also called negative terms.
pub fn removes(&self) -> impl ExactSizeIterator<Item = &T> {
self.values[1..].iter().step_by(2)
}
/// The added values, also called positive terms.
pub fn adds(&self) -> impl ExactSizeIterator<Item = &T> {
self.values.iter().step_by(2)
}
/// Returns the zeroth added value, which is guaranteed to exist.
pub fn first(&self) -> &T {
&self.values[0]
}
/// Returns the `index`-th removed value, which is considered belonging to
/// the `index`-th diff pair.
pub fn get_remove(&self, index: usize) -> Option<&T> {
self.values.get(index * 2 + 1)
}
/// Returns the `index`-th added value, which is considered belonging to the
/// `index-1`-th diff pair. The zeroth add is a diff from the non-existent
/// state.
pub fn get_add(&self, index: usize) -> Option<&T> {
self.values.get(index * 2)
}
/// Removes the specified "removed"/"added" values. The removed slots are
/// replaced by the last "removed"/"added" values.
pub fn swap_remove(&mut self, remove_index: usize, add_index: usize) -> (T, T) {
// Swap with the last "added" and "removed" values in order.
let add = self.values.swap_remove(add_index * 2);
let remove = self.values.swap_remove(remove_index * 2 + 1);
(remove, add)
}
/// The number of positive terms in the conflict.
pub fn num_sides(&self) -> usize {
self.values.len() / 2 + 1
}
/// Whether this merge is resolved. Does not resolve trivial merges.
pub fn is_resolved(&self) -> bool {
self.values.len() == 1
}
/// Returns the resolved value, if this merge is resolved. Does not
/// resolve trivial merges.
pub fn as_resolved(&self) -> Option<&T> {
if let [value] = &self.values[..] {
Some(value)
} else {
None
}
}
/// Returns the resolved value, if this merge is resolved. Otherwise returns
/// the merge itself as an `Err`. Does not resolve trivial merges.
pub fn into_resolved(mut self) -> Result<T, Self> {
if self.values.len() == 1 {
Ok(self.values.pop().unwrap())
} else {
Err(self)
}
}
/// Returns a vector mapping of a value's index in the simplified merge to
/// its original index in the unsimplified merge.
///
/// The merge is simplified by removing identical values in add and remove
/// values.
fn get_simplified_mapping(&self) -> Vec<usize>
where
T: PartialEq,
{
let unsimplified_len = self.values.len();
let mut simplified_to_original_indices = (0..unsimplified_len).collect_vec();
let mut add_index = 0;
while add_index < simplified_to_original_indices.len() {
let add = &self.values[simplified_to_original_indices[add_index]];
let mut remove_indices = simplified_to_original_indices
.iter()
.enumerate()
.skip(1)
.step_by(2);
if let Some((remove_index, _)) = remove_indices
.find(|&(_, original_remove_index)| &self.values[*original_remove_index] == add)
{
// Align the current "add" value to the `remove_index/2`-th diff, then
// delete the diff pair.
simplified_to_original_indices.swap(remove_index + 1, add_index);
simplified_to_original_indices.drain(remove_index..remove_index + 2);
} else {
add_index += 2;
}
}
simplified_to_original_indices
}
/// Apply the mapping returned by [`Self::get_simplified_mapping`].
#[must_use]
fn apply_simplified_mapping(&self, mapping: &[usize]) -> Self
where
T: Clone,
{
// Reorder values based on their new indices in the simplified merge.
let values = mapping
.iter()
.map(|index| self.values[*index].clone())
.collect();
Self { values }
}
/// Simplify the merge by joining diffs like A->B and B->C into A->C.
/// Also drops trivial diffs like A->A.
#[must_use]
pub fn simplify(&self) -> Self
where
T: PartialEq + Clone,
{
let mapping = self.get_simplified_mapping();
self.apply_simplified_mapping(&mapping)
}
/// Simplify the merge, using a function to choose which values to compare.
#[must_use]
pub fn simplify_by<'a, U>(&'a self, f: impl FnMut(&'a T) -> U) -> Self
where
T: Clone,
U: PartialEq,
{
let mapping = self.map(f).get_simplified_mapping();
self.apply_simplified_mapping(&mapping)
}
/// Updates the merge based on the given simplified merge.
pub fn update_from_simplified(mut self, simplified: Self) -> Self
where
T: PartialEq,
{
let mapping = self.get_simplified_mapping();
assert_eq!(mapping.len(), simplified.values.len());
for (index, value) in mapping.into_iter().zip(simplified.values.into_iter()) {
self.values[index] = value;
}
self
}
/// If this merge can be trivially resolved, returns the value it resolves
/// to.
pub fn resolve_trivial(&self, same_change: SameChange) -> Option<&T>
where
T: Eq + Hash,
{
trivial_merge(&self.values, same_change)
}
/// Pads this merge with to the specified number of sides with the specified
/// value. No-op if the requested size is not larger than the current size.
pub fn pad_to(&mut self, num_sides: usize, value: &T)
where
T: Clone,
{
if num_sides <= self.num_sides() {
return;
}
self.values.resize(num_sides * 2 - 1, value.clone());
}
/// Returns a slice containing the terms. The items will alternate between
/// positive and negative terms, starting with positive (since there's one
/// more of those).
pub fn as_slice(&self) -> &[T] {
&self.values
}
/// Returns an iterator over references to the terms. The items will
/// alternate between positive and negative terms, starting with
/// positive (since there's one more of those).
pub fn iter(&self) -> slice::Iter<'_, T> {
self.values.iter()
}
/// A version of `Merge::iter()` that iterates over mutable references.
pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> {
self.values.iter_mut()
}
/// Creates a new merge by applying `f` to each remove and add.
pub fn map<'a, U>(&'a self, f: impl FnMut(&'a T) -> U) -> Merge<U> {
let values = self.values.iter().map(f).collect();
Merge { values }
}
/// Creates a new merge by applying `f` to each remove and add, returning
/// `Err` if `f` returns `Err` for any of them.
pub fn try_map<'a, U, E>(
&'a self,
f: impl FnMut(&'a T) -> Result<U, E>,
) -> Result<Merge<U>, E> {
let values = self.values.iter().map(f).try_collect()?;
Ok(Merge { values })
}
/// Creates a new merge by applying the async function `f` to each remove
/// and add, running them concurrently, and returning `Err` if `f`
/// returns `Err` for any of them.
pub async fn try_map_async<'a, F, U, E>(
&'a self,
f: impl FnMut(&'a T) -> F,
) -> Result<Merge<U>, E>
where
F: Future<Output = Result<U, E>>,
{
let values = try_join_all(self.values.iter().map(f)).await?;
Ok(Merge {
values: values.into(),
})
}
/// Converts a `&Merge<T>` into a `Merge<&T>`.
pub fn as_ref(&self) -> Merge<&T> {
let values = self.values.iter().collect();
Merge { values }
}
/// Zip two merges which have the same number of terms. Panics if the merges
/// don't have the same number of terms.
pub fn zip<U>(self, other: Merge<U>) -> Merge<(T, U)> {
assert_eq!(self.values.len(), other.values.len());
let values = self.values.into_iter().zip(other.values).collect();
Merge { values }
}
}
impl<T, U> Merge<(T, U)> {
/// Unzips a merge of pairs into a pair of merges.
pub fn unzip(self) -> (Merge<T>, Merge<U>) {
let (left, right) = self.values.into_iter().unzip();
(Merge { values: left }, Merge { values: right })
}
}
impl<T> Merge<&'_ T> {
/// Convert a `Merge<&T>` into a `Merge<T>` by cloning each term.
pub fn cloned(&self) -> Merge<T>
where
T: Clone,
{
self.map(|&term| term.clone())
}
}
/// Helper for consuming items from an iterator and then creating a `Merge`.
///
/// By not collecting directly into `Merge`, we can avoid creating invalid
/// instances of it. If we had `Merge::from_iter()` we would need to allow it to
/// accept iterators of any length (including 0). We couldn't make it panic on
/// even lengths because we can get passed such iterators from e.g.
/// `Option::from_iter()`. By collecting into `MergeBuilder` instead, we move
/// the checking until after `from_iter()` (to `MergeBuilder::build()`).
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct MergeBuilder<T> {
values: SmallVec<[T; 1]>,
}
impl<T> Default for MergeBuilder<T> {
fn default() -> Self {
Self {
values: Default::default(),
}
}
}
impl<T> MergeBuilder<T> {
/// Requires that exactly one more "adds" than "removes" have been added to
/// this builder.
pub fn build(self) -> Merge<T> {
Merge::from_vec(self.values)
}
}
impl<T> IntoIterator for Merge<T> {
type Item = T;
type IntoIter = smallvec::IntoIter<[T; 1]>;
fn into_iter(self) -> Self::IntoIter {
self.values.into_iter()
}
}
impl<'a, T> IntoIterator for &'a Merge<T> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T> IntoIterator for &'a mut Merge<T> {
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<T> FromIterator<T> for MergeBuilder<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut builder = Self::default();
builder.extend(iter);
builder
}
}
impl<T> Extend<T> for MergeBuilder<T> {
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
self.values.extend(iter);
}
}
impl<T> Merge<Option<T>> {
/// Creates a resolved merge with a value of `None`.
pub fn absent() -> Self {
Self::resolved(None)
}
/// Creates a resolved merge with a value of `Some(value)`.
pub fn normal(value: T) -> Self {
Self::resolved(Some(value))
}
/// Whether this represents a resolved value of `None`.
pub fn is_absent(&self) -> bool {
matches!(self.as_resolved(), Some(None))
}
/// The opposite of `is_absent()`.
pub fn is_present(&self) -> bool {
!self.is_absent()
}
/// Returns the value if this is present and non-conflicting.
pub fn as_normal(&self) -> Option<&T> {
self.as_resolved()?.as_ref()
}
/// Creates lists of `removes` and `adds` from a `Merge` by dropping
/// `None` values. Note that the conversion is lossy: the order of `None`
/// values is not preserved when converting back to a `Merge`.
pub fn into_legacy_form(self) -> (Vec<T>, Vec<T>) {
// Allocate the maximum size assuming there would be few `None`s.
let mut removes = Vec::with_capacity(self.values.len() / 2);
let mut adds = Vec::with_capacity(self.values.len() / 2 + 1);
let mut values = self.values.into_iter();
adds.extend(values.next().unwrap());
while let Some(remove) = values.next() {
removes.extend(remove);
adds.extend(values.next().unwrap());
}
(removes, adds)
}
}
impl<T: Clone> Merge<Option<&T>> {
/// Creates a new merge by cloning inner `Option<&T>`s.
pub fn cloned(&self) -> Merge<Option<T>> {
self.map(|value| value.cloned())
}
}
impl<T> Merge<Merge<T>> {
/// Flattens a nested merge into a regular merge.
///
/// Let's say we have a 3-way merge of 3-way merges like this:
///
/// ```text
/// 4 5 7 8
/// 3 6
/// 1 2
/// 0
/// ```
///
/// Flattening that results in this 9-way merge:
///
/// ```text
/// 4 5 0 7 8
/// 3 2 1 6
/// ```
pub fn flatten(self) -> Merge<T> {
let mut outer_values = self.values.into_iter();
let mut result = outer_values.next().unwrap();
while let Some(mut remove) = outer_values.next() {
// Add removes reversed, and with the first element moved last, so we preserve
// the diffs
remove.values.rotate_left(1);
for i in 0..remove.values.len() / 2 {
remove.values.swap(i * 2, i * 2 + 1);
}
result.values.extend(remove.values);
let add = outer_values.next().unwrap();
result.values.extend(add.values);
}
result
}
}
impl<T: ContentHash> ContentHash for Merge<T> {
fn hash(&self, state: &mut impl DigestUpdate) {
self.values.hash(state);
}
}
/// Borrowed `MergedTreeValue`.
pub type MergedTreeVal<'a> = Merge<Option<&'a TreeValue>>;
/// The value at a given path in a commit.
///
/// It depends on the context whether it can be absent
/// (`Merge::is_absent()`). For example, when getting the value at a
/// specific path, it may be, but when iterating over entries in a
/// tree, it shouldn't be.
pub type MergedTreeValue = Merge<Option<TreeValue>>;
impl<T> Merge<Option<T>>
where
T: Borrow<TreeValue>,
{
/// Whether this merge should be recursed into when doing directory walks.
pub fn is_tree(&self) -> bool {
self.is_present()
&& self.iter().all(|value| {
matches!(
borrow_tree_value(value.as_ref()),
Some(TreeValue::Tree(_)) | None
)
})
}
/// Whether this merge is present and not a tree
pub fn is_file_like(&self) -> bool {
self.is_present() && !self.is_tree()
}
/// If this merge contains only files or absent entries, returns a merge of
/// the `FileId`s. The executable bits and copy IDs will be ignored. Use
/// `Merge::with_new_file_ids()` to produce a new merge with the original
/// executable bits preserved.
pub fn to_file_merge(&self) -> Option<Merge<Option<FileId>>> {
let file_ids = self
.try_map(|term| match borrow_tree_value(term.as_ref()) {
None => Ok(None),
Some(TreeValue::File {
id,
executable: _,
copy_id: _,
}) => Ok(Some(id.clone())),
_ => Err(()),
})
.ok()?;
Some(file_ids)
}
/// If this merge contains only files or absent entries, returns a merge of
/// the files' executable bits.
pub fn to_executable_merge(&self) -> Option<Merge<Option<bool>>> {
self.try_map(|term| match borrow_tree_value(term.as_ref()) {
None => Ok(None),
Some(TreeValue::File {
id: _,
executable,
copy_id: _,
}) => Ok(Some(*executable)),
_ => Err(()),
})
.ok()
}
/// If this merge contains only files or absent entries, returns a merge of
/// the files' copy IDs.
pub fn to_copy_id_merge(&self) -> Option<Merge<Option<CopyId>>> {
self.try_map(|term| match borrow_tree_value(term.as_ref()) {
None => Ok(None),
Some(TreeValue::File {
id: _,
executable: _,
copy_id,
}) => Ok(Some(copy_id.clone())),
_ => Err(()),
})
.ok()
}
/// If every non-`None` term of a `MergedTreeValue`
/// is a `TreeValue::Tree`, this converts it to
/// a `Merge<Tree>`, with empty trees instead of
/// any `None` terms. Otherwise, returns `None`.
pub async fn to_tree_merge(
&self,
store: &Arc<Store>,
dir: &RepoPath,
) -> BackendResult<Option<Merge<Tree>>> {
let tree_id_merge = self.try_map(|term| match borrow_tree_value(term.as_ref()) {
None => Ok(None),
Some(TreeValue::Tree(id)) => Ok(Some(id)),
Some(_) => Err(()),
});
if let Ok(tree_id_merge) = tree_id_merge {
Ok(Some(
tree_id_merge
.try_map_async(async |id| {
if let Some(id) = id {
store.get_tree_async(dir.to_owned(), id).await
} else {
Ok(Tree::empty(store.clone(), dir.to_owned()))
}
})
.await?,
))
} else {
Ok(None)
}
}
/// Creates a new merge with the file ids from the given merge. In other
/// words, only the executable bits from `self` will be preserved.
///
/// The given `file_ids` should have the same shape as `self`. Only the
/// `FileId` values may differ.
pub fn with_new_file_ids(&self, file_ids: &Merge<Option<FileId>>) -> Merge<Option<TreeValue>> {
assert_eq!(self.values.len(), file_ids.values.len());
let values = zip(self, file_ids.iter().cloned())
.map(
|(tree_value, file_id)| match (borrow_tree_value(tree_value.as_ref()), file_id) {
(
Some(TreeValue::File {
id: _,
executable,
copy_id,
}),
Some(id),
) => Some(TreeValue::File {
id,
executable: *executable,
copy_id: copy_id.clone(),
}),
(None, None) => None,
// New files are populated to preserve the materialized conflict. The file won't
// be checked out to the disk. So the metadata is not important, and we will
// just use the default values.
(None, Some(id)) => Some(TreeValue::File {
id,
executable: false,
copy_id: CopyId::placeholder(),
}),
(old, new) => panic!("incompatible update: {old:?} to {new:?}"),
},
)
.collect();
Merge { values }
}
/// Give a summary description of the conflict's "removes" and "adds"
pub fn describe(&self, labels: &ConflictLabels) -> String {
let mut buf = String::new();
writeln!(buf, "Conflict:").unwrap();
for (term, label) in self
.removes()
.enumerate()
.filter_map(|(i, term)| term.as_ref().map(|term| (term, labels.get_remove(i))))
{
write!(buf, " Removing {}", describe_conflict_term(term.borrow())).unwrap();
if let Some(label) = label {
write!(buf, " ({label})").unwrap();
}
buf.push('\n');
}
for (term, label) in self
.adds()
.enumerate()
.filter_map(|(i, term)| term.as_ref().map(|term| (term, labels.get_add(i))))
{
write!(buf, " Adding {}", describe_conflict_term(term.borrow())).unwrap();
if let Some(label) = label {
write!(buf, " ({label})").unwrap();
}
buf.push('\n');
}
buf
}
}
fn borrow_tree_value<T: Borrow<TreeValue> + ?Sized>(term: Option<&T>) -> Option<&TreeValue> {
term.map(|value| value.borrow())
}
fn describe_conflict_term(value: &TreeValue) -> String {
match value {
TreeValue::File {
id,
executable: false,
copy_id: _,
} => {
// TODO: include the copy here once we start using it
format!("file with id {id}")
}
TreeValue::File {
id,
executable: true,
copy_id: _,
} => {
// TODO: include the copy here once we start using it
format!("executable file with id {id}")
}
TreeValue::Symlink(id) => {
format!("symlink with id {id}")
}
TreeValue::Tree(id) => {
format!("tree with id {id}")
}
TreeValue::GitSubmodule(id) => {
format!("Git submodule with id {id}")
}
}
}
impl Merge<Tree> {
/// The directory that is shared by all trees in the merge.
pub fn dir(&self) -> &RepoPath {
debug_assert!(self.iter().map(|tree| tree.dir()).all_equal());
self.first().dir()
}
/// The value at the given basename. The value can be `Resolved` even if
/// `self` is conflicted, which happens if the value at the path can be
/// trivially merged. Does not recurse, so if `basename` refers to a Tree,
/// then a `TreeValue::Tree` will be returned.
pub fn value(&self, basename: &RepoPathComponent) -> MergedTreeVal<'_> {
if let Some(tree) = self.as_resolved() {
return Merge::resolved(tree.value(basename));
}
let same_change = self.first().store().merge_options().same_change;
let value = self.map(|tree| tree.value(basename));
if let Some(resolved) = value.resolve_trivial(same_change) {
return Merge::resolved(*resolved);
}
value
}
/// Gets the `Merge<Tree>` in a subdirectory of the current tree. If the
/// path doesn't correspond to a tree in any of the inputs to the merge,
/// then that entry will be replaced by an empty tree in the result.
pub async fn sub_tree(&self, name: &RepoPathComponent) -> BackendResult<Option<Self>> {
let store = self.first().store();
match self.value(name).into_resolved() {
Ok(Some(TreeValue::Tree(sub_tree_id))) => {
let subdir = self.dir().join(name);
Ok(Some(Self::resolved(
store.get_tree_async(subdir, sub_tree_id).await?,
)))
}
Ok(_) => Ok(None),
Err(merge) => {
if !merge.is_tree() {
return Ok(None);
}
let trees = merge
.try_map_async(async |value| match value {
Some(TreeValue::Tree(sub_tree_id)) => {
let subdir = self.dir().join(name);
store.get_tree_async(subdir, sub_tree_id).await
}
Some(_) => unreachable!(),
None => {
let subdir = self.dir().join(name);
Ok(Tree::empty(store.clone(), subdir))
}
})
.await?;
Ok(Some(trees))
}
}
}
/// Look up the tree at the given path.
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/config.rs | lib/src/config.rs | // Copyright 2022 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Configuration store helpers.
use std::borrow::Borrow;
use std::convert::Infallible;
use std::fmt;
use std::fmt::Display;
use std::fs;
use std::io;
use std::ops::Range;
use std::path::Path;
use std::path::PathBuf;
use std::slice;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::LazyLock;
use itertools::Itertools as _;
use serde::Deserialize;
use serde::de::IntoDeserializer as _;
use thiserror::Error;
use toml_edit::Document;
use toml_edit::DocumentMut;
pub use crate::config_resolver::ConfigMigrateError;
pub use crate::config_resolver::ConfigMigrateLayerError;
pub use crate::config_resolver::ConfigMigrationRule;
pub use crate::config_resolver::ConfigResolutionContext;
pub use crate::config_resolver::migrate;
pub use crate::config_resolver::resolve;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
/// Config value or table node.
pub type ConfigItem = toml_edit::Item;
/// Non-inline table of config key and value pairs.
pub type ConfigTable = toml_edit::Table;
/// Non-inline or inline table of config key and value pairs.
pub type ConfigTableLike<'a> = dyn toml_edit::TableLike + 'a;
/// Generic config value.
pub type ConfigValue = toml_edit::Value;
/// Error that can occur when parsing or loading config variables.
#[derive(Debug, Error)]
pub enum ConfigLoadError {
/// Config file or directory cannot be read.
#[error("Failed to read configuration file")]
Read(#[source] PathError),
/// TOML file or text cannot be parsed.
#[error("Configuration cannot be parsed as TOML document")]
Parse {
/// Source error.
#[source]
error: Box<toml_edit::TomlError>,
/// Source file path.
source_path: Option<PathBuf>,
},
}
/// Error that can occur when saving config variables to file.
#[derive(Debug, Error)]
#[error("Failed to write configuration file")]
pub struct ConfigFileSaveError(#[source] pub PathError);
/// Error that can occur when looking up config variable.
#[derive(Debug, Error)]
pub enum ConfigGetError {
/// Config value is not set.
#[error("Value not found for {name}")]
NotFound {
/// Dotted config name path.
name: String,
},
/// Config value cannot be converted to the expected type.
#[error("Invalid type or value for {name}")]
Type {
/// Dotted config name path.
name: String,
/// Source error.
#[source]
error: Box<dyn std::error::Error + Send + Sync>,
/// Source file path where the value is defined.
source_path: Option<PathBuf>,
},
}
/// Error that can occur when updating config variable.
#[derive(Debug, Error)]
pub enum ConfigUpdateError {
/// Non-table value exists at parent path, which shouldn't be removed.
#[error("Would overwrite non-table value with parent table {name}")]
WouldOverwriteValue {
/// Dotted config name path.
name: String,
},
/// Non-inline table exists at the path, which shouldn't be overwritten by a
/// value.
#[error("Would overwrite entire table {name}")]
WouldOverwriteTable {
/// Dotted config name path.
name: String,
},
/// Non-inline table exists at the path, which shouldn't be deleted.
#[error("Would delete entire table {name}")]
WouldDeleteTable {
/// Dotted config name path.
name: String,
},
}
/// Extension methods for `Result<T, ConfigGetError>`.
pub trait ConfigGetResultExt<T> {
/// Converts `NotFound` error to `Ok(None)`, leaving other errors.
fn optional(self) -> Result<Option<T>, ConfigGetError>;
}
impl<T> ConfigGetResultExt<T> for Result<T, ConfigGetError> {
fn optional(self) -> Result<Option<T>, ConfigGetError> {
match self {
Ok(value) => Ok(Some(value)),
Err(ConfigGetError::NotFound { .. }) => Ok(None),
Err(err) => Err(err),
}
}
}
/// Dotted config name path.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct ConfigNamePathBuf(Vec<toml_edit::Key>);
impl ConfigNamePathBuf {
/// Creates an empty path pointing to the root table.
///
/// This isn't a valid TOML key expression, but provided for convenience.
pub fn root() -> Self {
Self(vec![])
}
/// Returns true if the path is empty (i.e. pointing to the root table.)
pub fn is_root(&self) -> bool {
self.0.is_empty()
}
/// Returns true if the `base` is a prefix of this path.
pub fn starts_with(&self, base: impl AsRef<[toml_edit::Key]>) -> bool {
self.0.starts_with(base.as_ref())
}
/// Returns iterator of path components (or keys.)
pub fn components(&self) -> slice::Iter<'_, toml_edit::Key> {
self.0.iter()
}
/// Appends the given `key` component.
pub fn push(&mut self, key: impl Into<toml_edit::Key>) {
self.0.push(key.into());
}
}
// Help obtain owned value from ToConfigNamePath::Output. If we add a slice
// type (like &Path for PathBuf), this will be From<&ConfigNamePath>.
impl From<&Self> for ConfigNamePathBuf {
fn from(value: &Self) -> Self {
value.clone()
}
}
impl<K: Into<toml_edit::Key>> FromIterator<K> for ConfigNamePathBuf {
fn from_iter<I: IntoIterator<Item = K>>(iter: I) -> Self {
let keys = iter.into_iter().map(|k| k.into()).collect();
Self(keys)
}
}
impl FromStr for ConfigNamePathBuf {
type Err = toml_edit::TomlError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// TOML parser ensures that the returned vec is not empty.
toml_edit::Key::parse(s).map(ConfigNamePathBuf)
}
}
impl AsRef<[toml_edit::Key]> for ConfigNamePathBuf {
fn as_ref(&self) -> &[toml_edit::Key] {
&self.0
}
}
impl Display for ConfigNamePathBuf {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut components = self.0.iter().fuse();
if let Some(key) = components.next() {
write!(f, "{key}")?;
}
components.try_for_each(|key| write!(f, ".{key}"))
}
}
/// Value that can be converted to a dotted config name path.
///
/// This is an abstraction to specify a config name path in either a string or a
/// parsed form. It's similar to `Into<T>`, but the output type `T` is
/// constrained by the source type.
pub trait ToConfigNamePath: Sized {
/// Path type to be converted from `Self`.
type Output: Borrow<ConfigNamePathBuf> + Into<ConfigNamePathBuf>;
/// Converts this object into a dotted config name path.
fn into_name_path(self) -> Self::Output;
}
impl ToConfigNamePath for ConfigNamePathBuf {
type Output = Self;
fn into_name_path(self) -> Self::Output {
self
}
}
impl ToConfigNamePath for &ConfigNamePathBuf {
type Output = Self;
fn into_name_path(self) -> Self::Output {
self
}
}
impl ToConfigNamePath for &'static str {
// This can be changed to ConfigNamePathStr(str) if allocation cost matters.
type Output = ConfigNamePathBuf;
/// Parses this string into a dotted config name path.
///
/// The string must be a valid TOML dotted key. A static str is required to
/// prevent API misuse.
fn into_name_path(self) -> Self::Output {
self.parse()
.expect("valid TOML dotted key must be provided")
}
}
impl<const N: usize> ToConfigNamePath for [&str; N] {
type Output = ConfigNamePathBuf;
fn into_name_path(self) -> Self::Output {
self.into_iter().collect()
}
}
impl<const N: usize> ToConfigNamePath for &[&str; N] {
type Output = ConfigNamePathBuf;
fn into_name_path(self) -> Self::Output {
self.as_slice().into_name_path()
}
}
impl ToConfigNamePath for &[&str] {
type Output = ConfigNamePathBuf;
fn into_name_path(self) -> Self::Output {
self.iter().copied().collect()
}
}
/// Source of configuration variables in order of precedence.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum ConfigSource {
/// Default values (which has the lowest precedence.)
Default,
/// Base environment variables.
EnvBase,
/// User configuration files.
User,
/// Repo configuration files.
Repo,
/// Workspace configuration files.
Workspace,
/// Override environment variables.
EnvOverrides,
/// Command-line arguments (which has the highest precedence.)
CommandArg,
}
impl Display for ConfigSource {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ConfigSource::*;
let c = match self {
Default => "default",
User => "user",
Repo => "repo",
Workspace => "workspace",
CommandArg => "cli",
EnvBase | EnvOverrides => "env",
};
write!(f, "{c}")
}
}
/// Set of configuration variables with source information.
#[derive(Clone, Debug)]
pub struct ConfigLayer {
/// Source type of this layer.
pub source: ConfigSource,
/// Source file path of this layer if any.
pub path: Option<PathBuf>,
/// Configuration variables.
pub data: DocumentMut,
}
impl ConfigLayer {
/// Creates new layer with empty data.
pub fn empty(source: ConfigSource) -> Self {
Self::with_data(source, DocumentMut::new())
}
/// Creates new layer with the configuration variables `data`.
pub fn with_data(source: ConfigSource, data: DocumentMut) -> Self {
Self {
source,
path: None,
data,
}
}
/// Parses TOML document `text` into new layer.
pub fn parse(source: ConfigSource, text: &str) -> Result<Self, ConfigLoadError> {
let data = Document::parse(text).map_err(|error| ConfigLoadError::Parse {
error: Box::new(error),
source_path: None,
})?;
Ok(Self::with_data(source, data.into_mut()))
}
/// Loads TOML file from the specified `path`.
pub fn load_from_file(source: ConfigSource, path: PathBuf) -> Result<Self, ConfigLoadError> {
let text = fs::read_to_string(&path)
.context(&path)
.map_err(ConfigLoadError::Read)?;
let data = Document::parse(text).map_err(|error| ConfigLoadError::Parse {
error: Box::new(error),
source_path: Some(path.clone()),
})?;
Ok(Self {
source,
path: Some(path),
data: data.into_mut(),
})
}
fn load_from_dir(source: ConfigSource, path: &Path) -> Result<Vec<Self>, ConfigLoadError> {
// TODO: Walk the directory recursively?
let mut file_paths: Vec<_> = path
.read_dir()
.and_then(|dir_entries| {
dir_entries
.map(|entry| Ok(entry?.path()))
.filter_ok(|path| path.is_file() && path.extension() == Some("toml".as_ref()))
.try_collect()
})
.context(path)
.map_err(ConfigLoadError::Read)?;
file_paths.sort_unstable();
file_paths
.into_iter()
.map(|path| Self::load_from_file(source, path))
.try_collect()
}
/// Returns true if the table has no configuration variables.
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
// Add .get_value(name) if needed. look_up_*() are low-level API.
/// Looks up sub table by the `name` path. Returns `Some(table)` if a table
/// was found at the path. Returns `Err(item)` if middle or leaf node wasn't
/// a table.
pub fn look_up_table(
&self,
name: impl ToConfigNamePath,
) -> Result<Option<&ConfigTableLike<'_>>, &ConfigItem> {
match self.look_up_item(name) {
Ok(Some(item)) => match item.as_table_like() {
Some(table) => Ok(Some(table)),
None => Err(item),
},
Ok(None) => Ok(None),
Err(item) => Err(item),
}
}
/// Looks up item by the `name` path. Returns `Some(item)` if an item
/// found at the path. Returns `Err(item)` if middle node wasn't a table.
pub fn look_up_item(
&self,
name: impl ToConfigNamePath,
) -> Result<Option<&ConfigItem>, &ConfigItem> {
look_up_item(self.data.as_item(), name.into_name_path().borrow())
}
/// Sets `new_value` to the `name` path. Returns old value if any.
///
/// This function errors out if attempted to overwrite a non-table middle
/// node or a leaf non-inline table. An inline table can be overwritten
/// because it's syntactically a value.
pub fn set_value(
&mut self,
name: impl ToConfigNamePath,
new_value: impl Into<ConfigValue>,
) -> Result<Option<ConfigValue>, ConfigUpdateError> {
let would_overwrite_table = |name| ConfigUpdateError::WouldOverwriteValue { name };
let name = name.into_name_path();
let name = name.borrow();
let (leaf_key, table_keys) = name
.0
.split_last()
.ok_or_else(|| would_overwrite_table(name.to_string()))?;
let parent_table = ensure_table(self.data.as_table_mut(), table_keys)
.map_err(|keys| would_overwrite_table(keys.join(".")))?;
match parent_table.entry_format(leaf_key) {
toml_edit::Entry::Occupied(mut entry) => {
if !entry.get().is_value() {
return Err(ConfigUpdateError::WouldOverwriteTable {
name: name.to_string(),
});
}
let old_item = entry.insert(toml_edit::value(new_value));
Ok(Some(old_item.into_value().unwrap()))
}
toml_edit::Entry::Vacant(entry) => {
entry.insert(toml_edit::value(new_value));
// Reset whitespace formatting (i.e. insert space before '=')
let mut new_key = parent_table.key_mut(leaf_key).unwrap();
new_key.leaf_decor_mut().clear();
Ok(None)
}
}
}
/// Deletes value specified by the `name` path. Returns old value if any.
///
/// Returns `Ok(None)` if middle node wasn't a table or a value wasn't
/// found. Returns `Err` if attempted to delete a non-inline table. An
/// inline table can be deleted because it's syntactically a value.
pub fn delete_value(
&mut self,
name: impl ToConfigNamePath,
) -> Result<Option<ConfigValue>, ConfigUpdateError> {
let would_delete_table = |name| ConfigUpdateError::WouldDeleteTable { name };
let name = name.into_name_path();
let name = name.borrow();
let mut keys = name.components();
let leaf_key = keys
.next_back()
.ok_or_else(|| would_delete_table(name.to_string()))?;
let Some(parent_table) = keys.try_fold(
self.data.as_table_mut() as &mut ConfigTableLike,
|table, key| table.get_mut(key)?.as_table_like_mut(),
) else {
return Ok(None);
};
match parent_table.entry(leaf_key) {
toml_edit::Entry::Occupied(entry) => {
if !entry.get().is_value() {
return Err(would_delete_table(name.to_string()));
}
let old_item = entry.remove();
Ok(Some(old_item.into_value().unwrap()))
}
toml_edit::Entry::Vacant(_) => Ok(None),
}
}
/// Inserts tables down to the `name` path. Returns mutable reference to the
/// leaf table.
///
/// This function errors out if attempted to overwrite a non-table node. In
/// file-system analogy, this is equivalent to `std::fs::create_dir_all()`.
pub fn ensure_table(
&mut self,
name: impl ToConfigNamePath,
) -> Result<&mut ConfigTableLike<'_>, ConfigUpdateError> {
let would_overwrite_table = |name| ConfigUpdateError::WouldOverwriteValue { name };
let name = name.into_name_path();
let name = name.borrow();
ensure_table(self.data.as_table_mut(), &name.0)
.map_err(|keys| would_overwrite_table(keys.join(".")))
}
}
/// Looks up item from the `root_item`. Returns `Some(item)` if an item found at
/// the path. Returns `Err(item)` if middle node wasn't a table.
fn look_up_item<'a>(
root_item: &'a ConfigItem,
name: &ConfigNamePathBuf,
) -> Result<Option<&'a ConfigItem>, &'a ConfigItem> {
let mut cur_item = root_item;
for key in name.components() {
let Some(table) = cur_item.as_table_like() else {
return Err(cur_item);
};
cur_item = match table.get(key) {
Some(item) => item,
None => return Ok(None),
};
}
Ok(Some(cur_item))
}
/// Inserts tables recursively. Returns `Err(keys)` if middle node exists at the
/// prefix name `keys` and wasn't a table.
fn ensure_table<'a, 'b>(
root_table: &'a mut ConfigTableLike<'a>,
keys: &'b [toml_edit::Key],
) -> Result<&'a mut ConfigTableLike<'a>, &'b [toml_edit::Key]> {
keys.iter()
.enumerate()
.try_fold(root_table, |table, (i, key)| {
let sub_item = table.entry_format(key).or_insert_with(new_implicit_table);
sub_item.as_table_like_mut().ok_or(&keys[..=i])
})
}
fn new_implicit_table() -> ConfigItem {
let mut table = ConfigTable::new();
table.set_implicit(true);
ConfigItem::Table(table)
}
/// Wrapper for file-based [`ConfigLayer`], providing convenient methods for
/// modification.
#[derive(Clone, Debug)]
pub struct ConfigFile {
layer: Arc<ConfigLayer>,
}
impl ConfigFile {
/// Loads TOML file from the specified `path` if exists. Returns an empty
/// object if the file doesn't exist.
pub fn load_or_empty(
source: ConfigSource,
path: impl Into<PathBuf>,
) -> Result<Self, ConfigLoadError> {
let layer = match ConfigLayer::load_from_file(source, path.into()) {
Ok(layer) => Arc::new(layer),
Err(ConfigLoadError::Read(PathError {
path,
source: error,
})) if error.kind() == io::ErrorKind::NotFound => {
let mut data = DocumentMut::new();
data.decor_mut()
.set_prefix("#:schema https://docs.jj-vcs.dev/latest/config-schema.json\n\n");
let layer = ConfigLayer {
source,
path: Some(path),
data,
};
Arc::new(layer)
}
Err(err) => return Err(err),
};
Ok(Self { layer })
}
/// Wraps file-based [`ConfigLayer`] for modification. Returns `Err(layer)`
/// if the source `path` is unknown.
pub fn from_layer(layer: Arc<ConfigLayer>) -> Result<Self, Arc<ConfigLayer>> {
if layer.path.is_some() {
Ok(Self { layer })
} else {
Err(layer)
}
}
/// Writes serialized data to the source file.
pub fn save(&self) -> Result<(), ConfigFileSaveError> {
fs::write(self.path(), self.layer.data.to_string())
.context(self.path())
.map_err(ConfigFileSaveError)
}
/// Source file path.
pub fn path(&self) -> &Path {
self.layer.path.as_ref().expect("path must be known")
}
/// Returns the underlying config layer.
pub fn layer(&self) -> &Arc<ConfigLayer> {
&self.layer
}
/// See [`ConfigLayer::set_value()`].
pub fn set_value(
&mut self,
name: impl ToConfigNamePath,
new_value: impl Into<ConfigValue>,
) -> Result<Option<ConfigValue>, ConfigUpdateError> {
Arc::make_mut(&mut self.layer).set_value(name, new_value)
}
/// See [`ConfigLayer::delete_value()`].
pub fn delete_value(
&mut self,
name: impl ToConfigNamePath,
) -> Result<Option<ConfigValue>, ConfigUpdateError> {
Arc::make_mut(&mut self.layer).delete_value(name)
}
}
/// Stack of configuration layers which can be merged as needed.
///
/// A [`StackedConfig`] is something like a read-only `overlayfs`. Tables and
/// values are directories and files respectively, and tables are merged across
/// layers. Tables and values can be addressed by [dotted name
/// paths](ToConfigNamePath).
///
/// There's no tombstone notation to remove items from the lower layers.
///
/// Beware that arrays of tables are no different than inline arrays. They are
/// values, so are never merged. This might be confusing because they would be
/// merged if two TOML documents are concatenated literally. Avoid using array
/// of tables syntax.
#[derive(Clone, Debug)]
pub struct StackedConfig {
/// Layers sorted by `source` (the lowest precedence one first.)
layers: Vec<Arc<ConfigLayer>>,
}
impl StackedConfig {
/// Creates an empty stack of configuration layers.
pub fn empty() -> Self {
Self { layers: vec![] }
}
/// Creates a stack of configuration layers containing the default variables
/// referred to by `jj-lib`.
pub fn with_defaults() -> Self {
Self {
layers: DEFAULT_CONFIG_LAYERS.to_vec(),
}
}
/// Loads config file from the specified `path`, inserts it at the position
/// specified by `source`. The file should exist.
pub fn load_file(
&mut self,
source: ConfigSource,
path: impl Into<PathBuf>,
) -> Result<(), ConfigLoadError> {
let layer = ConfigLayer::load_from_file(source, path.into())?;
self.add_layer(layer);
Ok(())
}
/// Loads config files from the specified directory `path`, inserts them at
/// the position specified by `source`. The directory should exist.
pub fn load_dir(
&mut self,
source: ConfigSource,
path: impl AsRef<Path>,
) -> Result<(), ConfigLoadError> {
let layers = ConfigLayer::load_from_dir(source, path.as_ref())?;
self.extend_layers(layers);
Ok(())
}
/// Inserts new layer at the position specified by `layer.source`.
pub fn add_layer(&mut self, layer: impl Into<Arc<ConfigLayer>>) {
let layer = layer.into();
let index = self.insert_point(layer.source);
self.layers.insert(index, layer);
}
/// Inserts multiple layers at the positions specified by `layer.source`.
pub fn extend_layers<I>(&mut self, layers: I)
where
I: IntoIterator,
I::Item: Into<Arc<ConfigLayer>>,
{
let layers = layers.into_iter().map(Into::into);
for (source, chunk) in &layers.chunk_by(|layer| layer.source) {
let index = self.insert_point(source);
self.layers.splice(index..index, chunk);
}
}
/// Removes layers of the specified `source`.
pub fn remove_layers(&mut self, source: ConfigSource) {
self.layers.drain(self.layer_range(source));
}
fn layer_range(&self, source: ConfigSource) -> Range<usize> {
// Linear search since the size of Vec wouldn't be large.
let start = self
.layers
.iter()
.take_while(|layer| layer.source < source)
.count();
let count = self.layers[start..]
.iter()
.take_while(|layer| layer.source == source)
.count();
start..(start + count)
}
fn insert_point(&self, source: ConfigSource) -> usize {
// Search from end since layers are usually added in order, and the size
// of Vec wouldn't be large enough to do binary search.
let skip = self
.layers
.iter()
.rev()
.take_while(|layer| layer.source > source)
.count();
self.layers.len() - skip
}
/// Layers sorted by precedence.
pub fn layers(&self) -> &[Arc<ConfigLayer>] {
&self.layers
}
/// Mutable references to layers sorted by precedence.
pub fn layers_mut(&mut self) -> &mut [Arc<ConfigLayer>] {
&mut self.layers
}
/// Layers of the specified `source`.
pub fn layers_for(&self, source: ConfigSource) -> &[Arc<ConfigLayer>] {
&self.layers[self.layer_range(source)]
}
/// Looks up value of the specified type `T` from all layers, merges sub
/// fields as needed.
pub fn get<'de, T: Deserialize<'de>>(
&self,
name: impl ToConfigNamePath,
) -> Result<T, ConfigGetError> {
self.get_value_with(name, |value| T::deserialize(value.into_deserializer()))
}
/// Looks up value from all layers, merges sub fields as needed.
pub fn get_value(&self, name: impl ToConfigNamePath) -> Result<ConfigValue, ConfigGetError> {
self.get_value_with::<_, Infallible>(name, Ok)
}
/// Looks up value from all layers, merges sub fields as needed, then
/// converts the value by using the given function.
pub fn get_value_with<T, E: Into<Box<dyn std::error::Error + Send + Sync>>>(
&self,
name: impl ToConfigNamePath,
convert: impl FnOnce(ConfigValue) -> Result<T, E>,
) -> Result<T, ConfigGetError> {
self.get_item_with(name, |item| {
// Item variants other than Item::None can be converted to a Value,
// and Item::None is not a valid TOML type. See also the following
// thread: https://github.com/toml-rs/toml/issues/299
let value = item
.into_value()
.expect("Item::None should not exist in loaded tables");
convert(value)
})
}
/// Looks up sub table from all layers, merges fields as needed.
///
/// Use `table_keys(prefix)` and `get([prefix, key])` instead if table
/// values have to be converted to non-generic value type.
pub fn get_table(&self, name: impl ToConfigNamePath) -> Result<ConfigTable, ConfigGetError> {
self.get_item_with(name, |item| {
item.into_table()
.map_err(|item| format!("Expected a table, but is {}", item.type_name()))
})
}
fn get_item_with<T, E: Into<Box<dyn std::error::Error + Send + Sync>>>(
&self,
name: impl ToConfigNamePath,
convert: impl FnOnce(ConfigItem) -> Result<T, E>,
) -> Result<T, ConfigGetError> {
let name = name.into_name_path();
let name = name.borrow();
let (item, layer_index) =
get_merged_item(&self.layers, name).ok_or_else(|| ConfigGetError::NotFound {
name: name.to_string(),
})?;
// If the value is a table, the error might come from lower layers. We
// cannot report precise source information in that case. However,
// toml_edit captures dotted keys in the error object. If the keys field
// were public, we can look up the source information. This is probably
// simpler than reimplementing Deserializer.
convert(item).map_err(|err| ConfigGetError::Type {
name: name.to_string(),
error: err.into(),
source_path: self.layers[layer_index].path.clone(),
})
}
/// Returns iterator over sub table keys in order of layer precedence.
/// Duplicated keys are omitted.
pub fn table_keys(&self, name: impl ToConfigNamePath) -> impl Iterator<Item = &str> {
let name = name.into_name_path();
let name = name.borrow();
let to_merge = get_tables_to_merge(&self.layers, name);
to_merge
.into_iter()
.rev()
.flat_map(|table| table.iter().map(|(k, _)| k))
.unique()
}
}
/// Looks up item from `layers`, merges sub fields as needed. Returns a merged
/// item and the uppermost layer index where the item was found.
fn get_merged_item(
layers: &[Arc<ConfigLayer>],
name: &ConfigNamePathBuf,
) -> Option<(ConfigItem, usize)> {
let mut to_merge = Vec::new();
for (index, layer) in layers.iter().enumerate().rev() {
let item = match layer.look_up_item(name) {
Ok(Some(item)) => item,
Ok(None) => continue, // parent is a table, but no value found
Err(_) => break, // parent is not a table, shadows lower layers
};
if item.is_table_like() {
to_merge.push((item, index));
} else if to_merge.is_empty() {
return Some((item.clone(), index)); // no need to allocate vec
} else {
break; // shadows lower layers
}
}
// Simply merge tables from the bottom layer. Upper items should override
// the lower items (including their children) no matter if the upper items
// are shadowed by the other upper items.
let (item, mut top_index) = to_merge.pop()?;
let mut merged = item.clone();
for (item, index) in to_merge.into_iter().rev() {
merge_items(&mut merged, item);
top_index = index;
}
Some((merged, top_index))
}
/// Looks up tables to be merged from `layers`, returns in reverse order.
fn get_tables_to_merge<'a>(
layers: &'a [Arc<ConfigLayer>],
name: &ConfigNamePathBuf,
) -> Vec<&'a ConfigTableLike<'a>> {
let mut to_merge = Vec::new();
for layer in layers.iter().rev() {
match layer.look_up_table(name) {
Ok(Some(table)) => to_merge.push(table),
Ok(None) => {} // parent is a table, but no value found
Err(_) => break, // parent/leaf is not a table, shadows lower layers
}
}
to_merge
}
/// Merges `upper_item` fields into `lower_item` recursively.
fn merge_items(lower_item: &mut ConfigItem, upper_item: &ConfigItem) {
let (Some(lower_table), Some(upper_table)) =
(lower_item.as_table_like_mut(), upper_item.as_table_like())
else {
// Not a table, the upper item wins.
*lower_item = upper_item.clone();
return;
};
for (key, upper) in upper_table.iter() {
match lower_table.entry(key) {
toml_edit::Entry::Occupied(entry) => {
merge_items(entry.into_mut(), upper);
}
toml_edit::Entry::Vacant(entry) => {
entry.insert(upper.clone());
}
};
}
}
static DEFAULT_CONFIG_LAYERS: LazyLock<[Arc<ConfigLayer>; 1]> = LazyLock::new(|| {
let parse = |text: &str| Arc::new(ConfigLayer::parse(ConfigSource::Default, text).unwrap());
[parse(include_str!("config/misc.toml"))]
});
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use indoc::indoc;
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn test_config_layer_set_value() {
let mut layer = ConfigLayer::empty(ConfigSource::User);
// Cannot overwrite the root table
assert_matches!(
layer.set_value(ConfigNamePathBuf::root(), 0),
Err(ConfigUpdateError::WouldOverwriteValue { name }) if name.is_empty()
);
// Insert some values
layer.set_value("foo", 1).unwrap();
layer.set_value("bar.baz.blah", "2").unwrap();
layer
.set_value("bar.qux", ConfigValue::from_iter([("inline", "table")]))
.unwrap();
layer
.set_value("bar.to-update", ConfigValue::from_iter([("some", true)]))
.unwrap();
insta::assert_snapshot!(layer.data, @r#"
foo = 1
[bar]
qux = { inline = "table" }
to-update = { some = true }
[bar.baz]
blah = "2"
"#);
// Can overwrite value
layer
.set_value("foo", ConfigValue::from_iter(["new", "foo"]))
.unwrap();
// Can overwrite inline table
layer.set_value("bar.qux", "new bar.qux").unwrap();
// Can add value to inline table
layer
.set_value(
"bar.to-update.new",
ConfigValue::from_iter([("table", "value")]),
)
.unwrap();
// Cannot overwrite table
assert_matches!(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/copies.rs | lib/src/copies.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Code for working with copies and renames.
use std::collections::HashMap;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use std::task::ready;
use futures::Stream;
use crate::backend::BackendResult;
use crate::backend::CopyRecord;
use crate::merge::Diff;
use crate::merge::MergedTreeValue;
use crate::merged_tree::MergedTree;
use crate::merged_tree::TreeDiffStream;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
/// A collection of CopyRecords.
#[derive(Default, Debug)]
pub struct CopyRecords {
records: Vec<CopyRecord>,
// Maps from `source` or `target` to the index of the entry in `records`.
// Conflicts are excluded by keeping an out of range value.
sources: HashMap<RepoPathBuf, usize>,
targets: HashMap<RepoPathBuf, usize>,
}
impl CopyRecords {
/// Adds information about `CopyRecord`s to `self`. A target with multiple
/// conflicts is discarded and treated as not having an origin.
pub fn add_records(
&mut self,
copy_records: impl IntoIterator<Item = BackendResult<CopyRecord>>,
) -> BackendResult<()> {
for record in copy_records {
let r = record?;
self.sources
.entry(r.source.clone())
// TODO: handle conflicts instead of ignoring both sides.
.and_modify(|value| *value = usize::MAX)
.or_insert(self.records.len());
self.targets
.entry(r.target.clone())
// TODO: handle conflicts instead of ignoring both sides.
.and_modify(|value| *value = usize::MAX)
.or_insert(self.records.len());
self.records.push(r);
}
Ok(())
}
/// Returns true if there are copy records associated with a source path.
pub fn has_source(&self, source: &RepoPath) -> bool {
self.sources.contains_key(source)
}
/// Gets any copy record associated with a source path.
pub fn for_source(&self, source: &RepoPath) -> Option<&CopyRecord> {
self.sources.get(source).and_then(|&i| self.records.get(i))
}
/// Returns true if there are copy records associated with a target path.
pub fn has_target(&self, target: &RepoPath) -> bool {
self.targets.contains_key(target)
}
/// Gets any copy record associated with a target path.
pub fn for_target(&self, target: &RepoPath) -> Option<&CopyRecord> {
self.targets.get(target).and_then(|&i| self.records.get(i))
}
/// Gets all copy records.
pub fn iter(&self) -> impl Iterator<Item = &CopyRecord> {
self.records.iter()
}
}
/// Whether or not the source path was deleted.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum CopyOperation {
/// The source path was not deleted.
Copy,
/// The source path was renamed to the destination.
Rename,
}
/// A `TreeDiffEntry` with copy information.
#[derive(Debug)]
pub struct CopiesTreeDiffEntry {
/// The path.
pub path: CopiesTreeDiffEntryPath,
/// The resolved tree values if available.
pub values: BackendResult<Diff<MergedTreeValue>>,
}
/// Path and copy information of `CopiesTreeDiffEntry`.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CopiesTreeDiffEntryPath {
/// The source path and copy information if this is a copy or rename.
pub source: Option<(RepoPathBuf, CopyOperation)>,
/// The target path.
pub target: RepoPathBuf,
}
impl CopiesTreeDiffEntryPath {
/// The source path.
pub fn source(&self) -> &RepoPath {
self.source.as_ref().map_or(&self.target, |(path, _)| path)
}
/// The target path.
pub fn target(&self) -> &RepoPath {
&self.target
}
/// Whether this entry was copied or renamed from the source. Returns `None`
/// if the path is unchanged.
pub fn copy_operation(&self) -> Option<CopyOperation> {
self.source.as_ref().map(|(_, op)| *op)
}
/// Returns source/target paths as [`Diff`] if they differ.
pub fn to_diff(&self) -> Option<Diff<&RepoPath>> {
let (source, _) = self.source.as_ref()?;
Some(Diff::new(source, &self.target))
}
}
/// Wraps a `TreeDiffStream`, adding support for copies and renames.
pub struct CopiesTreeDiffStream<'a> {
inner: TreeDiffStream<'a>,
source_tree: MergedTree,
target_tree: MergedTree,
copy_records: &'a CopyRecords,
}
impl<'a> CopiesTreeDiffStream<'a> {
/// Create a new diff stream with copy information.
pub fn new(
inner: TreeDiffStream<'a>,
source_tree: MergedTree,
target_tree: MergedTree,
copy_records: &'a CopyRecords,
) -> Self {
Self {
inner,
source_tree,
target_tree,
copy_records,
}
}
fn resolve_copy_source(
&self,
source: &RepoPath,
values: BackendResult<Diff<MergedTreeValue>>,
) -> BackendResult<(CopyOperation, Diff<MergedTreeValue>)> {
let target_value = values?.after;
let source_value = self.source_tree.path_value(source)?;
// If the source path is deleted in the target tree, it's a rename.
let source_value_at_target = self.target_tree.path_value(source)?;
let copy_op = if source_value_at_target.is_absent() || source_value_at_target.is_tree() {
CopyOperation::Rename
} else {
CopyOperation::Copy
};
Ok((copy_op, Diff::new(source_value, target_value)))
}
}
impl Stream for CopiesTreeDiffStream<'_> {
type Item = CopiesTreeDiffEntry;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
while let Some(diff_entry) = ready!(self.inner.as_mut().poll_next(cx)) {
let Some(CopyRecord { source, .. }) = self.copy_records.for_target(&diff_entry.path)
else {
let target_deleted =
matches!(&diff_entry.values, Ok(diff) if diff.after.is_absent());
if target_deleted && self.copy_records.has_source(&diff_entry.path) {
// Skip the "delete" entry when there is a rename.
continue;
}
return Poll::Ready(Some(CopiesTreeDiffEntry {
path: CopiesTreeDiffEntryPath {
source: None,
target: diff_entry.path,
},
values: diff_entry.values,
}));
};
let (copy_op, values) = match self.resolve_copy_source(source, diff_entry.values) {
Ok((copy_op, values)) => (copy_op, Ok(values)),
// Fall back to "copy" (= path still exists) if unknown.
Err(err) => (CopyOperation::Copy, Err(err)),
};
return Poll::Ready(Some(CopiesTreeDiffEntry {
path: CopiesTreeDiffEntryPath {
source: Some((source.clone(), copy_op)),
target: diff_entry.path,
},
values,
}));
}
Poll::Ready(None)
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/iter_util.rs | lib/src/iter_util.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Iterator helpers.
/// Returns `Ok(true)` if any element satisfies the fallible predicate,
/// `Ok(false)` if none do. Returns `Err` on the first error encountered.
pub fn fallible_any<T, E>(
iter: impl IntoIterator<Item = T>,
mut predicate: impl FnMut(T) -> Result<bool, E>,
) -> Result<bool, E> {
for item in iter {
if predicate(item)? {
return Ok(true);
}
}
Ok(false)
}
/// Returns `Ok(Some(item))` for the first element where the predicate returns
/// `Ok(true)`, `Ok(None)` if no element satisfies it, or `Err` on the first
/// error.
pub fn fallible_find<T, E>(
iter: impl IntoIterator<Item = T>,
mut predicate: impl FnMut(&T) -> Result<bool, E>,
) -> Result<Option<T>, E> {
for item in iter {
if predicate(&item)? {
return Ok(Some(item));
}
}
Ok(None)
}
/// Returns `Ok(Some(index))` for the first element where the predicate returns
/// `Ok(true)`, `Ok(None)` if no element satisfies it, or `Err` on the first
/// error.
pub fn fallible_position<T, E>(
iter: impl IntoIterator<Item = T>,
mut predicate: impl FnMut(T) -> Result<bool, E>,
) -> Result<Option<usize>, E> {
for (index, item) in iter.into_iter().enumerate() {
if predicate(item)? {
return Ok(Some(index));
}
}
Ok(None)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/object_id.rs | lib/src/object_id.rs | // Copyright 2020-2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fmt;
use std::fmt::Debug;
use crate::hex_util;
pub trait ObjectId {
fn object_type(&self) -> String;
fn as_bytes(&self) -> &[u8];
fn to_bytes(&self) -> Vec<u8>;
fn hex(&self) -> String;
}
// Defines a new struct type with visibility `vis` and name `ident` containing
// a single Vec<u8> used to store an identifier (typically the output of a hash
// function) as bytes. Types defined using this macro automatically implement
// the `ObjectId` and `ContentHash` traits.
// Documentation comments written inside the macro definition will be captured
// and associated with the type defined by the macro.
//
// Example:
// ```no_run
// id_type!(
// /// My favorite id type.
// pub MyId { hex() }
// );
// ```
macro_rules! id_type {
( $(#[$attr:meta])*
$vis:vis $name:ident { $hex_method:ident() }
) => {
$(#[$attr])*
#[derive($crate::content_hash::ContentHash, PartialEq, Eq, PartialOrd, Ord, Clone, Hash)]
$vis struct $name(Vec<u8>);
$crate::object_id::impl_id_type!($name, $hex_method);
};
}
macro_rules! impl_id_type {
($name:ident, $hex_method:ident) => {
#[allow(dead_code)]
impl $name {
pub fn new(value: Vec<u8>) -> Self {
Self(value)
}
pub fn from_bytes(bytes: &[u8]) -> Self {
Self(bytes.to_vec())
}
/// Parses the given hex string into an ObjectId.
///
/// The given string must be valid. A static str is required to
/// prevent API misuse.
pub fn from_hex(hex: &'static str) -> Self {
Self::try_from_hex(hex).unwrap()
}
/// Parses the given hex string into an ObjectId.
pub fn try_from_hex(hex: impl AsRef<[u8]>) -> Option<Self> {
$crate::hex_util::decode_hex(hex).map(Self)
}
}
impl std::fmt::Debug for $name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
// TODO: should we use $hex_method here?
f.debug_tuple(stringify!($name)).field(&self.hex()).finish()
}
}
impl std::fmt::Display for $name {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
f.pad(&self.$hex_method())
}
}
impl serde::Serialize for $name {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
if serializer.is_human_readable() {
self.$hex_method().serialize(serializer)
} else {
self.as_bytes().serialize(serializer)
}
}
}
impl crate::object_id::ObjectId for $name {
fn object_type(&self) -> String {
stringify!($name)
.strip_suffix("Id")
.unwrap()
.to_ascii_lowercase()
.to_string()
}
fn as_bytes(&self) -> &[u8] {
&self.0
}
fn to_bytes(&self) -> Vec<u8> {
self.0.clone()
}
fn hex(&self) -> String {
$crate::hex_util::encode_hex(&self.0)
}
}
};
}
pub(crate) use id_type;
pub(crate) use impl_id_type;
/// An identifier prefix (typically from a type implementing the [`ObjectId`]
/// trait) with facilities for converting between bytes and a hex string.
#[derive(Clone, PartialEq, Eq)]
pub struct HexPrefix {
// For odd-length prefixes, the lower 4 bits of the last byte are
// zero-filled (e.g. the prefix "abc" is stored in two bytes as "abc0").
min_prefix_bytes: Vec<u8>,
has_odd_byte: bool,
}
impl HexPrefix {
/// Returns a new `HexPrefix` or `None` if `prefix` cannot be decoded from
/// hex to bytes.
pub fn try_from_hex(prefix: impl AsRef<[u8]>) -> Option<Self> {
let (min_prefix_bytes, has_odd_byte) = hex_util::decode_hex_prefix(prefix)?;
Some(Self {
min_prefix_bytes,
has_odd_byte,
})
}
/// Returns a new `HexPrefix` or `None` if `prefix` cannot be decoded from
/// "reverse" hex to bytes.
pub fn try_from_reverse_hex(prefix: impl AsRef<[u8]>) -> Option<Self> {
let (min_prefix_bytes, has_odd_byte) = hex_util::decode_reverse_hex_prefix(prefix)?;
Some(Self {
min_prefix_bytes,
has_odd_byte,
})
}
pub fn from_bytes(bytes: &[u8]) -> Self {
Self {
min_prefix_bytes: bytes.to_owned(),
has_odd_byte: false,
}
}
/// Returns a new `HexPrefix` representing the given `id`.
pub fn from_id<T: ObjectId + ?Sized>(id: &T) -> Self {
Self::from_bytes(id.as_bytes())
}
/// Returns string representation of this prefix using hex digits.
pub fn hex(&self) -> String {
let mut hex_string = hex_util::encode_hex(&self.min_prefix_bytes);
if self.has_odd_byte {
hex_string.pop().unwrap();
}
hex_string
}
/// Returns string representation of this prefix using `z-k` "digits".
pub fn reverse_hex(&self) -> String {
let mut hex_string = hex_util::encode_reverse_hex(&self.min_prefix_bytes);
if self.has_odd_byte {
hex_string.pop().unwrap();
}
hex_string
}
/// Minimum bytes that would match this prefix. (e.g. "abc0" for "abc")
///
/// Use this to partition a sorted slice, and test `matches(id)` from there.
pub fn min_prefix_bytes(&self) -> &[u8] {
&self.min_prefix_bytes
}
/// Returns the bytes representation if this prefix can be a full id.
pub fn as_full_bytes(&self) -> Option<&[u8]> {
(!self.has_odd_byte).then_some(&self.min_prefix_bytes)
}
fn split_odd_byte(&self) -> (Option<u8>, &[u8]) {
if self.has_odd_byte {
let (&odd, prefix) = self.min_prefix_bytes.split_last().unwrap();
(Some(odd), prefix)
} else {
(None, &self.min_prefix_bytes)
}
}
/// Returns whether the stored prefix matches the prefix of `id`.
pub fn matches<Q: ObjectId>(&self, id: &Q) -> bool {
let id_bytes = id.as_bytes();
let (maybe_odd, prefix) = self.split_odd_byte();
if id_bytes.starts_with(prefix) {
if let Some(odd) = maybe_odd {
matches!(id_bytes.get(prefix.len()), Some(v) if v & 0xf0 == odd)
} else {
true
}
} else {
false
}
}
}
impl Debug for HexPrefix {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
f.debug_tuple("HexPrefix").field(&self.hex()).finish()
}
}
/// The result of a prefix search.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PrefixResolution<T> {
NoMatch,
SingleMatch(T),
AmbiguousMatch,
}
impl<T> PrefixResolution<T> {
pub fn map<U>(self, f: impl FnOnce(T) -> U) -> PrefixResolution<U> {
match self {
Self::NoMatch => PrefixResolution::NoMatch,
Self::SingleMatch(x) => PrefixResolution::SingleMatch(f(x)),
Self::AmbiguousMatch => PrefixResolution::AmbiguousMatch,
}
}
pub fn filter_map<U>(self, f: impl FnOnce(T) -> Option<U>) -> PrefixResolution<U> {
match self {
Self::NoMatch => PrefixResolution::NoMatch,
Self::SingleMatch(x) => match f(x) {
None => PrefixResolution::NoMatch,
Some(y) => PrefixResolution::SingleMatch(y),
},
Self::AmbiguousMatch => PrefixResolution::AmbiguousMatch,
}
}
}
impl<T: Clone> PrefixResolution<T> {
pub fn plus(&self, other: &Self) -> Self {
match (self, other) {
(Self::NoMatch, other) => other.clone(),
(local, Self::NoMatch) => local.clone(),
(Self::AmbiguousMatch, _) => Self::AmbiguousMatch,
(_, Self::AmbiguousMatch) => Self::AmbiguousMatch,
(Self::SingleMatch(_), Self::SingleMatch(_)) => Self::AmbiguousMatch,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::backend::ChangeId;
use crate::backend::CommitId;
#[test]
fn test_display_object_id() {
let commit_id = CommitId::from_hex("deadbeef0123");
assert_eq!(format!("{commit_id}"), "deadbeef0123");
assert_eq!(format!("{commit_id:.6}"), "deadbe");
let change_id = ChangeId::from_hex("deadbeef0123");
assert_eq!(format!("{change_id}"), "mlpmollkzyxw");
assert_eq!(format!("{change_id:.6}"), "mlpmol");
}
#[test]
fn test_hex_prefix_prefixes() {
let prefix = HexPrefix::try_from_hex("").unwrap();
assert_eq!(prefix.min_prefix_bytes(), b"");
let prefix = HexPrefix::try_from_hex("1").unwrap();
assert_eq!(prefix.min_prefix_bytes(), b"\x10");
let prefix = HexPrefix::try_from_hex("12").unwrap();
assert_eq!(prefix.min_prefix_bytes(), b"\x12");
let prefix = HexPrefix::try_from_hex("123").unwrap();
assert_eq!(prefix.min_prefix_bytes(), b"\x12\x30");
let bad_prefix = HexPrefix::try_from_hex("0x123");
assert_eq!(bad_prefix, None);
let bad_prefix = HexPrefix::try_from_hex("foobar");
assert_eq!(bad_prefix, None);
}
#[test]
fn test_hex_prefix_matches() {
let id = CommitId::from_hex("1234");
assert!(HexPrefix::try_from_hex("").unwrap().matches(&id));
assert!(HexPrefix::try_from_hex("1").unwrap().matches(&id));
assert!(HexPrefix::try_from_hex("12").unwrap().matches(&id));
assert!(HexPrefix::try_from_hex("123").unwrap().matches(&id));
assert!(HexPrefix::try_from_hex("1234").unwrap().matches(&id));
assert!(!HexPrefix::try_from_hex("12345").unwrap().matches(&id));
assert!(!HexPrefix::try_from_hex("a").unwrap().matches(&id));
assert!(!HexPrefix::try_from_hex("1a").unwrap().matches(&id));
assert!(!HexPrefix::try_from_hex("12a").unwrap().matches(&id));
assert!(!HexPrefix::try_from_hex("123a").unwrap().matches(&id));
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/op_heads_store.rs | lib/src/op_heads_store.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::any::Any;
use std::collections::HashSet;
use std::fmt::Debug;
use std::sync::Arc;
use async_trait::async_trait;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use thiserror::Error;
use crate::dag_walk;
use crate::op_store::OpStore;
use crate::op_store::OpStoreError;
use crate::op_store::OperationId;
use crate::operation::Operation;
#[derive(Debug, Error)]
pub enum OpHeadsStoreError {
#[error("Failed to read operation heads")]
Read(#[source] Box<dyn std::error::Error + Send + Sync>),
#[error("Failed to record operation head {new_op_id}")]
Write {
new_op_id: OperationId,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Failed to lock operation heads store")]
Lock(#[source] Box<dyn std::error::Error + Send + Sync>),
}
#[derive(Debug, Error)]
pub enum OpHeadResolutionError {
#[error("Operation log has no heads")]
NoHeads,
}
pub trait OpHeadsStoreLock {}
/// Manages the set of current heads of the operation log.
#[async_trait]
pub trait OpHeadsStore: Any + Send + Sync + Debug {
fn name(&self) -> &str;
/// Remove the old op heads and add the new one.
///
/// The old op heads must not contain the new one.
async fn update_op_heads(
&self,
old_ids: &[OperationId],
new_id: &OperationId,
) -> Result<(), OpHeadsStoreError>;
async fn get_op_heads(&self) -> Result<Vec<OperationId>, OpHeadsStoreError>;
/// Optionally takes a lock on the op heads store. The purpose of the lock
/// is to prevent concurrent processes from resolving the same divergent
/// operations. It is not needed for correctness; implementations are free
/// to return a type that doesn't hold a lock.
async fn lock(&self) -> Result<Box<dyn OpHeadsStoreLock + '_>, OpHeadsStoreError>;
}
impl dyn OpHeadsStore {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: OpHeadsStore>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
// Given an OpHeadsStore, fetch and resolve its op heads down to one under a
// lock.
//
// This routine is defined outside the trait because it must support generics.
pub fn resolve_op_heads<E>(
op_heads_store: &dyn OpHeadsStore,
op_store: &Arc<dyn OpStore>,
resolver: impl FnOnce(Vec<Operation>) -> Result<Operation, E>,
) -> Result<Operation, E>
where
E: From<OpHeadResolutionError> + From<OpHeadsStoreError> + From<OpStoreError>,
{
// This can be empty if the OpHeadsStore doesn't support atomic updates.
// For example, all entries ahead of a readdir() pointer could be deleted by
// another concurrent process.
let mut op_heads = op_heads_store.get_op_heads().block_on()?;
if op_heads.len() == 1 {
let operation_id = op_heads.pop().unwrap();
let operation = op_store.read_operation(&operation_id).block_on()?;
return Ok(Operation::new(op_store.clone(), operation_id, operation));
}
// There are no/multiple heads. We take a lock, then check if there are
// still no/multiple heads (it's likely that another process was in the
// process of deleting on of them). If there are still multiple heads, we
// attempt to merge all the views into one. We then write that view and a
// corresponding operation to the op-store.
// Note that the locking isn't necessary for correctness of merge; we take
// the lock only to prevent other concurrent processes from doing the same
// work (and producing another set of divergent heads).
let _lock = op_heads_store.lock().block_on()?;
let op_head_ids = op_heads_store.get_op_heads().block_on()?;
if op_head_ids.is_empty() {
return Err(OpHeadResolutionError::NoHeads.into());
}
if op_head_ids.len() == 1 {
let op_head_id = op_head_ids[0].clone();
let op_head = op_store.read_operation(&op_head_id).block_on()?;
return Ok(Operation::new(op_store.clone(), op_head_id, op_head));
}
let op_heads: Vec<_> = op_head_ids
.iter()
.map(|op_id: &OperationId| -> Result<Operation, OpStoreError> {
let data = op_store.read_operation(op_id).block_on()?;
Ok(Operation::new(op_store.clone(), op_id.clone(), data))
})
.try_collect()?;
// Remove ancestors so we don't create merge operation with an operation and its
// ancestor
let op_head_ids_before: HashSet<_> = op_heads.iter().map(|op| op.id().clone()).collect();
let filtered_op_heads = dag_walk::heads_ok(
op_heads.into_iter().map(Ok),
|op: &Operation| op.id().clone(),
|op: &Operation| op.parents().collect_vec(),
)?;
let op_head_ids_after: HashSet<_> =
filtered_op_heads.iter().map(|op| op.id().clone()).collect();
let ancestor_op_heads = op_head_ids_before
.difference(&op_head_ids_after)
.cloned()
.collect_vec();
let mut op_heads = filtered_op_heads.into_iter().collect_vec();
// Return without creating a merge operation
if let [op_head] = &*op_heads {
op_heads_store
.update_op_heads(&ancestor_op_heads, op_head.id())
.block_on()?;
return Ok(op_head.clone());
}
op_heads.sort_by_key(|op| op.metadata().time.end.timestamp);
let new_op = resolver(op_heads)?;
let mut old_op_heads = ancestor_op_heads;
old_op_heads.extend_from_slice(new_op.parent_ids());
op_heads_store
.update_op_heads(&old_op_heads, new_op.id())
.block_on()?;
Ok(new_op)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/gitignore.rs | lib/src/gitignore.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fs;
use std::io;
use std::iter;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use ignore::gitignore;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum GitIgnoreError {
#[error("Failed to read ignore patterns from file {path}")]
ReadFile { path: PathBuf, source: io::Error },
#[error("Invalid UTF-8 for ignore pattern in {path} on line #{line_num_for_display}: {line}")]
InvalidUtf8 {
path: PathBuf,
line_num_for_display: usize,
line: String,
source: std::str::Utf8Error,
},
#[error("Failed to parse ignore patterns from file {path}")]
Underlying {
path: PathBuf,
source: ignore::Error,
},
}
/// Models the effective contents of multiple .gitignore files.
#[derive(Debug)]
pub struct GitIgnoreFile {
parent: Option<Arc<Self>>,
matcher: gitignore::Gitignore,
}
impl GitIgnoreFile {
pub fn empty() -> Arc<Self> {
Arc::new(Self {
parent: None,
matcher: gitignore::Gitignore::empty(),
})
}
/// Concatenates new `.gitignore` content at the `prefix` directory.
///
/// The `prefix` should be a slash-separated path relative to the workspace
/// root.
pub fn chain(
self: &Arc<Self>,
prefix: &str,
ignore_path: &Path,
input: &[u8],
) -> Result<Arc<Self>, GitIgnoreError> {
let mut builder = gitignore::GitignoreBuilder::new(prefix);
for (i, input_line) in input.split(|b| *b == b'\n').enumerate() {
if input_line.starts_with(b"#") {
continue;
}
let line = str::from_utf8(input_line).map_err(|err| GitIgnoreError::InvalidUtf8 {
path: ignore_path.to_path_buf(),
line_num_for_display: i + 1,
line: String::from_utf8_lossy(input_line).to_string(),
source: err,
})?;
// The `from` argument doesn't provide any diagnostics or correctness, so it is
// not required. It only allows retrieving the path from the `Glob` later, which
// we never do.
builder
.add_line(None, line)
.map_err(|err| GitIgnoreError::Underlying {
path: ignore_path.to_path_buf(),
source: err,
})?;
}
let matcher = builder.build().map_err(|err| GitIgnoreError::Underlying {
path: ignore_path.to_path_buf(),
source: err,
})?;
let parent = if self.matcher.is_empty() {
self.parent.clone() // omit the empty root
} else {
Some(self.clone())
};
Ok(Arc::new(Self { parent, matcher }))
}
/// Concatenates new `.gitignore` file at the `prefix` directory.
///
/// The `prefix` should be a slash-separated path relative to the workspace
/// root.
pub fn chain_with_file(
self: &Arc<Self>,
prefix: &str,
file: PathBuf,
) -> Result<Arc<Self>, GitIgnoreError> {
if file.is_file() {
let buf = fs::read(&file).map_err(|err| GitIgnoreError::ReadFile {
path: file.clone(),
source: err,
})?;
self.chain(prefix, &file, &buf)
} else {
Ok(self.clone())
}
}
fn matches_helper(&self, path: &str, is_dir: bool) -> bool {
iter::successors(Some(self), |file| file.parent.as_deref())
.find_map(|file| {
// TODO: the documentation warns that
// `matched_path_or_any_parents` is slower than `matched`;
// ideally, we would switch to that.
match file.matcher.matched_path_or_any_parents(path, is_dir) {
ignore::Match::None => None,
ignore::Match::Ignore(_) => Some(true),
ignore::Match::Whitelist(_) => Some(false),
}
})
.unwrap_or_default()
}
/// Returns whether specified path (not just file!) should be ignored. This
/// method does not directly define which files should not be tracked in
/// the repository. Instead, it performs a simple matching against the
/// last applicable .gitignore line. The effective set of paths
/// ignored in the repository should take into account that all (untracked)
/// files within a ignored directory should be ignored unconditionally.
/// The code in this file does not take that into account.
pub fn matches(&self, path: &str) -> bool {
//If path ends with slash, consider it as a directory.
let (path, is_dir) = match path.strip_suffix('/') {
Some(path) => (path, true),
None => (path, false),
};
self.matches_helper(path, is_dir)
}
}
#[cfg(test)]
mod tests {
use super::*;
fn matches(input: &[u8], path: &str) -> bool {
let file = GitIgnoreFile::empty()
.chain("", Path::new(""), input)
.unwrap();
file.matches(path)
}
#[test]
fn test_gitignore_empty_file() {
let file = GitIgnoreFile::empty();
assert!(!file.matches("foo"));
}
#[test]
fn test_gitignore_empty_file_with_prefix() {
let file = GitIgnoreFile::empty()
.chain("dir/", Path::new(""), b"")
.unwrap();
assert!(!file.matches("dir/foo"));
}
#[test]
fn test_gitignore_literal() {
let file = GitIgnoreFile::empty()
.chain("", Path::new(""), b"foo\n")
.unwrap();
assert!(file.matches("foo"));
assert!(file.matches("dir/foo"));
assert!(file.matches("dir/subdir/foo"));
assert!(!file.matches("food"));
assert!(!file.matches("dir/food"));
}
#[test]
fn test_gitignore_literal_with_prefix() {
let file = GitIgnoreFile::empty()
.chain("./dir/", Path::new(""), b"foo\n")
.unwrap();
assert!(file.matches("dir/foo"));
assert!(file.matches("dir/subdir/foo"));
}
#[test]
fn test_gitignore_pattern_same_as_prefix() {
let file = GitIgnoreFile::empty()
.chain("dir/", Path::new(""), b"dir\n")
.unwrap();
assert!(file.matches("dir/dir"));
// We don't want the "dir" pattern to apply to the parent directory
assert!(!file.matches("dir/foo"));
}
#[test]
fn test_gitignore_rooted_literal() {
let file = GitIgnoreFile::empty()
.chain("", Path::new(""), b"/foo\n")
.unwrap();
assert!(file.matches("foo"));
assert!(!file.matches("dir/foo"));
}
#[test]
fn test_gitignore_rooted_literal_with_prefix() {
let file = GitIgnoreFile::empty()
.chain("dir/", Path::new(""), b"/foo\n")
.unwrap();
assert!(file.matches("dir/foo"));
assert!(!file.matches("dir/subdir/foo"));
}
#[test]
fn test_gitignore_deep_dir() {
let file = GitIgnoreFile::empty()
.chain("", Path::new(""), b"/dir1/dir2/dir3\n")
.unwrap();
assert!(!file.matches("foo"));
assert!(!file.matches("dir1/foo"));
assert!(!file.matches("dir1/dir2/foo"));
assert!(file.matches("dir1/dir2/dir3/foo"));
assert!(file.matches("dir1/dir2/dir3/dir4/foo"));
}
#[test]
fn test_gitignore_deep_dir_chained() {
// Prefix is relative to root, not to parent file
let file = GitIgnoreFile::empty()
.chain("", Path::new(""), b"/dummy\n")
.unwrap()
.chain("dir1/", Path::new(""), b"/dummy\n")
.unwrap()
.chain("dir1/dir2/", Path::new(""), b"/dir3\n")
.unwrap();
assert!(!file.matches("foo"));
assert!(!file.matches("dir1/foo"));
assert!(!file.matches("dir1/dir2/foo"));
assert!(file.matches("dir1/dir2/dir3/foo"));
assert!(file.matches("dir1/dir2/dir3/dir4/foo"));
}
#[test]
fn test_gitignore_match_only_dir() {
let file = GitIgnoreFile::empty()
.chain("", Path::new(""), b"/dir/\n")
.unwrap();
assert!(!file.matches("dir"));
assert!(file.matches("dir/foo"));
assert!(file.matches("dir/subdir/foo"));
}
#[test]
fn test_gitignore_unusual_symbols() {
assert!(matches(b"\\*\n", "*"));
assert!(!matches(b"\\*\n", "foo"));
assert!(matches(b"\\!\n", "!"));
assert!(matches(b"\\?\n", "?"));
assert!(!matches(b"\\?\n", "x"));
assert!(matches(b"\\w\n", "w"));
assert!(
GitIgnoreFile::empty()
.chain("", Path::new(""), b"\\\n")
.is_err()
);
}
#[test]
#[cfg(not(target_os = "windows"))]
fn test_gitignore_backslash_path() {
assert!(!matches(b"/foo/bar", "/foo\\bar"));
assert!(!matches(b"/foo/bar", "/foo/bar\\"));
assert!(!matches(b"/foo/bar/", "/foo\\bar/"));
assert!(!matches(b"/foo/bar/", "/foo\\bar\\/"));
// Invalid escapes are treated like literal backslashes
assert!(!matches(b"\\w\n", "\\w"));
assert!(matches(b"\\\\ \n", "\\ "));
assert!(matches(b"\\\\\\ \n", "\\ "));
}
#[test]
#[cfg(target_os = "windows")]
/// ignore crate consider backslashes as a directory divider only on
/// Windows.
fn test_gitignore_backslash_path() {
assert!(matches(b"/foo/bar", "/foo\\bar"));
assert!(matches(b"/foo/bar", "/foo/bar\\"));
assert!(matches(b"/foo/bar/", "/foo\\bar/"));
assert!(matches(b"/foo/bar/", "/foo\\bar\\/"));
assert!(matches(b"\\w\n", "\\w"));
assert!(!matches(b"\\\\ \n", "\\ "));
assert!(!matches(b"\\\\\\ \n", "\\ "));
}
#[test]
fn test_gitignore_whitespace() {
assert!(!matches(b" \n", " "));
assert!(matches(b"\\ \n", " "));
assert!(!matches(b"\\\\ \n", " "));
assert!(matches(b" a\n", " a"));
assert!(matches(b"a b\n", "a b"));
assert!(matches(b"a b \n", "a b"));
assert!(!matches(b"a b \n", "a b "));
assert!(matches(b"a b\\ \\ \n", "a b "));
// Trail CRs at EOL is ignored
assert!(matches(b"a\r\n", "a"));
assert!(!matches(b"a\r\n", "a\r"));
assert!(!matches(b"a\r\r\n", "a\r"));
assert!(matches(b"a\r\r\n", "a"));
assert!(!matches(b"a\r\r\n", "a\r\r"));
assert!(matches(b"a\r\r\n", "a"));
assert!(matches(b"\ra\n", "\ra"));
assert!(!matches(b"\ra\n", "a"));
assert!(
GitIgnoreFile::empty()
.chain("", Path::new(""), b"a b \\ \n")
.is_err()
);
}
#[test]
fn test_gitignore_glob() {
assert!(!matches(b"*.o\n", "foo"));
assert!(matches(b"*.o\n", "foo.o"));
assert!(!matches(b"foo.?\n", "foo"));
assert!(!matches(b"foo.?\n", "foo."));
assert!(matches(b"foo.?\n", "foo.o"));
}
#[test]
fn test_gitignore_range() {
assert!(!matches(b"foo.[az]\n", "foo"));
assert!(matches(b"foo.[az]\n", "foo.a"));
assert!(!matches(b"foo.[az]\n", "foo.g"));
assert!(matches(b"foo.[az]\n", "foo.z"));
assert!(!matches(b"foo.[a-z]\n", "foo"));
assert!(matches(b"foo.[a-z]\n", "foo.a"));
assert!(matches(b"foo.[a-z]\n", "foo.g"));
assert!(matches(b"foo.[a-z]\n", "foo.z"));
assert!(matches(b"foo.[0-9a-fA-F]\n", "foo.5"));
assert!(matches(b"foo.[0-9a-fA-F]\n", "foo.c"));
assert!(matches(b"foo.[0-9a-fA-F]\n", "foo.E"));
assert!(!matches(b"foo.[0-9a-fA-F]\n", "foo._"));
}
#[test]
fn test_gitignore_leading_dir_glob() {
assert!(matches(b"**/foo\n", "foo"));
assert!(matches(b"**/foo\n", "dir1/dir2/foo"));
assert!(matches(b"**/foo\n", "foo/file"));
assert!(matches(b"**/dir/foo\n", "dir/foo"));
assert!(matches(b"**/dir/foo\n", "dir1/dir2/dir/foo"));
}
#[test]
fn test_gitignore_leading_dir_glob_with_prefix() {
let file = GitIgnoreFile::empty()
.chain("dir1/dir2/", Path::new(""), b"**/foo\n")
.unwrap();
assert!(file.matches("dir1/dir2/foo"));
assert!(!file.matches("dir1/dir2/bar"));
assert!(file.matches("dir1/dir2/sub1/sub2/foo"));
assert!(!file.matches("dir1/dir2/sub1/sub2/bar"));
}
#[test]
fn test_gitignore_trailing_dir_glob() {
assert!(!matches(b"abc/**\n", "abc"));
assert!(matches(b"abc/**\n", "abc/file"));
assert!(matches(b"abc/**\n", "abc/dir/file"));
}
#[test]
fn test_gitignore_internal_dir_glob() {
assert!(matches(b"a/**/b\n", "a/b"));
assert!(matches(b"a/**/b\n", "a/x/b"));
assert!(matches(b"a/**/b\n", "a/x/y/b"));
assert!(!matches(b"a/**/b\n", "ax/y/b"));
assert!(!matches(b"a/**/b\n", "a/x/yb"));
assert!(!matches(b"a/**/b\n", "ab"));
}
#[test]
fn test_gitignore_internal_dir_glob_not_really() {
assert!(!matches(b"a/x**y/b\n", "a/b"));
assert!(matches(b"a/x**y/b\n", "a/xy/b"));
assert!(matches(b"a/x**y/b\n", "a/xzzzy/b"));
}
#[test]
fn test_gitignore_line_ordering() {
assert!(matches(b"foo\n!foo/bar\n", "foo"));
assert!(!matches(b"foo\n!foo/bar\n", "foo/bar"));
assert!(matches(b"foo\n!foo/bar\n", "foo/baz"));
assert!(matches(b"foo\n!foo/bar\nfoo/bar/baz", "foo"));
assert!(!matches(b"foo\n!foo/bar\nfoo/bar/baz", "foo/bar"));
assert!(matches(b"foo\n!foo/bar\nfoo/bar/baz", "foo/bar/baz"));
assert!(!matches(b"foo\n!foo/bar\nfoo/bar/baz", "foo/bar/quux"));
assert!(!matches(b"foo/*\n!foo/bar", "foo/bar"));
}
#[test]
fn test_gitignore_file_ordering() {
let file1 = GitIgnoreFile::empty()
.chain("", Path::new(""), b"/foo\n")
.unwrap();
let file2 = file1.chain("foo/", Path::new(""), b"!/bar").unwrap();
let file3 = file2.chain("foo/bar/", Path::new(""), b"/baz").unwrap();
assert!(file1.matches("foo"));
assert!(file1.matches("foo/bar"));
assert!(!file2.matches("foo/bar"));
assert!(!file2.matches("foo/bar/baz"));
assert!(file2.matches("foo/baz"));
assert!(file3.matches("foo/bar/baz"));
assert!(!file3.matches("foo/bar/qux"));
}
#[test]
fn test_gitignore_negative_parent_directory() {
// The following script shows that Git ignores the file:
//
// ```bash
// $ rm -rf test-repo && \
// git init test-repo &>/dev/null && \
// cd test-repo && \
// printf 'A/B.*\n!/A/\n' >.gitignore && \
// mkdir A && \
// touch A/B.ext && \
// git check-ignore A/B.ext
// A/B.ext
// ```
let ignore = GitIgnoreFile::empty()
.chain("", Path::new(""), b"foo/bar.*\n!/foo/\n")
.unwrap();
assert!(ignore.matches("foo/bar.ext"));
let ignore = GitIgnoreFile::empty()
.chain("", Path::new(""), b"!/foo/\nfoo/bar.*\n")
.unwrap();
assert!(ignore.matches("foo/bar.ext"));
}
#[test]
fn test_gitignore_invalid_utf8() {
// This tests that comments are not parsed
// The following slice is the byte representation of the following comment
// string:
//#à
let non_ascii_bytes = [35, 224];
let ignore = GitIgnoreFile::empty().chain("", Path::new(""), &non_ascii_bytes);
assert!(ignore.is_ok());
// Test without the leading #
let ignore = GitIgnoreFile::empty().chain("", Path::new(""), &non_ascii_bytes[1..]);
assert!(ignore.is_err());
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/settings.rs | lib/src/settings.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashMap;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::Mutex;
use chrono::DateTime;
use itertools::Itertools as _;
use rand::prelude::*;
use rand_chacha::ChaCha20Rng;
use serde::Deserialize;
use crate::backend::ChangeId;
use crate::backend::Commit;
use crate::backend::Signature;
use crate::backend::Timestamp;
use crate::config::ConfigGetError;
use crate::config::ConfigGetResultExt as _;
use crate::config::ConfigTable;
use crate::config::ConfigValue;
use crate::config::StackedConfig;
use crate::config::ToConfigNamePath;
use crate::fmt_util::binary_prefix;
use crate::ref_name::RemoteNameBuf;
use crate::signing::SignBehavior;
#[derive(Debug, Clone)]
pub struct UserSettings {
config: Arc<StackedConfig>,
data: Arc<UserSettingsData>,
rng: Arc<JJRng>,
}
#[derive(Debug)]
struct UserSettingsData {
user_name: String,
user_email: String,
commit_timestamp: Option<Timestamp>,
operation_timestamp: Option<Timestamp>,
operation_hostname: String,
operation_username: String,
signing_behavior: SignBehavior,
signing_key: Option<String>,
}
pub type RemoteSettingsMap = HashMap<RemoteNameBuf, RemoteSettings>;
#[derive(Debug, Clone, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct RemoteSettings {
/// String matcher expression whether to track bookmarks automatically.
#[serde(default)]
pub auto_track_bookmarks: Option<String>,
}
impl RemoteSettings {
pub fn table_from_settings(
settings: &UserSettings,
) -> Result<RemoteSettingsMap, ConfigGetError> {
settings
.table_keys("remotes")
.map(|name| Ok((name.into(), settings.get(["remotes", name])?)))
.try_collect()
}
}
/// Commit signing settings, describes how to and if to sign commits.
#[derive(Debug, Clone)]
pub struct SignSettings {
/// What to actually do, see [SignBehavior].
pub behavior: SignBehavior,
/// The email address to compare against the commit author when determining
/// if the existing signature is "our own" in terms of the sign behavior.
pub user_email: String,
/// The signing backend specific key, to be passed to the signing backend.
pub key: Option<String>,
}
impl SignSettings {
/// Check if a commit should be signed according to the configured behavior
/// and email.
pub fn should_sign(&self, commit: &Commit) -> bool {
match self.behavior {
SignBehavior::Drop => false,
SignBehavior::Keep => {
commit.secure_sig.is_some() && commit.author.email == self.user_email
}
SignBehavior::Own => commit.author.email == self.user_email,
SignBehavior::Force => true,
}
}
}
fn to_timestamp(value: ConfigValue) -> Result<Timestamp, Box<dyn std::error::Error + Send + Sync>> {
// Since toml_edit::Datetime isn't the date-time type used across our code
// base, we accept both string and date-time types.
if let Some(s) = value.as_str() {
Ok(Timestamp::from_datetime(DateTime::parse_from_rfc3339(s)?))
} else if let Some(d) = value.as_datetime() {
// It's easier to re-parse the TOML date-time expression.
let s = d.to_string();
Ok(Timestamp::from_datetime(DateTime::parse_from_rfc3339(&s)?))
} else {
let ty = value.type_name();
Err(format!("invalid type: {ty}, expected a date-time").into())
}
}
impl UserSettings {
pub fn from_config(config: StackedConfig) -> Result<Self, ConfigGetError> {
let rng_seed = config.get::<u64>("debug.randomness-seed").optional()?;
Self::from_config_and_rng(config, Arc::new(JJRng::new(rng_seed)))
}
fn from_config_and_rng(config: StackedConfig, rng: Arc<JJRng>) -> Result<Self, ConfigGetError> {
let user_name = config.get("user.name")?;
let user_email = config.get("user.email")?;
let commit_timestamp = config
.get_value_with("debug.commit-timestamp", to_timestamp)
.optional()?;
let operation_timestamp = config
.get_value_with("debug.operation-timestamp", to_timestamp)
.optional()?;
let operation_hostname = config.get("operation.hostname")?;
let operation_username = config.get("operation.username")?;
let signing_behavior = config.get("signing.behavior")?;
let signing_key = config.get("signing.key").optional()?;
let data = UserSettingsData {
user_name,
user_email,
commit_timestamp,
operation_timestamp,
operation_hostname,
operation_username,
signing_behavior,
signing_key,
};
Ok(Self {
config: Arc::new(config),
data: Arc::new(data),
rng,
})
}
/// Like [`UserSettings::from_config()`], but retains the internal state.
///
/// This ensures that no duplicated change IDs are generated within the
/// current process. New `debug.randomness-seed` value is ignored.
pub fn with_new_config(&self, config: StackedConfig) -> Result<Self, ConfigGetError> {
Self::from_config_and_rng(config, self.rng.clone())
}
pub fn get_rng(&self) -> Arc<JJRng> {
self.rng.clone()
}
pub fn user_name(&self) -> &str {
&self.data.user_name
}
// Must not be changed to avoid git pushing older commits with no set name
pub const USER_NAME_PLACEHOLDER: &str = "(no name configured)";
pub fn user_email(&self) -> &str {
&self.data.user_email
}
// Must not be changed to avoid git pushing older commits with no set email
// address
pub const USER_EMAIL_PLACEHOLDER: &str = "(no email configured)";
pub fn commit_timestamp(&self) -> Option<Timestamp> {
self.data.commit_timestamp
}
pub fn operation_timestamp(&self) -> Option<Timestamp> {
self.data.operation_timestamp
}
pub fn operation_hostname(&self) -> &str {
&self.data.operation_hostname
}
pub fn operation_username(&self) -> &str {
&self.data.operation_username
}
pub fn signature(&self) -> Signature {
let timestamp = self.data.commit_timestamp.unwrap_or_else(Timestamp::now);
Signature {
name: self.user_name().to_owned(),
email: self.user_email().to_owned(),
timestamp,
}
}
/// Returns low-level config object.
///
/// You should typically use `settings.get_<type>()` methods instead.
pub fn config(&self) -> &StackedConfig {
&self.config
}
pub fn remote_settings(&self) -> Result<RemoteSettingsMap, ConfigGetError> {
RemoteSettings::table_from_settings(self)
}
// separate from sign_settings as those two are needed in pretty different
// places
pub fn signing_backend(&self) -> Result<Option<String>, ConfigGetError> {
let backend = self.get_string("signing.backend")?;
Ok((backend != "none").then_some(backend))
}
pub fn sign_settings(&self) -> SignSettings {
SignSettings {
behavior: self.data.signing_behavior,
user_email: self.data.user_email.clone(),
key: self.data.signing_key.clone(),
}
}
}
/// General-purpose accessors.
impl UserSettings {
/// Looks up value of the specified type `T` by `name`.
pub fn get<'de, T: Deserialize<'de>>(
&self,
name: impl ToConfigNamePath,
) -> Result<T, ConfigGetError> {
self.config.get(name)
}
/// Looks up string value by `name`.
pub fn get_string(&self, name: impl ToConfigNamePath) -> Result<String, ConfigGetError> {
self.get(name)
}
/// Looks up integer value by `name`.
pub fn get_int(&self, name: impl ToConfigNamePath) -> Result<i64, ConfigGetError> {
self.get(name)
}
/// Looks up boolean value by `name`.
pub fn get_bool(&self, name: impl ToConfigNamePath) -> Result<bool, ConfigGetError> {
self.get(name)
}
/// Looks up generic value by `name`.
pub fn get_value(&self, name: impl ToConfigNamePath) -> Result<ConfigValue, ConfigGetError> {
self.config.get_value(name)
}
/// Looks up value by `name`, converts it by using the given function.
pub fn get_value_with<T, E: Into<Box<dyn std::error::Error + Send + Sync>>>(
&self,
name: impl ToConfigNamePath,
convert: impl FnOnce(ConfigValue) -> Result<T, E>,
) -> Result<T, ConfigGetError> {
self.config.get_value_with(name, convert)
}
/// Looks up sub table by `name`.
///
/// Use `table_keys(prefix)` and `get([prefix, key])` instead if table
/// values have to be converted to non-generic value type.
pub fn get_table(&self, name: impl ToConfigNamePath) -> Result<ConfigTable, ConfigGetError> {
self.config.get_table(name)
}
/// Returns iterator over sub table keys at `name`.
pub fn table_keys(&self, name: impl ToConfigNamePath) -> impl Iterator<Item = &str> {
self.config.table_keys(name)
}
}
/// This Rng uses interior mutability to allow generating random values using an
/// immutable reference. It also fixes a specific seedable RNG for
/// reproducibility.
#[derive(Debug)]
pub struct JJRng(Mutex<ChaCha20Rng>);
impl JJRng {
pub fn new_change_id(&self, length: usize) -> ChangeId {
let mut rng = self.0.lock().unwrap();
let random_bytes = (0..length).map(|_| rng.random::<u8>()).collect();
ChangeId::new(random_bytes)
}
/// Creates a new RNGs. Could be made public, but we'd like to encourage all
/// RNGs references to point to the same RNG.
fn new(seed: Option<u64>) -> Self {
Self(Mutex::new(Self::internal_rng_from_seed(seed)))
}
fn internal_rng_from_seed(seed: Option<u64>) -> ChaCha20Rng {
match seed {
Some(seed) => ChaCha20Rng::seed_from_u64(seed),
None => ChaCha20Rng::from_os_rng(),
}
}
}
/// A size in bytes optionally formatted/serialized with binary prefixes
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub struct HumanByteSize(pub u64);
impl std::fmt::Display for HumanByteSize {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let (value, prefix) = binary_prefix(self.0 as f32);
write!(f, "{value:.1}{prefix}B")
}
}
impl FromStr for HumanByteSize {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.parse() {
Ok(bytes) => Ok(Self(bytes)),
Err(_) => {
let bytes = parse_human_byte_size(s)?;
Ok(Self(bytes))
}
}
}
}
impl TryFrom<ConfigValue> for HumanByteSize {
type Error = &'static str;
fn try_from(value: ConfigValue) -> Result<Self, Self::Error> {
if let Some(n) = value.as_integer() {
let n = u64::try_from(n).map_err(|_| "Integer out of range")?;
Ok(Self(n))
} else if let Some(s) = value.as_str() {
s.parse()
} else {
Err("Expected a positive integer or a string in '<number><unit>' form")
}
}
}
fn parse_human_byte_size(v: &str) -> Result<u64, &'static str> {
let digit_end = v.find(|c: char| !c.is_ascii_digit()).unwrap_or(v.len());
if digit_end == 0 {
return Err("must start with a number");
}
let (digits, trailing) = v.split_at(digit_end);
let exponent = match trailing.trim_start() {
"" | "B" => 0,
unit => {
const PREFIXES: [char; 8] = ['K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'];
let Some(prefix) = PREFIXES.iter().position(|&x| unit.starts_with(x)) else {
return Err("unrecognized unit prefix");
};
let ("" | "B" | "i" | "iB") = &unit[1..] else {
return Err("unrecognized unit");
};
prefix as u32 + 1
}
};
// A string consisting only of base 10 digits is either a valid u64 or really
// huge.
let factor = digits.parse::<u64>().unwrap_or(u64::MAX);
Ok(factor.saturating_mul(1024u64.saturating_pow(exponent)))
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use super::*;
#[test]
fn byte_size_parse() {
assert_eq!(parse_human_byte_size("0"), Ok(0));
assert_eq!(parse_human_byte_size("42"), Ok(42));
assert_eq!(parse_human_byte_size("42B"), Ok(42));
assert_eq!(parse_human_byte_size("42 B"), Ok(42));
assert_eq!(parse_human_byte_size("42K"), Ok(42 * 1024));
assert_eq!(parse_human_byte_size("42 K"), Ok(42 * 1024));
assert_eq!(parse_human_byte_size("42 KB"), Ok(42 * 1024));
assert_eq!(parse_human_byte_size("42 KiB"), Ok(42 * 1024));
assert_eq!(
parse_human_byte_size("42 LiB"),
Err("unrecognized unit prefix")
);
assert_eq!(parse_human_byte_size("42 KiC"), Err("unrecognized unit"));
assert_eq!(parse_human_byte_size("42 KC"), Err("unrecognized unit"));
assert_eq!(
parse_human_byte_size("KiB"),
Err("must start with a number")
);
assert_eq!(parse_human_byte_size(""), Err("must start with a number"));
}
#[test]
fn byte_size_from_config_value() {
assert_eq!(
HumanByteSize::try_from(ConfigValue::from(42)).unwrap(),
HumanByteSize(42)
);
assert_eq!(
HumanByteSize::try_from(ConfigValue::from("42K")).unwrap(),
HumanByteSize(42 * 1024)
);
assert_matches!(
HumanByteSize::try_from(ConfigValue::from(-1)),
Err("Integer out of range")
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/refs.rs | lib/src/refs.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use itertools::EitherOrBoth;
use crate::backend::CommitId;
use crate::index::Index;
use crate::index::IndexResult;
use crate::iter_util::fallible_position;
use crate::merge::Merge;
use crate::merge::SameChange;
use crate::merge::trivial_merge;
use crate::op_store::RefTarget;
use crate::op_store::RemoteRef;
/// Compares `refs1` and `refs2` targets, yields entry if they differ.
///
/// `refs1` and `refs2` must be sorted by `K`.
pub fn diff_named_ref_targets<'a, 'b, K: Ord>(
refs1: impl IntoIterator<Item = (K, &'a RefTarget)>,
refs2: impl IntoIterator<Item = (K, &'b RefTarget)>,
) -> impl Iterator<Item = (K, (&'a RefTarget, &'b RefTarget))> {
iter_named_pairs(
refs1,
refs2,
|| RefTarget::absent_ref(),
|| RefTarget::absent_ref(),
)
.filter(|(_, (target1, target2))| target1 != target2)
}
/// Compares remote `refs1` and `refs2` pairs, yields entry if they differ.
///
/// `refs1` and `refs2` must be sorted by `K`.
pub fn diff_named_remote_refs<'a, 'b, K: Ord>(
refs1: impl IntoIterator<Item = (K, &'a RemoteRef)>,
refs2: impl IntoIterator<Item = (K, &'b RemoteRef)>,
) -> impl Iterator<Item = (K, (&'a RemoteRef, &'b RemoteRef))> {
iter_named_pairs(
refs1,
refs2,
|| RemoteRef::absent_ref(),
|| RemoteRef::absent_ref(),
)
.filter(|(_, (ref1, ref2))| ref1 != ref2)
}
/// Iterates local `refs1` and remote `refs2` pairs by name.
///
/// `refs1` and `refs2` must be sorted by `K`.
pub fn iter_named_local_remote_refs<'a, 'b, K: Ord>(
refs1: impl IntoIterator<Item = (K, &'a RefTarget)>,
refs2: impl IntoIterator<Item = (K, &'b RemoteRef)>,
) -> impl Iterator<Item = (K, (&'a RefTarget, &'b RemoteRef))> {
iter_named_pairs(
refs1,
refs2,
|| RefTarget::absent_ref(),
|| RemoteRef::absent_ref(),
)
}
/// Compares `ids1` and `ids2` commit ids, yields entry if they differ.
///
/// `ids1` and `ids2` must be sorted by `K`.
pub fn diff_named_commit_ids<'a, 'b, K: Ord>(
ids1: impl IntoIterator<Item = (K, &'a CommitId)>,
ids2: impl IntoIterator<Item = (K, &'b CommitId)>,
) -> impl Iterator<Item = (K, (Option<&'a CommitId>, Option<&'b CommitId>))> {
iter_named_pairs(
ids1.into_iter().map(|(k, v)| (k, Some(v))),
ids2.into_iter().map(|(k, v)| (k, Some(v))),
|| None,
|| None,
)
.filter(|(_, (target1, target2))| target1 != target2)
}
fn iter_named_pairs<K: Ord, V1, V2>(
refs1: impl IntoIterator<Item = (K, V1)>,
refs2: impl IntoIterator<Item = (K, V2)>,
absent_ref1: impl Fn() -> V1,
absent_ref2: impl Fn() -> V2,
) -> impl Iterator<Item = (K, (V1, V2))> {
itertools::merge_join_by(refs1, refs2, |(name1, _), (name2, _)| name1.cmp(name2)).map(
move |entry| match entry {
EitherOrBoth::Both((name, target1), (_, target2)) => (name, (target1, target2)),
EitherOrBoth::Left((name, target1)) => (name, (target1, absent_ref2())),
EitherOrBoth::Right((name, target2)) => (name, (absent_ref1(), target2)),
},
)
}
pub fn merge_ref_targets(
index: &dyn Index,
left: &RefTarget,
base: &RefTarget,
right: &RefTarget,
) -> IndexResult<RefTarget> {
if let Some(&resolved) = trivial_merge(&[left, base, right], SameChange::Accept) {
return Ok(resolved.clone());
}
let mut merge = Merge::from_vec(vec![
left.as_merge().clone(),
base.as_merge().clone(),
right.as_merge().clone(),
])
.flatten()
.simplify();
// Suppose left = [A - C + B], base = [B], right = [A], the merge result is
// [A - C + A], which can now be trivially resolved.
if let Some(resolved) = merge.resolve_trivial(SameChange::Accept) {
Ok(RefTarget::resolved(resolved.clone()))
} else {
merge_ref_targets_non_trivial(index, &mut merge)?;
// TODO: Maybe better to try resolve_trivial() again, but the result is
// unreliable since merge_ref_targets_non_trivial() is order dependent.
Ok(RefTarget::from_merge(merge))
}
}
pub fn merge_remote_refs(
index: &dyn Index,
left: &RemoteRef,
base: &RemoteRef,
right: &RemoteRef,
) -> IndexResult<RemoteRef> {
// Just merge target and state fields separately. Strictly speaking, merging
// target-only change and state-only change shouldn't automatically mark the
// new target as tracking. However, many faulty merges will end up in local
// or remote target conflicts (since fast-forwardable move can be safely
// "tracked"), and the conflicts will require user intervention anyway. So
// there wouldn't be much reason to handle these merges precisely.
let target = merge_ref_targets(index, &left.target, &base.target, &right.target)?;
// Merged state shouldn't conflict atm since we only have two states, but if
// it does, keep the original state. The choice is arbitrary.
let state = *trivial_merge(&[left.state, base.state, right.state], SameChange::Accept)
.unwrap_or(&base.state);
Ok(RemoteRef { target, state })
}
fn merge_ref_targets_non_trivial(
index: &dyn Index,
conflict: &mut Merge<Option<CommitId>>,
) -> IndexResult<()> {
while let Some((remove_index, add_index)) = find_pair_to_remove(index, conflict)? {
conflict.swap_remove(remove_index, add_index);
}
Ok(())
}
fn find_pair_to_remove(
index: &dyn Index,
conflict: &Merge<Option<CommitId>>,
) -> IndexResult<Option<(usize, usize)>> {
// If a "remove" is an ancestor of two different "adds" and one of the
// "adds" is an ancestor of the other, then pick the descendant.
for (add_index1, add1) in conflict.adds().enumerate() {
for (add_index2, add2) in conflict.adds().enumerate().skip(add_index1 + 1) {
// TODO: Instead of relying on the list order, maybe ((add1, add2), remove)
// combination should be somehow weighted?
let (add_index, add_id) = match (add1, add2) {
(Some(id1), Some(id2)) if id1 == id2 => (add_index1, id1),
(Some(id1), Some(id2)) if index.is_ancestor(id1, id2)? => (add_index1, id1),
(Some(id1), Some(id2)) if index.is_ancestor(id2, id1)? => (add_index2, id2),
_ => continue,
};
if let Some(remove_index) =
fallible_position(conflict.removes(), |remove| match remove {
Some(id) => index.is_ancestor(id, add_id),
None => Ok(true), // Absent ref can be considered a root
})?
{
return Ok(Some((remove_index, add_index)));
}
}
}
Ok(None)
}
/// Pair of local and remote targets.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct LocalAndRemoteRef<'a> {
pub local_target: &'a RefTarget,
pub remote_ref: &'a RemoteRef,
}
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
pub struct BookmarkPushUpdate {
pub old_target: Option<CommitId>,
pub new_target: Option<CommitId>,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum BookmarkPushAction {
Update(BookmarkPushUpdate),
AlreadyMatches,
LocalConflicted,
RemoteConflicted,
RemoteUntracked,
}
/// Figure out what changes (if any) need to be made to the remote when pushing
/// this bookmark.
pub fn classify_bookmark_push_action(targets: LocalAndRemoteRef) -> BookmarkPushAction {
let local_target = targets.local_target;
let remote_target = targets.remote_ref.tracked_target();
if local_target == remote_target {
BookmarkPushAction::AlreadyMatches
} else if local_target.has_conflict() {
BookmarkPushAction::LocalConflicted
} else if remote_target.has_conflict() {
BookmarkPushAction::RemoteConflicted
} else if targets.remote_ref.is_present() && !targets.remote_ref.is_tracked() {
BookmarkPushAction::RemoteUntracked
} else {
BookmarkPushAction::Update(BookmarkPushUpdate {
old_target: remote_target.as_normal().cloned(),
new_target: local_target.as_normal().cloned(),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::op_store::RemoteRefState;
fn new_remote_ref(target: RefTarget) -> RemoteRef {
RemoteRef {
target,
state: RemoteRefState::New,
}
}
fn tracked_remote_ref(target: RefTarget) -> RemoteRef {
RemoteRef {
target,
state: RemoteRefState::Tracked,
}
}
#[test]
fn test_classify_bookmark_push_action_unchanged() {
let commit_id1 = CommitId::from_hex("11");
let targets = LocalAndRemoteRef {
local_target: &RefTarget::normal(commit_id1.clone()),
remote_ref: &tracked_remote_ref(RefTarget::normal(commit_id1)),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::AlreadyMatches
);
}
#[test]
fn test_classify_bookmark_push_action_added() {
let commit_id1 = CommitId::from_hex("11");
let targets = LocalAndRemoteRef {
local_target: &RefTarget::normal(commit_id1.clone()),
remote_ref: RemoteRef::absent_ref(),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::Update(BookmarkPushUpdate {
old_target: None,
new_target: Some(commit_id1),
})
);
}
#[test]
fn test_classify_bookmark_push_action_removed() {
let commit_id1 = CommitId::from_hex("11");
let targets = LocalAndRemoteRef {
local_target: RefTarget::absent_ref(),
remote_ref: &tracked_remote_ref(RefTarget::normal(commit_id1.clone())),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::Update(BookmarkPushUpdate {
old_target: Some(commit_id1),
new_target: None,
})
);
}
#[test]
fn test_classify_bookmark_push_action_updated() {
let commit_id1 = CommitId::from_hex("11");
let commit_id2 = CommitId::from_hex("22");
let targets = LocalAndRemoteRef {
local_target: &RefTarget::normal(commit_id2.clone()),
remote_ref: &tracked_remote_ref(RefTarget::normal(commit_id1.clone())),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::Update(BookmarkPushUpdate {
old_target: Some(commit_id1),
new_target: Some(commit_id2),
})
);
}
#[test]
fn test_classify_bookmark_push_action_removed_untracked() {
// This is not RemoteUntracked error since non-tracking remote bookmarks
// have no relation to local bookmarks, and there's nothing to push.
let commit_id1 = CommitId::from_hex("11");
let targets = LocalAndRemoteRef {
local_target: RefTarget::absent_ref(),
remote_ref: &new_remote_ref(RefTarget::normal(commit_id1.clone())),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::AlreadyMatches
);
}
#[test]
fn test_classify_bookmark_push_action_updated_untracked() {
let commit_id1 = CommitId::from_hex("11");
let commit_id2 = CommitId::from_hex("22");
let targets = LocalAndRemoteRef {
local_target: &RefTarget::normal(commit_id2.clone()),
remote_ref: &new_remote_ref(RefTarget::normal(commit_id1.clone())),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::RemoteUntracked
);
}
#[test]
fn test_classify_bookmark_push_action_local_conflicted() {
let commit_id1 = CommitId::from_hex("11");
let commit_id2 = CommitId::from_hex("22");
let targets = LocalAndRemoteRef {
local_target: &RefTarget::from_legacy_form([], [commit_id1.clone(), commit_id2]),
remote_ref: &tracked_remote_ref(RefTarget::normal(commit_id1)),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::LocalConflicted
);
}
#[test]
fn test_classify_bookmark_push_action_remote_conflicted() {
let commit_id1 = CommitId::from_hex("11");
let commit_id2 = CommitId::from_hex("22");
let targets = LocalAndRemoteRef {
local_target: &RefTarget::normal(commit_id1.clone()),
remote_ref: &tracked_remote_ref(RefTarget::from_legacy_form(
[],
[commit_id1, commit_id2],
)),
};
assert_eq!(
classify_bookmark_push_action(targets),
BookmarkPushAction::RemoteConflicted
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/workspace.rs | lib/src/workspace.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashMap;
use std::fs;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use pollster::FutureExt as _;
use thiserror::Error;
use crate::backend::BackendInitError;
use crate::commit::Commit;
use crate::file_util;
use crate::file_util::BadPathEncoding;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::local_working_copy::LocalWorkingCopy;
use crate::local_working_copy::LocalWorkingCopyFactory;
use crate::merged_tree::MergedTree;
use crate::op_heads_store::OpHeadsStoreError;
use crate::op_store::OperationId;
use crate::ref_name::WorkspaceName;
use crate::ref_name::WorkspaceNameBuf;
use crate::repo::BackendInitializer;
use crate::repo::CheckOutCommitError;
use crate::repo::IndexStoreInitializer;
use crate::repo::OpHeadsStoreInitializer;
use crate::repo::OpStoreInitializer;
use crate::repo::ReadonlyRepo;
use crate::repo::Repo as _;
use crate::repo::RepoInitError;
use crate::repo::RepoLoader;
use crate::repo::StoreFactories;
use crate::repo::StoreLoadError;
use crate::repo::SubmoduleStoreInitializer;
use crate::repo::read_store_type;
use crate::settings::UserSettings;
use crate::signing::SignInitError;
use crate::signing::Signer;
use crate::simple_backend::SimpleBackend;
use crate::transaction::TransactionCommitError;
use crate::working_copy::CheckoutError;
use crate::working_copy::CheckoutStats;
use crate::working_copy::LockedWorkingCopy;
use crate::working_copy::WorkingCopy;
use crate::working_copy::WorkingCopyFactory;
use crate::working_copy::WorkingCopyStateError;
#[derive(Error, Debug)]
pub enum WorkspaceInitError {
#[error("The destination repo ({0}) already exists")]
DestinationExists(PathBuf),
#[error("Repo path could not be encoded")]
EncodeRepoPath(#[source] BadPathEncoding),
#[error(transparent)]
CheckOutCommit(#[from] CheckOutCommitError),
#[error(transparent)]
WorkingCopyState(#[from] WorkingCopyStateError),
#[error(transparent)]
Path(#[from] PathError),
#[error(transparent)]
OpHeadsStore(OpHeadsStoreError),
#[error(transparent)]
Backend(#[from] BackendInitError),
#[error(transparent)]
SignInit(#[from] SignInitError),
#[error(transparent)]
TransactionCommit(#[from] TransactionCommitError),
}
#[derive(Error, Debug)]
pub enum WorkspaceLoadError {
#[error("The repo appears to no longer be at {0}")]
RepoDoesNotExist(PathBuf),
#[error("There is no Jujutsu repo in {0}")]
NoWorkspaceHere(PathBuf),
#[error("Cannot read the repo")]
StoreLoadError(#[from] StoreLoadError),
#[error("Repo path could not be decoded")]
DecodeRepoPath(#[source] BadPathEncoding),
#[error(transparent)]
WorkingCopyState(#[from] WorkingCopyStateError),
#[error(transparent)]
Path(#[from] PathError),
}
/// The combination of a repo and a working copy.
///
/// Represents the combination of a repo and working copy, i.e. what's typically
/// the .jj/ directory and its parent. See
/// <https://github.com/jj-vcs/jj/blob/main/docs/working-copy.md#workspaces>
/// for more information.
pub struct Workspace {
// Path to the workspace root (typically the parent of a .jj/ directory), which is where
// working copy files live.
workspace_root: PathBuf,
repo_path: PathBuf,
repo_loader: RepoLoader,
working_copy: Box<dyn WorkingCopy>,
}
fn create_jj_dir(workspace_root: &Path) -> Result<PathBuf, WorkspaceInitError> {
let jj_dir = workspace_root.join(".jj");
match std::fs::create_dir(&jj_dir).context(&jj_dir) {
Ok(()) => Ok(jj_dir),
Err(ref e) if e.source.kind() == io::ErrorKind::AlreadyExists => {
Err(WorkspaceInitError::DestinationExists(jj_dir))
}
Err(e) => Err(e.into()),
}
}
fn init_working_copy(
repo: &Arc<ReadonlyRepo>,
workspace_root: &Path,
jj_dir: &Path,
working_copy_factory: &dyn WorkingCopyFactory,
workspace_name: WorkspaceNameBuf,
) -> Result<(Box<dyn WorkingCopy>, Arc<ReadonlyRepo>), WorkspaceInitError> {
let working_copy_state_path = jj_dir.join("working_copy");
std::fs::create_dir(&working_copy_state_path).context(&working_copy_state_path)?;
let mut tx = repo.start_transaction();
tx.repo_mut()
.check_out(workspace_name.clone(), &repo.store().root_commit())?;
let repo = tx.commit(format!("add workspace '{}'", workspace_name.as_symbol()))?;
let working_copy = working_copy_factory.init_working_copy(
repo.store().clone(),
workspace_root.to_path_buf(),
working_copy_state_path.clone(),
repo.op_id().clone(),
workspace_name,
repo.settings(),
)?;
let working_copy_type_path = working_copy_state_path.join("type");
fs::write(&working_copy_type_path, working_copy.name()).context(&working_copy_type_path)?;
Ok((working_copy, repo))
}
impl Workspace {
pub fn new(
workspace_root: &Path,
repo_path: PathBuf,
working_copy: Box<dyn WorkingCopy>,
repo_loader: RepoLoader,
) -> Result<Self, PathError> {
let workspace_root = dunce::canonicalize(workspace_root).context(workspace_root)?;
Ok(Self::new_no_canonicalize(
workspace_root,
repo_path,
working_copy,
repo_loader,
))
}
pub fn new_no_canonicalize(
workspace_root: PathBuf,
repo_path: PathBuf,
working_copy: Box<dyn WorkingCopy>,
repo_loader: RepoLoader,
) -> Self {
Self {
workspace_root,
repo_path,
repo_loader,
working_copy,
}
}
pub fn init_simple(
user_settings: &UserSettings,
workspace_root: &Path,
) -> Result<(Self, Arc<ReadonlyRepo>), WorkspaceInitError> {
let backend_initializer: &BackendInitializer =
&|_settings, store_path| Ok(Box::new(SimpleBackend::init(store_path)));
let signer = Signer::from_settings(user_settings)?;
Self::init_with_backend(user_settings, workspace_root, backend_initializer, signer)
}
/// Initializes a workspace with a new Git backend and bare Git repo in
/// `.jj/repo/store/git`.
#[cfg(feature = "git")]
pub fn init_internal_git(
user_settings: &UserSettings,
workspace_root: &Path,
) -> Result<(Self, Arc<ReadonlyRepo>), WorkspaceInitError> {
let backend_initializer: &BackendInitializer = &|settings, store_path| {
Ok(Box::new(crate::git_backend::GitBackend::init_internal(
settings, store_path,
)?))
};
let signer = Signer::from_settings(user_settings)?;
Self::init_with_backend(user_settings, workspace_root, backend_initializer, signer)
}
/// Initializes a workspace with a new Git backend and Git repo that shares
/// the same working copy.
#[cfg(feature = "git")]
pub fn init_colocated_git(
user_settings: &UserSettings,
workspace_root: &Path,
) -> Result<(Self, Arc<ReadonlyRepo>), WorkspaceInitError> {
let backend_initializer = |settings: &UserSettings,
store_path: &Path|
-> Result<Box<dyn crate::backend::Backend>, _> {
// TODO: Clean up path normalization. store_path is canonicalized by
// ReadonlyRepo::init(). workspace_root will be canonicalized by
// Workspace::new(), but it's not yet here.
let store_relative_workspace_root =
if let Ok(workspace_root) = dunce::canonicalize(workspace_root) {
crate::file_util::relative_path(store_path, &workspace_root)
} else {
workspace_root.to_owned()
};
let backend = crate::git_backend::GitBackend::init_colocated(
settings,
store_path,
&store_relative_workspace_root,
)?;
Ok(Box::new(backend))
};
let signer = Signer::from_settings(user_settings)?;
Self::init_with_backend(user_settings, workspace_root, &backend_initializer, signer)
}
/// Initializes a workspace with an existing Git repo at the specified path.
///
/// The `git_repo_path` usually ends with `.git`. It's the path to the Git
/// repo directory, not the working directory.
#[cfg(feature = "git")]
pub fn init_external_git(
user_settings: &UserSettings,
workspace_root: &Path,
git_repo_path: &Path,
) -> Result<(Self, Arc<ReadonlyRepo>), WorkspaceInitError> {
let backend_initializer = |settings: &UserSettings,
store_path: &Path|
-> Result<Box<dyn crate::backend::Backend>, _> {
// If the git repo is inside the workspace, use a relative path to it so the
// whole workspace can be moved without breaking.
// TODO: Clean up path normalization. store_path is canonicalized by
// ReadonlyRepo::init(). workspace_root will be canonicalized by
// Workspace::new(), but it's not yet here.
let store_relative_git_repo_path = match (
dunce::canonicalize(workspace_root),
crate::git_backend::canonicalize_git_repo_path(git_repo_path),
) {
(Ok(workspace_root), Ok(git_repo_path))
if git_repo_path.starts_with(&workspace_root) =>
{
crate::file_util::relative_path(store_path, &git_repo_path)
}
_ => git_repo_path.to_owned(),
};
let backend = crate::git_backend::GitBackend::init_external(
settings,
store_path,
&store_relative_git_repo_path,
)?;
Ok(Box::new(backend))
};
let signer = Signer::from_settings(user_settings)?;
Self::init_with_backend(user_settings, workspace_root, &backend_initializer, signer)
}
#[expect(clippy::too_many_arguments)]
pub fn init_with_factories(
user_settings: &UserSettings,
workspace_root: &Path,
backend_initializer: &BackendInitializer,
signer: Signer,
op_store_initializer: &OpStoreInitializer,
op_heads_store_initializer: &OpHeadsStoreInitializer,
index_store_initializer: &IndexStoreInitializer,
submodule_store_initializer: &SubmoduleStoreInitializer,
working_copy_factory: &dyn WorkingCopyFactory,
workspace_name: WorkspaceNameBuf,
) -> Result<(Self, Arc<ReadonlyRepo>), WorkspaceInitError> {
let jj_dir = create_jj_dir(workspace_root)?;
(|| {
let repo_dir = jj_dir.join("repo");
std::fs::create_dir(&repo_dir).context(&repo_dir)?;
let repo = ReadonlyRepo::init(
user_settings,
&repo_dir,
backend_initializer,
signer,
op_store_initializer,
op_heads_store_initializer,
index_store_initializer,
submodule_store_initializer,
)
.map_err(|repo_init_err| match repo_init_err {
RepoInitError::Backend(err) => WorkspaceInitError::Backend(err),
RepoInitError::OpHeadsStore(err) => WorkspaceInitError::OpHeadsStore(err),
RepoInitError::Path(err) => WorkspaceInitError::Path(err),
})?;
let (working_copy, repo) = init_working_copy(
&repo,
workspace_root,
&jj_dir,
working_copy_factory,
workspace_name,
)?;
let repo_loader = repo.loader().clone();
let workspace = Self::new(workspace_root, repo_dir, working_copy, repo_loader)?;
Ok((workspace, repo))
})()
.inspect_err(|_err| {
std::fs::remove_dir_all(jj_dir).ok();
})
}
pub fn init_with_backend(
user_settings: &UserSettings,
workspace_root: &Path,
backend_initializer: &BackendInitializer,
signer: Signer,
) -> Result<(Self, Arc<ReadonlyRepo>), WorkspaceInitError> {
Self::init_with_factories(
user_settings,
workspace_root,
backend_initializer,
signer,
ReadonlyRepo::default_op_store_initializer(),
ReadonlyRepo::default_op_heads_store_initializer(),
ReadonlyRepo::default_index_store_initializer(),
ReadonlyRepo::default_submodule_store_initializer(),
&*default_working_copy_factory(),
WorkspaceName::DEFAULT.to_owned(),
)
}
pub fn init_workspace_with_existing_repo(
workspace_root: &Path,
repo_path: &Path,
repo: &Arc<ReadonlyRepo>,
working_copy_factory: &dyn WorkingCopyFactory,
workspace_name: WorkspaceNameBuf,
) -> Result<(Self, Arc<ReadonlyRepo>), WorkspaceInitError> {
let jj_dir = create_jj_dir(workspace_root)?;
let repo_dir = dunce::canonicalize(repo_path).context(repo_path)?;
let repo_dir_bytes =
file_util::path_to_bytes(&repo_dir).map_err(WorkspaceInitError::EncodeRepoPath)?;
let repo_file_path = jj_dir.join("repo");
fs::write(&repo_file_path, repo_dir_bytes).context(&repo_file_path)?;
let (working_copy, repo) = init_working_copy(
repo,
workspace_root,
&jj_dir,
working_copy_factory,
workspace_name,
)?;
let workspace = Self::new(
workspace_root,
repo_dir,
working_copy,
repo.loader().clone(),
)?;
Ok((workspace, repo))
}
pub fn load(
user_settings: &UserSettings,
workspace_path: &Path,
store_factories: &StoreFactories,
working_copy_factories: &WorkingCopyFactories,
) -> Result<Self, WorkspaceLoadError> {
let loader = DefaultWorkspaceLoader::new(workspace_path)?;
let workspace = loader.load(user_settings, store_factories, working_copy_factories)?;
Ok(workspace)
}
pub fn workspace_root(&self) -> &Path {
&self.workspace_root
}
pub fn workspace_name(&self) -> &WorkspaceName {
self.working_copy.workspace_name()
}
pub fn repo_path(&self) -> &Path {
&self.repo_path
}
pub fn repo_loader(&self) -> &RepoLoader {
&self.repo_loader
}
/// Settings for this workspace.
pub fn settings(&self) -> &UserSettings {
self.repo_loader.settings()
}
pub fn working_copy(&self) -> &dyn WorkingCopy {
self.working_copy.as_ref()
}
pub fn start_working_copy_mutation(
&mut self,
) -> Result<LockedWorkspace<'_>, WorkingCopyStateError> {
let locked_wc = self.working_copy.start_mutation()?;
Ok(LockedWorkspace {
base: self,
locked_wc,
})
}
pub fn check_out(
&mut self,
operation_id: OperationId,
old_tree: Option<&MergedTree>,
commit: &Commit,
) -> Result<CheckoutStats, CheckoutError> {
let mut locked_ws = self.start_working_copy_mutation()?;
// Check if the current working-copy commit has changed on disk compared to what
// the caller expected. It's safe to check out another commit
// regardless, but it's probably not what the caller wanted, so we let
// them know.
if let Some(old_tree) = old_tree
&& old_tree.tree_ids_and_labels()
!= locked_ws.locked_wc().old_tree().tree_ids_and_labels()
{
return Err(CheckoutError::ConcurrentCheckout);
}
let stats = locked_ws.locked_wc().check_out(commit).block_on()?;
locked_ws
.finish(operation_id)
.map_err(|err| CheckoutError::Other {
message: "Failed to save the working copy state".to_string(),
err: err.into(),
})?;
Ok(stats)
}
}
pub struct LockedWorkspace<'a> {
base: &'a mut Workspace,
locked_wc: Box<dyn LockedWorkingCopy>,
}
impl LockedWorkspace<'_> {
pub fn locked_wc(&mut self) -> &mut dyn LockedWorkingCopy {
self.locked_wc.as_mut()
}
pub fn finish(self, operation_id: OperationId) -> Result<(), WorkingCopyStateError> {
let new_wc = self.locked_wc.finish(operation_id).block_on()?;
self.base.working_copy = new_wc;
Ok(())
}
}
// Factory trait to build WorkspaceLoaders given the workspace root.
pub trait WorkspaceLoaderFactory {
fn create(&self, workspace_root: &Path)
-> Result<Box<dyn WorkspaceLoader>, WorkspaceLoadError>;
}
pub fn get_working_copy_factory<'a>(
workspace_loader: &dyn WorkspaceLoader,
working_copy_factories: &'a WorkingCopyFactories,
) -> Result<&'a dyn WorkingCopyFactory, StoreLoadError> {
let working_copy_type = workspace_loader.get_working_copy_type()?;
if let Some(factory) = working_copy_factories.get(&working_copy_type) {
Ok(factory.as_ref())
} else {
Err(StoreLoadError::UnsupportedType {
store: "working copy",
store_type: working_copy_type.clone(),
})
}
}
// Loader assigned to a specific workspace root that knows how to load a
// Workspace object for that path.
pub trait WorkspaceLoader {
// The root of the Workspace to be loaded.
fn workspace_root(&self) -> &Path;
// The path to the repo/ dir for this Workspace.
fn repo_path(&self) -> &Path;
// Loads the specified Workspace with the provided factories.
fn load(
&self,
user_settings: &UserSettings,
store_factories: &StoreFactories,
working_copy_factories: &WorkingCopyFactories,
) -> Result<Workspace, WorkspaceLoadError>;
// Returns the type identifier for the WorkingCopy trait in this Workspace.
fn get_working_copy_type(&self) -> Result<String, StoreLoadError>;
}
pub struct DefaultWorkspaceLoaderFactory;
impl WorkspaceLoaderFactory for DefaultWorkspaceLoaderFactory {
fn create(
&self,
workspace_root: &Path,
) -> Result<Box<dyn WorkspaceLoader>, WorkspaceLoadError> {
Ok(Box::new(DefaultWorkspaceLoader::new(workspace_root)?))
}
}
/// Helps create a `Workspace` instance by reading `.jj/repo/` and
/// `.jj/working_copy/` from the file system.
#[derive(Clone, Debug)]
struct DefaultWorkspaceLoader {
workspace_root: PathBuf,
repo_path: PathBuf,
working_copy_state_path: PathBuf,
}
pub type WorkingCopyFactories = HashMap<String, Box<dyn WorkingCopyFactory>>;
impl DefaultWorkspaceLoader {
pub fn new(workspace_root: &Path) -> Result<Self, WorkspaceLoadError> {
let jj_dir = workspace_root.join(".jj");
if !jj_dir.is_dir() {
return Err(WorkspaceLoadError::NoWorkspaceHere(
workspace_root.to_owned(),
));
}
let mut repo_dir = jj_dir.join("repo");
// If .jj/repo is a file, then we interpret its contents as a relative path to
// the actual repo directory (typically in another workspace).
if repo_dir.is_file() {
let buf = fs::read(&repo_dir).context(&repo_dir)?;
let repo_path =
file_util::path_from_bytes(&buf).map_err(WorkspaceLoadError::DecodeRepoPath)?;
repo_dir = dunce::canonicalize(jj_dir.join(repo_path)).context(repo_path)?;
if !repo_dir.is_dir() {
return Err(WorkspaceLoadError::RepoDoesNotExist(repo_dir));
}
}
let working_copy_state_path = jj_dir.join("working_copy");
Ok(Self {
workspace_root: workspace_root.to_owned(),
repo_path: repo_dir,
working_copy_state_path,
})
}
}
impl WorkspaceLoader for DefaultWorkspaceLoader {
fn workspace_root(&self) -> &Path {
&self.workspace_root
}
fn repo_path(&self) -> &Path {
&self.repo_path
}
fn load(
&self,
user_settings: &UserSettings,
store_factories: &StoreFactories,
working_copy_factories: &WorkingCopyFactories,
) -> Result<Workspace, WorkspaceLoadError> {
let repo_loader =
RepoLoader::init_from_file_system(user_settings, &self.repo_path, store_factories)?;
let working_copy_factory = get_working_copy_factory(self, working_copy_factories)?;
let working_copy = working_copy_factory.load_working_copy(
repo_loader.store().clone(),
self.workspace_root.clone(),
self.working_copy_state_path.clone(),
user_settings,
)?;
let workspace = Workspace::new(
&self.workspace_root,
self.repo_path.clone(),
working_copy,
repo_loader,
)?;
Ok(workspace)
}
fn get_working_copy_type(&self) -> Result<String, StoreLoadError> {
read_store_type("working copy", self.working_copy_state_path.join("type"))
}
}
pub fn default_working_copy_factories() -> WorkingCopyFactories {
let mut factories = WorkingCopyFactories::new();
factories.insert(
LocalWorkingCopy::name().to_owned(),
Box::new(LocalWorkingCopyFactory {}),
);
factories
}
pub fn default_working_copy_factory() -> Box<dyn WorkingCopyFactory> {
Box::new(LocalWorkingCopyFactory {})
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/git_subprocess.rs | lib/src/git_subprocess.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io;
use std::io::BufReader;
use std::io::Read;
use std::num::NonZeroU32;
use std::path::PathBuf;
use std::process::Child;
use std::process::Command;
use std::process::Output;
use std::process::Stdio;
use std::thread;
use bstr::ByteSlice as _;
use itertools::Itertools as _;
use thiserror::Error;
use crate::git::FetchTagsOverride;
use crate::git::GitPushStats;
use crate::git::GitSubprocessOptions;
use crate::git::NegativeRefSpec;
use crate::git::Progress;
use crate::git::RefSpec;
use crate::git::RefToPush;
use crate::git::RemoteCallbacks;
use crate::git_backend::GitBackend;
use crate::ref_name::GitRefNameBuf;
use crate::ref_name::RefNameBuf;
use crate::ref_name::RemoteName;
// This is not the minimum required version, that would be 2.29.0, which
// introduced the `--no-write-fetch-head` option. However, that by itself
// is quite old and unsupported, so we don't want to encourage users to
// update to that.
//
// 2.40 still receives security patches (latest one was in Jan/2025)
const MINIMUM_GIT_VERSION: &str = "2.40.4";
/// Error originating by a Git subprocess
#[derive(Error, Debug)]
pub enum GitSubprocessError {
#[error("Could not find repository at '{0}'")]
NoSuchRepository(String),
#[error("Could not execute the git process, found in the OS path '{path}'")]
SpawnInPath {
path: PathBuf,
#[source]
error: std::io::Error,
},
#[error("Could not execute git process at specified path '{path}'")]
Spawn {
path: PathBuf,
#[source]
error: std::io::Error,
},
#[error("Failed to wait for the git process")]
Wait(std::io::Error),
#[error(
"Git does not recognize required option: {0} (note: supported version is \
{MINIMUM_GIT_VERSION})"
)]
UnsupportedGitOption(String),
#[error("Git process failed: {0}")]
External(String),
}
/// Context for creating Git subprocesses
pub(crate) struct GitSubprocessContext {
git_dir: PathBuf,
options: GitSubprocessOptions,
}
impl GitSubprocessContext {
pub(crate) fn new(git_dir: impl Into<PathBuf>, options: GitSubprocessOptions) -> Self {
Self {
git_dir: git_dir.into(),
options,
}
}
pub(crate) fn from_git_backend(
git_backend: &GitBackend,
options: GitSubprocessOptions,
) -> Self {
Self::new(git_backend.git_repo_path(), options)
}
/// Create the Git command
fn create_command(&self) -> Command {
let mut git_cmd = Command::new(&self.options.executable_path);
// Hide console window on Windows (https://stackoverflow.com/a/60958956)
#[cfg(windows)]
{
use std::os::windows::process::CommandExt as _;
const CREATE_NO_WINDOW: u32 = 0x08000000;
git_cmd.creation_flags(CREATE_NO_WINDOW);
}
// TODO: here we are passing the full path to the git_dir, which can lead to UNC
// bugs in Windows. The ideal way to do this is to pass the workspace
// root to Command::current_dir and then pass a relative path to the git
// dir
git_cmd
// The gitconfig-controlled automated spawning of the macOS `fsmonitor--daemon`
// can cause strange behavior with certain subprocess operations.
// For example: https://github.com/jj-vcs/jj/issues/6440.
//
// Nothing we're doing in `jj` interacts with this daemon, so we force the
// config to be false for subprocess operations in order to avoid these
// interactions.
//
// In a colocated workspace, the daemon will still get started the first
// time a `git` command is run manually if the gitconfigs are set up that way.
.args(["-c", "core.fsmonitor=false"])
// Avoids an error message when fetching repos with submodules if
// user has `submodule.recurse` configured to true in their Git
// config (#7565).
.args(["-c", "submodule.recurse=false"])
.arg("--git-dir")
.arg(&self.git_dir)
// Disable translation and other locale-dependent behavior so we can
// parse the output. LC_ALL precedes LC_* and LANG.
.env("LC_ALL", "C")
.stdin(Stdio::null())
.stderr(Stdio::piped());
git_cmd.envs(&self.options.environment);
git_cmd
}
/// Spawn the git command
fn spawn_cmd(&self, mut git_cmd: Command) -> Result<Child, GitSubprocessError> {
tracing::debug!(cmd = ?git_cmd, "spawning a git subprocess");
git_cmd.spawn().map_err(|error| {
if self.options.executable_path.is_absolute() {
GitSubprocessError::Spawn {
path: self.options.executable_path.clone(),
error,
}
} else {
GitSubprocessError::SpawnInPath {
path: self.options.executable_path.clone(),
error,
}
}
})
}
/// Perform a git fetch
///
/// This returns a fully qualified ref that wasn't fetched successfully
/// Note that git only returns one failed ref at a time
pub(crate) fn spawn_fetch(
&self,
remote_name: &RemoteName,
refspecs: &[RefSpec],
negative_refspecs: &[NegativeRefSpec],
callbacks: &mut RemoteCallbacks<'_>,
depth: Option<NonZeroU32>,
fetch_tags_override: Option<FetchTagsOverride>,
) -> Result<Option<String>, GitSubprocessError> {
if refspecs.is_empty() {
return Ok(None);
}
let mut command = self.create_command();
command.stdout(Stdio::piped());
// attempt to prune stale refs with --prune
// --no-write-fetch-head ensures our request is invisible to other parties
command.args(["fetch", "--prune", "--no-write-fetch-head"]);
if callbacks.progress.is_some() {
command.arg("--progress");
}
if let Some(d) = depth {
command.arg(format!("--depth={d}"));
}
match fetch_tags_override {
Some(FetchTagsOverride::AllTags) => {
command.arg("--tags");
}
Some(FetchTagsOverride::NoTags) => {
command.arg("--no-tags");
}
None => {}
}
command.arg("--").arg(remote_name.as_str());
command.args(
refspecs
.iter()
.map(|x| x.to_git_format())
.chain(negative_refspecs.iter().map(|x| x.to_git_format())),
);
let output = wait_with_progress(self.spawn_cmd(command)?, callbacks)?;
parse_git_fetch_output(output)
}
/// Prune particular branches
pub(crate) fn spawn_branch_prune(
&self,
branches_to_prune: &[String],
) -> Result<(), GitSubprocessError> {
if branches_to_prune.is_empty() {
return Ok(());
}
tracing::debug!(?branches_to_prune, "pruning branches");
let mut command = self.create_command();
command.stdout(Stdio::null());
command.args(["branch", "--remotes", "--delete", "--"]);
command.args(branches_to_prune);
let output = wait_with_output(self.spawn_cmd(command)?)?;
// we name the type to make sure that it is not meant to be used
let () = parse_git_branch_prune_output(output)?;
Ok(())
}
/// How we retrieve the remote's default branch:
///
/// `git remote show <remote_name>`
///
/// dumps a lot of information about the remote, with a line such as:
/// ` HEAD branch: <default_branch>`
pub(crate) fn spawn_remote_show(
&self,
remote_name: &RemoteName,
) -> Result<Option<RefNameBuf>, GitSubprocessError> {
let mut command = self.create_command();
command.stdout(Stdio::piped());
command.args(["remote", "show", "--", remote_name.as_str()]);
let output = wait_with_output(self.spawn_cmd(command)?)?;
let output = parse_git_remote_show_output(output)?;
// find the HEAD branch line in the output
let maybe_branch = parse_git_remote_show_default_branch(&output.stdout)?;
Ok(maybe_branch.map(Into::into))
}
/// Push references to git
///
/// All pushes are forced, using --force-with-lease to perform a test&set
/// operation on the remote repository
///
/// Return tuple with
/// 1. refs that failed to push
/// 2. refs that succeeded to push
pub(crate) fn spawn_push(
&self,
remote_name: &RemoteName,
references: &[RefToPush],
callbacks: &mut RemoteCallbacks<'_>,
) -> Result<GitPushStats, GitSubprocessError> {
let mut command = self.create_command();
command.stdout(Stdio::piped());
// Currently jj does not support commit hooks, so we prevent git from running
// them
//
// https://github.com/jj-vcs/jj/issues/3577 and https://github.com/jj-vcs/jj/issues/405
// offer more context
command.args(["push", "--porcelain", "--no-verify"]);
if callbacks.progress.is_some() {
command.arg("--progress");
}
command.args(
references
.iter()
.map(|reference| format!("--force-with-lease={}", reference.to_git_lease())),
);
command.args(["--", remote_name.as_str()]);
// with --force-with-lease we cannot have the forced refspec,
// as it ignores the lease
command.args(
references
.iter()
.map(|r| r.refspec.to_git_format_not_forced()),
);
let output = wait_with_progress(self.spawn_cmd(command)?, callbacks)?;
parse_git_push_output(output)
}
}
/// Generate a GitSubprocessError::ExternalGitError if the stderr output was not
/// recognizable
fn external_git_error(stderr: &[u8]) -> GitSubprocessError {
GitSubprocessError::External(format!(
"External git program failed:\n{}",
stderr.to_str_lossy()
))
}
/// Parse no such remote errors output from git
///
/// Returns the remote that wasn't found
///
/// To say this, git prints out a lot of things, but the first line is of the
/// form:
/// `fatal: '<remote>' does not appear to be a git repository`
/// or
/// `fatal: '<remote>': Could not resolve host: invalid-remote`
fn parse_no_such_remote(stderr: &[u8]) -> Option<String> {
let first_line = stderr.lines().next()?;
let suffix = first_line
.strip_prefix(b"fatal: '")
.or_else(|| first_line.strip_prefix(b"fatal: unable to access '"))?;
suffix
.strip_suffix(b"' does not appear to be a git repository")
.or_else(|| suffix.strip_suffix(b"': Could not resolve host: invalid-remote"))
.map(|remote| remote.to_str_lossy().into_owned())
}
/// Parse error from refspec not present on the remote
///
/// This returns
/// Some(local_ref) that wasn't found by the remote
/// None if this wasn't the error
///
/// On git fetch even though --prune is specified, if a particular
/// refspec is asked for but not present in the remote, git will error out.
///
/// Git only reports one of these errors at a time, so we only look at the first
/// line
///
/// The first line is of the form:
/// `fatal: couldn't find remote ref refs/heads/<ref>`
fn parse_no_remote_ref(stderr: &[u8]) -> Option<String> {
let first_line = stderr.lines().next()?;
first_line
.strip_prefix(b"fatal: couldn't find remote ref ")
.map(|refname| refname.to_str_lossy().into_owned())
}
/// Parse remote tracking branch not found
///
/// This returns true if the error was detected
///
/// if a branch is asked for but is not present, jj will detect it post-hoc
/// so, we want to ignore these particular errors with git
///
/// The first line is of the form:
/// `error: remote-tracking branch '<branch>' not found`
fn parse_no_remote_tracking_branch(stderr: &[u8]) -> Option<String> {
let first_line = stderr.lines().next()?;
let suffix = first_line.strip_prefix(b"error: remote-tracking branch '")?;
suffix
.strip_suffix(b"' not found.")
.or_else(|| suffix.strip_suffix(b"' not found"))
.map(|branch| branch.to_str_lossy().into_owned())
}
/// Parse unknown options
///
/// Return the unknown option
///
/// If a user is running a very old git version, our commands may fail
/// We want to give a good error in this case
fn parse_unknown_option(stderr: &[u8]) -> Option<String> {
let first_line = stderr.lines().next()?;
first_line
.strip_prefix(b"unknown option: --")
.or(first_line
.strip_prefix(b"error: unknown option `")
.and_then(|s| s.strip_suffix(b"'")))
.map(|s| s.to_str_lossy().into())
}
// return the fully qualified ref that failed to fetch
//
// note that git fetch only returns one error at a time
fn parse_git_fetch_output(output: Output) -> Result<Option<String>, GitSubprocessError> {
if output.status.success() {
return Ok(None);
}
// There are some git errors we want to parse out
if let Some(option) = parse_unknown_option(&output.stderr) {
return Err(GitSubprocessError::UnsupportedGitOption(option));
}
if let Some(remote) = parse_no_such_remote(&output.stderr) {
return Err(GitSubprocessError::NoSuchRepository(remote));
}
if let Some(refspec) = parse_no_remote_ref(&output.stderr) {
return Ok(Some(refspec));
}
if parse_no_remote_tracking_branch(&output.stderr).is_some() {
return Ok(None);
}
Err(external_git_error(&output.stderr))
}
fn parse_git_branch_prune_output(output: Output) -> Result<(), GitSubprocessError> {
if output.status.success() {
return Ok(());
}
// There are some git errors we want to parse out
if let Some(option) = parse_unknown_option(&output.stderr) {
return Err(GitSubprocessError::UnsupportedGitOption(option));
}
if parse_no_remote_tracking_branch(&output.stderr).is_some() {
return Ok(());
}
Err(external_git_error(&output.stderr))
}
fn parse_git_remote_show_output(output: Output) -> Result<Output, GitSubprocessError> {
if output.status.success() {
return Ok(output);
}
// There are some git errors we want to parse out
if let Some(option) = parse_unknown_option(&output.stderr) {
return Err(GitSubprocessError::UnsupportedGitOption(option));
}
if let Some(remote) = parse_no_such_remote(&output.stderr) {
return Err(GitSubprocessError::NoSuchRepository(remote));
}
Err(external_git_error(&output.stderr))
}
fn parse_git_remote_show_default_branch(
stdout: &[u8],
) -> Result<Option<String>, GitSubprocessError> {
stdout
.lines()
.map(|x| x.trim())
.find(|x| x.starts_with_str("HEAD branch:"))
.inspect(|x| tracing::debug!(line = ?x.to_str_lossy(), "default branch"))
.and_then(|x| x.split_str(" ").last().map(|y| y.trim()))
.filter(|branch_name| branch_name != b"(unknown)")
.map(|branch_name| branch_name.to_str())
.transpose()
.map_err(|e| GitSubprocessError::External(format!("git remote output is not utf-8: {e:?}")))
.map(|b| b.map(|x| x.to_string()))
}
// git-push porcelain has the following format (per line)
// `<flag>\t<from>:<to>\t<summary> (<reason>)`
//
// <flag> is one of:
// ' ' for a successfully pushed fast-forward;
// + for a successful forced update
// - for a successfully deleted ref
// * for a successfully pushed new ref
// ! for a ref that was rejected or failed to push; and
// = for a ref that was up to date and did not need pushing.
//
// <from>:<to> is the refspec
//
// <summary> is extra info (commit ranges or reason for rejected)
//
// <reason> is a human-readable explanation
fn parse_ref_pushes(stdout: &[u8]) -> Result<GitPushStats, GitSubprocessError> {
if !stdout.starts_with(b"To ") {
return Err(GitSubprocessError::External(format!(
"Git push output unfamiliar:\n{}",
stdout.to_str_lossy()
)));
}
let mut push_stats = GitPushStats::default();
for (idx, line) in stdout
.lines()
.skip(1)
.take_while(|line| line != b"Done")
.enumerate()
{
tracing::debug!("response #{idx}: {}", line.to_str_lossy());
let [flag, reference, summary] = line.split_str("\t").collect_array().ok_or_else(|| {
GitSubprocessError::External(format!(
"Line #{idx} of git-push has unknown format: {}",
line.to_str_lossy()
))
})?;
let full_refspec = reference
.to_str()
.map_err(|e| {
format!(
"Line #{} of git-push has non-utf8 refspec {}: {}",
idx,
reference.to_str_lossy(),
e
)
})
.map_err(GitSubprocessError::External)?;
let reference: GitRefNameBuf = full_refspec
.split_once(':')
.map(|(_refname, reference)| reference.into())
.ok_or_else(|| {
GitSubprocessError::External(format!(
"Line #{idx} of git-push has full refspec without named ref: {full_refspec}"
))
})?;
match flag {
// ' ' for a successfully pushed fast-forward;
// + for a successful forced update
// - for a successfully deleted ref
// * for a successfully pushed new ref
// = for a ref that was up to date and did not need pushing.
b"+" | b"-" | b"*" | b"=" | b" " => {
push_stats.pushed.push(reference);
}
// ! for a ref that was rejected or failed to push; and
b"!" => {
if let Some(reason) = summary.strip_prefix(b"[remote rejected]") {
let reason = reason
.strip_prefix(b" (")
.and_then(|r| r.strip_suffix(b")"))
.map(|x| x.to_str_lossy().into_owned());
push_stats.remote_rejected.push((reference, reason));
} else {
let reason = summary
.split_once_str("]")
.and_then(|(_, reason)| reason.strip_prefix(b" ("))
.and_then(|r| r.strip_suffix(b")"))
.map(|x| x.to_str_lossy().into_owned());
push_stats.rejected.push((reference, reason));
}
}
unknown => {
return Err(GitSubprocessError::External(format!(
"Line #{} of git-push starts with an unknown flag '{}': '{}'",
idx,
unknown.to_str_lossy(),
line.to_str_lossy()
)));
}
}
}
Ok(push_stats)
}
// on Ok, return a tuple with
// 1. list of failed references from test and set
// 2. list of successful references pushed
fn parse_git_push_output(output: Output) -> Result<GitPushStats, GitSubprocessError> {
if output.status.success() {
let ref_pushes = parse_ref_pushes(&output.stdout)?;
return Ok(ref_pushes);
}
if let Some(option) = parse_unknown_option(&output.stderr) {
return Err(GitSubprocessError::UnsupportedGitOption(option));
}
if let Some(remote) = parse_no_such_remote(&output.stderr) {
return Err(GitSubprocessError::NoSuchRepository(remote));
}
if output
.stderr
.lines()
.any(|line| line.starts_with(b"error: failed to push some refs to "))
{
parse_ref_pushes(&output.stdout)
} else {
Err(external_git_error(&output.stderr))
}
}
fn wait_with_output(child: Child) -> Result<Output, GitSubprocessError> {
child.wait_with_output().map_err(GitSubprocessError::Wait)
}
/// Like `wait_with_output()`, but also emits sideband data through callback.
///
/// Git remotes can send custom messages on fetch and push, which the `git`
/// command prepends with `remote: `.
///
/// For instance, these messages can provide URLs to create Pull Requests
/// e.g.:
/// ```ignore
/// $ jj git push -c @
/// [...]
/// remote:
/// remote: Create a pull request for 'branch' on GitHub by visiting:
/// remote: https://github.com/user/repo/pull/new/branch
/// remote:
/// ```
///
/// The returned `stderr` content does not include sideband messages.
fn wait_with_progress(
mut child: Child,
callbacks: &mut RemoteCallbacks<'_>,
) -> Result<Output, GitSubprocessError> {
let (stdout, stderr) = thread::scope(|s| -> io::Result<_> {
drop(child.stdin.take());
let mut child_stdout = child.stdout.take().expect("stdout should be piped");
let mut child_stderr = child.stderr.take().expect("stderr should be piped");
let thread = s.spawn(move || -> io::Result<_> {
let mut buf = Vec::new();
child_stdout.read_to_end(&mut buf)?;
Ok(buf)
});
let stderr = read_to_end_with_progress(&mut child_stderr, callbacks)?;
let stdout = thread.join().expect("reader thread wouldn't panic")?;
Ok((stdout, stderr))
})
.map_err(GitSubprocessError::Wait)?;
let status = child.wait().map_err(GitSubprocessError::Wait)?;
Ok(Output {
status,
stdout,
stderr,
})
}
#[derive(Default)]
struct GitProgress {
// (frac, total)
deltas: (u64, u64),
objects: (u64, u64),
counted_objects: (u64, u64),
compressed_objects: (u64, u64),
}
impl GitProgress {
fn to_progress(&self) -> Progress {
Progress {
bytes_downloaded: None,
overall: if self.total() != 0 {
self.fraction() as f32 / self.total() as f32
} else {
0.0
},
}
}
fn fraction(&self) -> u64 {
self.objects.0 + self.deltas.0 + self.counted_objects.0 + self.compressed_objects.0
}
fn total(&self) -> u64 {
self.objects.1 + self.deltas.1 + self.counted_objects.1 + self.compressed_objects.1
}
}
fn read_to_end_with_progress<R: Read>(
src: R,
callbacks: &mut RemoteCallbacks<'_>,
) -> io::Result<Vec<u8>> {
let mut reader = BufReader::new(src);
let mut data = Vec::new();
let mut git_progress = GitProgress::default();
loop {
// progress sent through sideband channel may be terminated by \r
let start = data.len();
read_until_cr_or_lf(&mut reader, &mut data)?;
let line = &data[start..];
if line.is_empty() {
break;
}
if update_progress(line, &mut git_progress.objects, b"Receiving objects:")
|| update_progress(line, &mut git_progress.deltas, b"Resolving deltas:")
|| update_progress(
line,
&mut git_progress.counted_objects,
b"remote: Counting objects:",
)
|| update_progress(
line,
&mut git_progress.compressed_objects,
b"remote: Compressing objects:",
)
{
if let Some(cb) = callbacks.progress.as_mut() {
cb(&git_progress.to_progress());
}
data.truncate(start);
} else if let Some(message) = line.strip_prefix(b"remote: ") {
if let Some(cb) = callbacks.sideband_progress.as_mut() {
let (body, term) = trim_sideband_line(message);
cb(body);
if let Some(term) = term {
cb(&[term]);
}
}
data.truncate(start);
}
}
Ok(data)
}
fn update_progress(line: &[u8], progress: &mut (u64, u64), prefix: &[u8]) -> bool {
if let Some(line) = line.strip_prefix(prefix) {
if let Some((frac, total)) = read_progress_line(line) {
*progress = (frac, total);
}
true
} else {
false
}
}
fn read_until_cr_or_lf<R: io::BufRead + ?Sized>(
reader: &mut R,
dest_buf: &mut Vec<u8>,
) -> io::Result<()> {
loop {
let data = match reader.fill_buf() {
Ok(data) => data,
Err(err) if err.kind() == io::ErrorKind::Interrupted => continue,
Err(err) => return Err(err),
};
let (n, found) = match data.iter().position(|&b| matches!(b, b'\r' | b'\n')) {
Some(i) => (i + 1, true),
None => (data.len(), false),
};
dest_buf.extend_from_slice(&data[..n]);
reader.consume(n);
if found || n == 0 {
return Ok(());
}
}
}
/// Read progress lines of the form: `<text> (<frac>/<total>)`
/// Ensures that frac < total
fn read_progress_line(line: &[u8]) -> Option<(u64, u64)> {
// isolate the part between parenthesis
let (_prefix, suffix) = line.split_once_str("(")?;
let (fraction, _suffix) = suffix.split_once_str(")")?;
// split over the '/'
let (frac_str, total_str) = fraction.split_once_str("/")?;
// parse to integers
let frac = frac_str.to_str().ok()?.parse().ok()?;
let total = total_str.to_str().ok()?.parse().ok()?;
(frac <= total).then_some((frac, total))
}
/// Removes trailing spaces from sideband line, which may be padded by the `git`
/// CLI in order to clear the previous progress line.
fn trim_sideband_line(line: &[u8]) -> (&[u8], Option<u8>) {
let (body, term) = match line {
[body @ .., term @ (b'\r' | b'\n')] => (body, Some(*term)),
_ => (line, None),
};
let n = body.iter().rev().take_while(|&&b| b == b' ').count();
(&body[..body.len() - n], term)
}
#[cfg(test)]
mod test {
use indoc::formatdoc;
use super::*;
const SAMPLE_NO_SUCH_REPOSITORY_ERROR: &[u8] =
br###"fatal: unable to access 'origin': Could not resolve host: invalid-remote
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists. "###;
const SAMPLE_NO_SUCH_REMOTE_ERROR: &[u8] =
br###"fatal: 'origin' does not appear to be a git repository
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists. "###;
const SAMPLE_NO_REMOTE_REF_ERROR: &[u8] = b"fatal: couldn't find remote ref refs/heads/noexist";
const SAMPLE_NO_REMOTE_TRACKING_BRANCH_ERROR: &[u8] =
b"error: remote-tracking branch 'bookmark' not found";
const SAMPLE_PUSH_REFS_PORCELAIN_OUTPUT: &[u8] = b"To origin
*\tdeadbeef:refs/heads/bookmark1\t[new branch]
+\tdeadbeef:refs/heads/bookmark2\tabcd..dead
-\tdeadbeef:refs/heads/bookmark3\t[deleted branch]
\tdeadbeef:refs/heads/bookmark4\tabcd..dead
=\tdeadbeef:refs/heads/bookmark5\tabcd..abcd
!\tdeadbeef:refs/heads/bookmark6\t[rejected] (failure lease)
!\tdeadbeef:refs/heads/bookmark7\t[rejected]
!\tdeadbeef:refs/heads/bookmark8\t[remote rejected] (hook failure)
!\tdeadbeef:refs/heads/bookmark9\t[remote rejected]
Done";
const SAMPLE_OK_STDERR: &[u8] = b"";
#[test]
fn test_parse_no_such_remote() {
assert_eq!(
parse_no_such_remote(SAMPLE_NO_SUCH_REPOSITORY_ERROR),
Some("origin".to_string())
);
assert_eq!(
parse_no_such_remote(SAMPLE_NO_SUCH_REMOTE_ERROR),
Some("origin".to_string())
);
assert_eq!(parse_no_such_remote(SAMPLE_NO_REMOTE_REF_ERROR), None);
assert_eq!(
parse_no_such_remote(SAMPLE_NO_REMOTE_TRACKING_BRANCH_ERROR),
None
);
assert_eq!(
parse_no_such_remote(SAMPLE_PUSH_REFS_PORCELAIN_OUTPUT),
None
);
assert_eq!(parse_no_such_remote(SAMPLE_OK_STDERR), None);
}
#[test]
fn test_parse_no_remote_ref() {
assert_eq!(parse_no_remote_ref(SAMPLE_NO_SUCH_REPOSITORY_ERROR), None);
assert_eq!(parse_no_remote_ref(SAMPLE_NO_SUCH_REMOTE_ERROR), None);
assert_eq!(
parse_no_remote_ref(SAMPLE_NO_REMOTE_REF_ERROR),
Some("refs/heads/noexist".to_string())
);
assert_eq!(
parse_no_remote_ref(SAMPLE_NO_REMOTE_TRACKING_BRANCH_ERROR),
None
);
assert_eq!(parse_no_remote_ref(SAMPLE_PUSH_REFS_PORCELAIN_OUTPUT), None);
assert_eq!(parse_no_remote_ref(SAMPLE_OK_STDERR), None);
}
#[test]
fn test_parse_no_remote_tracking_branch() {
assert_eq!(
parse_no_remote_tracking_branch(SAMPLE_NO_SUCH_REPOSITORY_ERROR),
None
);
assert_eq!(
parse_no_remote_tracking_branch(SAMPLE_NO_SUCH_REMOTE_ERROR),
None
);
assert_eq!(
parse_no_remote_tracking_branch(SAMPLE_NO_REMOTE_REF_ERROR),
None
);
assert_eq!(
parse_no_remote_tracking_branch(SAMPLE_NO_REMOTE_TRACKING_BRANCH_ERROR),
Some("bookmark".to_string())
);
assert_eq!(
parse_no_remote_tracking_branch(SAMPLE_PUSH_REFS_PORCELAIN_OUTPUT),
None
);
assert_eq!(parse_no_remote_tracking_branch(SAMPLE_OK_STDERR), None);
}
#[test]
fn test_parse_ref_pushes() {
assert!(parse_ref_pushes(SAMPLE_NO_SUCH_REPOSITORY_ERROR).is_err());
assert!(parse_ref_pushes(SAMPLE_NO_SUCH_REMOTE_ERROR).is_err());
assert!(parse_ref_pushes(SAMPLE_NO_REMOTE_REF_ERROR).is_err());
assert!(parse_ref_pushes(SAMPLE_NO_REMOTE_TRACKING_BRANCH_ERROR).is_err());
let GitPushStats {
pushed,
rejected,
remote_rejected,
} = parse_ref_pushes(SAMPLE_PUSH_REFS_PORCELAIN_OUTPUT).unwrap();
assert_eq!(
pushed,
[
"refs/heads/bookmark1",
"refs/heads/bookmark2",
"refs/heads/bookmark3",
"refs/heads/bookmark4",
"refs/heads/bookmark5",
]
.map(GitRefNameBuf::from)
);
assert_eq!(
rejected,
vec![
(
"refs/heads/bookmark6".into(),
Some("failure lease".to_string())
),
("refs/heads/bookmark7".into(), None),
]
);
assert_eq!(
remote_rejected,
vec![
(
"refs/heads/bookmark8".into(),
Some("hook failure".to_string())
),
("refs/heads/bookmark9".into(), None)
]
);
assert!(parse_ref_pushes(SAMPLE_OK_STDERR).is_err());
}
#[test]
fn test_read_to_end_with_progress() {
let read = |sample: &[u8]| {
let mut progress = Vec::new();
let mut sideband = Vec::new();
let mut callbacks = RemoteCallbacks::default();
let mut progress_cb = |p: &Progress| progress.push(p.clone());
callbacks.progress = Some(&mut progress_cb);
let mut sideband_cb = |s: &[u8]| sideband.push(s.to_owned());
callbacks.sideband_progress = Some(&mut sideband_cb);
let output = read_to_end_with_progress(&mut &sample[..], &mut callbacks).unwrap();
(output, sideband, progress)
};
const DUMB_SUFFIX: &str = " ";
let sample = formatdoc! {"
remote: line1{DUMB_SUFFIX}
blah blah
remote: line2.0{DUMB_SUFFIX}\rremote: line2.1{DUMB_SUFFIX}
remote: line3{DUMB_SUFFIX}
Resolving deltas: (12/24)
some error message
"};
let (output, sideband, progress) = read(sample.as_bytes());
assert_eq!(
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/repo_path.rs | lib/src/repo_path.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::fmt;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::iter;
use std::iter::FusedIterator;
use std::ops::Deref;
use std::path::Component;
use std::path::Path;
use std::path::PathBuf;
use itertools::Itertools as _;
use ref_cast::RefCastCustom;
use ref_cast::ref_cast_custom;
use thiserror::Error;
use crate::content_hash::ContentHash;
use crate::file_util;
use crate::merge::Diff;
/// Owned `RepoPath` component.
#[derive(ContentHash, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct RepoPathComponentBuf {
// Don't add more fields. Eq, Hash, and Ord must be compatible with the
// borrowed RepoPathComponent type.
value: String,
}
impl RepoPathComponentBuf {
/// Wraps `value` as `RepoPathComponentBuf`.
///
/// Returns an error if the input `value` is empty or contains path
/// separator.
pub fn new(value: impl Into<String>) -> Result<Self, InvalidNewRepoPathError> {
let value: String = value.into();
if is_valid_repo_path_component_str(&value) {
Ok(Self { value })
} else {
Err(InvalidNewRepoPathError { value })
}
}
}
/// Borrowed `RepoPath` component.
#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, RefCastCustom)]
#[repr(transparent)]
pub struct RepoPathComponent {
value: str,
}
impl RepoPathComponent {
/// Wraps `value` as `RepoPathComponent`.
///
/// Returns an error if the input `value` is empty or contains path
/// separator.
pub fn new(value: &str) -> Result<&Self, InvalidNewRepoPathError> {
if is_valid_repo_path_component_str(value) {
Ok(Self::new_unchecked(value))
} else {
Err(InvalidNewRepoPathError {
value: value.to_string(),
})
}
}
#[ref_cast_custom]
const fn new_unchecked(value: &str) -> &Self;
/// Returns the underlying string representation.
pub fn as_internal_str(&self) -> &str {
&self.value
}
/// Returns a normal filesystem entry name if this path component is valid
/// as a file/directory name.
pub fn to_fs_name(&self) -> Result<&str, InvalidRepoPathComponentError> {
let mut components = Path::new(&self.value).components().fuse();
match (components.next(), components.next()) {
// Trailing "." can be normalized by Path::components(), so compare
// component name. e.g. "foo\." (on Windows) should be rejected.
(Some(Component::Normal(name)), None) if name == &self.value => Ok(&self.value),
// e.g. ".", "..", "foo\bar" (on Windows)
_ => Err(InvalidRepoPathComponentError {
component: self.value.into(),
}),
}
}
}
impl Debug for RepoPathComponent {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", &self.value)
}
}
impl Debug for RepoPathComponentBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
<RepoPathComponent as Debug>::fmt(self, f)
}
}
impl AsRef<Self> for RepoPathComponent {
fn as_ref(&self) -> &Self {
self
}
}
impl AsRef<RepoPathComponent> for RepoPathComponentBuf {
fn as_ref(&self) -> &RepoPathComponent {
self
}
}
impl Borrow<RepoPathComponent> for RepoPathComponentBuf {
fn borrow(&self) -> &RepoPathComponent {
self
}
}
impl Deref for RepoPathComponentBuf {
type Target = RepoPathComponent;
fn deref(&self) -> &Self::Target {
RepoPathComponent::new_unchecked(&self.value)
}
}
impl ToOwned for RepoPathComponent {
type Owned = RepoPathComponentBuf;
fn to_owned(&self) -> Self::Owned {
let value = self.value.to_owned();
RepoPathComponentBuf { value }
}
fn clone_into(&self, target: &mut Self::Owned) {
self.value.clone_into(&mut target.value);
}
}
/// Iterator over `RepoPath` components.
#[derive(Clone, Debug)]
pub struct RepoPathComponentsIter<'a> {
value: &'a str,
}
impl<'a> RepoPathComponentsIter<'a> {
/// Returns the remaining part as repository path.
pub fn as_path(&self) -> &'a RepoPath {
RepoPath::from_internal_string_unchecked(self.value)
}
}
impl<'a> Iterator for RepoPathComponentsIter<'a> {
type Item = &'a RepoPathComponent;
fn next(&mut self) -> Option<Self::Item> {
if self.value.is_empty() {
return None;
}
let (name, remainder) = self
.value
.split_once('/')
.unwrap_or_else(|| (self.value, &self.value[self.value.len()..]));
self.value = remainder;
Some(RepoPathComponent::new_unchecked(name))
}
}
impl DoubleEndedIterator for RepoPathComponentsIter<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.value.is_empty() {
return None;
}
let (remainder, name) = self
.value
.rsplit_once('/')
.unwrap_or_else(|| (&self.value[..0], self.value));
self.value = remainder;
Some(RepoPathComponent::new_unchecked(name))
}
}
impl FusedIterator for RepoPathComponentsIter<'_> {}
/// Owned repository path.
#[derive(ContentHash, Clone, Eq, Hash, PartialEq, serde::Serialize)]
#[serde(transparent)]
pub struct RepoPathBuf {
// Don't add more fields. Eq, Hash, and Ord must be compatible with the
// borrowed RepoPath type.
value: String,
}
/// Borrowed repository path.
#[derive(ContentHash, Eq, Hash, PartialEq, RefCastCustom, serde::Serialize)]
#[repr(transparent)]
#[serde(transparent)]
pub struct RepoPath {
value: str,
}
impl Debug for RepoPath {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", &self.value)
}
}
impl Debug for RepoPathBuf {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
<RepoPath as Debug>::fmt(self, f)
}
}
/// The `value` is not a valid repo path because it contains empty path
/// component. For example, `"/"`, `"/foo"`, `"foo/"`, `"foo//bar"` are all
/// invalid.
#[derive(Clone, Debug, Eq, Error, PartialEq)]
#[error(r#"Invalid repo path input "{value}""#)]
pub struct InvalidNewRepoPathError {
value: String,
}
impl RepoPathBuf {
/// Creates owned repository path pointing to the root.
pub const fn root() -> Self {
Self {
value: String::new(),
}
}
/// Creates `RepoPathBuf` from valid string representation.
pub fn from_internal_string(value: impl Into<String>) -> Result<Self, InvalidNewRepoPathError> {
let value: String = value.into();
if is_valid_repo_path_str(&value) {
Ok(Self { value })
} else {
Err(InvalidNewRepoPathError { value })
}
}
/// Converts repo-relative `Path` to `RepoPathBuf`.
///
/// The input path should not contain redundant `.` or `..`.
pub fn from_relative_path(
relative_path: impl AsRef<Path>,
) -> Result<Self, RelativePathParseError> {
let relative_path = relative_path.as_ref();
if relative_path == Path::new(".") {
return Ok(Self::root());
}
let mut components = relative_path
.components()
.map(|c| match c {
Component::Normal(name) => {
name.to_str()
.ok_or_else(|| RelativePathParseError::InvalidUtf8 {
path: relative_path.into(),
})
}
_ => Err(RelativePathParseError::InvalidComponent {
component: c.as_os_str().to_string_lossy().into(),
path: relative_path.into(),
}),
})
.fuse();
let mut value = String::with_capacity(relative_path.as_os_str().len());
if let Some(name) = components.next() {
value.push_str(name?);
}
for name in components {
value.push('/');
value.push_str(name?);
}
Ok(Self { value })
}
/// Parses an `input` path into a `RepoPathBuf` relative to `base`.
///
/// The `cwd` and `base` paths are supposed to be absolute and normalized in
/// the same manner. The `input` path may be either relative to `cwd` or
/// absolute.
pub fn parse_fs_path(
cwd: &Path,
base: &Path,
input: impl AsRef<Path>,
) -> Result<Self, FsPathParseError> {
let input = input.as_ref();
let abs_input_path = file_util::normalize_path(&cwd.join(input));
let repo_relative_path = file_util::relative_path(base, &abs_input_path);
Self::from_relative_path(repo_relative_path).map_err(|source| FsPathParseError {
base: file_util::relative_path(cwd, base).into(),
input: input.into(),
source,
})
}
/// Consumes this and returns the underlying string representation.
pub fn into_internal_string(self) -> String {
self.value
}
}
impl RepoPath {
/// Returns repository path pointing to the root.
pub const fn root() -> &'static Self {
Self::from_internal_string_unchecked("")
}
/// Wraps valid string representation as `RepoPath`.
///
/// Returns an error if the input `value` contains empty path component. For
/// example, `"/"`, `"/foo"`, `"foo/"`, `"foo//bar"` are all invalid.
pub fn from_internal_string(value: &str) -> Result<&Self, InvalidNewRepoPathError> {
if is_valid_repo_path_str(value) {
Ok(Self::from_internal_string_unchecked(value))
} else {
Err(InvalidNewRepoPathError {
value: value.to_owned(),
})
}
}
#[ref_cast_custom]
const fn from_internal_string_unchecked(value: &str) -> &Self;
/// The full string form used internally, not for presenting to users (where
/// we may want to use the platform's separator). This format includes a
/// trailing slash, unless this path represents the root directory. That
/// way it can be concatenated with a basename and produce a valid path.
pub fn to_internal_dir_string(&self) -> String {
if self.value.is_empty() {
String::new()
} else {
[&self.value, "/"].concat()
}
}
/// The full string form used internally, not for presenting to users (where
/// we may want to use the platform's separator).
pub fn as_internal_file_string(&self) -> &str {
&self.value
}
/// Converts repository path to filesystem path relative to the `base`.
///
/// The returned path should never contain `..`, `C:` (on Windows), etc.
/// However, it may contain reserved working-copy directories such as `.jj`.
pub fn to_fs_path(&self, base: &Path) -> Result<PathBuf, InvalidRepoPathError> {
let mut result = PathBuf::with_capacity(base.as_os_str().len() + self.value.len() + 1);
result.push(base);
for c in self.components() {
result.push(c.to_fs_name().map_err(|err| err.with_path(self))?);
}
if result.as_os_str().is_empty() {
result.push(".");
}
Ok(result)
}
/// Converts repository path to filesystem path relative to the `base`,
/// without checking invalid path components.
///
/// The returned path may point outside of the `base` directory. Use this
/// function only for displaying or testing purposes.
pub fn to_fs_path_unchecked(&self, base: &Path) -> PathBuf {
let mut result = PathBuf::with_capacity(base.as_os_str().len() + self.value.len() + 1);
result.push(base);
result.extend(self.components().map(RepoPathComponent::as_internal_str));
if result.as_os_str().is_empty() {
result.push(".");
}
result
}
pub fn is_root(&self) -> bool {
self.value.is_empty()
}
/// Returns true if the `base` is a prefix of this path.
pub fn starts_with(&self, base: &Self) -> bool {
self.strip_prefix(base).is_some()
}
/// Returns the remaining path with the `base` path removed.
pub fn strip_prefix(&self, base: &Self) -> Option<&Self> {
if base.value.is_empty() {
Some(self)
} else {
let tail = self.value.strip_prefix(&base.value)?;
if tail.is_empty() {
Some(Self::from_internal_string_unchecked(tail))
} else {
tail.strip_prefix('/')
.map(Self::from_internal_string_unchecked)
}
}
}
/// Returns the parent path without the base name component.
pub fn parent(&self) -> Option<&Self> {
self.split().map(|(parent, _)| parent)
}
/// Splits this into the parent path and base name component.
pub fn split(&self) -> Option<(&Self, &RepoPathComponent)> {
let mut components = self.components();
let basename = components.next_back()?;
Some((components.as_path(), basename))
}
pub fn components(&self) -> RepoPathComponentsIter<'_> {
RepoPathComponentsIter { value: &self.value }
}
pub fn ancestors(&self) -> impl Iterator<Item = &Self> {
std::iter::successors(Some(self), |path| path.parent())
}
pub fn join(&self, entry: &RepoPathComponent) -> RepoPathBuf {
let value = if self.value.is_empty() {
entry.as_internal_str().to_owned()
} else {
[&self.value, "/", entry.as_internal_str()].concat()
};
RepoPathBuf { value }
}
/// Splits this path at its common prefix with `other`.
///
/// # Returns
///
/// Returns the `(common_prefix, self_remainder)`.
///
/// All paths will at least have `RepoPath::root()` as a common prefix,
/// therefore even if `self` and `other` have no matching parent component
/// this function will always return at least `(RepoPath::root(), self)`.
///
///
/// # Examples
///
/// ```
/// use jj_lib::repo_path::RepoPath;
///
/// let bing_path = RepoPath::from_internal_string("foo/bar/bing").unwrap();
///
/// let baz_path = RepoPath::from_internal_string("foo/bar/baz").unwrap();
///
/// let foo_bar_path = RepoPath::from_internal_string("foo/bar").unwrap();
///
/// assert_eq!(
/// bing_path.split_common_prefix(&baz_path),
/// (foo_bar_path, RepoPath::from_internal_string("bing").unwrap())
/// );
///
/// let unrelated_path = RepoPath::from_internal_string("no/common/prefix").unwrap();
/// assert_eq!(
/// baz_path.split_common_prefix(&unrelated_path),
/// (RepoPath::root(), baz_path)
/// );
/// ```
pub fn split_common_prefix(&self, other: &Self) -> (&Self, &Self) {
// Obtain the common prefix between these paths
let mut prefix_len = 0;
let common_components = self
.components()
.zip(other.components())
.take_while(|(prev_comp, this_comp)| prev_comp == this_comp);
for (self_comp, _other_comp) in common_components {
if prefix_len > 0 {
// + 1 for all paths to take their separators into account.
// We skip the first one since there are ComponentCount - 1 separators in a
// path.
prefix_len += 1;
}
prefix_len += self_comp.value.len();
}
if prefix_len == 0 {
// No common prefix except root
return (Self::root(), self);
}
if prefix_len == self.value.len() {
return (self, Self::root());
}
let common_prefix = Self::from_internal_string_unchecked(&self.value[..prefix_len]);
let remainder = Self::from_internal_string_unchecked(&self.value[prefix_len + 1..]);
(common_prefix, remainder)
}
}
impl AsRef<Self> for RepoPath {
fn as_ref(&self) -> &Self {
self
}
}
impl AsRef<RepoPath> for RepoPathBuf {
fn as_ref(&self) -> &RepoPath {
self
}
}
impl Borrow<RepoPath> for RepoPathBuf {
fn borrow(&self) -> &RepoPath {
self
}
}
impl Deref for RepoPathBuf {
type Target = RepoPath;
fn deref(&self) -> &Self::Target {
RepoPath::from_internal_string_unchecked(&self.value)
}
}
impl ToOwned for RepoPath {
type Owned = RepoPathBuf;
fn to_owned(&self) -> Self::Owned {
let value = self.value.to_owned();
RepoPathBuf { value }
}
fn clone_into(&self, target: &mut Self::Owned) {
self.value.clone_into(&mut target.value);
}
}
impl Ord for RepoPath {
fn cmp(&self, other: &Self) -> Ordering {
// If there were leading/trailing slash, components-based Ord would
// disagree with str-based Eq.
debug_assert!(is_valid_repo_path_str(&self.value));
self.components().cmp(other.components())
}
}
impl Ord for RepoPathBuf {
fn cmp(&self, other: &Self) -> Ordering {
<RepoPath as Ord>::cmp(self, other)
}
}
impl PartialOrd for RepoPath {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialOrd for RepoPathBuf {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<P: AsRef<RepoPathComponent>> Extend<P> for RepoPathBuf {
fn extend<T: IntoIterator<Item = P>>(&mut self, iter: T) {
for component in iter {
if !self.value.is_empty() {
self.value.push('/');
}
self.value.push_str(component.as_ref().as_internal_str());
}
}
}
/// `RepoPath` contained invalid file/directory component such as `..`.
#[derive(Clone, Debug, Eq, Error, PartialEq)]
#[error(r#"Invalid repository path "{}""#, path.as_internal_file_string())]
pub struct InvalidRepoPathError {
/// Path containing an error.
pub path: RepoPathBuf,
/// Source error.
pub source: InvalidRepoPathComponentError,
}
/// `RepoPath` component was invalid. (e.g. `..`)
#[derive(Clone, Debug, Eq, Error, PartialEq)]
#[error(r#"Invalid path component "{component}""#)]
pub struct InvalidRepoPathComponentError {
pub component: Box<str>,
}
impl InvalidRepoPathComponentError {
/// Attaches the `path` that caused the error.
pub fn with_path(self, path: &RepoPath) -> InvalidRepoPathError {
InvalidRepoPathError {
path: path.to_owned(),
source: self,
}
}
}
#[derive(Clone, Debug, Eq, Error, PartialEq)]
pub enum RelativePathParseError {
#[error(r#"Invalid component "{component}" in repo-relative path "{path}""#)]
InvalidComponent {
component: Box<str>,
path: Box<Path>,
},
#[error(r#"Not valid UTF-8 path "{path}""#)]
InvalidUtf8 { path: Box<Path> },
}
#[derive(Clone, Debug, Eq, Error, PartialEq)]
#[error(r#"Path "{input}" is not in the repo "{base}""#)]
pub struct FsPathParseError {
/// Repository or workspace root path relative to the `cwd`.
pub base: Box<Path>,
/// Input path without normalization.
pub input: Box<Path>,
/// Source error.
pub source: RelativePathParseError,
}
fn is_valid_repo_path_component_str(value: &str) -> bool {
!value.is_empty() && !value.contains('/')
}
fn is_valid_repo_path_str(value: &str) -> bool {
!value.starts_with('/') && !value.ends_with('/') && !value.contains("//")
}
/// An error from `RepoPathUiConverter::parse_file_path`.
#[derive(Debug, Error)]
pub enum UiPathParseError {
#[error(transparent)]
Fs(FsPathParseError),
}
/// Converts `RepoPath`s to and from plain strings as displayed to the user
/// (e.g. relative to CWD).
#[derive(Debug, Clone)]
pub enum RepoPathUiConverter {
/// Variant for a local file system. Paths are interpreted relative to `cwd`
/// with the repo rooted in `base`.
///
/// The `cwd` and `base` paths are supposed to be absolute and normalized in
/// the same manner.
Fs { cwd: PathBuf, base: PathBuf },
// TODO: Add a no-op variant that uses the internal `RepoPath` representation. Can be useful
// on a server.
}
impl RepoPathUiConverter {
/// Format a path for display in the UI.
pub fn format_file_path(&self, file: &RepoPath) -> String {
match self {
Self::Fs { cwd, base } => {
file_util::relative_path(cwd, &file.to_fs_path_unchecked(base))
.display()
.to_string()
}
}
}
/// Format a copy from `before` to `after` for display in the UI by
/// extracting common components and producing something like
/// "common/prefix/{before => after}/common/suffix".
///
/// If `before == after`, this is equivalent to `format_file_path()`.
pub fn format_copied_path(&self, paths: Diff<&RepoPath>) -> String {
match self {
Self::Fs { .. } => {
let paths = paths.map(|path| self.format_file_path(path));
collapse_copied_path(paths.as_deref(), std::path::MAIN_SEPARATOR)
}
}
}
/// Parses a path from the UI.
///
/// It's up to the implementation whether absolute paths are allowed, and
/// where relative paths are interpreted as relative to.
pub fn parse_file_path(&self, input: &str) -> Result<RepoPathBuf, UiPathParseError> {
match self {
Self::Fs { cwd, base } => {
RepoPathBuf::parse_fs_path(cwd, base, input).map_err(UiPathParseError::Fs)
}
}
}
}
fn collapse_copied_path(paths: Diff<&str>, separator: char) -> String {
// The last component should never match middle components. This is ensured
// by including trailing separators. e.g. ("a/b", "a/b/x") => ("a/", _)
let components = paths.map(|path| path.split_inclusive(separator));
let prefix_len: usize = iter::zip(components.before, components.after)
.take_while(|(before, after)| before == after)
.map(|(_, after)| after.len())
.sum();
if paths.before.len() == prefix_len && paths.after.len() == prefix_len {
return paths.after.to_owned();
}
// The first component should never match middle components, but the first
// uncommon middle component can. e.g. ("a/b", "x/a/b") => ("", "/b"),
// ("a/b", "a/x/b") => ("a/", "/b")
let components = paths.map(|path| {
let mut remainder = &path[prefix_len.saturating_sub(1)..];
iter::from_fn(move || {
let pos = remainder.rfind(separator)?;
let (prefix, last) = remainder.split_at(pos);
remainder = prefix;
Some(last)
})
});
let suffix_len: usize = iter::zip(components.before, components.after)
.take_while(|(before, after)| before == after)
.map(|(_, after)| after.len())
.sum();
// Middle range may be invalid (start > end) because the same separator char
// can be distributed to both common prefix and suffix. e.g.
// ("a/b", "a/x/b") == ("a//b", "a/x/b") => ("a/", "/b")
let middle = paths.map(|path| path.get(prefix_len..path.len() - suffix_len).unwrap_or(""));
let mut collapsed = String::new();
collapsed.push_str(&paths.after[..prefix_len]);
collapsed.push('{');
collapsed.push_str(middle.before);
collapsed.push_str(" => ");
collapsed.push_str(middle.after);
collapsed.push('}');
collapsed.push_str(&paths.after[paths.after.len() - suffix_len..]);
collapsed
}
/// Tree that maps `RepoPath` to value of type `V`.
#[derive(Clone, Default, Eq, PartialEq)]
pub struct RepoPathTree<V> {
entries: HashMap<RepoPathComponentBuf, Self>,
value: V,
}
impl<V> RepoPathTree<V> {
/// The value associated with this path.
pub fn value(&self) -> &V {
&self.value
}
/// Mutable reference to the value associated with this path.
pub fn value_mut(&mut self) -> &mut V {
&mut self.value
}
/// Set the value associated with this path.
pub fn set_value(&mut self, value: V) {
self.value = value;
}
/// The immediate children of this node.
pub fn children(&self) -> impl Iterator<Item = (&RepoPathComponent, &Self)> {
self.entries
.iter()
.map(|(component, value)| (component.as_ref(), value))
}
/// Whether this node has any children.
pub fn has_children(&self) -> bool {
!self.entries.is_empty()
}
/// Add a path to the tree. Normally called on the root tree.
pub fn add(&mut self, path: &RepoPath) -> &mut Self
where
V: Default,
{
path.components().fold(self, |sub, name| {
// Avoid name.clone() if entry already exists.
if !sub.entries.contains_key(name) {
sub.entries.insert(name.to_owned(), Self::default());
}
sub.entries.get_mut(name).unwrap()
})
}
/// Get a reference to the node for the given `path`, if it exists in the
/// tree.
pub fn get(&self, path: &RepoPath) -> Option<&Self> {
path.components()
.try_fold(self, |sub, name| sub.entries.get(name))
}
/// Walks the tree from the root to the given `path`, yielding each sub tree
/// and remaining path.
pub fn walk_to<'a, 'b>(
&'a self,
path: &'b RepoPath,
) -> impl Iterator<Item = (&'a Self, &'b RepoPath)> {
iter::successors(Some((self, path)), |(sub, path)| {
let mut components = path.components();
let name = components.next()?;
Some((sub.entries.get(name)?, components.as_path()))
})
}
}
impl<V: Debug> Debug for RepoPathTree<V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.value.fmt(f)?;
f.write_str(" ")?;
f.debug_map()
.entries(
self.entries
.iter()
.sorted_unstable_by_key(|&(name, _)| name),
)
.finish()
}
}
#[cfg(test)]
mod tests {
use std::panic;
use assert_matches::assert_matches;
use itertools::Itertools as _;
use super::*;
use crate::tests::new_temp_dir;
fn repo_path(value: &str) -> &RepoPath {
RepoPath::from_internal_string(value).unwrap()
}
fn repo_path_component(value: &str) -> &RepoPathComponent {
RepoPathComponent::new(value).unwrap()
}
#[test]
fn test_is_root() {
assert!(RepoPath::root().is_root());
assert!(repo_path("").is_root());
assert!(!repo_path("foo").is_root());
}
#[test]
fn test_from_internal_string() {
let repo_path_buf = |value: &str| RepoPathBuf::from_internal_string(value).unwrap();
assert_eq!(repo_path_buf(""), RepoPathBuf::root());
assert!(panic::catch_unwind(|| repo_path_buf("/")).is_err());
assert!(panic::catch_unwind(|| repo_path_buf("/x")).is_err());
assert!(panic::catch_unwind(|| repo_path_buf("x/")).is_err());
assert!(panic::catch_unwind(|| repo_path_buf("x//y")).is_err());
assert_eq!(repo_path(""), RepoPath::root());
assert!(panic::catch_unwind(|| repo_path("/")).is_err());
assert!(panic::catch_unwind(|| repo_path("/x")).is_err());
assert!(panic::catch_unwind(|| repo_path("x/")).is_err());
assert!(panic::catch_unwind(|| repo_path("x//y")).is_err());
}
#[test]
fn test_as_internal_file_string() {
assert_eq!(RepoPath::root().as_internal_file_string(), "");
assert_eq!(repo_path("dir").as_internal_file_string(), "dir");
assert_eq!(repo_path("dir/file").as_internal_file_string(), "dir/file");
}
#[test]
fn test_to_internal_dir_string() {
assert_eq!(RepoPath::root().to_internal_dir_string(), "");
assert_eq!(repo_path("dir").to_internal_dir_string(), "dir/");
assert_eq!(repo_path("dir/file").to_internal_dir_string(), "dir/file/");
}
#[test]
fn test_starts_with() {
assert!(repo_path("").starts_with(repo_path("")));
assert!(repo_path("x").starts_with(repo_path("")));
assert!(!repo_path("").starts_with(repo_path("x")));
assert!(repo_path("x").starts_with(repo_path("x")));
assert!(repo_path("x/y").starts_with(repo_path("x")));
assert!(!repo_path("xy").starts_with(repo_path("x")));
assert!(!repo_path("x/y").starts_with(repo_path("y")));
assert!(repo_path("x/y").starts_with(repo_path("x/y")));
assert!(repo_path("x/y/z").starts_with(repo_path("x/y")));
assert!(!repo_path("x/yz").starts_with(repo_path("x/y")));
assert!(!repo_path("x").starts_with(repo_path("x/y")));
assert!(!repo_path("xy").starts_with(repo_path("x/y")));
}
#[test]
fn test_strip_prefix() {
assert_eq!(
repo_path("").strip_prefix(repo_path("")),
Some(repo_path(""))
);
assert_eq!(
repo_path("x").strip_prefix(repo_path("")),
Some(repo_path("x"))
);
assert_eq!(repo_path("").strip_prefix(repo_path("x")), None);
assert_eq!(
repo_path("x").strip_prefix(repo_path("x")),
Some(repo_path(""))
);
assert_eq!(
repo_path("x/y").strip_prefix(repo_path("x")),
Some(repo_path("y"))
);
assert_eq!(repo_path("xy").strip_prefix(repo_path("x")), None);
assert_eq!(repo_path("x/y").strip_prefix(repo_path("y")), None);
assert_eq!(
repo_path("x/y").strip_prefix(repo_path("x/y")),
Some(repo_path(""))
);
assert_eq!(
repo_path("x/y/z").strip_prefix(repo_path("x/y")),
Some(repo_path("z"))
);
assert_eq!(repo_path("x/yz").strip_prefix(repo_path("x/y")), None);
assert_eq!(repo_path("x").strip_prefix(repo_path("x/y")), None);
assert_eq!(repo_path("xy").strip_prefix(repo_path("x/y")), None);
}
#[test]
fn test_order() {
assert!(RepoPath::root() < repo_path("dir"));
assert!(repo_path("dir") < repo_path("dirx"));
// '#' < '/', but ["dir", "sub"] < ["dir#"]
assert!(repo_path("dir") < repo_path("dir#"));
assert!(repo_path("dir") < repo_path("dir/sub"));
assert!(repo_path("dir/sub") < repo_path("dir#"));
assert!(repo_path("abc") < repo_path("dir/file"));
assert!(repo_path("dir") < repo_path("dir/file"));
assert!(repo_path("dis") > repo_path("dir/file"));
assert!(repo_path("xyz") > repo_path("dir/file"));
assert!(repo_path("dir1/xyz") < repo_path("dir2/abc"));
}
#[test]
fn test_join() {
let root = RepoPath::root();
let dir = root.join(repo_path_component("dir"));
assert_eq!(dir.as_ref(), repo_path("dir"));
let subdir = dir.join(repo_path_component("subdir"));
assert_eq!(subdir.as_ref(), repo_path("dir/subdir"));
assert_eq!(
subdir.join(repo_path_component("file")).as_ref(),
repo_path("dir/subdir/file")
);
}
#[test]
fn test_extend() {
let mut path = RepoPathBuf::root();
path.extend(std::iter::empty::<RepoPathComponentBuf>());
assert_eq!(path.as_ref(), RepoPath::root());
path.extend([repo_path_component("dir")]);
assert_eq!(path.as_ref(), repo_path("dir"));
path.extend(std::iter::repeat_n(repo_path_component("subdir"), 3));
assert_eq!(path.as_ref(), repo_path("dir/subdir/subdir/subdir"));
path.extend(std::iter::empty::<RepoPathComponentBuf>());
assert_eq!(path.as_ref(), repo_path("dir/subdir/subdir/subdir"));
}
#[test]
fn test_parent() {
let root = RepoPath::root();
let dir_component = repo_path_component("dir");
let subdir_component = repo_path_component("subdir");
let dir = root.join(dir_component);
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/union_find.rs | lib/src/union_find.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This module implements a [`UnionFind<T>`] type which can be used to
//! efficiently calculate disjoint sets for any data type.
use std::collections::HashMap;
use std::hash::Hash;
#[derive(Clone, Copy)]
struct Node<T> {
root: T,
size: u32,
}
/// Implementation of the union-find algorithm:
/// <https://en.wikipedia.org/wiki/Disjoint-set_data_structure>
///
/// Joins disjoint sets by size to amortize cost.
#[derive(Clone)]
pub struct UnionFind<T> {
roots: HashMap<T, Node<T>>,
}
impl<T> Default for UnionFind<T>
where
T: Copy + Eq + Hash,
{
fn default() -> Self {
Self::new()
}
}
impl<T> UnionFind<T>
where
T: Copy + Eq + Hash,
{
/// Creates a new empty UnionFind data structure.
pub fn new() -> Self {
Self {
roots: HashMap::new(),
}
}
/// Returns the root identifying the union this item is a part of.
pub fn find(&mut self, item: T) -> T {
self.find_node(item).root
}
fn find_node(&mut self, item: T) -> Node<T> {
match self.roots.get(&item) {
Some(node) => {
if node.root != item {
let new_root = self.find_node(node.root);
self.roots.insert(item, new_root);
new_root
} else {
*node
}
}
None => {
let node = Node::<T> {
root: item,
size: 1,
};
self.roots.insert(item, node);
node
}
}
}
/// Unions the disjoint sets connected to `a` and `b`.
pub fn union(&mut self, a: T, b: T) {
let a = self.find_node(a);
let b = self.find_node(b);
if a.root == b.root {
return;
}
let new_node = Node::<T> {
root: if a.size < b.size { b.root } else { a.root },
size: a.size + b.size,
};
self.roots.insert(a.root, new_node);
self.roots.insert(b.root, new_node);
}
}
#[cfg(test)]
mod tests {
use itertools::Itertools as _;
use super::*;
#[test]
fn test_basic() {
let mut union_find = UnionFind::<i32>::new();
// Everything starts as a singleton.
assert_eq!(union_find.find(1), 1);
assert_eq!(union_find.find(2), 2);
assert_eq!(union_find.find(3), 3);
// Make two pair sets. This implicitly adds node 4.
union_find.union(1, 2);
union_find.union(3, 4);
assert_eq!(union_find.find(1), union_find.find(2));
assert_eq!(union_find.find(3), union_find.find(4));
assert_ne!(union_find.find(1), union_find.find(3));
// Unioning the pairs gives everything the same root.
union_find.union(1, 3);
assert!(
[
union_find.find(1),
union_find.find(2),
union_find.find(3),
union_find.find(4),
]
.iter()
.all_equal()
);
}
#[test]
fn test_union_by_size() {
let mut union_find = UnionFind::<i32>::new();
// Create a set of 3 and a set of 2.
union_find.union(1, 2);
union_find.union(2, 3);
union_find.union(4, 5);
let set3 = union_find.find(1);
let set2 = union_find.find(4);
assert_ne!(set3, set2);
// Merging them always chooses the larger set.
let mut large_first = union_find.clone();
large_first.union(1, 4);
assert_eq!(large_first.find(1), set3);
assert_eq!(large_first.find(4), set3);
let mut small_first = union_find.clone();
small_first.union(4, 1);
assert_eq!(small_first.find(1), set3);
assert_eq!(small_first.find(4), set3);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/extensions_map.rs | lib/src/extensions_map.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::any::Any;
use std::any::TypeId;
use std::collections::HashMap;
/// Type-safe map that stores objects of arbitrary types.
///
/// This allows extensions to store and retrieve their own types unknown to
/// jj_lib safely.
#[derive(Default)]
pub struct ExtensionsMap {
values: HashMap<TypeId, Box<dyn Any>>,
}
impl ExtensionsMap {
/// Creates an empty ExtensionsMap.
pub fn empty() -> Self {
Default::default()
}
/// Returns the specified type if it has already been inserted.
pub fn get<V: Any>(&self) -> Option<&V> {
self.values
.get(&TypeId::of::<V>())
.map(|v| v.downcast_ref::<V>().unwrap())
}
/// Inserts a new instance of the specified type.
///
/// Requires that this type has not been inserted before.
pub fn insert<V: Any>(&mut self, value: V) {
assert!(
self.values
.insert(TypeId::of::<V>(), Box::new(value))
.is_none()
);
}
}
#[cfg(test)]
mod tests {
use super::*;
struct TestTypeA;
impl TestTypeA {
fn get_a(&self) -> &'static str {
"a"
}
}
struct TestTypeB;
impl TestTypeB {
fn get_b(&self) -> &'static str {
"b"
}
}
#[test]
fn test_empty() {
let extensions_map = ExtensionsMap::empty();
assert!(extensions_map.get::<TestTypeA>().is_none());
assert!(extensions_map.get::<TestTypeB>().is_none());
}
#[test]
fn test_retrieval() {
let mut extensions_map = ExtensionsMap::empty();
extensions_map.insert(TestTypeA);
extensions_map.insert(TestTypeB);
assert_eq!(
extensions_map
.get::<TestTypeA>()
.map(|a| a.get_a())
.unwrap_or(""),
"a"
);
assert_eq!(
extensions_map
.get::<TestTypeB>()
.map(|b| b.get_b())
.unwrap_or(""),
"b"
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/dag_walk.rs | lib/src/dag_walk.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! General-purpose DAG algorithms.
use std::collections::BinaryHeap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::convert::Infallible;
use std::hash::Hash;
use std::iter;
use std::mem;
use itertools::Itertools as _;
use smallvec::SmallVec;
use smallvec::smallvec_inline;
/// Traverses nodes from `start` in depth-first order.
pub fn dfs<T, ID, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
) -> impl Iterator<Item = T>
where
ID: Hash + Eq,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let neighbors_fn = move |node: &T| to_infallibe_iter(neighbors_fn(node));
dfs_ok(to_infallibe_iter(start), id_fn, neighbors_fn).map(|Ok(node)| node)
}
/// Traverses nodes from `start` in depth-first order.
///
/// An `Err` is emitted as a node with no neighbors. Caller may decide to
/// short-circuit on it.
pub fn dfs_ok<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
) -> impl Iterator<Item = Result<T, E>>
where
ID: Hash + Eq,
II: IntoIterator<Item = Result<T, E>>,
NI: IntoIterator<Item = Result<T, E>>,
{
let mut work: Vec<Result<T, E>> = start.into_iter().collect();
let mut visited: HashSet<ID> = HashSet::new();
iter::from_fn(move || {
loop {
let c = match work.pop() {
Some(Ok(c)) => c,
r @ (Some(Err(_)) | None) => return r,
};
let id = id_fn(&c);
if visited.contains(&id) {
continue;
}
for p in neighbors_fn(&c) {
work.push(p);
}
visited.insert(id);
return Some(Ok(c));
}
})
}
/// Builds a list of nodes reachable from the `start` where neighbors come
/// before the node itself.
///
/// If the graph has cycle, `cycle_fn()` is called with one of the nodes
/// involved in the cycle.
pub fn topo_order_forward<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
cycle_fn: impl FnOnce(T) -> E,
) -> Result<Vec<T>, E>
where
ID: Hash + Eq + Clone,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let neighbors_fn = move |node: &T| to_ok_iter(neighbors_fn(node));
topo_order_forward_ok(to_ok_iter(start), id_fn, neighbors_fn, cycle_fn)
}
/// Builds a list of `Ok` nodes reachable from the `start` where neighbors come
/// before the node itself.
///
/// If `start` or `neighbors_fn()` yields an `Err`, this function terminates and
/// returns the error. If the graph has cycle, `cycle_fn()` is called with one
/// of the nodes involved in the cycle.
pub fn topo_order_forward_ok<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
cycle_fn: impl FnOnce(T) -> E,
) -> Result<Vec<T>, E>
where
ID: Hash + Eq + Clone,
II: IntoIterator<Item = Result<T, E>>,
NI: IntoIterator<Item = Result<T, E>>,
{
let mut stack: Vec<(T, bool)> = start.into_iter().map(|r| Ok((r?, false))).try_collect()?;
let mut visiting = HashSet::new();
let mut emitted = HashSet::new();
let mut result = vec![];
while let Some((node, neighbors_visited)) = stack.pop() {
let id = id_fn(&node);
if emitted.contains(&id) {
continue;
}
if !neighbors_visited {
if !visiting.insert(id.clone()) {
return Err(cycle_fn(node));
}
let neighbors_iter = neighbors_fn(&node).into_iter();
stack.reserve(neighbors_iter.size_hint().0 + 1);
stack.push((node, true));
for neighbor in neighbors_iter {
stack.push((neighbor?, false));
}
} else {
visiting.remove(&id);
emitted.insert(id);
result.push(node);
}
}
Ok(result)
}
/// Builds a list of nodes reachable from the `start` where neighbors come after
/// the node itself.
///
/// If the graph has cycle, `cycle_fn()` is called with one of the nodes
/// involved in the cycle.
pub fn topo_order_reverse<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
cycle_fn: impl FnOnce(T) -> E,
) -> Result<Vec<T>, E>
where
ID: Hash + Eq + Clone,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let neighbors_fn = move |node: &T| to_ok_iter(neighbors_fn(node));
topo_order_reverse_ok(to_ok_iter(start), id_fn, neighbors_fn, cycle_fn)
}
/// Builds a list of `Ok` nodes reachable from the `start` where neighbors come
/// after the node itself.
///
/// If `start` or `neighbors_fn()` yields an `Err`, this function terminates and
/// returns the error. If the graph has cycle, `cycle_fn()` is called with one
/// of the nodes involved in the cycle.
pub fn topo_order_reverse_ok<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
neighbors_fn: impl FnMut(&T) -> NI,
cycle_fn: impl FnOnce(T) -> E,
) -> Result<Vec<T>, E>
where
ID: Hash + Eq + Clone,
II: IntoIterator<Item = Result<T, E>>,
NI: IntoIterator<Item = Result<T, E>>,
{
let mut result = topo_order_forward_ok(start, id_fn, neighbors_fn, cycle_fn)?;
result.reverse();
Ok(result)
}
/// Like `topo_order_reverse()`, but can iterate linear DAG lazily.
///
/// The DAG is supposed to be (mostly) topologically ordered by `T: Ord`.
/// For example, topological order of chronological data should respect
/// timestamp (except a few outliers caused by clock skew.)
///
/// Use `topo_order_reverse()` if the DAG is heavily branched. This can
/// only process linear part lazily.
///
/// If the graph has cycle, `cycle_fn()` is called with one of the nodes
/// involved in the cycle.
pub fn topo_order_reverse_lazy<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
cycle_fn: impl FnMut(T) -> E,
) -> impl Iterator<Item = Result<T, E>>
where
T: Ord,
ID: Hash + Eq + Clone,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let neighbors_fn = move |node: &T| to_ok_iter(neighbors_fn(node));
topo_order_reverse_lazy_ok(to_ok_iter(start), id_fn, neighbors_fn, cycle_fn)
}
/// Like `topo_order_reverse_ok()`, but can iterate linear DAG lazily.
///
/// The returned iterator short-circuits at an `Err`. Pending non-linear nodes
/// before the `Err` will be discarded. If the graph has cycle, `cycle_fn()` is
/// called with one of the nodes involved in the cycle.
pub fn topo_order_reverse_lazy_ok<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
mut cycle_fn: impl FnMut(T) -> E,
) -> impl Iterator<Item = Result<T, E>>
where
T: Ord,
ID: Hash + Eq + Clone,
II: IntoIterator<Item = Result<T, E>>,
NI: IntoIterator<Item = Result<T, E>>,
{
let mut inner = TopoOrderReverseLazyInner::empty();
inner.extend(start);
iter::from_fn(move || inner.next(&id_fn, &mut neighbors_fn, &mut cycle_fn))
}
#[derive(Clone, Debug)]
struct TopoOrderReverseLazyInner<T, ID, E> {
start: Vec<T>,
result: Vec<Result<T, E>>,
emitted: HashSet<ID>,
}
impl<T: Ord, ID: Hash + Eq + Clone, E> TopoOrderReverseLazyInner<T, ID, E> {
fn empty() -> Self {
Self {
start: Vec::new(),
result: Vec::new(),
emitted: HashSet::new(),
}
}
fn extend(&mut self, iter: impl IntoIterator<Item = Result<T, E>>) {
let iter = iter.into_iter();
self.start.reserve(iter.size_hint().0);
for res in iter {
if let Ok(node) = res {
self.start.push(node);
} else {
// Emit the error and terminate
self.start.clear();
self.result.insert(0, res);
return;
}
}
}
fn next<NI: IntoIterator<Item = Result<T, E>>>(
&mut self,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
mut cycle_fn: impl FnMut(T) -> E,
) -> Option<Result<T, E>> {
if let Some(res) = self.result.pop() {
return Some(res);
}
// Fast path for linear DAG
if self.start.len() <= 1 {
let node = self.start.pop()?;
self.extend(neighbors_fn(&node));
if self.emitted.insert(id_fn(&node)) {
return Some(Ok(node));
} else {
return Some(Err(cycle_fn(node)));
}
}
// Extract graph nodes based on T's order, and sort them by using ids
// (because we wouldn't want to clone T itself)
let start_ids = self.start.iter().map(&id_fn).collect_vec();
match look_ahead_sub_graph(mem::take(&mut self.start), &id_fn, &mut neighbors_fn) {
Ok((mut node_map, neighbor_ids_map, remainder)) => {
self.start = remainder;
let sorted_ids = match topo_order_forward_ok(
start_ids.iter().map(Ok),
|id| *id,
|id| neighbor_ids_map[id].iter().map(Ok),
|id| cycle_fn(node_map.remove(id).unwrap()),
) {
Ok(ids) => ids,
Err(err) => return Some(Err(err)),
};
self.result.reserve(sorted_ids.len());
for id in sorted_ids {
let (id, node) = node_map.remove_entry(id).unwrap();
if self.emitted.insert(id) {
self.result.push(Ok(node));
} else {
self.result.push(Err(cycle_fn(node)));
}
}
self.result.pop()
}
Err(err) => Some(Err(err)),
}
}
}
/// Splits DAG at the first single fork point, and builds a list of nodes
/// reachable from the `start` where neighbors come after the node itself.
///
/// This is a building block for lazy DAG iterators similar to
/// [`topo_order_reverse_lazy()`]. The `start` list will be updated to include
/// the next nodes to visit.
///
/// If the split chunk of the graph has cycle, `cycle_fn()` is called with one
/// of the nodes involved in the cycle.
pub fn topo_order_reverse_chunked<T, ID, E, NI>(
start: &mut Vec<T>,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
mut cycle_fn: impl FnMut(T) -> E,
) -> Result<SmallVec<[T; 1]>, E>
where
T: Ord,
ID: Hash + Eq + Clone,
NI: IntoIterator<Item = Result<T, E>>,
{
// Fast path for linear DAG
if start.len() <= 1 {
let Some(node) = start.pop() else {
return Ok(SmallVec::new());
};
let neighbors_iter = neighbors_fn(&node).into_iter();
start.reserve(neighbors_iter.size_hint().0);
for neighbor in neighbors_iter {
start.push(neighbor?);
}
return Ok(smallvec_inline![node]);
}
// Extract graph nodes based on T's order, and sort them by using ids
// (because we wouldn't want to clone T itself)
let start_ids = start.iter().map(&id_fn).collect_vec();
let (mut node_map, neighbor_ids_map, remainder) =
look_ahead_sub_graph(mem::take(start), &id_fn, &mut neighbors_fn)?;
*start = remainder;
let sorted_ids = topo_order_forward_ok(
start_ids.iter().map(Ok),
|id| *id,
|id| neighbor_ids_map[id].iter().map(Ok),
|id| cycle_fn(node_map.remove(id).unwrap()),
)?;
let sorted_nodes = sorted_ids
.iter()
.rev()
.map(|&id| node_map.remove(id).unwrap())
.collect();
Ok(sorted_nodes)
}
/// Splits DAG at single fork point, and extracts branchy part as sub graph.
///
/// ```text
/// o | C
/// | o B
/// |/ <---- split here (A->B or A->C would create cycle)
/// o A
/// ```
///
/// If a branch reached to root (empty neighbors), the graph can't be split
/// anymore because the other branch may be connected to a descendant of
/// the rooted branch.
///
/// ```text
/// o | C
/// | o B
/// | <---- can't split here (there may be edge A->B)
/// o A
/// ```
///
/// We assume the graph is (mostly) topologically ordered by `T: Ord`.
#[expect(clippy::type_complexity)]
fn look_ahead_sub_graph<T, ID, E, NI>(
start: Vec<T>,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
) -> Result<(HashMap<ID, T>, HashMap<ID, Vec<ID>>, Vec<T>), E>
where
T: Ord,
ID: Hash + Eq + Clone,
NI: IntoIterator<Item = Result<T, E>>,
{
let mut queue: BinaryHeap<T> = start.into();
// Build separate node/neighbors maps since lifetime is different at caller
let mut node_map: HashMap<ID, T> = HashMap::new();
let mut neighbor_ids_map: HashMap<ID, Vec<ID>> = HashMap::new();
let mut has_reached_root = false;
while queue.len() > 1 || node_map.is_empty() || has_reached_root {
let Some(node) = queue.pop() else {
break;
};
let node_id = id_fn(&node);
if node_map.contains_key(&node_id) {
continue;
}
let mut neighbor_ids = Vec::new();
let mut neighbors_iter = neighbors_fn(&node).into_iter().peekable();
has_reached_root |= neighbors_iter.peek().is_none();
for neighbor in neighbors_iter {
let neighbor = neighbor?;
neighbor_ids.push(id_fn(&neighbor));
queue.push(neighbor);
}
node_map.insert(node_id.clone(), node);
neighbor_ids_map.insert(node_id, neighbor_ids);
}
assert!(queue.len() <= 1, "order of remainder shouldn't matter");
let remainder = queue.into_vec();
// Omit unvisited neighbors
if let Some(unvisited_id) = remainder.first().map(&id_fn) {
for neighbor_ids in neighbor_ids_map.values_mut() {
neighbor_ids.retain(|id| *id != unvisited_id);
}
}
Ok((node_map, neighbor_ids_map, remainder))
}
/// Builds a list of nodes reachable from the `start` where neighbors come after
/// the node itself.
///
/// Unlike `topo_order_reverse()`, nodes are sorted in reverse `T: Ord` order so
/// long as they can respect the topological requirement.
///
/// If the graph has cycle, `cycle_fn()` is called with one of the nodes
/// involved in the cycle.
pub fn topo_order_reverse_ord<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
cycle_fn: impl FnOnce(T) -> E,
) -> Result<Vec<T>, E>
where
T: Ord,
ID: Hash + Eq + Clone,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let neighbors_fn = move |node: &T| to_ok_iter(neighbors_fn(node));
topo_order_reverse_ord_ok(to_ok_iter(start), id_fn, neighbors_fn, cycle_fn)
}
/// Builds a list of `Ok` nodes reachable from the `start` where neighbors come
/// after the node itself.
///
/// Unlike `topo_order_reverse_ok()`, nodes are sorted in reverse `T: Ord` order
/// so long as they can respect the topological requirement.
///
/// If `start` or `neighbors_fn()` yields an `Err`, this function terminates and
/// returns the error. If the graph has cycle, `cycle_fn()` is called with one
/// of the nodes involved in the cycle.
pub fn topo_order_reverse_ord_ok<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
cycle_fn: impl FnOnce(T) -> E,
) -> Result<Vec<T>, E>
where
T: Ord,
ID: Hash + Eq + Clone,
II: IntoIterator<Item = Result<T, E>>,
NI: IntoIterator<Item = Result<T, E>>,
{
struct InnerNode<T> {
node: Option<T>,
indegree: usize,
}
// DFS to accumulate incoming edges
let mut stack: Vec<T> = start.into_iter().try_collect()?;
let mut head_node_map: HashMap<ID, T> = HashMap::new();
let mut inner_node_map: HashMap<ID, InnerNode<T>> = HashMap::new();
let mut neighbor_ids_map: HashMap<ID, Vec<ID>> = HashMap::new();
while let Some(node) = stack.pop() {
let node_id = id_fn(&node);
if neighbor_ids_map.contains_key(&node_id) {
continue; // Already visited
}
let neighbors_iter = neighbors_fn(&node).into_iter();
let pos = stack.len();
stack.reserve(neighbors_iter.size_hint().0);
for neighbor in neighbors_iter {
stack.push(neighbor?);
}
let neighbor_ids = stack[pos..].iter().map(&id_fn).collect_vec();
if let Some(inner) = inner_node_map.get_mut(&node_id) {
inner.node = Some(node);
} else {
head_node_map.insert(node_id.clone(), node);
}
for neighbor_id in &neighbor_ids {
if let Some(inner) = inner_node_map.get_mut(neighbor_id) {
inner.indegree += 1;
} else {
let inner = InnerNode {
node: head_node_map.remove(neighbor_id),
indegree: 1,
};
inner_node_map.insert(neighbor_id.clone(), inner);
}
}
neighbor_ids_map.insert(node_id, neighbor_ids);
}
debug_assert!(
head_node_map
.keys()
.all(|id| !inner_node_map.contains_key(id))
);
debug_assert!(inner_node_map.values().all(|inner| inner.node.is_some()));
debug_assert!(inner_node_map.values().all(|inner| inner.indegree > 0));
// Using Kahn's algorithm
let mut queue: BinaryHeap<T> = head_node_map.into_values().collect();
let mut result = Vec::new();
while let Some(node) = queue.pop() {
let node_id = id_fn(&node);
result.push(node);
for neighbor_id in neighbor_ids_map.remove(&node_id).unwrap() {
let inner = inner_node_map.get_mut(&neighbor_id).unwrap();
inner.indegree -= 1;
if inner.indegree == 0 {
queue.push(inner.node.take().unwrap());
inner_node_map.remove(&neighbor_id);
}
}
}
if let Some(inner) = inner_node_map.into_values().next() {
Err(cycle_fn(inner.node.unwrap()))
} else {
Ok(result)
}
}
/// Find nodes in the start set that are not reachable from other nodes in the
/// start set.
pub fn heads<T, ID, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
) -> HashSet<T>
where
T: Hash + Eq + Clone,
ID: Hash + Eq,
II: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let neighbors_fn = move |node: &T| to_infallibe_iter(neighbors_fn(node));
let Ok(node) = heads_ok(to_infallibe_iter(start), id_fn, neighbors_fn);
node
}
/// Finds `Ok` nodes in the start set that are not reachable from other nodes in
/// the start set.
///
/// If `start` or `neighbors_fn()` yields an `Err`, this function terminates and
/// returns the error.
pub fn heads_ok<T, ID, E, II, NI>(
start: II,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
) -> Result<HashSet<T>, E>
where
T: Hash + Eq + Clone,
ID: Hash + Eq,
II: IntoIterator<Item = Result<T, E>>,
NI: IntoIterator<Item = Result<T, E>>,
{
let mut heads: HashSet<T> = start.into_iter().try_collect()?;
// Do a BFS until we have only one item left in the frontier. That frontier must
// have originated from one of the heads, and since there can't be cycles,
// it won't be able to eliminate any other heads.
let mut frontier: Vec<T> = heads.iter().cloned().collect();
let mut visited: HashSet<ID> = heads.iter().map(&id_fn).collect();
let mut root_reached = false;
while frontier.len() > 1 || (!frontier.is_empty() && root_reached) {
frontier = frontier
.iter()
.flat_map(|node| {
let neighbors = neighbors_fn(node).into_iter().collect_vec();
if neighbors.is_empty() {
root_reached = true;
}
neighbors
})
.try_collect()?;
for node in &frontier {
heads.remove(node);
}
frontier.retain(|node| visited.insert(id_fn(node)));
}
Ok(heads)
}
/// Finds the closest common neighbor among the `set1` and `set2`.
pub fn closest_common_node<T, ID, II1, II2, NI>(
set1: II1,
set2: II2,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
) -> Option<T>
where
ID: Hash + Eq,
II1: IntoIterator<Item = T>,
II2: IntoIterator<Item = T>,
NI: IntoIterator<Item = T>,
{
let neighbors_fn = move |node: &T| to_infallibe_iter(neighbors_fn(node));
let Ok(node) = closest_common_node_ok(
to_infallibe_iter(set1),
to_infallibe_iter(set2),
id_fn,
neighbors_fn,
);
node
}
/// Finds the closest common `Ok` neighbor among the `set1` and `set2`.
///
/// If the traverse reached to an `Err`, this function terminates and returns
/// the error.
pub fn closest_common_node_ok<T, ID, E, II1, II2, NI>(
set1: II1,
set2: II2,
id_fn: impl Fn(&T) -> ID,
mut neighbors_fn: impl FnMut(&T) -> NI,
) -> Result<Option<T>, E>
where
ID: Hash + Eq,
II1: IntoIterator<Item = Result<T, E>>,
II2: IntoIterator<Item = Result<T, E>>,
NI: IntoIterator<Item = Result<T, E>>,
{
let mut visited1 = HashSet::new();
let mut visited2 = HashSet::new();
// TODO: might be better to leave an Err so long as the work contains at
// least one Ok node. If a work1 node is included in visited2, it should be
// the closest node even if work2 had previously contained an Err.
let mut work1: Vec<Result<T, E>> = set1.into_iter().collect();
let mut work2: Vec<Result<T, E>> = set2.into_iter().collect();
while !work1.is_empty() || !work2.is_empty() {
let mut new_work1 = vec![];
for node in work1 {
let node = node?;
let id: ID = id_fn(&node);
if visited2.contains(&id) {
return Ok(Some(node));
}
if visited1.insert(id) {
for neighbor in neighbors_fn(&node) {
new_work1.push(neighbor);
}
}
}
work1 = new_work1;
let mut new_work2 = vec![];
for node in work2 {
let node = node?;
let id: ID = id_fn(&node);
if visited1.contains(&id) {
return Ok(Some(node));
}
if visited2.insert(id) {
for neighbor in neighbors_fn(&node) {
new_work2.push(neighbor);
}
}
}
work2 = new_work2;
}
Ok(None)
}
fn to_ok_iter<T, E>(iter: impl IntoIterator<Item = T>) -> impl Iterator<Item = Result<T, E>> {
iter.into_iter().map(Ok)
}
fn to_infallibe_iter<T>(
iter: impl IntoIterator<Item = T>,
) -> impl Iterator<Item = Result<T, Infallible>> {
to_ok_iter(iter)
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use maplit::hashmap;
use maplit::hashset;
use super::*;
#[test]
fn test_dfs_ok() {
let neighbors = hashmap! {
'A' => vec![],
'B' => vec![Ok('A'), Err('X')],
'C' => vec![Ok('B')],
};
let id_fn = |node: &char| *node;
let neighbors_fn = |node: &char| neighbors[node].clone();
// Self and neighbor nodes shouldn't be lost at the error.
let nodes = dfs_ok([Ok('C')], id_fn, neighbors_fn).collect_vec();
assert_eq!(nodes, [Ok('C'), Ok('B'), Err('X'), Ok('A')]);
}
#[test]
fn test_topo_order_reverse_linear() {
// This graph:
// o C
// o B
// o A
let neighbors = hashmap! {
'A' => vec![],
'B' => vec!['A'],
'C' => vec!['B'],
};
let id_fn = |node: &char| *node;
let neighbors_fn = |node: &char| neighbors[node].clone();
let cycle_fn = |id| id;
let common = topo_order_reverse(vec!['C'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common = topo_order_reverse(vec!['C', 'B'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common = topo_order_reverse(vec!['B', 'C'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common: Vec<_> = topo_order_reverse_lazy(vec!['C'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common: Vec<_> = topo_order_reverse_lazy(vec!['C', 'B'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common: Vec<_> = topo_order_reverse_lazy(vec!['B', 'C'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common = topo_order_reverse_ord(vec!['C'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common = topo_order_reverse_ord(vec!['C', 'B'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
let common = topo_order_reverse_ord(vec!['B', 'C'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['C', 'B', 'A']);
}
#[test]
fn test_topo_order_reverse_merge() {
// This graph:
// o F
// |\
// o | E
// | o D
// | o C
// | o B
// |/
// o A
let neighbors = hashmap! {
'A' => vec![],
'B' => vec!['A'],
'C' => vec!['B'],
'D' => vec!['C'],
'E' => vec!['A'],
'F' => vec!['E', 'D'],
};
let id_fn = |node: &char| *node;
let neighbors_fn = |node: &char| neighbors[node].clone();
let cycle_fn = |id| id;
let common = topo_order_reverse(vec!['F'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']);
let common =
topo_order_reverse(vec!['F', 'E', 'C'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['F', 'D', 'E', 'C', 'B', 'A']);
let common =
topo_order_reverse(vec!['F', 'D', 'E'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['F', 'D', 'C', 'B', 'E', 'A']);
let common: Vec<_> = topo_order_reverse_lazy(vec!['F'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']);
let common: Vec<_> =
topo_order_reverse_lazy(vec!['F', 'E', 'C'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['F', 'D', 'E', 'C', 'B', 'A']);
let common: Vec<_> =
topo_order_reverse_lazy(vec!['F', 'D', 'E'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['F', 'D', 'C', 'B', 'E', 'A']);
let common = topo_order_reverse_ord(vec!['F'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']);
let common =
topo_order_reverse_ord(vec!['F', 'E', 'C'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']);
let common =
topo_order_reverse_ord(vec!['F', 'D', 'E'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['F', 'E', 'D', 'C', 'B', 'A']);
}
#[test]
fn test_topo_order_reverse_nested_merges() {
// This graph:
// o I
// |\
// | o H
// | |\
// | | o G
// | o | F
// | | o E
// o |/ D
// | o C
// o | B
// |/
// o A
let neighbors = hashmap! {
'A' => vec![],
'B' => vec!['A'],
'C' => vec!['A'],
'D' => vec!['B'],
'E' => vec!['C'],
'F' => vec!['C'],
'G' => vec!['E'],
'H' => vec!['F', 'G'],
'I' => vec!['D', 'H'],
};
let id_fn = |node: &char| *node;
let neighbors_fn = |node: &char| neighbors[node].clone();
let cycle_fn = |id| id;
let common = topo_order_reverse(vec!['I'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['I', 'D', 'B', 'H', 'F', 'G', 'E', 'C', 'A']);
let common: Vec<_> = topo_order_reverse_lazy(vec!['I'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['I', 'D', 'B', 'H', 'F', 'G', 'E', 'C', 'A']);
let common = topo_order_reverse_ord(vec!['I'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A']);
}
#[test]
fn test_topo_order_reverse_nested_merges_bad_order() {
// This graph:
// o I
// |\
// | |\
// | | |\
// | | | o h (h > I)
// | | |/|
// | | o | G
// | |/| o f
// | o |/ e (e > I, G)
// |/| o D
// o |/ C
// | o b (b > D)
// |/
// o A
let neighbors = hashmap! {
'A' => vec![],
'b' => vec!['A'],
'C' => vec!['A'],
'D' => vec!['b'],
'e' => vec!['C', 'b'],
'f' => vec!['D'],
'G' => vec!['e', 'D'],
'h' => vec!['G', 'f'],
'I' => vec!['C', 'e', 'G', 'h'],
};
let id_fn = |node: &char| *node;
let neighbors_fn = |node: &char| neighbors[node].clone();
let cycle_fn = |id| id;
let common = topo_order_reverse(vec!['I'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['I', 'h', 'G', 'e', 'C', 'f', 'D', 'b', 'A']);
let common: Vec<_> = topo_order_reverse_lazy(vec!['I'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['I', 'h', 'G', 'e', 'C', 'f', 'D', 'b', 'A']);
let common = topo_order_reverse_ord(vec!['I'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['I', 'h', 'f', 'G', 'e', 'D', 'b', 'C', 'A']);
}
#[test]
fn test_topo_order_reverse_merge_bad_fork_order_at_root() {
// This graph:
// o E
// |\
// o | D
// | o C
// | o B
// |/
// o a (a > D, B)
let neighbors = hashmap! {
'a' => vec![],
'B' => vec!['a'],
'C' => vec!['B'],
'D' => vec!['a'],
'E' => vec!['D', 'C'],
};
let id_fn = |node: &char| *node;
let neighbors_fn = |node: &char| neighbors[node].clone();
let cycle_fn = |id| id;
let common = topo_order_reverse(vec!['E'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['E', 'D', 'C', 'B', 'a']);
// The root node 'a' is visited before 'C'. If the graph were split there,
// the branch 'C->B->a' would be orphaned.
let common: Vec<_> = topo_order_reverse_lazy(vec!['E'], id_fn, neighbors_fn, cycle_fn)
.try_collect()
.unwrap();
assert_eq!(common, vec!['E', 'D', 'C', 'B', 'a']);
let common = topo_order_reverse_ord(vec!['E'], id_fn, neighbors_fn, cycle_fn).unwrap();
assert_eq!(common, vec!['E', 'D', 'C', 'B', 'a']);
}
#[test]
fn test_topo_order_reverse_merge_and_linear() {
// This graph:
// o G
// |\
// | o F
// o | E
// | o D
// |/
// o C
// o B
// o A
let neighbors = hashmap! {
'A' => vec![],
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/lib.rs | lib/src/lib.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Jujutsu version control system.
#![warn(missing_docs)]
#![deny(unused_must_use)]
#![forbid(unsafe_code)]
// Needed so that proc macros can be used inside jj_lib and by external crates
// that depend on it.
// See:
// - https://github.com/rust-lang/rust/issues/54647#issuecomment-432015102
// - https://github.com/rust-lang/rust/issues/54363
extern crate self as jj_lib;
#[macro_use]
pub mod content_hash;
pub mod absorb;
pub mod annotate;
pub mod backend;
pub mod bisect;
pub mod commit;
pub mod commit_builder;
pub mod config;
mod config_resolver;
pub mod conflict_labels;
pub mod conflicts;
pub mod copies;
pub mod dag_walk;
pub mod default_index;
pub mod default_submodule_store;
pub mod diff;
pub mod diff_presentation;
pub mod dsl_util;
pub(crate) mod eol;
pub mod evolution;
pub mod extensions_map;
pub mod file_util;
pub mod files;
pub mod fileset;
mod fileset_parser;
pub mod fix;
pub mod fmt_util;
pub mod fsmonitor;
#[cfg(feature = "git")]
pub mod git;
#[cfg(feature = "git")]
pub mod git_backend;
#[cfg(feature = "git")]
mod git_subprocess;
pub mod gitignore;
pub mod gpg_signing;
pub mod graph;
pub mod hex_util;
pub mod id_prefix;
pub mod index;
pub mod iter_util;
pub mod local_working_copy;
pub mod lock;
pub mod matchers;
pub mod merge;
pub mod merged_tree;
pub mod object_id;
pub mod op_heads_store;
pub mod op_store;
pub mod op_walk;
pub mod operation;
#[expect(missing_docs)]
pub mod protos;
pub mod ref_name;
pub mod refs;
pub mod repo;
pub mod repo_path;
pub mod revset;
mod revset_parser;
pub mod rewrite;
#[cfg(feature = "testing")]
pub mod secret_backend;
pub mod settings;
pub mod signing;
pub mod tree_merge;
// TODO: This file is mostly used for testing, whenever we no longer require it
// in the lib it should be moved to the examples (e.g
// "examples/simple-backend/").
pub mod simple_backend;
pub mod simple_op_heads_store;
pub mod simple_op_store;
pub mod ssh_signing;
pub mod stacked_table;
pub mod store;
pub mod str_util;
pub mod submodule_store;
#[cfg(feature = "testing")]
pub mod test_signing_backend;
pub mod time_util;
pub mod trailer;
pub mod transaction;
pub mod tree;
pub mod tree_builder;
pub mod union_find;
pub mod view;
pub mod working_copy;
pub mod workspace;
#[cfg(test)]
mod tests {
use tempfile::TempDir;
/// Unlike `testutils::new_temp_dir()`, this function doesn't set up
/// hermetic Git environment.
pub fn new_temp_dir() -> TempDir {
tempfile::Builder::new()
.prefix("jj-test-")
.tempdir()
.unwrap()
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/index.rs | lib/src/index.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Interfaces for indexes of the commits in a repository.
use std::any::Any;
use std::fmt::Debug;
use std::sync::Arc;
use itertools::Itertools as _;
use thiserror::Error;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::object_id::HexPrefix;
use crate::object_id::PrefixResolution;
use crate::operation::Operation;
use crate::repo_path::RepoPathBuf;
use crate::revset::ResolvedExpression;
use crate::revset::Revset;
use crate::revset::RevsetEvaluationError;
use crate::store::Store;
/// Returned by [`IndexStore`] in the event of an error.
#[derive(Debug, Error)]
pub enum IndexStoreError {
/// Error reading a [`ReadonlyIndex`] from the [`IndexStore`].
#[error("Failed to read index")]
Read(#[source] Box<dyn std::error::Error + Send + Sync>),
/// Error writing a [`MutableIndex`] to the [`IndexStore`].
#[error("Failed to write index")]
Write(#[source] Box<dyn std::error::Error + Send + Sync>),
}
/// Result of [`IndexStore`] operations.
pub type IndexStoreResult<T> = Result<T, IndexStoreError>;
/// Returned by [`Index`] backend in the event of an error.
#[derive(Debug, Error)]
pub enum IndexError {
/// Error returned if [`Index::all_heads_for_gc()`] is not supported by the
/// [`Index`] backend.
#[error("Cannot collect all heads by index of this type")]
AllHeadsForGcUnsupported,
/// Some other index error.
#[error(transparent)]
Other(Box<dyn std::error::Error + Send + Sync>),
}
/// Result of [`Index`] operations.
pub type IndexResult<T> = Result<T, IndexError>;
/// Defines the interface for types that provide persistent storage for an
/// index.
pub trait IndexStore: Any + Send + Sync + Debug {
/// Returns a name representing the type of index that the `IndexStore` is
/// compatible with. For example, the `IndexStore` for the default index
/// returns "default".
fn name(&self) -> &str;
/// Returns the index at the specified operation.
fn get_index_at_op(
&self,
op: &Operation,
store: &Arc<Store>,
) -> IndexStoreResult<Box<dyn ReadonlyIndex>>;
/// Writes `index` to the index store and returns a read-only version of the
/// index.
fn write_index(
&self,
index: Box<dyn MutableIndex>,
op: &Operation,
) -> IndexStoreResult<Box<dyn ReadonlyIndex>>;
}
impl dyn IndexStore {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: IndexStore>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
/// Defines the interface for types that provide an index of the commits in a
/// repository by [`CommitId`].
pub trait Index: Send + Sync {
/// Returns the minimum prefix length to disambiguate `commit_id` from other
/// commits in the index. The length returned is the number of hexadecimal
/// digits in the minimum prefix.
///
/// If the given `commit_id` doesn't exist, returns the minimum prefix
/// length which matches none of the commits in the index.
fn shortest_unique_commit_id_prefix_len(&self, commit_id: &CommitId) -> IndexResult<usize>;
/// Searches the index for commit IDs matching `prefix`. Returns a
/// [`PrefixResolution`] with a [`CommitId`] if the prefix matches a single
/// commit.
fn resolve_commit_id_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<CommitId>>;
/// Returns true if `commit_id` is present in the index.
fn has_id(&self, commit_id: &CommitId) -> IndexResult<bool>;
/// Returns true if `ancestor_id` commit is an ancestor of the
/// `descendant_id` commit, or if `ancestor_id` equals `descendant_id`.
fn is_ancestor(&self, ancestor_id: &CommitId, descendant_id: &CommitId) -> IndexResult<bool>;
/// Returns the best common ancestor or ancestors of the commits in `set1`
/// and `set2`. A "best common ancestor" has no descendants that are also
/// common ancestors.
fn common_ancestors(&self, set1: &[CommitId], set2: &[CommitId]) -> IndexResult<Vec<CommitId>>;
/// Heads among all indexed commits at the associated operation.
///
/// Suppose the index contains all the historical heads and their ancestors
/// reachable from the associated operation, this function returns the heads
/// that should be preserved on garbage collection.
///
/// The iteration order is unspecified.
fn all_heads_for_gc(&self) -> IndexResult<Box<dyn Iterator<Item = CommitId> + '_>>;
/// Returns the subset of commit IDs in `candidates` which are not ancestors
/// of other commits in `candidates`. If a commit id is duplicated in the
/// `candidates` list it will appear at most once in the output.
fn heads(&self, candidates: &mut dyn Iterator<Item = &CommitId>) -> IndexResult<Vec<CommitId>>;
/// Returns iterator over paths changed at the specified commit. The paths
/// are sorted. Returns `None` if the commit wasn't indexed.
fn changed_paths_in_commit(
&self,
commit_id: &CommitId,
) -> IndexResult<Option<Box<dyn Iterator<Item = RepoPathBuf> + '_>>>;
/// Resolves the revset `expression` against the index and corresponding
/// `store`.
fn evaluate_revset(
&self,
expression: &ResolvedExpression,
store: &Arc<Store>,
) -> Result<Box<dyn Revset + '_>, RevsetEvaluationError>;
}
#[expect(missing_docs)]
pub trait ReadonlyIndex: Any + Send + Sync {
fn as_index(&self) -> &dyn Index;
fn change_id_index(&self, heads: &mut dyn Iterator<Item = &CommitId>)
-> Box<dyn ChangeIdIndex>;
fn start_modification(&self) -> Box<dyn MutableIndex>;
}
impl dyn ReadonlyIndex {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: ReadonlyIndex>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
#[expect(missing_docs)]
pub trait MutableIndex: Any {
fn as_index(&self) -> &dyn Index;
fn change_id_index(
&self,
heads: &mut dyn Iterator<Item = &CommitId>,
) -> Box<dyn ChangeIdIndex + '_>;
fn add_commit(&mut self, commit: &Commit) -> IndexResult<()>;
fn merge_in(&mut self, other: &dyn ReadonlyIndex) -> IndexResult<()>;
}
impl dyn MutableIndex {
/// Downcasts to the implementation type.
pub fn downcast<T: MutableIndex>(self: Box<Self>) -> Option<Box<T>> {
(self as Box<dyn Any>).downcast().ok()
}
/// Returns reference of the implementation type.
pub fn downcast_ref<T: MutableIndex>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
/// The state of a commit with a given change ID.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum ResolvedChangeState {
/// The commit is visible (reachable from the visible heads).
Visible,
/// The commit is hidden (not reachable from the visible heads).
Hidden,
}
/// Represents the possible target commits of a resolved change ID. If the
/// change is divergent, there may be multiple visible commits. Hidden commits
/// can also be returned to allow showing a change offset number in the evolog.
#[derive(Clone, Eq, PartialEq, Debug)]
pub struct ResolvedChangeTargets {
/// All indexed commits with this change ID. The sort order of the commits
/// is determined by the index implementation, but it is preferred that more
/// recent commits should be sorted before later commits when possible. All
/// visible commits must be included, but some hidden commits may be omitted
/// if it would be inefficient for the index to support them.
pub targets: Vec<(CommitId, ResolvedChangeState)>,
}
impl ResolvedChangeTargets {
/// Returns an iterator over all visible commits for this change ID, as well
/// as their offsets.
pub fn visible_with_offsets(&self) -> impl Iterator<Item = (usize, &CommitId)> {
self.targets
.iter()
.enumerate()
.filter_map(|(i, (target, state))| {
(*state == ResolvedChangeState::Visible).then_some((i, target))
})
}
/// Returns true if the commit ID is one of the visible targets of this
/// change ID.
pub fn has_visible(&self, commit: &CommitId) -> bool {
self.visible_with_offsets()
.any(|(_, target)| target == commit)
}
/// Returns true if there are multiple visible targets for this change ID.
pub fn is_divergent(&self) -> bool {
self.visible_with_offsets().take(2).count() > 1
}
/// Returns the commit ID at a given offset. The change offset of a commit
/// can be found using [`ResolvedChangeTargets::find_offset`].
pub fn at_offset(&self, offset: usize) -> Option<&CommitId> {
self.targets.get(offset).map(|(target, _state)| target)
}
/// Finds the change offset corresponding to a commit. Newer commits should
/// generally have a lower offset than older commits, but this is not
/// guaranteed. Hidden commits may not have an offset at all.
pub fn find_offset(&self, commit_id: &CommitId) -> Option<usize> {
self.targets
.iter()
.position(|(target, _state)| target == commit_id)
}
/// Extracts the visible commits for this change ID. Returns `None` if there
/// are no visible commits with this change ID.
pub fn into_visible(self) -> Option<Vec<CommitId>> {
let visible = self
.targets
.into_iter()
.filter_map(|(target, state)| (state == ResolvedChangeState::Visible).then_some(target))
.collect_vec();
(!visible.is_empty()).then_some(visible)
}
}
/// Defines the interface for types that provide an index of the commits in a
/// repository by [`ChangeId`].
pub trait ChangeIdIndex: Send + Sync {
/// Resolve an unambiguous change ID prefix to the commit IDs in the index.
fn resolve_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<ResolvedChangeTargets>>;
/// This function returns the shortest length of a prefix of `key` that
/// disambiguates it from every other key in the index.
///
/// The length returned is a number of hexadecimal digits.
///
/// This has some properties that we do not currently make much use of:
///
/// - The algorithm works even if `key` itself is not in the index.
///
/// - In the special case when there are keys in the trie for which our
/// `key` is an exact prefix, returns `key.len() + 1`. Conceptually, in
/// order to disambiguate, you need every letter of the key *and* the
/// additional fact that it's the entire key). This case is extremely
/// unlikely for hashes with 12+ hexadecimal characters.
fn shortest_unique_prefix_len(&self, change_id: &ChangeId) -> IndexResult<usize>;
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/store.rs | lib/src/store.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::fmt::Debug;
use std::fmt::Formatter;
use std::pin::Pin;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::SystemTime;
use clru::CLruCache;
use futures::stream::BoxStream;
use pollster::FutureExt as _;
use tokio::io::AsyncRead;
use crate::backend;
use crate::backend::Backend;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::backend::CopyRecord;
use crate::backend::FileId;
use crate::backend::SigningFn;
use crate::backend::SymlinkId;
use crate::backend::TreeId;
use crate::commit::Commit;
use crate::index::Index;
use crate::merge::Merge;
use crate::merged_tree::MergedTree;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::signing::Signer;
use crate::tree::Tree;
use crate::tree_merge::MergeOptions;
// There are more tree objects than commits, and trees are often shared across
// commits.
pub(crate) const COMMIT_CACHE_CAPACITY: usize = 100;
const TREE_CACHE_CAPACITY: usize = 1000;
/// Wraps the low-level backend and makes it return more convenient types. Also
/// adds caching.
pub struct Store {
backend: Box<dyn Backend>,
signer: Signer,
commit_cache: Mutex<CLruCache<CommitId, Arc<backend::Commit>>>,
tree_cache: Mutex<CLruCache<(RepoPathBuf, TreeId), Arc<backend::Tree>>>,
merge_options: MergeOptions,
}
impl Debug for Store {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
f.debug_struct("Store")
.field("backend", &self.backend)
.finish_non_exhaustive()
}
}
impl Store {
pub fn new(
backend: Box<dyn Backend>,
signer: Signer,
merge_options: MergeOptions,
) -> Arc<Self> {
Arc::new(Self {
backend,
signer,
commit_cache: Mutex::new(CLruCache::new(COMMIT_CACHE_CAPACITY.try_into().unwrap())),
tree_cache: Mutex::new(CLruCache::new(TREE_CACHE_CAPACITY.try_into().unwrap())),
merge_options,
})
}
pub fn backend(&self) -> &dyn Backend {
self.backend.as_ref()
}
/// Returns backend as the implementation type.
pub fn backend_impl<T: Backend>(&self) -> Option<&T> {
self.backend.downcast_ref()
}
pub fn signer(&self) -> &Signer {
&self.signer
}
/// Default merge options to be used when resolving parent trees.
pub fn merge_options(&self) -> &MergeOptions {
&self.merge_options
}
pub fn get_copy_records(
&self,
paths: Option<&[RepoPathBuf]>,
root: &CommitId,
head: &CommitId,
) -> BackendResult<BoxStream<'_, BackendResult<CopyRecord>>> {
self.backend.get_copy_records(paths, root, head)
}
pub fn commit_id_length(&self) -> usize {
self.backend.commit_id_length()
}
pub fn change_id_length(&self) -> usize {
self.backend.change_id_length()
}
pub fn root_commit_id(&self) -> &CommitId {
self.backend.root_commit_id()
}
pub fn root_change_id(&self) -> &ChangeId {
self.backend.root_change_id()
}
pub fn empty_tree_id(&self) -> &TreeId {
self.backend.empty_tree_id()
}
pub fn concurrency(&self) -> usize {
self.backend.concurrency()
}
pub fn empty_merged_tree(self: &Arc<Self>) -> MergedTree {
let empty_tree_id = self.backend.empty_tree_id().clone();
MergedTree::resolved(self.clone(), empty_tree_id)
}
pub fn empty_merged_tree_id(&self) -> Merge<TreeId> {
Merge::resolved(self.backend.empty_tree_id().clone())
}
pub fn root_commit(self: &Arc<Self>) -> Commit {
self.get_commit(self.backend.root_commit_id()).unwrap()
}
pub fn get_commit(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
self.get_commit_async(id).block_on()
}
pub async fn get_commit_async(self: &Arc<Self>, id: &CommitId) -> BackendResult<Commit> {
let data = self.get_backend_commit(id).await?;
Ok(Commit::new(self.clone(), id.clone(), data))
}
async fn get_backend_commit(&self, id: &CommitId) -> BackendResult<Arc<backend::Commit>> {
{
let mut locked_cache = self.commit_cache.lock().unwrap();
if let Some(data) = locked_cache.get(id).cloned() {
return Ok(data);
}
}
let commit = self.backend.read_commit(id).await?;
let data = Arc::new(commit);
let mut locked_cache = self.commit_cache.lock().unwrap();
locked_cache.put(id.clone(), data.clone());
Ok(data)
}
pub async fn write_commit(
self: &Arc<Self>,
commit: backend::Commit,
sign_with: Option<&mut SigningFn<'_>>,
) -> BackendResult<Commit> {
assert!(!commit.parents.is_empty());
let (commit_id, commit) = self.backend.write_commit(commit, sign_with).await?;
let data = Arc::new(commit);
{
let mut locked_cache = self.commit_cache.lock().unwrap();
locked_cache.put(commit_id.clone(), data.clone());
}
Ok(Commit::new(self.clone(), commit_id, data))
}
pub fn get_tree(self: &Arc<Self>, dir: RepoPathBuf, id: &TreeId) -> BackendResult<Tree> {
self.get_tree_async(dir, id).block_on()
}
pub async fn get_tree_async(
self: &Arc<Self>,
dir: RepoPathBuf,
id: &TreeId,
) -> BackendResult<Tree> {
let data = self.get_backend_tree(&dir, id).await?;
Ok(Tree::new(self.clone(), dir, id.clone(), data))
}
async fn get_backend_tree(
&self,
dir: &RepoPath,
id: &TreeId,
) -> BackendResult<Arc<backend::Tree>> {
let key = (dir.to_owned(), id.clone());
{
let mut locked_cache = self.tree_cache.lock().unwrap();
if let Some(data) = locked_cache.get(&key).cloned() {
return Ok(data);
}
}
let data = self.backend.read_tree(dir, id).await?;
let data = Arc::new(data);
let mut locked_cache = self.tree_cache.lock().unwrap();
locked_cache.put(key, data.clone());
Ok(data)
}
pub async fn write_tree(
self: &Arc<Self>,
path: &RepoPath,
tree: backend::Tree,
) -> BackendResult<Tree> {
let tree_id = self.backend.write_tree(path, &tree).await?;
let data = Arc::new(tree);
{
let mut locked_cache = self.tree_cache.lock().unwrap();
locked_cache.put((path.to_owned(), tree_id.clone()), data.clone());
}
Ok(Tree::new(self.clone(), path.to_owned(), tree_id, data))
}
pub async fn read_file(
&self,
path: &RepoPath,
id: &FileId,
) -> BackendResult<Pin<Box<dyn AsyncRead + Send>>> {
self.backend.read_file(path, id).await
}
pub async fn write_file(
&self,
path: &RepoPath,
contents: &mut (dyn AsyncRead + Send + Unpin),
) -> BackendResult<FileId> {
self.backend.write_file(path, contents).await
}
pub async fn read_symlink(&self, path: &RepoPath, id: &SymlinkId) -> BackendResult<String> {
self.backend.read_symlink(path, id).await
}
pub async fn write_symlink(&self, path: &RepoPath, contents: &str) -> BackendResult<SymlinkId> {
self.backend.write_symlink(path, contents).await
}
pub fn gc(&self, index: &dyn Index, keep_newer: SystemTime) -> BackendResult<()> {
self.backend.gc(index, keep_newer)
}
/// Clear cached objects. Mainly intended for testing.
pub fn clear_caches(&self) {
self.commit_cache.lock().unwrap().clear();
self.tree_cache.lock().unwrap().clear();
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/commit_builder.rs | lib/src/commit_builder.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::sync::Arc;
use pollster::FutureExt as _;
use crate::backend;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::backend::Signature;
use crate::backend::TreeId;
use crate::commit::Commit;
use crate::commit::is_backend_commit_empty;
use crate::conflict_labels::ConflictLabels;
use crate::merge::Merge;
use crate::merged_tree::MergedTree;
use crate::repo::MutableRepo;
use crate::repo::Repo;
use crate::settings::JJRng;
use crate::settings::SignSettings;
use crate::settings::UserSettings;
use crate::signing::SignBehavior;
use crate::store::Store;
#[must_use]
pub struct CommitBuilder<'repo> {
mut_repo: &'repo mut MutableRepo,
inner: DetachedCommitBuilder,
}
impl CommitBuilder<'_> {
/// Detaches from `&'repo mut` lifetime. The returned builder can be used in
/// order to obtain a temporary commit object.
pub fn detach(self) -> DetachedCommitBuilder {
self.inner
}
/// Clears the source commit to not record new commit as rewritten from it.
///
/// The caller should also assign new change id to not create divergence.
pub fn clear_rewrite_source(mut self) -> Self {
self.inner.clear_rewrite_source();
self
}
pub fn parents(&self) -> &[CommitId] {
self.inner.parents()
}
pub fn set_parents(mut self, parents: Vec<CommitId>) -> Self {
self.inner.set_parents(parents);
self
}
pub fn predecessors(&self) -> &[CommitId] {
self.inner.predecessors()
}
pub fn set_predecessors(mut self, predecessors: Vec<CommitId>) -> Self {
self.inner.set_predecessors(predecessors);
self
}
pub fn tree(&self) -> MergedTree {
self.inner.tree()
}
pub fn tree_ids(&self) -> &Merge<TreeId> {
self.inner.tree_ids()
}
pub fn set_tree(mut self, tree: MergedTree) -> Self {
self.inner.set_tree(tree);
self
}
/// [`Commit::is_empty()`] for the new commit.
pub fn is_empty(&self) -> BackendResult<bool> {
self.inner.is_empty(self.mut_repo)
}
pub fn change_id(&self) -> &ChangeId {
self.inner.change_id()
}
pub fn set_change_id(mut self, change_id: ChangeId) -> Self {
self.inner.set_change_id(change_id);
self
}
pub fn generate_new_change_id(mut self) -> Self {
self.inner.generate_new_change_id();
self
}
pub fn description(&self) -> &str {
self.inner.description()
}
pub fn set_description(mut self, description: impl Into<String>) -> Self {
self.inner.set_description(description);
self
}
pub fn author(&self) -> &Signature {
self.inner.author()
}
pub fn set_author(mut self, author: Signature) -> Self {
self.inner.set_author(author);
self
}
pub fn committer(&self) -> &Signature {
self.inner.committer()
}
pub fn set_committer(mut self, committer: Signature) -> Self {
self.inner.set_committer(committer);
self
}
/// [`Commit::is_discardable()`] for the new commit.
pub fn is_discardable(&self) -> BackendResult<bool> {
self.inner.is_discardable(self.mut_repo)
}
pub fn sign_settings(&self) -> &SignSettings {
self.inner.sign_settings()
}
pub fn set_sign_behavior(mut self, sign_behavior: SignBehavior) -> Self {
self.inner.set_sign_behavior(sign_behavior);
self
}
pub fn set_sign_key(mut self, sign_key: String) -> Self {
self.inner.set_sign_key(sign_key);
self
}
pub fn clear_sign_key(mut self) -> Self {
self.inner.clear_sign_key();
self
}
pub fn write(self) -> BackendResult<Commit> {
self.inner.write(self.mut_repo)
}
/// Records the old commit as abandoned instead of writing new commit. This
/// is noop for the builder created by [`MutableRepo::new_commit()`].
pub fn abandon(self) {
self.inner.abandon(self.mut_repo);
}
}
/// Like `CommitBuilder`, but doesn't mutably borrow `MutableRepo`.
#[derive(Debug)]
pub struct DetachedCommitBuilder {
store: Arc<Store>,
rng: Arc<JJRng>,
commit: backend::Commit,
predecessors: Vec<CommitId>,
rewrite_source: Option<Commit>,
sign_settings: SignSettings,
record_predecessors_in_commit: bool,
}
impl DetachedCommitBuilder {
/// Only called from [`MutableRepo::new_commit`]. Use that function instead.
pub(crate) fn for_new_commit(
repo: &dyn Repo,
settings: &UserSettings,
parents: Vec<CommitId>,
tree: MergedTree,
) -> Self {
let store = repo.store().clone();
let signature = settings.signature();
assert!(!parents.is_empty());
let rng = settings.get_rng();
let (root_tree, conflict_labels) = tree.into_tree_ids_and_labels();
let change_id = rng.new_change_id(store.change_id_length());
let commit = backend::Commit {
parents,
predecessors: vec![],
root_tree,
conflict_labels: conflict_labels.into_merge(),
change_id,
description: String::new(),
author: signature.clone(),
committer: signature,
secure_sig: None,
};
let record_predecessors_in_commit = settings
.get_bool("experimental.record-predecessors-in-commit")
.unwrap();
Self {
store,
rng,
commit,
rewrite_source: None,
predecessors: vec![],
sign_settings: settings.sign_settings(),
record_predecessors_in_commit,
}
}
/// Only called from [`MutableRepo::rewrite_commit`]. Use that function
/// instead.
pub(crate) fn for_rewrite_from(
repo: &dyn Repo,
settings: &UserSettings,
predecessor: &Commit,
) -> Self {
let store = repo.store().clone();
let mut commit = backend::Commit::clone(predecessor.store_commit());
commit.predecessors = vec![];
commit.committer = settings.signature();
// If the user had not configured a name and email before but now they have,
// update the author fields with the new information.
if commit.author.name.is_empty()
|| commit.author.name == UserSettings::USER_NAME_PLACEHOLDER
{
commit.author.name.clone_from(&commit.committer.name);
}
if commit.author.email.is_empty()
|| commit.author.email == UserSettings::USER_EMAIL_PLACEHOLDER
{
commit.author.email.clone_from(&commit.committer.email);
}
// Reset author timestamp on discardable commits if the author is the
// committer. While it's unlikely we'll have somebody else's commit
// with no description in our repo, we'd like to be extra safe.
if commit.author.name == commit.committer.name
&& commit.author.email == commit.committer.email
&& predecessor.is_discardable(repo).unwrap_or_default()
{
commit.author.timestamp = commit.committer.timestamp;
}
let record_predecessors_in_commit = settings
.get_bool("experimental.record-predecessors-in-commit")
.unwrap();
Self {
store,
commit,
rng: settings.get_rng(),
rewrite_source: Some(predecessor.clone()),
predecessors: vec![predecessor.id().clone()],
sign_settings: settings.sign_settings(),
record_predecessors_in_commit,
}
}
/// Attaches the underlying `mut_repo`.
pub fn attach(self, mut_repo: &mut MutableRepo) -> CommitBuilder<'_> {
assert!(Arc::ptr_eq(&self.store, mut_repo.store()));
CommitBuilder {
mut_repo,
inner: self,
}
}
/// Clears the source commit to not record new commit as rewritten from it.
///
/// The caller should also assign new change id to not create divergence.
pub fn clear_rewrite_source(&mut self) {
self.rewrite_source = None;
}
pub fn parents(&self) -> &[CommitId] {
&self.commit.parents
}
pub fn set_parents(&mut self, parents: Vec<CommitId>) -> &mut Self {
assert!(!parents.is_empty());
self.commit.parents = parents;
self
}
pub fn predecessors(&self) -> &[CommitId] {
&self.predecessors
}
pub fn set_predecessors(&mut self, predecessors: Vec<CommitId>) -> &mut Self {
self.predecessors = predecessors;
self
}
pub fn tree(&self) -> MergedTree {
MergedTree::new(
self.store.clone(),
self.commit.root_tree.clone(),
ConflictLabels::from_merge(self.commit.conflict_labels.clone()),
)
}
pub fn tree_ids(&self) -> &Merge<TreeId> {
&self.commit.root_tree
}
pub fn set_tree(&mut self, tree: MergedTree) -> &mut Self {
assert!(Arc::ptr_eq(tree.store(), &self.store));
let (root_tree, conflict_labels) = tree.into_tree_ids_and_labels();
self.commit.root_tree = root_tree;
self.commit.conflict_labels = conflict_labels.into_merge();
self
}
/// [`Commit::is_empty()`] for the new commit.
pub fn is_empty(&self, repo: &dyn Repo) -> BackendResult<bool> {
is_backend_commit_empty(repo, &self.store, &self.commit)
}
pub fn change_id(&self) -> &ChangeId {
&self.commit.change_id
}
pub fn set_change_id(&mut self, change_id: ChangeId) -> &mut Self {
self.commit.change_id = change_id;
self
}
pub fn generate_new_change_id(&mut self) -> &mut Self {
self.commit.change_id = self.rng.new_change_id(self.store.change_id_length());
self
}
pub fn description(&self) -> &str {
&self.commit.description
}
pub fn set_description(&mut self, description: impl Into<String>) -> &mut Self {
self.commit.description = description.into();
self
}
pub fn author(&self) -> &Signature {
&self.commit.author
}
pub fn set_author(&mut self, author: Signature) -> &mut Self {
self.commit.author = author;
self
}
pub fn committer(&self) -> &Signature {
&self.commit.committer
}
pub fn set_committer(&mut self, committer: Signature) -> &mut Self {
self.commit.committer = committer;
self
}
/// [`Commit::is_discardable()`] for the new commit.
pub fn is_discardable(&self, repo: &dyn Repo) -> BackendResult<bool> {
Ok(self.description().is_empty() && self.is_empty(repo)?)
}
pub fn sign_settings(&self) -> &SignSettings {
&self.sign_settings
}
pub fn set_sign_behavior(&mut self, sign_behavior: SignBehavior) -> &mut Self {
self.sign_settings.behavior = sign_behavior;
self
}
pub fn set_sign_key(&mut self, sign_key: String) -> &mut Self {
self.sign_settings.key = Some(sign_key);
self
}
pub fn clear_sign_key(&mut self) -> &mut Self {
self.sign_settings.key = None;
self
}
/// Writes new commit and makes it visible in the `mut_repo`.
pub fn write(mut self, mut_repo: &mut MutableRepo) -> BackendResult<Commit> {
if self.record_predecessors_in_commit {
self.commit.predecessors = self.predecessors.clone();
}
let commit = write_to_store(&self.store, self.commit, &self.sign_settings)?;
// FIXME: Google's index.has_id() always returns true.
if mut_repo.is_backed_by_default_index()
&& mut_repo
.index()
.has_id(commit.id())
// TODO: indexing error shouldn't be a "BackendError"
.map_err(|err| BackendError::Other(err.into()))?
{
// Recording existing commit as new would create cycle in
// predecessors/parent mappings within the current transaction, and
// in predecessors graph globally.
return Err(BackendError::Other(
format!("Newly-created commit {id} already exists", id = commit.id()).into(),
));
}
mut_repo.add_head(&commit)?;
mut_repo.set_predecessors(commit.id().clone(), self.predecessors);
if let Some(rewrite_source) = self.rewrite_source {
mut_repo.set_rewritten_commit(rewrite_source.id().clone(), commit.id().clone());
}
Ok(commit)
}
/// Writes new commit without making it visible in the repo.
///
/// This does not consume the builder, so you can reuse the current
/// configuration to create another commit later.
pub fn write_hidden(&self) -> BackendResult<Commit> {
let mut commit = self.commit.clone();
if self.record_predecessors_in_commit {
commit.predecessors = self.predecessors.clone();
}
write_to_store(&self.store, commit, &self.sign_settings)
}
/// Records the old commit as abandoned in the `mut_repo`.
///
/// This is noop if there's no old commit that would be rewritten to the new
/// commit by `write()`.
pub fn abandon(self, mut_repo: &mut MutableRepo) {
let commit = self.commit;
if let Some(rewrite_source) = &self.rewrite_source {
mut_repo
.record_abandoned_commit_with_parents(rewrite_source.id().clone(), commit.parents);
}
}
}
fn write_to_store(
store: &Arc<Store>,
mut commit: backend::Commit,
sign_settings: &SignSettings,
) -> BackendResult<Commit> {
let should_sign = store.signer().can_sign() && sign_settings.should_sign(&commit);
let sign_fn = |data: &[u8]| store.signer().sign(data, sign_settings.key.as_deref());
// Commit backend doesn't use secure_sig for writing and enforces it with an
// assert, but sign_settings.should_sign check above will want to know
// if we're rewriting a signed commit
commit.secure_sig = None;
store
.write_commit(commit, should_sign.then_some(&mut &sign_fn))
.block_on()
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/stacked_table.rs | lib/src/stacked_table.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A persistent table of fixed-size keys to variable-size values.
//!
//! The keys are stored in sorted order, with each key followed by an
//! integer offset into the list of values. The values are
//! concatenated after the keys. A file may have a parent file, and
//! the parent may have its own parent, and so on. The child file then
//! represents the union of the entries.
#![expect(missing_docs)]
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fs;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Write as _;
use std::iter;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::RwLock;
use std::time::SystemTime;
use blake2::Blake2b512;
use blake2::Digest as _;
use tempfile::NamedTempFile;
use thiserror::Error;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::file_util::persist_content_addressed_temp_file;
use crate::hex_util;
use crate::lock::FileLock;
use crate::lock::FileLockError;
// BLAKE2b-512 hash length in hex string
const SEGMENT_FILE_NAME_LENGTH: usize = 64 * 2;
pub trait TableSegment {
fn segment_num_entries(&self) -> usize;
fn segment_parent_file(&self) -> Option<&Arc<ReadonlyTable>>;
fn segment_get_value(&self, key: &[u8]) -> Option<&[u8]>;
fn segment_add_entries_to(&self, mut_table: &mut MutableTable);
fn num_entries(&self) -> usize {
if let Some(parent_file) = self.segment_parent_file() {
parent_file.num_entries() + self.segment_num_entries()
} else {
self.segment_num_entries()
}
}
fn get_value<'a>(&'a self, key: &[u8]) -> Option<&'a [u8]> {
self.segment_get_value(key)
.or_else(|| self.segment_parent_file()?.get_value(key))
}
}
pub struct ReadonlyTable {
key_size: usize,
parent_file: Option<Arc<Self>>,
name: String,
// Number of entries not counting the parent file
num_local_entries: usize,
// The file's entries in the raw format they're stored in on disk.
index: Vec<u8>,
values: Vec<u8>,
}
impl ReadonlyTable {
fn load_from(
file: &mut dyn Read,
store: &TableStore,
name: String,
key_size: usize,
) -> TableStoreResult<Arc<Self>> {
let to_load_err = |err| TableStoreError::LoadSegment {
name: name.clone(),
err,
};
let read_u32 = |file: &mut dyn Read| -> TableStoreResult<u32> {
let mut buf = [0; 4];
file.read_exact(&mut buf).map_err(to_load_err)?;
Ok(u32::from_le_bytes(buf))
};
let parent_filename_len = read_u32(file)?;
let maybe_parent_file = if parent_filename_len > 0 {
let mut parent_filename_bytes = vec![0; parent_filename_len as usize];
file.read_exact(&mut parent_filename_bytes)
.map_err(to_load_err)?;
let parent_filename = String::from_utf8(parent_filename_bytes).unwrap();
let parent_file = store.load_table(parent_filename)?;
Some(parent_file)
} else {
None
};
let num_local_entries = read_u32(file)? as usize;
let index_size = num_local_entries * ReadonlyTableIndexEntry::size(key_size);
let mut data = vec![];
file.read_to_end(&mut data).map_err(to_load_err)?;
let values = data.split_off(index_size);
let index = data;
Ok(Arc::new(Self {
key_size,
parent_file: maybe_parent_file,
name,
num_local_entries,
index,
values,
}))
}
pub fn name(&self) -> &str {
&self.name
}
/// Iterates ancestor table segments including `self`.
pub fn ancestor_segments(self: &Arc<Self>) -> impl Iterator<Item = &Arc<Self>> {
iter::successors(Some(self), |table| table.segment_parent_file())
}
pub fn start_mutation(self: &Arc<Self>) -> MutableTable {
MutableTable::incremental(self.clone())
}
fn segment_value_offset_by_pos(&self, pos: usize) -> usize {
if pos == self.num_local_entries {
self.values.len()
} else {
ReadonlyTableIndexEntry::new(self, pos).value_offset()
}
}
fn segment_value_by_pos(&self, pos: usize) -> &[u8] {
&self.values
[self.segment_value_offset_by_pos(pos)..self.segment_value_offset_by_pos(pos + 1)]
}
}
impl TableSegment for ReadonlyTable {
fn segment_num_entries(&self) -> usize {
self.num_local_entries
}
fn segment_parent_file(&self) -> Option<&Arc<ReadonlyTable>> {
self.parent_file.as_ref()
}
fn segment_get_value(&self, key: &[u8]) -> Option<&[u8]> {
let mut low_pos = 0;
let mut high_pos = self.num_local_entries;
loop {
if high_pos == low_pos {
return None;
}
let mid_pos = (low_pos + high_pos) / 2;
let mid_entry = ReadonlyTableIndexEntry::new(self, mid_pos);
match key.cmp(mid_entry.key()) {
Ordering::Less => {
high_pos = mid_pos;
}
Ordering::Equal => {
return Some(self.segment_value_by_pos(mid_pos));
}
Ordering::Greater => {
low_pos = mid_pos + 1;
}
}
}
}
fn segment_add_entries_to(&self, mut_table: &mut MutableTable) {
for pos in 0..self.num_local_entries {
let entry = ReadonlyTableIndexEntry::new(self, pos);
mut_table.add_entry(
entry.key().to_vec(),
self.segment_value_by_pos(pos).to_vec(),
);
}
}
}
struct ReadonlyTableIndexEntry<'table> {
data: &'table [u8],
}
impl<'table> ReadonlyTableIndexEntry<'table> {
fn new(table: &'table ReadonlyTable, pos: usize) -> Self {
let entry_size = ReadonlyTableIndexEntry::size(table.key_size);
let offset = entry_size * pos;
let data = &table.index[offset..][..entry_size];
Self { data }
}
fn size(key_size: usize) -> usize {
key_size + 4
}
fn key(&self) -> &'table [u8] {
&self.data[0..self.data.len() - 4]
}
fn value_offset(&self) -> usize {
u32::from_le_bytes(self.data[self.data.len() - 4..].try_into().unwrap()) as usize
}
}
pub struct MutableTable {
key_size: usize,
parent_file: Option<Arc<ReadonlyTable>>,
entries: BTreeMap<Vec<u8>, Vec<u8>>,
}
impl MutableTable {
fn full(key_size: usize) -> Self {
Self {
key_size,
parent_file: None,
entries: BTreeMap::new(),
}
}
fn incremental(parent_file: Arc<ReadonlyTable>) -> Self {
let key_size = parent_file.key_size;
Self {
key_size,
parent_file: Some(parent_file),
entries: BTreeMap::new(),
}
}
pub fn add_entry(&mut self, key: Vec<u8>, value: Vec<u8>) {
assert_eq!(key.len(), self.key_size);
self.entries.insert(key, value);
}
fn add_entries_from(&mut self, other: &dyn TableSegment) {
other.segment_add_entries_to(self);
}
fn merge_in(&mut self, other: &Arc<ReadonlyTable>) {
let mut maybe_own_ancestor = self.parent_file.clone();
let mut maybe_other_ancestor = Some(other.clone());
let mut files_to_add = vec![];
loop {
if maybe_other_ancestor.is_none() {
break;
}
let other_ancestor = maybe_other_ancestor.as_ref().unwrap();
if maybe_own_ancestor.is_none() {
files_to_add.push(other_ancestor.clone());
maybe_other_ancestor = other_ancestor.parent_file.clone();
continue;
}
let own_ancestor = maybe_own_ancestor.as_ref().unwrap();
if own_ancestor.name == other_ancestor.name {
break;
}
if own_ancestor.num_entries() < other_ancestor.num_entries() {
files_to_add.push(other_ancestor.clone());
maybe_other_ancestor = other_ancestor.parent_file.clone();
} else {
maybe_own_ancestor = own_ancestor.parent_file.clone();
}
}
for file in files_to_add.iter().rev() {
self.add_entries_from(file.as_ref());
}
}
fn serialize(self) -> Vec<u8> {
let mut buf = vec![];
if let Some(parent_file) = &self.parent_file {
buf.extend(u32::try_from(parent_file.name.len()).unwrap().to_le_bytes());
buf.extend_from_slice(parent_file.name.as_bytes());
} else {
buf.extend(0_u32.to_le_bytes());
}
buf.extend(u32::try_from(self.entries.len()).unwrap().to_le_bytes());
let mut value_offset = 0_u32;
for (key, value) in &self.entries {
buf.extend_from_slice(key);
buf.extend(value_offset.to_le_bytes());
value_offset += u32::try_from(value.len()).unwrap();
}
for value in self.entries.values() {
buf.extend_from_slice(value);
}
buf
}
/// If the MutableTable has more than half the entries of its parent
/// ReadonlyTable, return MutableTable with the commits from both. This
/// is done recursively, so the stack of index files has O(log n) files.
#[expect(clippy::assigning_clones)]
fn maybe_squash_with_ancestors(self) -> Self {
let mut num_new_entries = self.entries.len();
let mut files_to_squash = vec![];
let mut maybe_parent_file = self.parent_file.clone();
let mut squashed;
loop {
match maybe_parent_file {
Some(parent_file) => {
// TODO: We should probably also squash if the parent file has less than N
// commits, regardless of how many (few) are in `self`.
if 2 * num_new_entries < parent_file.num_local_entries {
squashed = Self::incremental(parent_file);
break;
}
num_new_entries += parent_file.num_local_entries;
files_to_squash.push(parent_file.clone());
maybe_parent_file = parent_file.parent_file.clone();
}
None => {
squashed = Self::full(self.key_size);
break;
}
}
}
if files_to_squash.is_empty() {
return self;
}
for parent_file in files_to_squash.iter().rev() {
squashed.add_entries_from(parent_file.as_ref());
}
squashed.add_entries_from(&self);
squashed
}
fn save_in(mut self, store: &TableStore) -> TableStoreResult<Arc<ReadonlyTable>> {
if self.entries.is_empty()
&& let Some(parent_file) = self.parent_file.take()
{
return Ok(parent_file);
}
let buf = self.maybe_squash_with_ancestors().serialize();
let mut hasher = Blake2b512::new();
hasher.update(&buf);
let file_id_hex = hex_util::encode_hex(&hasher.finalize());
let file_path = store.dir.join(&file_id_hex);
let to_save_err = |err| TableStoreError::SaveSegment {
name: file_id_hex.clone(),
err,
};
let mut temp_file = NamedTempFile::new_in(&store.dir).map_err(to_save_err)?;
let file = temp_file.as_file_mut();
file.write_all(&buf).map_err(to_save_err)?;
persist_content_addressed_temp_file(temp_file, file_path).map_err(to_save_err)?;
ReadonlyTable::load_from(&mut buf.as_slice(), store, file_id_hex, store.key_size)
}
}
impl TableSegment for MutableTable {
fn segment_num_entries(&self) -> usize {
self.entries.len()
}
fn segment_parent_file(&self) -> Option<&Arc<ReadonlyTable>> {
self.parent_file.as_ref()
}
fn segment_get_value(&self, key: &[u8]) -> Option<&[u8]> {
self.entries.get(key).map(Vec::as_slice)
}
fn segment_add_entries_to(&self, mut_table: &mut MutableTable) {
for (key, value) in &self.entries {
mut_table.add_entry(key.clone(), value.clone());
}
}
}
#[derive(Debug, Error)]
pub enum TableStoreError {
#[error("Failed to load table heads")]
LoadHeads(#[source] io::Error),
#[error("Failed to save table heads")]
SaveHeads(#[source] io::Error),
#[error("Failed to load table segment '{name}'")]
LoadSegment {
name: String,
#[source]
err: io::Error,
},
#[error("Failed to save table segment '{name}'")]
SaveSegment {
name: String,
#[source]
err: io::Error,
},
#[error("Failed to lock table store")]
Lock(#[source] FileLockError),
}
pub type TableStoreResult<T> = Result<T, TableStoreError>;
pub struct TableStore {
dir: PathBuf,
key_size: usize,
cached_tables: RwLock<HashMap<String, Arc<ReadonlyTable>>>,
}
impl TableStore {
pub fn init(dir: PathBuf, key_size: usize) -> Self {
std::fs::create_dir(dir.join("heads")).unwrap();
Self {
dir,
key_size,
cached_tables: Default::default(),
}
}
pub fn reinit(&self) {
std::fs::remove_dir_all(self.dir.join("heads")).unwrap();
Self::init(self.dir.clone(), self.key_size);
}
pub fn key_size(&self) -> usize {
self.key_size
}
pub fn load(dir: PathBuf, key_size: usize) -> Self {
Self {
dir,
key_size,
cached_tables: Default::default(),
}
}
pub fn save_table(&self, mut_table: MutableTable) -> TableStoreResult<Arc<ReadonlyTable>> {
let maybe_parent_table = mut_table.parent_file.clone();
let table = mut_table.save_in(self)?;
self.add_head(&table)?;
if let Some(parent_table) = maybe_parent_table
&& parent_table.name != table.name
{
self.remove_head(&parent_table);
}
{
let mut locked_cache = self.cached_tables.write().unwrap();
locked_cache.insert(table.name.clone(), table.clone());
}
Ok(table)
}
fn add_head(&self, table: &Arc<ReadonlyTable>) -> TableStoreResult<()> {
std::fs::write(self.dir.join("heads").join(&table.name), "")
.map_err(TableStoreError::SaveHeads)
}
fn remove_head(&self, table: &Arc<ReadonlyTable>) {
// It's fine if the old head was not found. It probably means
// that we're on a distributed file system where the locking
// doesn't work. We'll probably end up with two current
// heads. We'll detect that next time we load the table.
std::fs::remove_file(self.dir.join("heads").join(&table.name)).ok();
}
fn lock(&self) -> TableStoreResult<FileLock> {
FileLock::lock(self.dir.join("lock")).map_err(TableStoreError::Lock)
}
fn load_table(&self, name: String) -> TableStoreResult<Arc<ReadonlyTable>> {
{
let read_locked_cached = self.cached_tables.read().unwrap();
if let Some(table) = read_locked_cached.get(&name).cloned() {
return Ok(table);
}
}
let to_load_err = |err| TableStoreError::LoadSegment {
name: name.clone(),
err,
};
let table_file_path = self.dir.join(&name);
let mut table_file = File::open(table_file_path).map_err(to_load_err)?;
let table = ReadonlyTable::load_from(&mut table_file, self, name, self.key_size)?;
{
let mut write_locked_cache = self.cached_tables.write().unwrap();
write_locked_cache.insert(table.name.clone(), table.clone());
}
Ok(table)
}
fn get_head_tables(&self) -> TableStoreResult<Vec<Arc<ReadonlyTable>>> {
let mut tables = vec![];
for head_entry in
std::fs::read_dir(self.dir.join("heads")).map_err(TableStoreError::LoadHeads)?
{
let head_file_name = head_entry.map_err(TableStoreError::LoadHeads)?.file_name();
let table = self.load_table(head_file_name.to_str().unwrap().to_string())?;
tables.push(table);
}
Ok(tables)
}
pub fn get_head(&self) -> TableStoreResult<Arc<ReadonlyTable>> {
let mut tables = self.get_head_tables()?;
if tables.is_empty() {
let empty_table = MutableTable::full(self.key_size);
self.save_table(empty_table)
} else if tables.len() == 1 {
Ok(tables.pop().unwrap())
} else {
// There are multiple heads. We take a lock, then check if there are still
// multiple heads (it's likely that another process was in the process of
// deleting on of them). If there are still multiple heads, we attempt to
// merge all the tables into one. We then save that table and record the new
// head. Note that the locking isn't necessary for correctness; we
// take the lock only to avoid other concurrent processes from doing
// the same work (and producing another set of divergent heads).
let (table, _) = self.get_head_locked()?;
Ok(table)
}
}
pub fn get_head_locked(&self) -> TableStoreResult<(Arc<ReadonlyTable>, FileLock)> {
let lock = self.lock()?;
let mut tables = self.get_head_tables()?;
if tables.is_empty() {
let empty_table = MutableTable::full(self.key_size);
let table = self.save_table(empty_table)?;
return Ok((table, lock));
}
if tables.len() == 1 {
// Return early so we don't write a table with no changes compared to its parent
return Ok((tables.pop().unwrap(), lock));
}
let mut merged_table = MutableTable::incremental(tables[0].clone());
for other in &tables[1..] {
merged_table.merge_in(other);
}
let merged_table = self.save_table(merged_table)?;
for table in &tables[1..] {
self.remove_head(table);
}
Ok((merged_table, lock))
}
/// Prunes unreachable table segments.
///
/// All table segments reachable from the `head` won't be removed. In
/// addition to that, segments created after `keep_newer` will be
/// preserved. This mitigates a risk of deleting new segments created
/// concurrently by another process.
///
/// The caller may decide whether to lock the store by `get_head_locked()`.
/// It's generally safe to run `gc()` without locking so long as the
/// `keep_newer` time is reasonably old, and all writers reload table
/// segments by `get_head_locked()` before adding new entries.
#[tracing::instrument(skip(self, head))]
pub fn gc(&self, head: &Arc<ReadonlyTable>, keep_newer: SystemTime) -> Result<(), PathError> {
let read_locked_cache = self.cached_tables.read().unwrap();
let reachable_tables: HashSet<&str> = itertools::chain(
head.ancestor_segments(),
// Also preserve cached segments so these segments can still be
// loaded from the disk.
read_locked_cache.values(),
)
.map(|table| table.name())
.collect();
let remove_file_if_not_new = |entry: &fs::DirEntry| -> Result<(), PathError> {
let path = entry.path();
// Check timestamp, but there's still TOCTOU problem if an existing
// file is replaced with new file of the same name.
let metadata = entry.metadata().context(&path)?;
let mtime = metadata.modified().expect("unsupported platform?");
if mtime > keep_newer {
tracing::trace!(?path, "not removing");
Ok(())
} else {
tracing::trace!(?path, "removing");
fs::remove_file(&path).context(&path)
}
};
for entry in self.dir.read_dir().context(&self.dir)? {
let entry = entry.context(&self.dir)?;
let file_name = entry.file_name();
if file_name == "heads" || file_name == "lock" {
continue;
}
let Some(table_name) = file_name
.to_str()
.filter(|name| name.len() == SEGMENT_FILE_NAME_LENGTH)
else {
tracing::trace!(?entry, "skipping invalid file name");
continue;
};
if reachable_tables.contains(table_name) {
continue;
}
remove_file_if_not_new(&entry)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use test_case::test_case;
use super::*;
use crate::tests::new_temp_dir;
#[test_case(false; "memory")]
#[test_case(true; "file")]
fn stacked_table_empty(on_disk: bool) {
let temp_dir = new_temp_dir();
let store = TableStore::init(temp_dir.path().to_path_buf(), 3);
let mut_table = store.get_head().unwrap().start_mutation();
let mut _saved_table = None;
let table: &dyn TableSegment = if on_disk {
_saved_table = Some(store.save_table(mut_table).unwrap());
_saved_table.as_ref().unwrap().as_ref()
} else {
&mut_table
};
// Cannot find any keys
assert_eq!(table.get_value(b"\0\0\0"), None);
assert_eq!(table.get_value(b"aaa"), None);
assert_eq!(table.get_value(b"\xff\xff\xff"), None);
}
#[test_case(false; "memory")]
#[test_case(true; "file")]
fn stacked_table_single_key(on_disk: bool) {
let temp_dir = new_temp_dir();
let store = TableStore::init(temp_dir.path().to_path_buf(), 3);
let mut mut_table = store.get_head().unwrap().start_mutation();
mut_table.add_entry(b"abc".to_vec(), b"value".to_vec());
let mut _saved_table = None;
let table: &dyn TableSegment = if on_disk {
_saved_table = Some(store.save_table(mut_table).unwrap());
_saved_table.as_ref().unwrap().as_ref()
} else {
&mut_table
};
// Can find expected keys
assert_eq!(table.get_value(b"\0\0\0"), None);
assert_eq!(table.get_value(b"abc"), Some(b"value".as_slice()));
assert_eq!(table.get_value(b"\xff\xff\xff"), None);
}
#[test_case(false; "memory")]
#[test_case(true; "file")]
fn stacked_table_multiple_keys(on_disk: bool) {
let temp_dir = new_temp_dir();
let store = TableStore::init(temp_dir.path().to_path_buf(), 3);
let mut mut_table = store.get_head().unwrap().start_mutation();
mut_table.add_entry(b"zzz".to_vec(), b"val3".to_vec());
mut_table.add_entry(b"abc".to_vec(), b"value1".to_vec());
mut_table.add_entry(b"abd".to_vec(), b"value 2".to_vec());
let mut _saved_table = None;
let table: &dyn TableSegment = if on_disk {
_saved_table = Some(store.save_table(mut_table).unwrap());
_saved_table.as_ref().unwrap().as_ref()
} else {
&mut_table
};
// Can find expected keys
assert_eq!(table.get_value(b"\0\0\0"), None);
assert_eq!(table.get_value(b"abb"), None);
assert_eq!(table.get_value(b"abc"), Some(b"value1".as_slice()));
assert_eq!(table.get_value(b"abd"), Some(b"value 2".as_slice()));
assert_eq!(table.get_value(b"abe"), None);
assert_eq!(table.get_value(b"zzz"), Some(b"val3".as_slice()));
assert_eq!(table.get_value(b"\xff\xff\xff"), None);
}
#[test]
fn stacked_table_multiple_keys_with_parent_file() {
let temp_dir = new_temp_dir();
let store = TableStore::init(temp_dir.path().to_path_buf(), 3);
let mut mut_table = store.get_head().unwrap().start_mutation();
mut_table.add_entry(b"abd".to_vec(), b"value 2".to_vec());
mut_table.add_entry(b"abc".to_vec(), b"value1".to_vec());
mut_table.add_entry(b"zzz".to_vec(), b"val3".to_vec());
for round in 0..10 {
for i in 0..10 {
mut_table.add_entry(
format!("x{i}{round}").into_bytes(),
format!("value {i}{round}").into_bytes(),
);
}
let saved_table = store.save_table(mut_table).unwrap();
mut_table = MutableTable::incremental(saved_table);
}
// Can find expected keys
assert_eq!(mut_table.get_value(b"\0\0\0"), None);
assert_eq!(mut_table.get_value(b"x.."), None);
assert_eq!(mut_table.get_value(b"x14"), Some(b"value 14".as_slice()));
assert_eq!(mut_table.get_value(b"x41"), Some(b"value 41".as_slice()));
assert_eq!(mut_table.get_value(b"x49"), Some(b"value 49".as_slice()));
assert_eq!(mut_table.get_value(b"x94"), Some(b"value 94".as_slice()));
assert_eq!(mut_table.get_value(b"xAA"), None);
assert_eq!(mut_table.get_value(b"\xff\xff\xff"), None);
}
#[test]
fn stacked_table_merge() {
let temp_dir = new_temp_dir();
let store = TableStore::init(temp_dir.path().to_path_buf(), 3);
let mut mut_base_table = store.get_head().unwrap().start_mutation();
mut_base_table.add_entry(b"abc".to_vec(), b"value1".to_vec());
let base_table = store.save_table(mut_base_table).unwrap();
let mut mut_table1 = MutableTable::incremental(base_table.clone());
mut_table1.add_entry(b"abd".to_vec(), b"value 2".to_vec());
mut_table1.add_entry(b"zzz".to_vec(), b"val3".to_vec());
mut_table1.add_entry(b"mmm".to_vec(), b"side 1".to_vec());
let table1 = store.save_table(mut_table1).unwrap();
let mut mut_table2 = MutableTable::incremental(base_table);
mut_table2.add_entry(b"yyy".to_vec(), b"val5".to_vec());
mut_table2.add_entry(b"mmm".to_vec(), b"side 2".to_vec());
mut_table2.add_entry(b"abe".to_vec(), b"value 4".to_vec());
mut_table2.merge_in(&table1);
// Can find expected keys
assert_eq!(mut_table2.get_value(b"\0\0\0"), None);
assert_eq!(mut_table2.get_value(b"abc"), Some(b"value1".as_slice()));
assert_eq!(mut_table2.get_value(b"abd"), Some(b"value 2".as_slice()));
assert_eq!(mut_table2.get_value(b"abe"), Some(b"value 4".as_slice()));
// The caller shouldn't write two values for the same key, so it's undefined
// which wins, but let's test how it currently behaves.
assert_eq!(mut_table2.get_value(b"mmm"), Some(b"side 1".as_slice()));
assert_eq!(mut_table2.get_value(b"yyy"), Some(b"val5".as_slice()));
assert_eq!(mut_table2.get_value(b"zzz"), Some(b"val3".as_slice()));
assert_eq!(mut_table2.get_value(b"\xff\xff\xff"), None);
}
#[test]
fn stacked_table_automatic_merge() {
// Same test as above, but here we let the store do the merging on load
let temp_dir = new_temp_dir();
let store = TableStore::init(temp_dir.path().to_path_buf(), 3);
let mut mut_base_table = store.get_head().unwrap().start_mutation();
mut_base_table.add_entry(b"abc".to_vec(), b"value1".to_vec());
let base_table = store.save_table(mut_base_table).unwrap();
let mut mut_table1 = MutableTable::incremental(base_table.clone());
mut_table1.add_entry(b"abd".to_vec(), b"value 2".to_vec());
mut_table1.add_entry(b"zzz".to_vec(), b"val3".to_vec());
mut_table1.add_entry(b"mmm".to_vec(), b"side 1".to_vec());
store.save_table(mut_table1).unwrap();
let mut mut_table2 = MutableTable::incremental(base_table);
mut_table2.add_entry(b"yyy".to_vec(), b"val5".to_vec());
mut_table2.add_entry(b"mmm".to_vec(), b"side 2".to_vec());
mut_table2.add_entry(b"abe".to_vec(), b"value 4".to_vec());
let table2 = store.save_table(mut_table2).unwrap();
// The saved table does not have the keys from table1
assert_eq!(table2.get_value(b"abd"), None);
// Can find expected keys in the merged table we get from get_head()
let merged_table = store.get_head().unwrap();
assert_eq!(merged_table.get_value(b"\0\0\0"), None);
assert_eq!(merged_table.get_value(b"abc"), Some(b"value1".as_slice()));
assert_eq!(merged_table.get_value(b"abd"), Some(b"value 2".as_slice()));
assert_eq!(merged_table.get_value(b"abe"), Some(b"value 4".as_slice()));
// The caller shouldn't write two values for the same key, so it's undefined
// which wins.
let value_mmm = merged_table.get_value(b"mmm");
assert!(value_mmm == Some(b"side 1".as_slice()) || value_mmm == Some(b"side 2".as_slice()));
assert_eq!(merged_table.get_value(b"yyy"), Some(b"val5".as_slice()));
assert_eq!(merged_table.get_value(b"zzz"), Some(b"val3".as_slice()));
assert_eq!(merged_table.get_value(b"\xff\xff\xff"), None);
}
#[test]
fn stacked_table_store_save_empty() {
let temp_dir = new_temp_dir();
let store = TableStore::init(temp_dir.path().to_path_buf(), 3);
let mut mut_table = store.get_head().unwrap().start_mutation();
mut_table.add_entry(b"abc".to_vec(), b"value".to_vec());
store.save_table(mut_table).unwrap();
let mut_table = store.get_head().unwrap().start_mutation();
store.save_table(mut_table).unwrap();
// Table head shouldn't be removed on empty save
let table = store.get_head().unwrap();
assert_eq!(table.get_value(b"abc"), Some(b"value".as_slice()));
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/fmt_util.rs | lib/src/fmt_util.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Common formatting helpers
/// Find the smallest binary prefix with which the whole part of `x` is at most
/// three digits, and return the scaled `x`, that prefix, and the associated
/// base-1024 exponent.
pub fn binary_prefix(x: f32) -> (f32, &'static str) {
/// Binary prefixes in ascending order, starting with the empty prefix. The
/// index of each prefix is the base-1024 exponent it represents.
const TABLE: [&str; 9] = ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"];
let mut i = 0;
let mut scaled = x;
while scaled.abs() >= 1000.0 && i < TABLE.len() - 1 {
i += 1;
scaled /= 1024.0;
}
(scaled, TABLE[i])
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/eol.rs | lib/src/eol.rs | // Copyright 2025 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Cursor;
use bstr::ByteSlice as _;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt as _;
use crate::config::ConfigGetError;
use crate::settings::UserSettings;
fn is_binary(bytes: &[u8]) -> bool {
// TODO(06393993): align the algorithm with git so that the git config autocrlf
// users won't see different decisions on whether a file is binary and needs to
// perform EOL conversion.
let mut bytes = bytes.iter().peekable();
while let Some(byte) = bytes.next() {
match *byte {
b'\0' => return true,
b'\r' => {
if bytes.peek() != Some(&&b'\n') {
return true;
}
}
_ => {}
}
}
false
}
#[derive(Clone)]
pub(crate) struct TargetEolStrategy {
eol_conversion_mode: EolConversionMode,
}
impl TargetEolStrategy {
pub(crate) fn new(eol_conversion_mode: EolConversionMode) -> Self {
Self {
eol_conversion_mode,
}
}
/// The limit to probe for whether the file is binary is 8KB.
/// All files strictly smaller than the limit are always
/// evaluated correctly and in full.
/// Files larger than the limit - or with ambiguous content at the limit -
/// are potentially misclassified.
const PROBE_LIMIT: u64 = 8 << 10;
/// Peek into the first [`TargetEolStrategy::PROBE_LIMIT`] bytes of content
/// to determine if it is binary data.
///
/// Peeked data is stored in `peek`.
async fn probe_for_binary(
mut contents: impl AsyncRead + Unpin,
peek: &mut Vec<u8>,
) -> Result<bool, std::io::Error> {
(&mut contents)
.take(Self::PROBE_LIMIT)
.read_to_end(peek)
.await?;
// The probe limit may have sliced a CRLF sequence, which would cause
// misclassification as binary.
let slice_to_check = if peek.get(Self::PROBE_LIMIT as usize - 1) == Some(&b'\r') {
&peek[0..Self::PROBE_LIMIT as usize - 1]
} else {
peek
};
Ok(is_binary(slice_to_check))
}
pub(crate) async fn convert_eol_for_snapshot<'a>(
&self,
mut contents: impl AsyncRead + Send + Unpin + 'a,
) -> Result<Box<dyn AsyncRead + Send + Unpin + 'a>, std::io::Error> {
match self.eol_conversion_mode {
EolConversionMode::None => Ok(Box::new(contents)),
EolConversionMode::Input | EolConversionMode::InputOutput => {
let mut peek = vec![];
let target_eol = if Self::probe_for_binary(&mut contents, &mut peek).await? {
TargetEol::PassThrough
} else {
TargetEol::Lf
};
let peek = Cursor::new(peek);
let contents = peek.chain(contents);
convert_eol(contents, target_eol).await
}
}
}
pub(crate) async fn convert_eol_for_update<'a>(
&self,
mut contents: impl AsyncRead + Send + Unpin + 'a,
) -> Result<Box<dyn AsyncRead + Send + Unpin + 'a>, std::io::Error> {
match self.eol_conversion_mode {
EolConversionMode::None | EolConversionMode::Input => Ok(Box::new(contents)),
EolConversionMode::InputOutput => {
let mut peek = vec![];
let target_eol = if Self::probe_for_binary(&mut contents, &mut peek).await? {
TargetEol::PassThrough
} else {
TargetEol::Crlf
};
let peek = Cursor::new(peek);
let contents = peek.chain(contents);
convert_eol(contents, target_eol).await
}
}
}
}
/// Configuring auto-converting CRLF line endings into LF when you add a file to
/// the backend, and vice versa when it checks out code onto your filesystem.
#[derive(Debug, PartialEq, Eq, Copy, Clone, serde::Deserialize)]
#[serde(rename_all(deserialize = "kebab-case"))]
pub enum EolConversionMode {
/// Do not perform EOL conversion.
None,
/// Only perform the CRLF to LF EOL conversion when writing to the backend
/// store from the file system.
Input,
/// Perform CRLF to LF EOL conversion when writing to the backend store from
/// the file system and LF to CRLF EOL conversion when writing to the file
/// system from the backend store.
InputOutput,
}
impl EolConversionMode {
/// Try to create the [`EolConversionMode`] based on the
/// `working-copy.eol-conversion` setting in the [`UserSettings`].
pub fn try_from_settings(user_settings: &UserSettings) -> Result<Self, ConfigGetError> {
user_settings.get("working-copy.eol-conversion")
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum TargetEol {
Lf,
Crlf,
PassThrough,
}
async fn convert_eol<'a>(
mut input: impl AsyncRead + Send + Unpin + 'a,
target_eol: TargetEol,
) -> Result<Box<dyn AsyncRead + Send + Unpin + 'a>, std::io::Error> {
let eol = match target_eol {
TargetEol::PassThrough => {
return Ok(Box::new(input));
}
TargetEol::Lf => b"\n".as_slice(),
TargetEol::Crlf => b"\r\n".as_slice(),
};
let mut contents = vec![];
input.read_to_end(&mut contents).await?;
let lines = contents.lines_with_terminator();
let mut res = Vec::<u8>::with_capacity(contents.len());
fn trim_last_eol(input: &[u8]) -> Option<&[u8]> {
input
.strip_suffix(b"\r\n")
.or_else(|| input.strip_suffix(b"\n"))
}
for line in lines {
if let Some(line) = trim_last_eol(line) {
res.extend_from_slice(line);
// If the line ends with an EOL, we should append the target EOL.
res.extend_from_slice(eol);
} else {
// If the line doesn't end with an EOL, we don't append the EOL. This can happen
// on the last line.
res.extend_from_slice(line);
}
}
Ok(Box::new(Cursor::new(res)))
}
#[cfg(test)]
mod tests {
use std::error::Error;
use std::pin::Pin;
use std::task::Poll;
use test_case::test_case;
use super::*;
#[tokio::main(flavor = "current_thread")]
#[test_case(b"a\n", TargetEol::PassThrough, b"a\n"; "LF text with no EOL conversion")]
#[test_case(b"a\r\n", TargetEol::PassThrough, b"a\r\n"; "CRLF text with no EOL conversion")]
#[test_case(b"a", TargetEol::PassThrough, b"a"; "no EOL text with no EOL conversion")]
#[test_case(b"a\n", TargetEol::Crlf, b"a\r\n"; "LF text with CRLF EOL conversion")]
#[test_case(b"a\r\n", TargetEol::Crlf, b"a\r\n"; "CRLF text with CRLF EOL conversion")]
#[test_case(b"a", TargetEol::Crlf, b"a"; "no EOL text with CRLF conversion")]
#[test_case(b"", TargetEol::Crlf, b""; "empty text with CRLF EOL conversion")]
#[test_case(b"a\nb", TargetEol::Crlf, b"a\r\nb"; "text ends without EOL with CRLF EOL conversion")]
#[test_case(b"a\n", TargetEol::Lf, b"a\n"; "LF text with LF EOL conversion")]
#[test_case(b"a\r\n", TargetEol::Lf, b"a\n"; "CRLF text with LF EOL conversion")]
#[test_case(b"a", TargetEol::Lf, b"a"; "no EOL text with LF conversion")]
#[test_case(b"", TargetEol::Lf, b""; "empty text with LF EOL conversion")]
#[test_case(b"a\r\nb", TargetEol::Lf, b"a\nb"; "text ends without EOL with LF EOL conversion")]
async fn test_eol_conversion(input: &[u8], target_eol: TargetEol, expected_output: &[u8]) {
let mut input = input;
let mut output = vec![];
convert_eol(&mut input, target_eol)
.await
.expect("Failed to call convert_eol")
.read_to_end(&mut output)
.await
.expect("Failed to read from the result");
assert_eq!(output, expected_output);
}
struct ErrorReader(Option<std::io::Error>);
impl ErrorReader {
fn new(error: std::io::Error) -> Self {
Self(Some(error))
}
}
impl AsyncRead for ErrorReader {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
_buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
if let Some(e) = self.0.take() {
return Poll::Ready(Err(e));
}
Poll::Ready(Ok(()))
}
}
#[tokio::main(flavor = "current_thread")]
#[test_case(TargetEol::PassThrough; "no EOL conversion")]
#[test_case(TargetEol::Lf; "LF EOL conversion")]
#[test_case(TargetEol::Crlf; "CRLF EOL conversion")]
async fn test_eol_convert_eol_read_error(target_eol: TargetEol) {
let message = "test error";
let error_reader = ErrorReader::new(std::io::Error::other(message));
let mut output = vec![];
// TODO: use TryFutureExt::and_then and async closure after we upgrade to 1.85.0
// or later.
let err = match convert_eol(error_reader, target_eol).await {
Ok(mut reader) => reader.read_to_end(&mut output).await,
Err(e) => Err(e),
}
.expect_err("should fail");
let has_expected_error_message = (0..)
.scan(Some(&err as &(dyn Error + 'static)), |err, _| {
let current_err = err.take()?;
*err = current_err.source();
Some(current_err)
})
.any(|e| e.to_string() == message);
assert!(
has_expected_error_message,
"should have expected error message: {message}"
);
}
fn test_probe_limit_input_crlf() -> [u8; TargetEolStrategy::PROBE_LIMIT as usize + 1] {
let mut arr = [b'a'; TargetEolStrategy::PROBE_LIMIT as usize + 1];
let crlf = b"\r\n";
arr[100..102].copy_from_slice(crlf);
arr[500..502].copy_from_slice(crlf);
arr[1000..1002].copy_from_slice(crlf);
arr[4090..4092].copy_from_slice(crlf);
arr[TargetEolStrategy::PROBE_LIMIT as usize - 1
..TargetEolStrategy::PROBE_LIMIT as usize + 1]
.copy_from_slice(crlf);
arr
}
fn test_probe_limit_input_lf() -> Vec<u8> {
test_probe_limit_input_crlf().replace(b"\r\n", b"\n")
}
#[tokio::main(flavor = "current_thread")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::None,
}, b"\r\n", b"\r\n"; "none settings")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::Input,
}, b"\r\n", b"\n"; "input settings text input")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::InputOutput,
}, b"\r\n", b"\n"; "input output settings text input")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::Input,
}, b"\0\r\n", b"\0\r\n"; "input settings binary input")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::InputOutput,
}, b"\0\r\n", b"\0\r\n"; "input output settings binary input with NUL")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::InputOutput,
}, b"\r\r\n", b"\r\r\n"; "input output settings binary input with lone CR")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::Input,
}, &[0; 20 << 10], &[0; 20 << 10]; "input settings long binary input")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::Input,
}, &test_probe_limit_input_crlf(), &test_probe_limit_input_lf(); "input settings with CRLF on probe boundary")]
async fn test_eol_strategy_convert_eol_for_snapshot(
strategy: TargetEolStrategy,
contents: &[u8],
expected_output: &[u8],
) {
let mut actual_output = vec![];
strategy
.convert_eol_for_snapshot(contents)
.await
.unwrap()
.read_to_end(&mut actual_output)
.await
.unwrap();
assert_eq!(actual_output, expected_output);
}
#[tokio::main(flavor = "current_thread")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::None,
}, b"\n", b"\n"; "none settings")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::Input,
}, b"\n", b"\n"; "input settings")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::InputOutput,
}, b"\n", b"\r\n"; "input output settings text input")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::InputOutput,
}, b"\0\n", b"\0\n"; "input output settings binary input")]
#[test_case(TargetEolStrategy {
eol_conversion_mode: EolConversionMode::Input,
}, &[0; 20 << 10], &[0; 20 << 10]; "input output settings long binary input")]
async fn test_eol_strategy_convert_eol_for_update(
strategy: TargetEolStrategy,
contents: &[u8],
expected_output: &[u8],
) {
let mut actual_output = vec![];
strategy
.convert_eol_for_update(contents)
.await
.unwrap()
.read_to_end(&mut actual_output)
.await
.unwrap();
assert_eq!(actual_output, expected_output);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/revset_parser.rs | lib/src/revset_parser.rs | // Copyright 2021-2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashSet;
use std::error;
use std::mem;
use std::str::FromStr;
use std::sync::LazyLock;
use itertools::Itertools as _;
use pest::Parser as _;
use pest::iterators::Pair;
use pest::pratt_parser::Assoc;
use pest::pratt_parser::Op;
use pest::pratt_parser::PrattParser;
use pest_derive::Parser;
use thiserror::Error;
use crate::dsl_util;
use crate::dsl_util::AliasDeclaration;
use crate::dsl_util::AliasDeclarationParser;
use crate::dsl_util::AliasDefinitionParser;
use crate::dsl_util::AliasExpandError;
use crate::dsl_util::AliasExpandableExpression;
use crate::dsl_util::AliasId;
use crate::dsl_util::AliasesMap;
use crate::dsl_util::Diagnostics;
use crate::dsl_util::ExpressionFolder;
use crate::dsl_util::FoldableExpression;
use crate::dsl_util::FunctionCallParser;
use crate::dsl_util::InvalidArguments;
use crate::dsl_util::StringLiteralParser;
use crate::dsl_util::collect_similar;
use crate::ref_name::RefNameBuf;
use crate::ref_name::RemoteNameBuf;
use crate::ref_name::RemoteRefSymbolBuf;
#[derive(Parser)]
#[grammar = "revset.pest"]
struct RevsetParser;
const STRING_LITERAL_PARSER: StringLiteralParser<Rule> = StringLiteralParser {
content_rule: Rule::string_content,
escape_rule: Rule::string_escape,
};
const FUNCTION_CALL_PARSER: FunctionCallParser<Rule> = FunctionCallParser {
function_name_rule: Rule::function_name,
function_arguments_rule: Rule::function_arguments,
keyword_argument_rule: Rule::keyword_argument,
argument_name_rule: Rule::strict_identifier,
argument_value_rule: Rule::expression,
};
impl Rule {
/// Whether this is a placeholder rule for compatibility with the other
/// systems.
fn is_compat(&self) -> bool {
matches!(
self,
Self::compat_parents_op
| Self::compat_dag_range_op
| Self::compat_dag_range_pre_op
| Self::compat_dag_range_post_op
| Self::compat_add_op
| Self::compat_sub_op
)
}
fn to_symbol(self) -> Option<&'static str> {
match self {
Self::EOI => None,
Self::whitespace => None,
Self::identifier_part => None,
Self::identifier => None,
Self::strict_identifier_part => None,
Self::strict_identifier => None,
Self::symbol => None,
Self::string_escape => None,
Self::string_content_char => None,
Self::string_content => None,
Self::string_literal => None,
Self::raw_string_content => None,
Self::raw_string_literal => None,
Self::at_op => Some("@"),
Self::pattern_kind_op => Some(":"),
Self::parents_op => Some("-"),
Self::children_op => Some("+"),
Self::compat_parents_op => Some("^"),
Self::dag_range_op
| Self::dag_range_pre_op
| Self::dag_range_post_op
| Self::dag_range_all_op => Some("::"),
Self::compat_dag_range_op
| Self::compat_dag_range_pre_op
| Self::compat_dag_range_post_op => Some(":"),
Self::range_op => Some(".."),
Self::range_pre_op | Self::range_post_op | Self::range_all_op => Some(".."),
Self::range_ops => None,
Self::range_pre_ops => None,
Self::range_post_ops => None,
Self::range_all_ops => None,
Self::negate_op => Some("~"),
Self::union_op => Some("|"),
Self::intersection_op => Some("&"),
Self::difference_op => Some("~"),
Self::compat_add_op => Some("+"),
Self::compat_sub_op => Some("-"),
Self::infix_op => None,
Self::function => None,
Self::function_name => None,
Self::keyword_argument => None,
Self::argument => None,
Self::function_arguments => None,
Self::formal_parameters => None,
Self::string_pattern => None,
Self::primary => None,
Self::neighbors_expression => None,
Self::range_expression => None,
Self::expression => None,
Self::program_modifier => None,
Self::program_with_modifier => None,
Self::program => None,
Self::symbol_name => None,
Self::function_alias_declaration => None,
Self::alias_declaration => None,
}
}
}
/// Manages diagnostic messages emitted during revset parsing and function-call
/// resolution.
pub type RevsetDiagnostics = Diagnostics<RevsetParseError>;
#[derive(Debug, Error)]
#[error("{pest_error}")]
pub struct RevsetParseError {
kind: Box<RevsetParseErrorKind>,
pest_error: Box<pest::error::Error<Rule>>,
source: Option<Box<dyn error::Error + Send + Sync>>,
}
#[derive(Debug, Error, PartialEq, Eq)]
pub enum RevsetParseErrorKind {
#[error("Syntax error")]
SyntaxError,
#[error("`{op}` is not a prefix operator")]
NotPrefixOperator {
op: String,
similar_op: String,
description: String,
},
#[error("`{op}` is not a postfix operator")]
NotPostfixOperator {
op: String,
similar_op: String,
description: String,
},
#[error("`{op}` is not an infix operator")]
NotInfixOperator {
op: String,
similar_op: String,
description: String,
},
#[error("Modifier `{0}` doesn't exist")]
NoSuchModifier(String),
#[error("Function `{name}` doesn't exist")]
NoSuchFunction {
name: String,
candidates: Vec<String>,
},
#[error("Function `{name}`: {message}")]
InvalidFunctionArguments { name: String, message: String },
#[error("Cannot resolve file pattern without workspace")]
FsPathWithoutWorkspace,
#[error("Cannot resolve `@` without workspace")]
WorkingCopyWithoutWorkspace,
#[error("Redefinition of function parameter")]
RedefinedFunctionParameter,
#[error("{0}")]
Expression(String),
#[error("In alias `{0}`")]
InAliasExpansion(String),
#[error("In function parameter `{0}`")]
InParameterExpansion(String),
#[error("Alias `{0}` expanded recursively")]
RecursiveAlias(String),
}
impl RevsetParseError {
pub(super) fn with_span(kind: RevsetParseErrorKind, span: pest::Span<'_>) -> Self {
let message = kind.to_string();
let pest_error = Box::new(pest::error::Error::new_from_span(
pest::error::ErrorVariant::CustomError { message },
span,
));
Self {
kind: Box::new(kind),
pest_error,
source: None,
}
}
pub(super) fn with_source(
mut self,
source: impl Into<Box<dyn error::Error + Send + Sync>>,
) -> Self {
self.source = Some(source.into());
self
}
/// Some other expression error.
pub fn expression(message: impl Into<String>, span: pest::Span<'_>) -> Self {
Self::with_span(RevsetParseErrorKind::Expression(message.into()), span)
}
/// If this is a `NoSuchFunction` error, expands the candidates list with
/// the given `other_functions`.
pub(super) fn extend_function_candidates<I>(mut self, other_functions: I) -> Self
where
I: IntoIterator,
I::Item: AsRef<str>,
{
if let RevsetParseErrorKind::NoSuchFunction { name, candidates } = self.kind.as_mut() {
let other_candidates = collect_similar(name, other_functions);
*candidates = itertools::merge(mem::take(candidates), other_candidates)
.dedup()
.collect();
}
self
}
pub fn kind(&self) -> &RevsetParseErrorKind {
&self.kind
}
/// Original parsing error which typically occurred in an alias expression.
pub fn origin(&self) -> Option<&Self> {
self.source.as_ref().and_then(|e| e.downcast_ref())
}
}
impl AliasExpandError for RevsetParseError {
fn invalid_arguments(err: InvalidArguments<'_>) -> Self {
err.into()
}
fn recursive_expansion(id: AliasId<'_>, span: pest::Span<'_>) -> Self {
Self::with_span(RevsetParseErrorKind::RecursiveAlias(id.to_string()), span)
}
fn within_alias_expansion(self, id: AliasId<'_>, span: pest::Span<'_>) -> Self {
let kind = match id {
AliasId::Symbol(_) | AliasId::Function(..) => {
RevsetParseErrorKind::InAliasExpansion(id.to_string())
}
AliasId::Parameter(_) => RevsetParseErrorKind::InParameterExpansion(id.to_string()),
};
Self::with_span(kind, span).with_source(self)
}
}
impl From<pest::error::Error<Rule>> for RevsetParseError {
fn from(err: pest::error::Error<Rule>) -> Self {
Self {
kind: Box::new(RevsetParseErrorKind::SyntaxError),
pest_error: Box::new(rename_rules_in_pest_error(err)),
source: None,
}
}
}
impl From<InvalidArguments<'_>> for RevsetParseError {
fn from(err: InvalidArguments<'_>) -> Self {
let kind = RevsetParseErrorKind::InvalidFunctionArguments {
name: err.name.to_owned(),
message: err.message,
};
Self::with_span(kind, err.span)
}
}
fn rename_rules_in_pest_error(mut err: pest::error::Error<Rule>) -> pest::error::Error<Rule> {
let pest::error::ErrorVariant::ParsingError {
positives,
negatives,
} = &mut err.variant
else {
return err;
};
// Remove duplicated symbols. Compat symbols are also removed from the
// (positive) suggestion.
let mut known_syms = HashSet::new();
positives.retain(|rule| {
!rule.is_compat() && rule.to_symbol().is_none_or(|sym| known_syms.insert(sym))
});
let mut known_syms = HashSet::new();
negatives.retain(|rule| rule.to_symbol().is_none_or(|sym| known_syms.insert(sym)));
err.renamed_rules(|rule| {
rule.to_symbol()
.map(|sym| format!("`{sym}`"))
.unwrap_or_else(|| format!("<{rule:?}>"))
})
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum ExpressionKind<'i> {
/// Unquoted symbol.
Identifier(&'i str),
/// Quoted symbol or string.
String(String),
/// `<kind>:<value>`
StringPattern {
kind: &'i str,
value: String,
},
/// `<name>@<remote>`
RemoteSymbol(RemoteRefSymbolBuf),
/// `<name>@`
AtWorkspace(String),
/// `@`
AtCurrentWorkspace,
/// `::`
DagRangeAll,
/// `..`
RangeAll,
Unary(UnaryOp, Box<ExpressionNode<'i>>),
Binary(BinaryOp, Box<ExpressionNode<'i>>, Box<ExpressionNode<'i>>),
/// `x | y | ..`
UnionAll(Vec<ExpressionNode<'i>>),
FunctionCall(Box<FunctionCallNode<'i>>),
/// `name: body`
Modifier(Box<ModifierNode<'i>>),
/// Identity node to preserve the span in the source text.
AliasExpanded(AliasId<'i>, Box<ExpressionNode<'i>>),
}
impl<'i> FoldableExpression<'i> for ExpressionKind<'i> {
fn fold<F>(self, folder: &mut F, span: pest::Span<'i>) -> Result<Self, F::Error>
where
F: ExpressionFolder<'i, Self> + ?Sized,
{
match self {
Self::Identifier(name) => folder.fold_identifier(name, span),
Self::String(_)
| Self::StringPattern { .. }
| Self::RemoteSymbol(_)
| ExpressionKind::AtWorkspace(_)
| Self::AtCurrentWorkspace
| Self::DagRangeAll
| Self::RangeAll => Ok(self),
Self::Unary(op, arg) => {
let arg = Box::new(folder.fold_expression(*arg)?);
Ok(Self::Unary(op, arg))
}
Self::Binary(op, lhs, rhs) => {
let lhs = Box::new(folder.fold_expression(*lhs)?);
let rhs = Box::new(folder.fold_expression(*rhs)?);
Ok(Self::Binary(op, lhs, rhs))
}
Self::UnionAll(nodes) => {
let nodes = dsl_util::fold_expression_nodes(folder, nodes)?;
Ok(Self::UnionAll(nodes))
}
Self::FunctionCall(function) => folder.fold_function_call(function, span),
Self::Modifier(modifier) => {
let modifier = Box::new(ModifierNode {
name: modifier.name,
name_span: modifier.name_span,
body: folder.fold_expression(modifier.body)?,
});
Ok(Self::Modifier(modifier))
}
Self::AliasExpanded(id, subst) => {
let subst = Box::new(folder.fold_expression(*subst)?);
Ok(Self::AliasExpanded(id, subst))
}
}
}
}
impl<'i> AliasExpandableExpression<'i> for ExpressionKind<'i> {
fn identifier(name: &'i str) -> Self {
Self::Identifier(name)
}
fn function_call(function: Box<FunctionCallNode<'i>>) -> Self {
Self::FunctionCall(function)
}
fn alias_expanded(id: AliasId<'i>, subst: Box<ExpressionNode<'i>>) -> Self {
Self::AliasExpanded(id, subst)
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum UnaryOp {
/// `~x`
Negate,
/// `::x`
DagRangePre,
/// `x::`
DagRangePost,
/// `..x`
RangePre,
/// `x..`
RangePost,
/// `x-`
Parents,
/// `x+`
Children,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum BinaryOp {
/// `&`
Intersection,
/// `~`
Difference,
/// `::`
DagRange,
/// `..`
Range,
}
pub type ExpressionNode<'i> = dsl_util::ExpressionNode<'i, ExpressionKind<'i>>;
pub type FunctionCallNode<'i> = dsl_util::FunctionCallNode<'i, ExpressionKind<'i>>;
/// Expression with modifier `name: body`.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ModifierNode<'i> {
/// Modifier name.
pub name: &'i str,
/// Span of the modifier name.
pub name_span: pest::Span<'i>,
/// Expression body.
pub body: ExpressionNode<'i>,
}
fn union_nodes<'i>(lhs: ExpressionNode<'i>, rhs: ExpressionNode<'i>) -> ExpressionNode<'i> {
let span = lhs.span.start_pos().span(&rhs.span.end_pos());
let expr = match lhs.kind {
// Flatten "x | y | z" to save recursion stack. Machine-generated query
// might have long chain of unions.
ExpressionKind::UnionAll(mut nodes) => {
nodes.push(rhs);
ExpressionKind::UnionAll(nodes)
}
_ => ExpressionKind::UnionAll(vec![lhs, rhs]),
};
ExpressionNode::new(expr, span)
}
/// Parses text into expression tree. The text may be prefixed with `all:`.
/// No name resolution is made at this stage.
// TODO: drop support for legacy "all:" modifier in jj 0.38+
pub fn parse_program_with_modifier(
revset_str: &str,
) -> Result<ExpressionNode<'_>, RevsetParseError> {
let mut pairs = RevsetParser::parse(Rule::program_with_modifier, revset_str)?;
let first = pairs.next().unwrap();
match first.as_rule() {
Rule::expression => parse_expression_node(first),
Rule::program_modifier => {
let [lhs, op] = first.into_inner().collect_array().unwrap();
let rhs = pairs.next().unwrap();
assert_eq!(lhs.as_rule(), Rule::strict_identifier);
assert_eq!(op.as_rule(), Rule::pattern_kind_op);
assert_eq!(rhs.as_rule(), Rule::expression);
let span = lhs.as_span().start_pos().span(&rhs.as_span().end_pos());
let modifier = Box::new(ModifierNode {
name: lhs.as_str(),
name_span: lhs.as_span(),
body: parse_expression_node(rhs)?,
});
let expr = ExpressionKind::Modifier(modifier);
Ok(ExpressionNode::new(expr, span))
}
r => panic!("unexpected revset parse rule: {r:?}"),
}
}
/// Parses text into expression tree. No name resolution is made at this stage.
pub fn parse_program(revset_str: &str) -> Result<ExpressionNode<'_>, RevsetParseError> {
let mut pairs = RevsetParser::parse(Rule::program, revset_str)?;
let first = pairs.next().unwrap();
assert_eq!(first.as_rule(), Rule::expression);
parse_expression_node(first)
}
fn parse_expression_node(pair: Pair<Rule>) -> Result<ExpressionNode, RevsetParseError> {
fn not_prefix_op(
op: &Pair<Rule>,
similar_op: impl Into<String>,
description: impl Into<String>,
) -> RevsetParseError {
RevsetParseError::with_span(
RevsetParseErrorKind::NotPrefixOperator {
op: op.as_str().to_owned(),
similar_op: similar_op.into(),
description: description.into(),
},
op.as_span(),
)
}
fn not_postfix_op(
op: &Pair<Rule>,
similar_op: impl Into<String>,
description: impl Into<String>,
) -> RevsetParseError {
RevsetParseError::with_span(
RevsetParseErrorKind::NotPostfixOperator {
op: op.as_str().to_owned(),
similar_op: similar_op.into(),
description: description.into(),
},
op.as_span(),
)
}
fn not_infix_op(
op: &Pair<Rule>,
similar_op: impl Into<String>,
description: impl Into<String>,
) -> RevsetParseError {
RevsetParseError::with_span(
RevsetParseErrorKind::NotInfixOperator {
op: op.as_str().to_owned(),
similar_op: similar_op.into(),
description: description.into(),
},
op.as_span(),
)
}
static PRATT: LazyLock<PrattParser<Rule>> = LazyLock::new(|| {
PrattParser::new()
.op(Op::infix(Rule::union_op, Assoc::Left)
| Op::infix(Rule::compat_add_op, Assoc::Left))
.op(Op::infix(Rule::intersection_op, Assoc::Left)
| Op::infix(Rule::difference_op, Assoc::Left)
| Op::infix(Rule::compat_sub_op, Assoc::Left))
.op(Op::prefix(Rule::negate_op))
// Ranges can't be nested without parentheses. Associativity doesn't matter.
.op(Op::infix(Rule::dag_range_op, Assoc::Left)
| Op::infix(Rule::compat_dag_range_op, Assoc::Left)
| Op::infix(Rule::range_op, Assoc::Left))
.op(Op::prefix(Rule::dag_range_pre_op)
| Op::prefix(Rule::compat_dag_range_pre_op)
| Op::prefix(Rule::range_pre_op))
.op(Op::postfix(Rule::dag_range_post_op)
| Op::postfix(Rule::compat_dag_range_post_op)
| Op::postfix(Rule::range_post_op))
// Neighbors
.op(Op::postfix(Rule::parents_op)
| Op::postfix(Rule::children_op)
| Op::postfix(Rule::compat_parents_op))
});
PRATT
.map_primary(|primary| {
let expr = match primary.as_rule() {
Rule::primary => return parse_primary_node(primary),
Rule::dag_range_all_op => ExpressionKind::DagRangeAll,
Rule::range_all_op => ExpressionKind::RangeAll,
r => panic!("unexpected primary rule {r:?}"),
};
Ok(ExpressionNode::new(expr, primary.as_span()))
})
.map_prefix(|op, rhs| {
let op_kind = match op.as_rule() {
Rule::negate_op => UnaryOp::Negate,
Rule::dag_range_pre_op => UnaryOp::DagRangePre,
Rule::compat_dag_range_pre_op => Err(not_prefix_op(&op, "::", "ancestors"))?,
Rule::range_pre_op => UnaryOp::RangePre,
r => panic!("unexpected prefix operator rule {r:?}"),
};
let rhs = Box::new(rhs?);
let span = op.as_span().start_pos().span(&rhs.span.end_pos());
let expr = ExpressionKind::Unary(op_kind, rhs);
Ok(ExpressionNode::new(expr, span))
})
.map_postfix(|lhs, op| {
let op_kind = match op.as_rule() {
Rule::dag_range_post_op => UnaryOp::DagRangePost,
Rule::compat_dag_range_post_op => Err(not_postfix_op(&op, "::", "descendants"))?,
Rule::range_post_op => UnaryOp::RangePost,
Rule::parents_op => UnaryOp::Parents,
Rule::children_op => UnaryOp::Children,
Rule::compat_parents_op => Err(not_postfix_op(&op, "-", "parents"))?,
r => panic!("unexpected postfix operator rule {r:?}"),
};
let lhs = Box::new(lhs?);
let span = lhs.span.start_pos().span(&op.as_span().end_pos());
let expr = ExpressionKind::Unary(op_kind, lhs);
Ok(ExpressionNode::new(expr, span))
})
.map_infix(|lhs, op, rhs| {
let op_kind = match op.as_rule() {
Rule::union_op => return Ok(union_nodes(lhs?, rhs?)),
Rule::compat_add_op => Err(not_infix_op(&op, "|", "union"))?,
Rule::intersection_op => BinaryOp::Intersection,
Rule::difference_op => BinaryOp::Difference,
Rule::compat_sub_op => Err(not_infix_op(&op, "~", "difference"))?,
Rule::dag_range_op => BinaryOp::DagRange,
Rule::compat_dag_range_op => Err(not_infix_op(&op, "::", "DAG range"))?,
Rule::range_op => BinaryOp::Range,
r => panic!("unexpected infix operator rule {r:?}"),
};
let lhs = Box::new(lhs?);
let rhs = Box::new(rhs?);
let span = lhs.span.start_pos().span(&rhs.span.end_pos());
let expr = ExpressionKind::Binary(op_kind, lhs, rhs);
Ok(ExpressionNode::new(expr, span))
})
.parse(pair.into_inner())
}
fn parse_primary_node(pair: Pair<Rule>) -> Result<ExpressionNode, RevsetParseError> {
let span = pair.as_span();
let mut pairs = pair.into_inner();
let first = pairs.next().unwrap();
let expr = match first.as_rule() {
// Ignore inner span to preserve parenthesized expression as such.
Rule::expression => parse_expression_node(first)?.kind,
Rule::function => {
let function = Box::new(FUNCTION_CALL_PARSER.parse(
first,
|pair| Ok(pair.as_str()),
|pair| parse_expression_node(pair),
)?);
ExpressionKind::FunctionCall(function)
}
Rule::string_pattern => {
let [lhs, op, rhs] = first.into_inner().collect_array().unwrap();
assert_eq!(lhs.as_rule(), Rule::strict_identifier);
assert_eq!(op.as_rule(), Rule::pattern_kind_op);
let kind = lhs.as_str();
let value = parse_as_string_literal(rhs);
ExpressionKind::StringPattern { kind, value }
}
// Identifier without "@" may be substituted by aliases. Primary expression including "@"
// is considered an indecomposable unit, and no alias substitution would be made.
Rule::identifier if pairs.peek().is_none() => ExpressionKind::Identifier(first.as_str()),
Rule::identifier | Rule::string_literal | Rule::raw_string_literal => {
let name = parse_as_string_literal(first);
match pairs.next() {
None => ExpressionKind::String(name),
Some(op) => {
assert_eq!(op.as_rule(), Rule::at_op);
match pairs.next() {
// postfix "<name>@"
None => ExpressionKind::AtWorkspace(name),
// infix "<name>@<remote>"
Some(second) => {
let name: RefNameBuf = name.into();
let remote: RemoteNameBuf = parse_as_string_literal(second).into();
ExpressionKind::RemoteSymbol(RemoteRefSymbolBuf { name, remote })
}
}
}
}
}
// nullary "@"
Rule::at_op => ExpressionKind::AtCurrentWorkspace,
r => panic!("unexpected revset parse rule: {r:?}"),
};
Ok(ExpressionNode::new(expr, span))
}
/// Parses part of compound symbol to string.
fn parse_as_string_literal(pair: Pair<Rule>) -> String {
match pair.as_rule() {
Rule::identifier => pair.as_str().to_owned(),
Rule::string_literal => STRING_LITERAL_PARSER.parse(pair.into_inner()),
Rule::raw_string_literal => {
let [content] = pair.into_inner().collect_array().unwrap();
assert_eq!(content.as_rule(), Rule::raw_string_content);
content.as_str().to_owned()
}
_ => {
panic!("unexpected string literal rule: {:?}", pair.as_str());
}
}
}
/// Checks if the text is a valid identifier
pub fn is_identifier(text: &str) -> bool {
match RevsetParser::parse(Rule::identifier, text) {
Ok(mut pairs) => pairs.next().unwrap().as_span().end() == text.len(),
Err(_) => false,
}
}
/// Parses the text as a revset symbol, rejects empty string.
pub fn parse_symbol(text: &str) -> Result<String, RevsetParseError> {
let mut pairs = RevsetParser::parse(Rule::symbol_name, text)?;
let first = pairs.next().unwrap();
let span = first.as_span();
let name = parse_as_string_literal(first);
if name.is_empty() {
Err(RevsetParseError::expression(
"Expected non-empty string",
span,
))
} else {
Ok(name)
}
}
pub type RevsetAliasesMap = AliasesMap<RevsetAliasParser, String>;
#[derive(Clone, Debug, Default)]
pub struct RevsetAliasParser;
impl AliasDeclarationParser for RevsetAliasParser {
type Error = RevsetParseError;
fn parse_declaration(&self, source: &str) -> Result<AliasDeclaration, Self::Error> {
let mut pairs = RevsetParser::parse(Rule::alias_declaration, source)?;
let first = pairs.next().unwrap();
match first.as_rule() {
Rule::strict_identifier => Ok(AliasDeclaration::Symbol(first.as_str().to_owned())),
Rule::function_alias_declaration => {
let [name_pair, params_pair] = first.into_inner().collect_array().unwrap();
assert_eq!(name_pair.as_rule(), Rule::function_name);
assert_eq!(params_pair.as_rule(), Rule::formal_parameters);
let name = name_pair.as_str().to_owned();
let params_span = params_pair.as_span();
let params = params_pair
.into_inner()
.map(|pair| match pair.as_rule() {
Rule::strict_identifier => pair.as_str().to_owned(),
r => panic!("unexpected formal parameter rule {r:?}"),
})
.collect_vec();
if params.iter().all_unique() {
Ok(AliasDeclaration::Function(name, params))
} else {
Err(RevsetParseError::with_span(
RevsetParseErrorKind::RedefinedFunctionParameter,
params_span,
))
}
}
r => panic!("unexpected alias declaration rule {r:?}"),
}
}
}
impl AliasDefinitionParser for RevsetAliasParser {
type Output<'i> = ExpressionKind<'i>;
type Error = RevsetParseError;
fn parse_definition<'i>(&self, source: &'i str) -> Result<ExpressionNode<'i>, Self::Error> {
parse_program_with_modifier(source)
}
}
pub(super) fn expect_string_pattern<'a>(
type_name: &str,
node: &'a ExpressionNode<'_>,
) -> Result<(&'a str, Option<&'a str>), RevsetParseError> {
catch_aliases_no_diagnostics(node, |node| match &node.kind {
ExpressionKind::Identifier(name) => Ok((*name, None)),
ExpressionKind::String(name) => Ok((name, None)),
ExpressionKind::StringPattern { kind, value } => Ok((value, Some(*kind))),
_ => Err(RevsetParseError::expression(
format!("Expected {type_name}"),
node.span,
)),
})
}
pub fn expect_literal<T: FromStr>(
type_name: &str,
node: &ExpressionNode,
) -> Result<T, RevsetParseError> {
catch_aliases_no_diagnostics(node, |node| {
let value = expect_string_literal(type_name, node)?;
value
.parse()
.map_err(|_| RevsetParseError::expression(format!("Expected {type_name}"), node.span))
})
}
pub(super) fn expect_string_literal<'a>(
type_name: &str,
node: &'a ExpressionNode<'_>,
) -> Result<&'a str, RevsetParseError> {
catch_aliases_no_diagnostics(node, |node| match &node.kind {
ExpressionKind::Identifier(name) => Ok(*name),
ExpressionKind::String(name) => Ok(name),
_ => Err(RevsetParseError::expression(
format!("Expected {type_name}"),
node.span,
)),
})
}
/// Applies the given function to the innermost `node` by unwrapping alias
/// expansion nodes. Appends alias expansion stack to error and diagnostics.
pub(super) fn catch_aliases<'a, 'i, T>(
diagnostics: &mut RevsetDiagnostics,
node: &'a ExpressionNode<'i>,
f: impl FnOnce(&mut RevsetDiagnostics, &'a ExpressionNode<'i>) -> Result<T, RevsetParseError>,
) -> Result<T, RevsetParseError> {
let (node, stack) = skip_aliases(node);
if stack.is_empty() {
f(diagnostics, node)
} else {
let mut inner_diagnostics = RevsetDiagnostics::new();
let result = f(&mut inner_diagnostics, node);
diagnostics.extend_with(inner_diagnostics, |diag| attach_aliases_err(diag, &stack));
result.map_err(|err| attach_aliases_err(err, &stack))
}
}
fn catch_aliases_no_diagnostics<'a, 'i, T>(
node: &'a ExpressionNode<'i>,
f: impl FnOnce(&'a ExpressionNode<'i>) -> Result<T, RevsetParseError>,
) -> Result<T, RevsetParseError> {
let (node, stack) = skip_aliases(node);
f(node).map_err(|err| attach_aliases_err(err, &stack))
}
fn skip_aliases<'a, 'i>(
mut node: &'a ExpressionNode<'i>,
) -> (&'a ExpressionNode<'i>, Vec<(AliasId<'i>, pest::Span<'i>)>) {
let mut stack = Vec::new();
while let ExpressionKind::AliasExpanded(id, subst) = &node.kind {
stack.push((*id, node.span));
node = subst;
}
(node, stack)
}
fn attach_aliases_err(
err: RevsetParseError,
stack: &[(AliasId<'_>, pest::Span<'_>)],
) -> RevsetParseError {
stack
.iter()
.rfold(err, |err, &(id, span)| err.within_alias_expansion(id, span))
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use assert_matches::assert_matches;
use super::*;
use crate::dsl_util::KeywordArgument;
#[derive(Debug)]
struct WithRevsetAliasesMap<'i> {
aliases_map: RevsetAliasesMap,
locals: HashMap<&'i str, ExpressionNode<'i>>,
}
impl<'i> WithRevsetAliasesMap<'i> {
fn set_local(mut self, name: &'i str, value: &'i str) -> Self {
self.locals.insert(name, parse_program(value).unwrap());
self
}
fn parse(&'i self, text: &'i str) -> Result<ExpressionNode<'i>, RevsetParseError> {
let node = parse_program_with_modifier(text)?;
dsl_util::expand_aliases_with_locals(node, &self.aliases_map, &self.locals)
}
fn parse_normalized(&'i self, text: &'i str) -> ExpressionNode<'i> {
normalize_tree(self.parse(text).unwrap())
}
}
fn with_aliases<'i>(
aliases: impl IntoIterator<Item = (impl AsRef<str>, impl Into<String>)>,
) -> WithRevsetAliasesMap<'i> {
let mut aliases_map = RevsetAliasesMap::new();
for (decl, defn) in aliases {
aliases_map.insert(decl, defn).unwrap();
}
WithRevsetAliasesMap {
aliases_map,
locals: HashMap::new(),
}
}
fn parse_into_kind(text: &str) -> Result<ExpressionKind<'_>, RevsetParseErrorKind> {
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/fix.rs | lib/src/fix.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! API for transforming file content, for example to apply formatting, and
//! propagate those changes across revisions.
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::mpsc::channel;
use futures::StreamExt as _;
use itertools::Itertools as _;
use jj_lib::backend::BackendError;
use jj_lib::backend::CommitId;
use jj_lib::backend::FileId;
use jj_lib::backend::TreeValue;
use jj_lib::matchers::Matcher;
use jj_lib::merged_tree::MergedTreeBuilder;
use jj_lib::merged_tree::TreeDiffEntry;
use jj_lib::repo::MutableRepo;
use jj_lib::repo::Repo as _;
use jj_lib::repo_path::RepoPathBuf;
use jj_lib::revset::RevsetExpression;
use jj_lib::revset::RevsetIteratorExt as _;
use jj_lib::store::Store;
use rayon::iter::IntoParallelIterator as _;
use rayon::prelude::ParallelIterator as _;
use crate::revset::RevsetEvaluationError;
/// Represents a file whose content may be transformed by a FileFixer.
// TODO: Add the set of changed line/byte ranges, so those can be passed into code formatters via
// flags. This will help avoid introducing unrelated changes when working on code with out of date
// formatting.
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub struct FileToFix {
/// Unique identifier for the file content.
pub file_id: FileId,
/// The path is provided to allow the FileFixer to potentially:
/// - Choose different behaviors for different file names, extensions, etc.
/// - Update parts of the file's content that should be derived from the
/// file's path.
pub repo_path: RepoPathBuf,
}
/// Error fixing files.
#[derive(Debug, thiserror::Error)]
pub enum FixError {
/// Error while contacting the Backend.
#[error(transparent)]
Backend(#[from] BackendError),
/// Error resolving commit ancestry.
#[error(transparent)]
RevsetEvaluation(#[from] RevsetEvaluationError),
/// Error occurred while reading/writing file content.
#[error(transparent)]
IO(#[from] std::io::Error),
/// Error occurred while processing the file content.
#[error(transparent)]
FixContent(Box<dyn std::error::Error + Send + Sync>),
}
/// Fixes a set of files.
///
/// Fixing a file is implementation dependent. For example it may format source
/// code using a code formatter.
pub trait FileFixer {
/// Fixes a set of files. Stores the resulting file content (for modified
/// files).
///
/// Returns a map describing the subset of `files_to_fix` that resulted in
/// changed file content (unchanged files should not be present in the map),
/// pointing to the new FileId for the file.
///
/// TODO: Better error handling so we can tell the user what went wrong with
/// each failed input.
fn fix_files<'a>(
&mut self,
store: &Store,
files_to_fix: &'a HashSet<FileToFix>,
) -> Result<HashMap<&'a FileToFix, FileId>, FixError>;
}
/// Aggregate information about the outcome of the file fixer.
#[derive(Debug, Default)]
pub struct FixSummary {
/// The commits that were rewritten. Maps old commit id to new commit id.
pub rewrites: HashMap<CommitId, CommitId>,
/// The number of commits that had files that were passed to the file fixer.
pub num_checked_commits: i32,
/// The number of new commits created due to file content changed by the
/// fixer.
pub num_fixed_commits: i32,
}
/// A [FileFixer] that applies fix_fn to each file, in parallel.
///
/// The implementation is currently based on [rayon].
// TODO: Consider switching to futures, or document the decision not to. We
// don't need threads unless the threads will be doing more than waiting for
// pipes.
pub struct ParallelFileFixer<T> {
fix_fn: T,
}
impl<T> ParallelFileFixer<T>
where
T: Fn(&Store, &FileToFix) -> Result<Option<FileId>, FixError> + Sync + Send,
{
/// Creates a ParallelFileFixer.
pub fn new(fix_fn: T) -> Self {
Self { fix_fn }
}
}
impl<T> FileFixer for ParallelFileFixer<T>
where
T: Fn(&Store, &FileToFix) -> Result<Option<FileId>, FixError> + Sync + Send,
{
/// Applies `fix_fn()` to the inputs and stores the resulting file content.
fn fix_files<'a>(
&mut self,
store: &Store,
files_to_fix: &'a HashSet<FileToFix>,
) -> Result<HashMap<&'a FileToFix, FileId>, FixError> {
let (updates_tx, updates_rx) = channel();
files_to_fix.into_par_iter().try_for_each_init(
|| updates_tx.clone(),
|updates_tx, file_to_fix| -> Result<(), FixError> {
let result = (self.fix_fn)(store, file_to_fix)?;
match result {
Some(new_file_id) => {
updates_tx.send((file_to_fix, new_file_id)).unwrap();
Ok(())
}
None => Ok(()),
}
},
)?;
drop(updates_tx);
let mut result = HashMap::new();
while let Ok((file_to_fix, new_file_id)) = updates_rx.recv() {
result.insert(file_to_fix, new_file_id);
}
Ok(result)
}
}
/// Updates files with formatting fixes or other changes, using the given
/// FileFixer.
///
/// The primary use case is to apply the results of automatic code formatting
/// tools to revisions that may not be properly formatted yet. It can also be
/// used to modify files with other tools like `sed` or `sort`.
///
/// After the FileFixer is done, descendants are also updated, which ensures
/// that the fixes are not lost. This will never result in new conflicts. Files
/// with existing conflicts are updated on all sides of the conflict, which
/// can potentially increase or decrease the number of conflict markers.
pub async fn fix_files(
root_commits: Vec<CommitId>,
matcher: &dyn Matcher,
include_unchanged_files: bool,
repo_mut: &mut MutableRepo,
file_fixer: &mut impl FileFixer,
) -> Result<FixSummary, FixError> {
let mut summary = FixSummary::default();
// Collect all of the unique `FileToFix`s we're going to use. file_fixer should
// be deterministic, and should not consider outside information, so it is
// safe to deduplicate inputs that correspond to multiple files or commits.
// This is typically more efficient, but it does prevent certain use cases
// like providing commit IDs as inputs to be inserted into files. We also
// need to record the mapping between files-to-fix and paths/commits, to
// efficiently rewrite the commits later.
//
// If a path is being fixed in a particular commit, it must also be fixed in all
// that commit's descendants. We do this as a way of propagating changes,
// under the assumption that it is more useful than performing a rebase and
// risking merge conflicts. In the case of code formatters, rebasing wouldn't
// reliably produce well formatted code anyway. Deduplicating inputs helps
// to prevent quadratic growth in the number of tool executions required for
// doing this in long chains of commits with disjoint sets of modified files.
let commits: Vec<_> = RevsetExpression::commits(root_commits.clone())
.descendants()
.evaluate(repo_mut)?
.iter()
.commits(repo_mut.store())
.try_collect()?;
tracing::debug!(
?root_commits,
?commits,
"looking for files to fix in commits:"
);
let mut unique_files_to_fix: HashSet<FileToFix> = HashSet::new();
let mut commit_paths: HashMap<CommitId, HashSet<RepoPathBuf>> = HashMap::new();
for commit in commits.iter().rev() {
let mut paths: HashSet<RepoPathBuf> = HashSet::new();
// If --include-unchanged-files, we always fix every matching file in the tree.
// Otherwise, we fix the matching changed files in this commit, plus any that
// were fixed in ancestors, so we don't lose those changes. We do this
// instead of rebasing onto those changes, to avoid merge conflicts.
let parent_tree = if include_unchanged_files {
repo_mut.store().empty_merged_tree()
} else {
for parent_id in commit.parent_ids() {
if let Some(parent_paths) = commit_paths.get(parent_id) {
paths.extend(parent_paths.iter().cloned());
}
}
commit.parent_tree_async(repo_mut).await?
};
// TODO: handle copy tracking
let mut diff_stream = parent_tree.diff_stream(&commit.tree(), &matcher);
while let Some(TreeDiffEntry {
path: repo_path,
values,
}) = diff_stream.next().await
{
let after = values?.after;
// Deleted files have no file content to fix, and they have no terms in `after`,
// so we don't add any files-to-fix for them. Conflicted files produce one
// file-to-fix for each side of the conflict.
for term in after.into_iter().flatten() {
// We currently only support fixing the content of normal files, so we skip
// directories and symlinks, and we ignore the executable bit.
if let TreeValue::File {
id,
executable: _,
copy_id: _,
} = term
{
// TODO: Skip the file if its content is larger than some configured size,
// preferably without actually reading it yet.
let file_to_fix = FileToFix {
file_id: id.clone(),
repo_path: repo_path.clone(),
};
unique_files_to_fix.insert(file_to_fix.clone());
paths.insert(repo_path.clone());
}
}
}
commit_paths.insert(commit.id().clone(), paths);
}
tracing::debug!(
?include_unchanged_files,
?unique_files_to_fix,
"invoking file fixer on these files:"
);
// Fix all of the chosen inputs.
let fixed_file_ids = file_fixer.fix_files(repo_mut.store().as_ref(), &unique_files_to_fix)?;
tracing::debug!(?fixed_file_ids, "file fixer fixed these files:");
// Substitute the fixed file IDs into all of the affected commits. Currently,
// fixes cannot delete or rename files, change the executable bit, or modify
// other parts of the commit like the description.
repo_mut.transform_descendants(root_commits, async |rewriter| {
// TODO: Build the trees in parallel before `transform_descendants()` and only
// keep the tree IDs in memory, so we can pass them to the rewriter.
let old_commit_id = rewriter.old_commit().id().clone();
let repo_paths = commit_paths.get(&old_commit_id).unwrap();
let old_tree = rewriter.old_commit().tree();
let mut tree_builder = MergedTreeBuilder::new(old_tree.clone());
let mut has_changes = false;
for repo_path in repo_paths {
let old_value = old_tree.path_value_async(repo_path).await?;
let new_value = old_value.map(|old_term| {
if let Some(TreeValue::File {
id,
executable,
copy_id,
}) = old_term
{
let file_to_fix = FileToFix {
file_id: id.clone(),
repo_path: repo_path.clone(),
};
if let Some(new_id) = fixed_file_ids.get(&file_to_fix) {
return Some(TreeValue::File {
id: new_id.clone(),
executable: *executable,
copy_id: copy_id.clone(),
});
}
}
old_term.clone()
});
if new_value != old_value {
tree_builder.set_or_remove(repo_path.clone(), new_value);
has_changes = true;
}
}
summary.num_checked_commits += 1;
if has_changes {
summary.num_fixed_commits += 1;
let new_tree = tree_builder.write_tree()?;
let builder = rewriter.reparent();
let new_commit = builder.set_tree(new_tree).write()?;
summary
.rewrites
.insert(old_commit_id, new_commit.id().clone());
} else if rewriter.parents_changed() {
let new_commit = rewriter.reparent().write()?;
summary
.rewrites
.insert(old_commit_id, new_commit.id().clone());
}
Ok(())
})?;
tracing::debug!(?summary);
Ok(summary)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/view.rs | lib/src/view.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::BTreeMap;
use std::collections::HashSet;
use itertools::Itertools as _;
use thiserror::Error;
use crate::backend::CommitId;
use crate::op_store;
use crate::op_store::LocalRemoteRefTarget;
use crate::op_store::RefTarget;
use crate::op_store::RefTargetOptionExt as _;
use crate::op_store::RemoteRef;
use crate::op_store::RemoteView;
use crate::ref_name::GitRefName;
use crate::ref_name::GitRefNameBuf;
use crate::ref_name::RefName;
use crate::ref_name::RemoteName;
use crate::ref_name::RemoteRefSymbol;
use crate::ref_name::WorkspaceName;
use crate::ref_name::WorkspaceNameBuf;
use crate::refs;
use crate::refs::LocalAndRemoteRef;
use crate::str_util::StringMatcher;
/// A wrapper around [`op_store::View`] that defines additional methods.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct View {
data: op_store::View,
}
impl View {
pub fn new(op_store_view: op_store::View) -> Self {
Self {
data: op_store_view,
}
}
pub fn wc_commit_ids(&self) -> &BTreeMap<WorkspaceNameBuf, CommitId> {
&self.data.wc_commit_ids
}
pub fn get_wc_commit_id(&self, name: &WorkspaceName) -> Option<&CommitId> {
self.data.wc_commit_ids.get(name)
}
pub fn workspaces_for_wc_commit_id(&self, commit_id: &CommitId) -> Vec<WorkspaceNameBuf> {
let mut workspace_names = vec![];
for (name, wc_commit_id) in &self.data.wc_commit_ids {
if wc_commit_id == commit_id {
workspace_names.push(name.clone());
}
}
workspace_names
}
pub fn is_wc_commit_id(&self, commit_id: &CommitId) -> bool {
self.data.wc_commit_ids.values().contains(commit_id)
}
pub fn heads(&self) -> &HashSet<CommitId> {
&self.data.head_ids
}
/// Iterates pair of local and remote bookmarks by bookmark name.
pub fn bookmarks(&self) -> impl Iterator<Item = (&RefName, LocalRemoteRefTarget<'_>)> {
op_store::merge_join_ref_views(
&self.data.local_bookmarks,
&self.data.remote_views,
|view| &view.bookmarks,
)
}
/// Iterates pair of local and remote tags by tag name.
pub fn tags(&self) -> impl Iterator<Item = (&RefName, LocalRemoteRefTarget<'_>)> {
op_store::merge_join_ref_views(&self.data.local_tags, &self.data.remote_views, |view| {
&view.tags
})
}
pub fn git_refs(&self) -> &BTreeMap<GitRefNameBuf, RefTarget> {
&self.data.git_refs
}
pub fn git_head(&self) -> &RefTarget {
&self.data.git_head
}
pub fn set_wc_commit(&mut self, name: WorkspaceNameBuf, commit_id: CommitId) {
self.data.wc_commit_ids.insert(name, commit_id);
}
pub fn remove_wc_commit(&mut self, name: &WorkspaceName) {
self.data.wc_commit_ids.remove(name);
}
pub fn rename_workspace(
&mut self,
old_name: &WorkspaceName,
new_name: WorkspaceNameBuf,
) -> Result<(), RenameWorkspaceError> {
if self.data.wc_commit_ids.contains_key(&new_name) {
return Err(RenameWorkspaceError::WorkspaceAlreadyExists {
name: new_name.clone(),
});
}
let wc_commit_id = self.data.wc_commit_ids.remove(old_name).ok_or_else(|| {
RenameWorkspaceError::WorkspaceDoesNotExist {
name: old_name.to_owned(),
}
})?;
self.data.wc_commit_ids.insert(new_name, wc_commit_id);
Ok(())
}
pub fn add_head(&mut self, head_id: &CommitId) {
self.data.head_ids.insert(head_id.clone());
}
pub fn remove_head(&mut self, head_id: &CommitId) {
self.data.head_ids.remove(head_id);
}
/// Iterates local bookmark `(name, target)`s in lexicographical order.
pub fn local_bookmarks(&self) -> impl Iterator<Item = (&RefName, &RefTarget)> {
self.data
.local_bookmarks
.iter()
.map(|(name, target)| (name.as_ref(), target))
}
/// Iterates local bookmarks `(name, target)` in lexicographical order where
/// the target adds `commit_id`.
pub fn local_bookmarks_for_commit(
&self,
commit_id: &CommitId,
) -> impl Iterator<Item = (&RefName, &RefTarget)> {
self.local_bookmarks()
.filter(|(_, target)| target.added_ids().contains(commit_id))
}
/// Iterates local bookmark `(name, target)`s matching the given pattern.
/// Entries are sorted by `name`.
pub fn local_bookmarks_matching(
&self,
matcher: &StringMatcher,
) -> impl Iterator<Item = (&RefName, &RefTarget)> {
matcher
.filter_btree_map_as_deref(&self.data.local_bookmarks)
.map(|(name, target)| (name.as_ref(), target))
}
pub fn get_local_bookmark(&self, name: &RefName) -> &RefTarget {
self.data.local_bookmarks.get(name).flatten()
}
/// Sets local bookmark to point to the given target. If the target is
/// absent, the local bookmark will be removed. If there are absent remote
/// bookmarks tracked by the newly-absent local bookmark, they will also be
/// removed.
pub fn set_local_bookmark_target(&mut self, name: &RefName, target: RefTarget) {
if target.is_present() {
self.data.local_bookmarks.insert(name.to_owned(), target);
} else {
self.data.local_bookmarks.remove(name);
for remote_view in self.data.remote_views.values_mut() {
let remote_refs = &mut remote_view.bookmarks;
if remote_refs.get(name).is_some_and(RemoteRef::is_absent) {
remote_refs.remove(name);
}
}
}
}
/// Iterates over `(symbol, remote_ref)` for all remote bookmarks in
/// lexicographical order.
pub fn all_remote_bookmarks(&self) -> impl Iterator<Item = (RemoteRefSymbol<'_>, &RemoteRef)> {
op_store::flatten_remote_refs(&self.data.remote_views, |view| &view.bookmarks)
}
/// Iterates over `(name, remote_ref)`s for all remote bookmarks of the
/// specified remote in lexicographical order.
pub fn remote_bookmarks(
&self,
remote_name: &RemoteName,
) -> impl Iterator<Item = (&RefName, &RemoteRef)> + use<'_> {
let maybe_remote_view = self.data.remote_views.get(remote_name);
maybe_remote_view
.map(|remote_view| {
remote_view
.bookmarks
.iter()
.map(|(name, remote_ref)| (name.as_ref(), remote_ref))
})
.into_iter()
.flatten()
}
/// Iterates over `(symbol, remote_ref)`s for all remote bookmarks of the
/// specified remote that match the given pattern.
///
/// Entries are sorted by `symbol`, which is `(name, remote)`.
pub fn remote_bookmarks_matching(
&self,
bookmark_matcher: &StringMatcher,
remote_matcher: &StringMatcher,
) -> impl Iterator<Item = (RemoteRefSymbol<'_>, &RemoteRef)> {
// Use kmerge instead of flat_map for consistency with all_remote_bookmarks().
remote_matcher
.filter_btree_map_as_deref(&self.data.remote_views)
.map(|(remote, remote_view)| {
bookmark_matcher
.filter_btree_map_as_deref(&remote_view.bookmarks)
.map(|(name, remote_ref)| (name.to_remote_symbol(remote), remote_ref))
})
.kmerge_by(|(symbol1, _), (symbol2, _)| symbol1 < symbol2)
}
pub fn get_remote_bookmark(&self, symbol: RemoteRefSymbol<'_>) -> &RemoteRef {
if let Some(remote_view) = self.data.remote_views.get(symbol.remote) {
remote_view.bookmarks.get(symbol.name).flatten()
} else {
RemoteRef::absent_ref()
}
}
/// Sets remote-tracking bookmark to the given target and state. If the
/// target is absent and if no tracking local bookmark exists, the bookmark
/// will be removed.
pub fn set_remote_bookmark(&mut self, symbol: RemoteRefSymbol<'_>, remote_ref: RemoteRef) {
if remote_ref.is_present()
|| (remote_ref.is_tracked() && self.get_local_bookmark(symbol.name).is_present())
{
let remote_view = self
.data
.remote_views
.entry(symbol.remote.to_owned())
.or_default();
remote_view
.bookmarks
.insert(symbol.name.to_owned(), remote_ref);
} else if let Some(remote_view) = self.data.remote_views.get_mut(symbol.remote) {
remote_view.bookmarks.remove(symbol.name);
}
}
/// Iterates over `(name, {local_ref, remote_ref})`s for every bookmark
/// present locally and/or on the specified remote, in lexicographical
/// order.
///
/// Note that this does *not* take into account whether the local bookmark
/// tracks the remote bookmark or not. Missing values are represented as
/// RefTarget::absent_ref() or RemoteRef::absent_ref().
pub fn local_remote_bookmarks(
&self,
remote_name: &RemoteName,
) -> impl Iterator<Item = (&RefName, LocalAndRemoteRef<'_>)> + use<'_> {
refs::iter_named_local_remote_refs(
self.local_bookmarks(),
self.remote_bookmarks(remote_name),
)
.map(|(name, (local_target, remote_ref))| {
let targets = LocalAndRemoteRef {
local_target,
remote_ref,
};
(name, targets)
})
}
/// Iterates over `(name, TrackingRefPair {local_ref, remote_ref})`s for
/// every bookmark with a name that matches the given pattern, and that is
/// present locally and/or on the specified remote.
///
/// Entries are sorted by `name`.
///
/// Note that this does *not* take into account whether the local bookmark
/// tracks the remote bookmark or not. Missing values are represented as
/// RefTarget::absent_ref() or RemoteRef::absent_ref().
pub fn local_remote_bookmarks_matching<'a, 'b>(
&'a self,
bookmark_matcher: &'b StringMatcher,
remote_name: &RemoteName,
) -> impl Iterator<Item = (&'a RefName, LocalAndRemoteRef<'a>)> + use<'a, 'b> {
// Change remote_name to StringMatcher if needed, but merge-join adapter won't
// be usable.
let maybe_remote_view = self.data.remote_views.get(remote_name);
refs::iter_named_local_remote_refs(
bookmark_matcher.filter_btree_map_as_deref(&self.data.local_bookmarks),
maybe_remote_view
.map(|remote_view| {
bookmark_matcher.filter_btree_map_as_deref(&remote_view.bookmarks)
})
.into_iter()
.flatten(),
)
.map(|(name, (local_target, remote_ref))| {
let targets = LocalAndRemoteRef {
local_target,
remote_ref,
};
(name.as_ref(), targets)
})
}
/// Iterates remote `(name, view)`s in lexicographical order.
pub fn remote_views(&self) -> impl Iterator<Item = (&RemoteName, &RemoteView)> {
self.data
.remote_views
.iter()
.map(|(name, view)| (name.as_ref(), view))
}
/// Iterates matching remote `(name, view)`s in lexicographical order.
pub fn remote_views_matching(
&self,
matcher: &StringMatcher,
) -> impl Iterator<Item = (&RemoteName, &RemoteView)> {
matcher
.filter_btree_map_as_deref(&self.data.remote_views)
.map(|(name, view)| (name.as_ref(), view))
}
/// Returns the remote view for `name`.
pub fn get_remote_view(&self, name: &RemoteName) -> Option<&RemoteView> {
self.data.remote_views.get(name)
}
/// Adds remote view if it doesn't exist.
pub fn ensure_remote(&mut self, remote_name: &RemoteName) {
if self.data.remote_views.contains_key(remote_name) {
return;
}
self.data
.remote_views
.insert(remote_name.to_owned(), RemoteView::default());
}
pub fn remove_remote(&mut self, remote_name: &RemoteName) {
self.data.remote_views.remove(remote_name);
}
pub fn rename_remote(&mut self, old: &RemoteName, new: &RemoteName) {
if let Some(remote_view) = self.data.remote_views.remove(old) {
self.data.remote_views.insert(new.to_owned(), remote_view);
}
}
/// Iterates local tag `(name, target)`s in lexicographical order.
pub fn local_tags(&self) -> impl Iterator<Item = (&RefName, &RefTarget)> {
self.data
.local_tags
.iter()
.map(|(name, target)| (name.as_ref(), target))
}
pub fn get_local_tag(&self, name: &RefName) -> &RefTarget {
self.data.local_tags.get(name).flatten()
}
/// Iterates local tag `(name, target)`s matching the given pattern. Entries
/// are sorted by `name`.
pub fn local_tags_matching(
&self,
matcher: &StringMatcher,
) -> impl Iterator<Item = (&RefName, &RefTarget)> {
matcher
.filter_btree_map_as_deref(&self.data.local_tags)
.map(|(name, target)| (name.as_ref(), target))
}
/// Sets local tag to point to the given target. If the target is absent,
/// the local tag will be removed. If there are absent remote tags tracked
/// by the newly-absent local tag, they will also be removed.
pub fn set_local_tag_target(&mut self, name: &RefName, target: RefTarget) {
if target.is_present() {
self.data.local_tags.insert(name.to_owned(), target);
} else {
self.data.local_tags.remove(name);
for remote_view in self.data.remote_views.values_mut() {
let remote_refs = &mut remote_view.tags;
if remote_refs.get(name).is_some_and(RemoteRef::is_absent) {
remote_refs.remove(name);
}
}
}
}
/// Iterates over `(symbol, remote_ref)` for all remote tags in
/// lexicographical order.
pub fn all_remote_tags(&self) -> impl Iterator<Item = (RemoteRefSymbol<'_>, &RemoteRef)> {
op_store::flatten_remote_refs(&self.data.remote_views, |view| &view.tags)
}
/// Iterates over `(name, remote_ref)`s for all remote tags of the specified
/// remote in lexicographical order.
pub fn remote_tags(
&self,
remote_name: &RemoteName,
) -> impl Iterator<Item = (&RefName, &RemoteRef)> + use<'_> {
let maybe_remote_view = self.data.remote_views.get(remote_name);
maybe_remote_view
.map(|remote_view| {
remote_view
.tags
.iter()
.map(|(name, remote_ref)| (name.as_ref(), remote_ref))
})
.into_iter()
.flatten()
}
/// Iterates over `(symbol, remote_ref)`s for all remote tags of the
/// specified remote that match the given pattern.
///
/// Entries are sorted by `symbol`, which is `(name, remote)`.
pub fn remote_tags_matching(
&self,
tag_matcher: &StringMatcher,
remote_matcher: &StringMatcher,
) -> impl Iterator<Item = (RemoteRefSymbol<'_>, &RemoteRef)> {
// Use kmerge instead of flat_map for consistency with all_remote_tags().
remote_matcher
.filter_btree_map_as_deref(&self.data.remote_views)
.map(|(remote, remote_view)| {
tag_matcher
.filter_btree_map_as_deref(&remote_view.tags)
.map(|(name, remote_ref)| (name.to_remote_symbol(remote), remote_ref))
})
.kmerge_by(|(symbol1, _), (symbol2, _)| symbol1 < symbol2)
}
/// Returns remote-tracking tag target and state specified by `symbol`.
pub fn get_remote_tag(&self, symbol: RemoteRefSymbol<'_>) -> &RemoteRef {
if let Some(remote_view) = self.data.remote_views.get(symbol.remote) {
remote_view.tags.get(symbol.name).flatten()
} else {
RemoteRef::absent_ref()
}
}
/// Sets remote-tracking tag to the given target and state. If the target is
/// absent and if no tracking local tag exists, the tag will be removed.
pub fn set_remote_tag(&mut self, symbol: RemoteRefSymbol<'_>, remote_ref: RemoteRef) {
if remote_ref.is_present()
|| (remote_ref.is_tracked() && self.get_local_tag(symbol.name).is_present())
{
let remote_view = self
.data
.remote_views
.entry(symbol.remote.to_owned())
.or_default();
remote_view.tags.insert(symbol.name.to_owned(), remote_ref);
} else if let Some(remote_view) = self.data.remote_views.get_mut(symbol.remote) {
remote_view.tags.remove(symbol.name);
}
}
/// Iterates over `(name, {local_ref, remote_ref})`s for every tag present
/// locally and/or on the specified remote, in lexicographical order.
///
/// Note that this does *not* take into account whether the local tag tracks
/// the remote tag or not. Missing values are represented as
/// [`RefTarget::absent_ref()`] or [`RemoteRef::absent_ref()`].
pub fn local_remote_tags(
&self,
remote_name: &RemoteName,
) -> impl Iterator<Item = (&RefName, LocalAndRemoteRef<'_>)> + use<'_> {
refs::iter_named_local_remote_refs(self.local_tags(), self.remote_tags(remote_name)).map(
|(name, (local_target, remote_ref))| {
let targets = LocalAndRemoteRef {
local_target,
remote_ref,
};
(name, targets)
},
)
}
pub fn get_git_ref(&self, name: &GitRefName) -> &RefTarget {
self.data.git_refs.get(name).flatten()
}
/// Sets the last imported Git ref to point to the given target. If the
/// target is absent, the reference will be removed.
pub fn set_git_ref_target(&mut self, name: &GitRefName, target: RefTarget) {
if target.is_present() {
self.data.git_refs.insert(name.to_owned(), target);
} else {
self.data.git_refs.remove(name);
}
}
/// Sets Git HEAD to point to the given target. If the target is absent, the
/// reference will be cleared.
pub fn set_git_head_target(&mut self, target: RefTarget) {
self.data.git_head = target;
}
/// Iterates all commit ids referenced by this view.
///
/// This can include hidden commits referenced by remote bookmarks, previous
/// positions of conflicted bookmarks, etc. The ancestors of the returned
/// commits should be considered reachable from the view. Use this to build
/// commit index from scratch.
///
/// The iteration order is unspecified, and may include duplicated entries.
pub fn all_referenced_commit_ids(&self) -> impl Iterator<Item = &CommitId> {
// Include both added/removed ids since ancestry information of old
// references will be needed while merging views.
fn ref_target_ids(target: &RefTarget) -> impl Iterator<Item = &CommitId> {
target.as_merge().iter().flatten()
}
// Some of the fields (e.g. wc_commit_ids) would be redundant, but let's
// not be smart here. Callers will build a larger set of commits anyway.
let op_store::View {
head_ids,
local_bookmarks,
local_tags,
remote_views,
git_refs,
git_head,
wc_commit_ids,
} = &self.data;
itertools::chain!(
head_ids,
local_bookmarks.values().flat_map(ref_target_ids),
local_tags.values().flat_map(ref_target_ids),
remote_views.values().flat_map(|remote_view| {
let op_store::RemoteView { bookmarks, tags } = remote_view;
itertools::chain(bookmarks.values(), tags.values())
.flat_map(|remote_ref| ref_target_ids(&remote_ref.target))
}),
git_refs.values().flat_map(ref_target_ids),
ref_target_ids(git_head),
wc_commit_ids.values()
)
}
pub fn set_view(&mut self, data: op_store::View) {
self.data = data;
}
pub fn store_view(&self) -> &op_store::View {
&self.data
}
pub fn store_view_mut(&mut self) -> &mut op_store::View {
&mut self.data
}
}
/// Error from attempts to rename a workspace
#[derive(Debug, Error)]
pub enum RenameWorkspaceError {
#[error("Workspace {} not found", name.as_symbol())]
WorkspaceDoesNotExist { name: WorkspaceNameBuf },
#[error("Workspace {} already exists", name.as_symbol())]
WorkspaceAlreadyExists { name: WorkspaceNameBuf },
}
#[cfg(test)]
mod tests {
use super::*;
use crate::op_store::RemoteRefState;
fn remote_symbol<'a, N, M>(name: &'a N, remote: &'a M) -> RemoteRefSymbol<'a>
where
N: AsRef<RefName> + ?Sized,
M: AsRef<RemoteName> + ?Sized,
{
RemoteRefSymbol {
name: name.as_ref(),
remote: remote.as_ref(),
}
}
#[test]
fn test_absent_tracked_bookmarks() {
let mut view = View {
data: op_store::View::make_root(CommitId::from_hex("000000")),
};
let absent_tracked_ref = RemoteRef {
target: RefTarget::absent(),
state: RemoteRefState::Tracked,
};
let present_tracked_ref = RemoteRef {
target: RefTarget::normal(CommitId::from_hex("111111")),
state: RemoteRefState::Tracked,
};
// Absent remote ref cannot be tracked by absent local ref
view.set_remote_bookmark(remote_symbol("foo", "new"), absent_tracked_ref.clone());
assert_eq!(
view.get_remote_bookmark(remote_symbol("foo", "new")),
RemoteRef::absent_ref()
);
// Present remote ref can be tracked by absent local ref
view.set_remote_bookmark(remote_symbol("foo", "present"), present_tracked_ref.clone());
assert_eq!(
view.get_remote_bookmark(remote_symbol("foo", "present")),
&present_tracked_ref
);
// Absent remote ref can be tracked by present local ref
view.set_local_bookmark_target(
"foo".as_ref(),
RefTarget::normal(CommitId::from_hex("222222")),
);
view.set_remote_bookmark(remote_symbol("foo", "new"), absent_tracked_ref.clone());
assert_eq!(
view.get_remote_bookmark(remote_symbol("foo", "new")),
&absent_tracked_ref
);
// Absent remote ref should be removed if local ref becomes absent
view.set_local_bookmark_target("foo".as_ref(), RefTarget::absent());
assert_eq!(
view.get_remote_bookmark(remote_symbol("foo", "new")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_remote_bookmark(remote_symbol("foo", "present")),
&present_tracked_ref
);
}
#[test]
fn test_absent_tracked_tags() {
let mut view = View {
data: op_store::View::make_root(CommitId::from_hex("000000")),
};
let absent_tracked_ref = RemoteRef {
target: RefTarget::absent(),
state: RemoteRefState::Tracked,
};
let present_tracked_ref = RemoteRef {
target: RefTarget::normal(CommitId::from_hex("111111")),
state: RemoteRefState::Tracked,
};
// Absent remote ref cannot be tracked by absent local ref
view.set_remote_tag(remote_symbol("foo", "new"), absent_tracked_ref.clone());
assert_eq!(
view.get_remote_tag(remote_symbol("foo", "new")),
RemoteRef::absent_ref()
);
// Present remote ref can be tracked by absent local ref
view.set_remote_tag(remote_symbol("foo", "present"), present_tracked_ref.clone());
assert_eq!(
view.get_remote_tag(remote_symbol("foo", "present")),
&present_tracked_ref
);
// Absent remote ref can be tracked by present local ref
view.set_local_tag_target(
"foo".as_ref(),
RefTarget::normal(CommitId::from_hex("222222")),
);
view.set_remote_tag(remote_symbol("foo", "new"), absent_tracked_ref.clone());
assert_eq!(
view.get_remote_tag(remote_symbol("foo", "new")),
&absent_tracked_ref
);
// Absent remote ref should be removed if local ref becomes absent
view.set_local_tag_target("foo".as_ref(), RefTarget::absent());
assert_eq!(
view.get_remote_tag(remote_symbol("foo", "new")),
RemoteRef::absent_ref()
);
assert_eq!(
view.get_remote_tag(remote_symbol("foo", "present")),
&present_tracked_ref
);
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/diff.rs | lib/src/diff.rs | // Copyright 2021 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::BTreeMap;
use std::hash::BuildHasher;
use std::hash::Hash;
use std::hash::Hasher;
use std::hash::RandomState;
use std::iter;
use std::ops::Range;
use std::slice;
use bstr::BStr;
use hashbrown::HashTable;
use itertools::Itertools as _;
use smallvec::SmallVec;
use smallvec::smallvec;
pub fn find_line_ranges(text: &[u8]) -> Vec<Range<usize>> {
text.split_inclusive(|b| *b == b'\n')
.scan(0, |total, line| {
let start = *total;
*total += line.len();
Some(start..*total)
})
.collect()
}
fn is_word_byte(b: u8) -> bool {
// TODO: Make this configurable (probably higher up in the call stack)
matches!(
b,
// Count 0x80..0xff as word bytes so multi-byte UTF-8 chars are
// treated as a single unit.
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'_' | b'\x80'..=b'\xff'
)
}
pub fn find_word_ranges(text: &[u8]) -> Vec<Range<usize>> {
let mut word_ranges = vec![];
let mut word_start_pos = 0;
let mut in_word = false;
for (i, b) in text.iter().enumerate() {
if in_word && !is_word_byte(*b) {
in_word = false;
word_ranges.push(word_start_pos..i);
word_start_pos = i;
} else if !in_word && is_word_byte(*b) {
in_word = true;
word_start_pos = i;
}
}
if in_word && word_start_pos < text.len() {
word_ranges.push(word_start_pos..text.len());
}
word_ranges
}
pub fn find_nonword_ranges(text: &[u8]) -> Vec<Range<usize>> {
text.iter()
.positions(|b| !is_word_byte(*b))
.map(|i| i..i + 1)
.collect()
}
fn bytes_ignore_all_whitespace(text: &[u8]) -> impl Iterator<Item = u8> {
text.iter().copied().filter(|b| !b.is_ascii_whitespace())
}
fn bytes_ignore_whitespace_amount(text: &[u8]) -> impl Iterator<Item = u8> {
let mut prev_was_space = false;
text.iter().filter_map(move |&b| {
let was_space = prev_was_space;
let is_space = b.is_ascii_whitespace();
prev_was_space = is_space;
match (was_space, is_space) {
(_, false) => Some(b),
(false, true) => Some(b' '),
(true, true) => None,
}
})
}
fn hash_with_length_suffix<I, H>(data: I, state: &mut H)
where
I: IntoIterator,
I::Item: Hash,
H: Hasher,
{
let mut len: usize = 0;
for d in data {
d.hash(state);
len += 1;
}
state.write_usize(len);
}
/// Compares byte sequences based on a certain equivalence property.
///
/// This isn't a newtype `Wrapper<'a>(&'a [u8])` but an external comparison
/// object for the following reasons:
///
/// a. If it were newtype, a generic `wrap` function would be needed. It
/// couldn't be expressed as a simple closure:
/// `for<'a> Fn(&'a [u8]) -> ???<'a>`
/// b. Dynamic comparison object can be implemented intuitively. For example,
/// `pattern: &Regex` would have to be copied to all newtype instances if it
/// were newtype.
/// c. Hash values can be cached if hashing is controlled externally.
pub trait CompareBytes {
/// Returns true if `left` and `right` are equivalent.
fn eq(&self, left: &[u8], right: &[u8]) -> bool;
/// Generates hash which respects the following property:
/// `eq(left, right) => hash(left) == hash(right)`
fn hash<H: Hasher>(&self, text: &[u8], state: &mut H);
}
// An instance might have e.g. Regex pattern, which can't be trivially copied.
// Such comparison object can be passed by reference.
impl<C: CompareBytes + ?Sized> CompareBytes for &C {
fn eq(&self, left: &[u8], right: &[u8]) -> bool {
<C as CompareBytes>::eq(self, left, right)
}
fn hash<H: Hasher>(&self, text: &[u8], state: &mut H) {
<C as CompareBytes>::hash(self, text, state);
}
}
/// Compares byte sequences literally.
#[derive(Clone, Debug, Default)]
pub struct CompareBytesExactly;
impl CompareBytes for CompareBytesExactly {
fn eq(&self, left: &[u8], right: &[u8]) -> bool {
left == right
}
fn hash<H: Hasher>(&self, text: &[u8], state: &mut H) {
text.hash(state);
}
}
/// Compares byte sequences ignoring any whitespace occurrences.
#[derive(Clone, Debug, Default)]
pub struct CompareBytesIgnoreAllWhitespace;
impl CompareBytes for CompareBytesIgnoreAllWhitespace {
fn eq(&self, left: &[u8], right: &[u8]) -> bool {
bytes_ignore_all_whitespace(left).eq(bytes_ignore_all_whitespace(right))
}
fn hash<H: Hasher>(&self, text: &[u8], state: &mut H) {
hash_with_length_suffix(bytes_ignore_all_whitespace(text), state);
}
}
/// Compares byte sequences ignoring changes in whitespace amount.
#[derive(Clone, Debug, Default)]
pub struct CompareBytesIgnoreWhitespaceAmount;
impl CompareBytes for CompareBytesIgnoreWhitespaceAmount {
fn eq(&self, left: &[u8], right: &[u8]) -> bool {
bytes_ignore_whitespace_amount(left).eq(bytes_ignore_whitespace_amount(right))
}
fn hash<H: Hasher>(&self, text: &[u8], state: &mut H) {
hash_with_length_suffix(bytes_ignore_whitespace_amount(text), state);
}
}
// Not implementing Eq because the text should be compared by WordComparator.
#[derive(Clone, Copy, Debug)]
struct HashedWord<'input> {
hash: u64,
text: &'input BStr,
}
/// Compares words (or tokens) under a certain hasher configuration.
#[derive(Clone, Debug, Default)]
struct WordComparator<C, S> {
compare: C,
hash_builder: S,
}
impl<C: CompareBytes> WordComparator<C, RandomState> {
fn new(compare: C) -> Self {
Self {
compare,
// TODO: switch to ahash for better performance?
hash_builder: RandomState::new(),
}
}
}
impl<C: CompareBytes, S: BuildHasher> WordComparator<C, S> {
fn eq(&self, left: &[u8], right: &[u8]) -> bool {
self.compare.eq(left, right)
}
fn eq_hashed(&self, left: HashedWord<'_>, right: HashedWord<'_>) -> bool {
left.hash == right.hash && self.compare.eq(left.text, right.text)
}
fn hash_one(&self, text: &[u8]) -> u64 {
let mut state = self.hash_builder.build_hasher();
self.compare.hash(text, &mut state);
state.finish()
}
}
/// Index in a list of word (or token) ranges in `DiffSource`.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct WordPosition(usize);
/// Index in a list of word (or token) ranges in `LocalDiffSource`.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct LocalWordPosition(usize);
#[derive(Clone, Debug)]
struct DiffSource<'input, 'aux> {
text: &'input BStr,
ranges: &'aux [Range<usize>],
hashes: Vec<u64>,
}
impl<'input, 'aux> DiffSource<'input, 'aux> {
fn new<T: AsRef<[u8]> + ?Sized, C: CompareBytes, S: BuildHasher>(
text: &'input T,
ranges: &'aux [Range<usize>],
comp: &WordComparator<C, S>,
) -> Self {
let text = BStr::new(text);
let hashes = ranges
.iter()
.map(|range| comp.hash_one(&text[range.clone()]))
.collect();
Self {
text,
ranges,
hashes,
}
}
fn local(&self) -> LocalDiffSource<'input, '_> {
LocalDiffSource {
text: self.text,
ranges: self.ranges,
hashes: &self.hashes,
global_offset: WordPosition(0),
}
}
fn range_at(&self, position: WordPosition) -> Range<usize> {
self.ranges[position.0].clone()
}
}
#[derive(Clone, Debug)]
struct LocalDiffSource<'input, 'aux> {
text: &'input BStr,
ranges: &'aux [Range<usize>],
hashes: &'aux [u64],
/// The number of preceding word ranges excluded from the self `ranges`.
global_offset: WordPosition,
}
impl<'input> LocalDiffSource<'input, '_> {
fn narrowed(&self, positions: Range<LocalWordPosition>) -> Self {
Self {
text: self.text,
ranges: &self.ranges[positions.start.0..positions.end.0],
hashes: &self.hashes[positions.start.0..positions.end.0],
global_offset: self.map_to_global(positions.start),
}
}
fn map_to_global(&self, position: LocalWordPosition) -> WordPosition {
WordPosition(self.global_offset.0 + position.0)
}
fn hashed_words(
&self,
) -> impl DoubleEndedIterator<Item = HashedWord<'input>> + ExactSizeIterator {
iter::zip(self.ranges, self.hashes).map(|(range, &hash)| {
let text = &self.text[range.clone()];
HashedWord { hash, text }
})
}
}
struct Histogram<'input> {
word_to_positions: HashTable<HistogramEntry<'input>>,
}
// Many of the words are unique. We can inline up to 2 word positions (16 bytes
// on 64-bit platform) in SmallVec for free.
type HistogramEntry<'input> = (HashedWord<'input>, SmallVec<[LocalWordPosition; 2]>);
impl<'input> Histogram<'input> {
fn calculate<C: CompareBytes, S: BuildHasher>(
source: &LocalDiffSource<'input, '_>,
comp: &WordComparator<C, S>,
max_occurrences: usize,
) -> Self {
let mut word_to_positions: HashTable<HistogramEntry> = HashTable::new();
for (i, word) in source.hashed_words().enumerate() {
let pos = LocalWordPosition(i);
word_to_positions
.entry(
word.hash,
|(w, _)| comp.eq(w.text, word.text),
|(w, _)| w.hash,
)
.and_modify(|(_, positions)| {
// Allow one more than max_occurrences, so we can later skip
// those with more than max_occurrences
if positions.len() <= max_occurrences {
positions.push(pos);
}
})
.or_insert_with(|| (word, smallvec![pos]));
}
Self { word_to_positions }
}
fn build_count_to_entries(&self) -> BTreeMap<usize, Vec<&HistogramEntry<'input>>> {
let mut count_to_entries: BTreeMap<usize, Vec<_>> = BTreeMap::new();
for entry in &self.word_to_positions {
let (_, positions) = entry;
let entries = count_to_entries.entry(positions.len()).or_default();
entries.push(entry);
}
count_to_entries
}
fn positions_by_word<C: CompareBytes, S: BuildHasher>(
&self,
word: HashedWord<'input>,
comp: &WordComparator<C, S>,
) -> Option<&[LocalWordPosition]> {
let (_, positions) = self
.word_to_positions
.find(word.hash, |(w, _)| comp.eq(w.text, word.text))?;
Some(positions)
}
}
/// Finds the LCS given a array where the value of `input[i]` indicates that
/// the position of element `i` in the right array is at position `input[i]` in
/// the left array.
///
/// For example (some have multiple valid outputs):
///
/// [0,1,2] => [(0,0),(1,1),(2,2)]
/// [2,1,0] => [(0,2)]
/// [0,1,4,2,3,5,6] => [(0,0),(1,1),(2,3),(3,4),(5,5),(6,6)]
/// [0,1,4,3,2,5,6] => [(0,0),(1,1),(4,2),(5,5),(6,6)]
fn find_lcs(input: &[usize]) -> Vec<(usize, usize)> {
if input.is_empty() {
return vec![];
}
let mut chain = vec![(0, 0, 0); input.len()];
let mut global_longest = 0;
let mut global_longest_right_pos = 0;
for (right_pos, &left_pos) in input.iter().enumerate() {
let mut longest_from_here = 1;
let mut previous_right_pos = usize::MAX;
for i in (0..right_pos).rev() {
let (previous_len, previous_left_pos, _) = chain[i];
if previous_left_pos < left_pos {
let len = previous_len + 1;
if len > longest_from_here {
longest_from_here = len;
previous_right_pos = i;
if len > global_longest {
global_longest = len;
global_longest_right_pos = right_pos;
// If this is the longest chain globally so far, we cannot find a
// longer one by using a previous value, so break early.
break;
}
}
}
}
chain[right_pos] = (longest_from_here, left_pos, previous_right_pos);
}
let mut result = vec![];
let mut right_pos = global_longest_right_pos;
loop {
let (_, left_pos, previous_right_pos) = chain[right_pos];
result.push((left_pos, right_pos));
if previous_right_pos == usize::MAX {
break;
}
right_pos = previous_right_pos;
}
result.reverse();
result
}
/// Finds unchanged word (or token) positions among the ones given as
/// arguments. The data between those words is ignored.
fn collect_unchanged_words<C: CompareBytes, S: BuildHasher>(
found_positions: &mut Vec<(WordPosition, WordPosition)>,
left: &LocalDiffSource,
right: &LocalDiffSource,
comp: &WordComparator<C, S>,
) {
if left.ranges.is_empty() || right.ranges.is_empty() {
return;
}
// Prioritize LCS-based algorithm than leading/trailing matches
let old_len = found_positions.len();
collect_unchanged_words_lcs(found_positions, left, right, comp);
if found_positions.len() != old_len {
return;
}
// Trim leading common ranges (i.e. grow previous unchanged region)
let common_leading_len = iter::zip(left.hashed_words(), right.hashed_words())
.take_while(|&(l, r)| comp.eq_hashed(l, r))
.count();
let left_hashed_words = left.hashed_words().skip(common_leading_len);
let right_hashed_words = right.hashed_words().skip(common_leading_len);
// Trim trailing common ranges (i.e. grow next unchanged region)
let common_trailing_len = iter::zip(left_hashed_words.rev(), right_hashed_words.rev())
.take_while(|&(l, r)| comp.eq_hashed(l, r))
.count();
found_positions.extend(itertools::chain(
(0..common_leading_len).map(|i| {
(
left.map_to_global(LocalWordPosition(i)),
right.map_to_global(LocalWordPosition(i)),
)
}),
(1..=common_trailing_len).rev().map(|i| {
(
left.map_to_global(LocalWordPosition(left.ranges.len() - i)),
right.map_to_global(LocalWordPosition(right.ranges.len() - i)),
)
}),
));
}
fn collect_unchanged_words_lcs<C: CompareBytes, S: BuildHasher>(
found_positions: &mut Vec<(WordPosition, WordPosition)>,
left: &LocalDiffSource,
right: &LocalDiffSource,
comp: &WordComparator<C, S>,
) {
let max_occurrences = 100;
let left_histogram = Histogram::calculate(left, comp, max_occurrences);
let left_count_to_entries = left_histogram.build_count_to_entries();
if *left_count_to_entries.keys().next().unwrap() > max_occurrences {
// If there are very many occurrences of all words, then we just give up.
return;
}
let right_histogram = Histogram::calculate(right, comp, max_occurrences);
// Look for words with few occurrences in `left` (could equally well have picked
// `right`?). If any of them also occur in `right`, then we add the words to
// the LCS.
let Some(uncommon_shared_word_positions) =
left_count_to_entries.values().find_map(|left_entries| {
let mut both_positions = left_entries
.iter()
.filter_map(|&(word, left_positions)| {
let right_positions = right_histogram.positions_by_word(*word, comp)?;
(left_positions.len() == right_positions.len())
.then_some((left_positions, right_positions))
})
.peekable();
both_positions.peek().is_some().then_some(both_positions)
})
else {
return;
};
// [(index into ranges, serial to identify {word, occurrence #})]
let (mut left_positions, mut right_positions): (Vec<_>, Vec<_>) =
uncommon_shared_word_positions
.flat_map(|(lefts, rights)| iter::zip(lefts, rights))
.enumerate()
.map(|(serial, (&left_pos, &right_pos))| ((left_pos, serial), (right_pos, serial)))
.unzip();
left_positions.sort_unstable_by_key(|&(pos, _serial)| pos);
right_positions.sort_unstable_by_key(|&(pos, _serial)| pos);
let left_index_by_right_index: Vec<usize> = {
let mut left_index_map = vec![0; left_positions.len()];
for (i, &(_pos, serial)) in left_positions.iter().enumerate() {
left_index_map[serial] = i;
}
right_positions
.iter()
.map(|&(_pos, serial)| left_index_map[serial])
.collect()
};
let lcs = find_lcs(&left_index_by_right_index);
// Produce output word positions, recursing into the modified areas between
// the elements in the LCS.
let mut previous_left_position = LocalWordPosition(0);
let mut previous_right_position = LocalWordPosition(0);
for (left_index, right_index) in lcs {
let (left_position, _) = left_positions[left_index];
let (right_position, _) = right_positions[right_index];
collect_unchanged_words(
found_positions,
&left.narrowed(previous_left_position..left_position),
&right.narrowed(previous_right_position..right_position),
comp,
);
found_positions.push((
left.map_to_global(left_position),
right.map_to_global(right_position),
));
previous_left_position = LocalWordPosition(left_position.0 + 1);
previous_right_position = LocalWordPosition(right_position.0 + 1);
}
// Also recurse into range at end (after common ranges).
collect_unchanged_words(
found_positions,
&left.narrowed(previous_left_position..LocalWordPosition(left.ranges.len())),
&right.narrowed(previous_right_position..LocalWordPosition(right.ranges.len())),
comp,
);
}
/// Intersects two sorted sequences of `(base, other)` word positions by
/// `base`. `base` positions should refer to the same source text.
fn intersect_unchanged_words(
current_positions: Vec<(WordPosition, Vec<WordPosition>)>,
new_positions: &[(WordPosition, WordPosition)],
) -> Vec<(WordPosition, Vec<WordPosition>)> {
itertools::merge_join_by(
current_positions,
new_positions,
|(cur_base_pos, _), (new_base_pos, _)| cur_base_pos.cmp(new_base_pos),
)
.filter_map(|entry| entry.both())
.map(|((base_pos, mut other_positions), &(_, new_other_pos))| {
other_positions.push(new_other_pos);
(base_pos, other_positions)
})
.collect()
}
#[derive(Clone, PartialEq, Eq, Debug)]
struct UnchangedRange {
// Inline up to two sides (base + one other)
base: Range<usize>,
others: SmallVec<[Range<usize>; 1]>,
}
impl UnchangedRange {
/// Translates word positions to byte ranges in the source texts.
fn from_word_positions(
base_source: &DiffSource,
other_sources: &[DiffSource],
base_position: WordPosition,
other_positions: &[WordPosition],
) -> Self {
assert_eq!(other_sources.len(), other_positions.len());
let base = base_source.range_at(base_position);
let others = iter::zip(other_sources, other_positions)
.map(|(source, pos)| source.range_at(*pos))
.collect();
Self { base, others }
}
fn is_all_empty(&self) -> bool {
self.base.is_empty() && self.others.iter().all(|r| r.is_empty())
}
}
/// Takes any number of inputs and finds regions that are them same between all
/// of them.
#[derive(Clone, Debug)]
pub struct ContentDiff<'input> {
base_input: &'input BStr,
other_inputs: SmallVec<[&'input BStr; 1]>,
/// Sorted list of ranges of unchanged regions in bytes.
///
/// The list should never be empty. The first and the last region may be
/// empty if inputs start/end with changes.
unchanged_regions: Vec<UnchangedRange>,
}
impl<'input> ContentDiff<'input> {
pub fn for_tokenizer<T: AsRef<[u8]> + ?Sized + 'input>(
inputs: impl IntoIterator<Item = &'input T>,
tokenizer: impl Fn(&[u8]) -> Vec<Range<usize>>,
compare: impl CompareBytes,
) -> Self {
let mut inputs = inputs.into_iter().map(BStr::new);
let base_input = inputs.next().expect("inputs must not be empty");
let other_inputs: SmallVec<[&BStr; 1]> = inputs.collect();
// First tokenize each input
let base_token_ranges: Vec<Range<usize>>;
let other_token_ranges: Vec<Vec<Range<usize>>>;
// No need to tokenize if one of the inputs is empty. Non-empty inputs
// are all different as long as the tokenizer emits non-empty ranges.
// This means "" and " " are different even if the compare function is
// ignore-whitespace. They are tokenized as [] and [" "] respectively.
if base_input.is_empty() || other_inputs.iter().any(|input| input.is_empty()) {
base_token_ranges = vec![];
other_token_ranges = std::iter::repeat_n(vec![], other_inputs.len()).collect();
} else {
base_token_ranges = tokenizer(base_input);
other_token_ranges = other_inputs
.iter()
.map(|other_input| tokenizer(other_input))
.collect();
}
Self::with_inputs_and_token_ranges(
base_input,
other_inputs,
&base_token_ranges,
&other_token_ranges,
compare,
)
}
fn with_inputs_and_token_ranges(
base_input: &'input BStr,
other_inputs: SmallVec<[&'input BStr; 1]>,
base_token_ranges: &[Range<usize>],
other_token_ranges: &[Vec<Range<usize>>],
compare: impl CompareBytes,
) -> Self {
assert_eq!(other_inputs.len(), other_token_ranges.len());
let comp = WordComparator::new(compare);
let base_source = DiffSource::new(base_input, base_token_ranges, &comp);
let other_sources = iter::zip(&other_inputs, other_token_ranges)
.map(|(input, token_ranges)| DiffSource::new(input, token_ranges, &comp))
.collect_vec();
let unchanged_regions = match &*other_sources {
// Consider the whole range of the base input as unchanged compared
// to itself.
[] => {
let whole_range = UnchangedRange {
base: 0..base_source.text.len(),
others: smallvec![],
};
vec![whole_range]
}
// Diff each other input against the base. Intersect the previously
// found ranges with the ranges in the diff.
[first_other_source, tail_other_sources @ ..] => {
let mut unchanged_regions = Vec::new();
// Add an empty range at the start to make life easier for hunks().
unchanged_regions.push(UnchangedRange {
base: 0..0,
others: smallvec![0..0; other_inputs.len()],
});
let mut first_positions = Vec::new();
collect_unchanged_words(
&mut first_positions,
&base_source.local(),
&first_other_source.local(),
&comp,
);
if tail_other_sources.is_empty() {
unchanged_regions.extend(first_positions.iter().map(
|&(base_pos, other_pos)| {
UnchangedRange::from_word_positions(
&base_source,
&other_sources,
base_pos,
&[other_pos],
)
},
));
} else {
let first_positions = first_positions
.iter()
.map(|&(base_pos, other_pos)| (base_pos, vec![other_pos]))
.collect();
let intersected_positions = tail_other_sources.iter().fold(
first_positions,
|current_positions, other_source| {
let mut new_positions = Vec::new();
collect_unchanged_words(
&mut new_positions,
&base_source.local(),
&other_source.local(),
&comp,
);
intersect_unchanged_words(current_positions, &new_positions)
},
);
unchanged_regions.extend(intersected_positions.iter().map(
|(base_pos, other_positions)| {
UnchangedRange::from_word_positions(
&base_source,
&other_sources,
*base_pos,
other_positions,
)
},
));
};
// Add an empty range at the end to make life easier for hunks().
unchanged_regions.push(UnchangedRange {
base: base_input.len()..base_input.len(),
others: other_inputs
.iter()
.map(|input| input.len()..input.len())
.collect(),
});
unchanged_regions
}
};
let mut diff = Self {
base_input,
other_inputs,
unchanged_regions,
};
diff.compact_unchanged_regions();
diff
}
pub fn unrefined<T: AsRef<[u8]> + ?Sized + 'input>(
inputs: impl IntoIterator<Item = &'input T>,
) -> Self {
ContentDiff::for_tokenizer(inputs, |_| vec![], CompareBytesExactly)
}
/// Compares `inputs` line by line.
pub fn by_line<T: AsRef<[u8]> + ?Sized + 'input>(
inputs: impl IntoIterator<Item = &'input T>,
) -> Self {
ContentDiff::for_tokenizer(inputs, find_line_ranges, CompareBytesExactly)
}
/// Compares `inputs` word by word.
///
/// The `inputs` is usually a changed hunk (e.g. a `DiffHunk::Different`)
/// that was the output from a line-by-line diff.
pub fn by_word<T: AsRef<[u8]> + ?Sized + 'input>(
inputs: impl IntoIterator<Item = &'input T>,
) -> Self {
let mut diff = ContentDiff::for_tokenizer(inputs, find_word_ranges, CompareBytesExactly);
diff.refine_changed_regions(find_nonword_ranges, CompareBytesExactly);
diff
}
/// Returns iterator over matching and different texts.
pub fn hunks(&self) -> DiffHunkIterator<'_, 'input> {
let ranges = self.hunk_ranges();
DiffHunkIterator { diff: self, ranges }
}
/// Returns iterator over matching and different ranges in bytes.
pub fn hunk_ranges(&self) -> DiffHunkRangeIterator<'_> {
DiffHunkRangeIterator::new(self)
}
/// Returns contents at the unchanged `range`.
fn hunk_at(&self, range: &UnchangedRange) -> impl Iterator<Item = &'input BStr> {
itertools::chain(
iter::once(&self.base_input[range.base.clone()]),
iter::zip(&self.other_inputs, &range.others).map(|(input, r)| &input[r.clone()]),
)
}
/// Returns contents between the `previous` ends and the `current` starts.
fn hunk_between(
&self,
previous: &UnchangedRange,
current: &UnchangedRange,
) -> impl Iterator<Item = &'input BStr> {
itertools::chain(
iter::once(&self.base_input[previous.base.end..current.base.start]),
itertools::izip!(&self.other_inputs, &previous.others, ¤t.others)
.map(|(input, prev, cur)| &input[prev.end..cur.start]),
)
}
/// Uses the given tokenizer to split the changed regions into smaller
/// regions. Then tries to finds unchanged regions among them.
pub fn refine_changed_regions(
&mut self,
tokenizer: impl Fn(&[u8]) -> Vec<Range<usize>>,
compare: impl CompareBytes,
) {
let mut new_unchanged_ranges = vec![self.unchanged_regions[0].clone()];
for window in self.unchanged_regions.windows(2) {
let [previous, current]: &[_; 2] = window.try_into().unwrap();
// For the changed region between the previous region and the current one,
// create a new Diff instance. Then adjust the start positions and
// offsets to be valid in the context of the larger Diff instance
// (`self`).
let refined_diff = ContentDiff::for_tokenizer(
self.hunk_between(previous, current),
&tokenizer,
&compare,
);
for refined in &refined_diff.unchanged_regions {
let new_base_start = refined.base.start + previous.base.end;
let new_base_end = refined.base.end + previous.base.end;
let new_others = iter::zip(&refined.others, &previous.others)
.map(|(refi, prev)| (refi.start + prev.end)..(refi.end + prev.end))
.collect();
new_unchanged_ranges.push(UnchangedRange {
base: new_base_start..new_base_end,
others: new_others,
});
}
new_unchanged_ranges.push(current.clone());
}
self.unchanged_regions = new_unchanged_ranges;
self.compact_unchanged_regions();
}
fn compact_unchanged_regions(&mut self) {
let mut compacted = vec![];
let mut maybe_previous: Option<UnchangedRange> = None;
for current in &self.unchanged_regions {
if let Some(previous) = maybe_previous {
if previous.base.end == current.base.start
&& iter::zip(&previous.others, ¤t.others)
.all(|(prev, cur)| prev.end == cur.start)
{
maybe_previous = Some(UnchangedRange {
base: previous.base.start..current.base.end,
others: iter::zip(&previous.others, ¤t.others)
.map(|(prev, cur)| prev.start..cur.end)
.collect(),
});
continue;
}
compacted.push(previous);
}
maybe_previous = Some(current.clone());
}
if let Some(previous) = maybe_previous {
compacted.push(previous);
}
self.unchanged_regions = compacted;
}
}
/// Hunk texts.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct DiffHunk<'input> {
pub kind: DiffHunkKind,
pub contents: DiffHunkContentVec<'input>,
}
impl<'input> DiffHunk<'input> {
pub fn matching<T: AsRef<[u8]> + ?Sized + 'input>(
contents: impl IntoIterator<Item = &'input T>,
) -> Self {
Self {
kind: DiffHunkKind::Matching,
contents: contents.into_iter().map(BStr::new).collect(),
}
}
pub fn different<T: AsRef<[u8]> + ?Sized + 'input>(
contents: impl IntoIterator<Item = &'input T>,
) -> Self {
Self {
kind: DiffHunkKind::Different,
contents: contents.into_iter().map(BStr::new).collect(),
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum DiffHunkKind {
Matching,
Different,
}
// Inline up to two sides
pub type DiffHunkContentVec<'input> = SmallVec<[&'input BStr; 2]>;
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/git.rs | lib/src/git.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::borrow::Borrow;
use std::borrow::Cow;
use std::collections::HashMap;
use std::collections::HashSet;
use std::default::Default;
use std::ffi::OsString;
use std::fs::File;
use std::iter;
use std::num::NonZeroU32;
use std::path::PathBuf;
use std::sync::Arc;
use bstr::BStr;
use bstr::BString;
use futures::StreamExt as _;
use gix::refspec::Instruction;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use thiserror::Error;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::CommitId;
use crate::backend::TreeValue;
use crate::commit::Commit;
use crate::config::ConfigGetError;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::git_backend::GitBackend;
use crate::git_subprocess::GitSubprocessContext;
use crate::git_subprocess::GitSubprocessError;
use crate::index::IndexError;
use crate::matchers::EverythingMatcher;
use crate::merged_tree::MergedTree;
use crate::merged_tree::TreeDiffEntry;
use crate::object_id::ObjectId as _;
use crate::op_store::RefTarget;
use crate::op_store::RefTargetOptionExt as _;
use crate::op_store::RemoteRef;
use crate::op_store::RemoteRefState;
use crate::ref_name::GitRefName;
use crate::ref_name::GitRefNameBuf;
use crate::ref_name::RefName;
use crate::ref_name::RefNameBuf;
use crate::ref_name::RemoteName;
use crate::ref_name::RemoteNameBuf;
use crate::ref_name::RemoteRefSymbol;
use crate::ref_name::RemoteRefSymbolBuf;
use crate::refs::BookmarkPushUpdate;
use crate::repo::MutableRepo;
use crate::repo::Repo;
use crate::repo_path::RepoPath;
use crate::revset::RevsetExpression;
use crate::settings::UserSettings;
use crate::store::Store;
use crate::str_util::StringExpression;
use crate::str_util::StringMatcher;
use crate::str_util::StringPattern;
use crate::view::View;
/// Reserved remote name for the backing Git repo.
pub const REMOTE_NAME_FOR_LOCAL_GIT_REPO: &RemoteName = RemoteName::new("git");
/// Git ref prefix that would conflict with the reserved "git" remote.
pub const RESERVED_REMOTE_REF_NAMESPACE: &str = "refs/remotes/git/";
/// Ref name used as a placeholder to unset HEAD without a commit.
const UNBORN_ROOT_REF_NAME: &str = "refs/jj/root";
/// Dummy file to be added to the index to indicate that the user is editing a
/// commit with a conflict that isn't represented in the Git index.
const INDEX_DUMMY_CONFLICT_FILE: &str = ".jj-do-not-resolve-this-conflict";
#[derive(Clone, Debug)]
pub struct GitSettings {
// TODO: Delete in jj 0.42.0+
pub auto_local_bookmark: bool,
pub abandon_unreachable_commits: bool,
pub executable_path: PathBuf,
pub write_change_id_header: bool,
}
impl GitSettings {
pub fn from_settings(settings: &UserSettings) -> Result<Self, ConfigGetError> {
Ok(Self {
auto_local_bookmark: settings.get_bool("git.auto-local-bookmark")?,
abandon_unreachable_commits: settings.get_bool("git.abandon-unreachable-commits")?,
executable_path: settings.get("git.executable-path")?,
write_change_id_header: settings.get("git.write-change-id-header")?,
})
}
pub fn to_subprocess_options(&self) -> GitSubprocessOptions {
GitSubprocessOptions {
executable_path: self.executable_path.clone(),
environment: HashMap::new(),
}
}
}
/// Configuration for a Git subprocess
#[derive(Clone, Debug)]
pub struct GitSubprocessOptions {
pub executable_path: PathBuf,
/// Used by consumers of jj-lib to set environment variables like
/// GIT_ASKPASS (for authentication callbacks) or GIT_TRACE (for debugging).
/// Setting per-subcommand environment variables avoids the need for unsafe
/// code and process-wide state.
pub environment: HashMap<OsString, OsString>,
}
impl GitSubprocessOptions {
pub fn from_settings(settings: &UserSettings) -> Result<Self, ConfigGetError> {
Ok(Self {
executable_path: settings.get("git.executable-path")?,
environment: HashMap::new(),
})
}
}
#[derive(Debug, Error)]
pub enum GitRemoteNameError {
#[error(
"Git remote named '{name}' is reserved for local Git repository",
name = REMOTE_NAME_FOR_LOCAL_GIT_REPO.as_symbol()
)]
ReservedForLocalGitRepo,
#[error("Git remotes with slashes are incompatible with jj: {}", .0.as_symbol())]
WithSlash(RemoteNameBuf),
}
fn validate_remote_name(name: &RemoteName) -> Result<(), GitRemoteNameError> {
if name == REMOTE_NAME_FOR_LOCAL_GIT_REPO {
Err(GitRemoteNameError::ReservedForLocalGitRepo)
} else if name.as_str().contains("/") {
Err(GitRemoteNameError::WithSlash(name.to_owned()))
} else {
Ok(())
}
}
/// Type of Git ref to be imported or exported.
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum GitRefKind {
Bookmark,
Tag,
}
/// Stats from a git push
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct GitPushStats {
/// reference accepted by the remote
pub pushed: Vec<GitRefNameBuf>,
/// rejected reference, due to lease failure, with an optional reason
pub rejected: Vec<(GitRefNameBuf, Option<String>)>,
/// reference rejected by the remote, with an optional reason
pub remote_rejected: Vec<(GitRefNameBuf, Option<String>)>,
}
impl GitPushStats {
pub fn all_ok(&self) -> bool {
self.rejected.is_empty() && self.remote_rejected.is_empty()
}
}
/// Newtype to look up `HashMap` entry by key of shorter lifetime.
///
/// https://users.rust-lang.org/t/unexpected-lifetime-issue-with-hashmap-remove/113961/6
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct RemoteRefKey<'a>(RemoteRefSymbol<'a>);
impl<'a: 'b, 'b> Borrow<RemoteRefSymbol<'b>> for RemoteRefKey<'a> {
fn borrow(&self) -> &RemoteRefSymbol<'b> {
&self.0
}
}
/// Representation of a Git refspec
///
/// It is often the case that we need only parts of the refspec,
/// Passing strings around and repeatedly parsing them is sub-optimal, confusing
/// and error prone
#[derive(Debug, Hash, PartialEq, Eq)]
pub(crate) struct RefSpec {
forced: bool,
// Source and destination may be fully-qualified ref name, glob pattern, or
// object ID. The GitRefNameBuf type shouldn't be used.
source: Option<String>,
destination: String,
}
impl RefSpec {
fn forced(source: impl Into<String>, destination: impl Into<String>) -> Self {
Self {
forced: true,
source: Some(source.into()),
destination: destination.into(),
}
}
fn delete(destination: impl Into<String>) -> Self {
// We don't force push on branch deletion
Self {
forced: false,
source: None,
destination: destination.into(),
}
}
pub(crate) fn to_git_format(&self) -> String {
format!(
"{}{}",
if self.forced { "+" } else { "" },
self.to_git_format_not_forced()
)
}
/// Format git refspec without the leading force flag '+'
///
/// When independently setting --force-with-lease, having the
/// leading flag overrides the lease, so we need to print it
/// without it
pub(crate) fn to_git_format_not_forced(&self) -> String {
if let Some(s) = &self.source {
format!("{}:{}", s, self.destination)
} else {
format!(":{}", self.destination)
}
}
}
/// Representation of a negative Git refspec
#[derive(Debug)]
#[repr(transparent)]
pub(crate) struct NegativeRefSpec {
source: String,
}
impl NegativeRefSpec {
fn new(source: impl Into<String>) -> Self {
Self {
source: source.into(),
}
}
pub(crate) fn to_git_format(&self) -> String {
format!("^{}", self.source)
}
}
/// Helper struct that matches a refspec with its expected location in the
/// remote it's being pushed to
pub(crate) struct RefToPush<'a> {
pub(crate) refspec: &'a RefSpec,
pub(crate) expected_location: Option<&'a CommitId>,
}
impl<'a> RefToPush<'a> {
fn new(
refspec: &'a RefSpec,
expected_locations: &'a HashMap<&GitRefName, Option<&CommitId>>,
) -> Self {
let expected_location = *expected_locations
.get(GitRefName::new(&refspec.destination))
.expect(
"The refspecs and the expected locations were both constructed from the same \
source of truth. This means the lookup should always work.",
);
Self {
refspec,
expected_location,
}
}
pub(crate) fn to_git_lease(&self) -> String {
format!(
"{}:{}",
self.refspec.destination,
self.expected_location
.map(|x| x.to_string())
.as_deref()
.unwrap_or("")
)
}
}
/// Translates Git ref name to jj's `name@remote` symbol. Returns `None` if the
/// ref cannot be represented in jj.
pub fn parse_git_ref(full_name: &GitRefName) -> Option<(GitRefKind, RemoteRefSymbol<'_>)> {
if let Some(name) = full_name.as_str().strip_prefix("refs/heads/") {
// Git CLI says 'HEAD' is not a valid branch name
if name == "HEAD" {
return None;
}
let name = RefName::new(name);
let remote = REMOTE_NAME_FOR_LOCAL_GIT_REPO;
Some((GitRefKind::Bookmark, RemoteRefSymbol { name, remote }))
} else if let Some(remote_and_name) = full_name.as_str().strip_prefix("refs/remotes/") {
let (remote, name) = remote_and_name.split_once('/')?;
// "refs/remotes/origin/HEAD" isn't a real remote-tracking branch
if remote == REMOTE_NAME_FOR_LOCAL_GIT_REPO || name == "HEAD" {
return None;
}
let name = RefName::new(name);
let remote = RemoteName::new(remote);
Some((GitRefKind::Bookmark, RemoteRefSymbol { name, remote }))
} else if let Some(name) = full_name.as_str().strip_prefix("refs/tags/") {
let name = RefName::new(name);
let remote = REMOTE_NAME_FOR_LOCAL_GIT_REPO;
Some((GitRefKind::Tag, RemoteRefSymbol { name, remote }))
} else {
None
}
}
fn to_git_ref_name(kind: GitRefKind, symbol: RemoteRefSymbol<'_>) -> Option<GitRefNameBuf> {
let RemoteRefSymbol { name, remote } = symbol;
let name = name.as_str();
let remote = remote.as_str();
if name.is_empty() || remote.is_empty() {
return None;
}
match kind {
GitRefKind::Bookmark => {
if name == "HEAD" {
return None;
}
if remote == REMOTE_NAME_FOR_LOCAL_GIT_REPO {
Some(format!("refs/heads/{name}").into())
} else {
Some(format!("refs/remotes/{remote}/{name}").into())
}
}
GitRefKind::Tag => {
(remote == REMOTE_NAME_FOR_LOCAL_GIT_REPO).then(|| format!("refs/tags/{name}").into())
}
}
}
#[derive(Debug, Error)]
#[error("The repo is not backed by a Git repo")]
pub struct UnexpectedGitBackendError;
/// Returns the underlying `GitBackend` implementation.
pub fn get_git_backend(store: &Store) -> Result<&GitBackend, UnexpectedGitBackendError> {
store.backend_impl().ok_or(UnexpectedGitBackendError)
}
/// Returns new thread-local instance to access to the underlying Git repo.
pub fn get_git_repo(store: &Store) -> Result<gix::Repository, UnexpectedGitBackendError> {
get_git_backend(store).map(|backend| backend.git_repo())
}
/// Checks if `git_ref` points to a Git commit object, and returns its id.
///
/// If the ref points to the previously `known_commit_oid` (i.e. unchanged),
/// this should be faster than `git_ref.into_fully_peeled_id()`.
fn resolve_git_ref_to_commit_id(
git_ref: &gix::Reference,
known_commit_oid: Option<&gix::oid>,
) -> Option<gix::ObjectId> {
let mut peeling_ref = Cow::Borrowed(git_ref);
// Try fast path if we have a candidate id which is known to be a commit object.
if let Some(known_oid) = known_commit_oid {
let raw_ref = &git_ref.inner;
if let Some(oid) = raw_ref.target.try_id()
&& oid == known_oid
{
return Some(oid.to_owned());
}
if let Some(oid) = raw_ref.peeled
&& oid == known_oid
{
// Perhaps an annotated tag stored in packed-refs file, and pointing to the
// already known target commit.
return Some(oid);
}
// A tag (according to ref name.) Try to peel one more level. This is slightly
// faster than recurse into into_fully_peeled_id(). If we recorded a tag oid, we
// could skip this at all.
if raw_ref.peeled.is_none() && git_ref.name().as_bstr().starts_with(b"refs/tags/") {
let maybe_tag = git_ref
.try_id()
.and_then(|id| id.object().ok())
.and_then(|object| object.try_into_tag().ok());
if let Some(oid) = maybe_tag.as_ref().and_then(|tag| tag.target_id().ok()) {
let oid = oid.detach();
if oid == known_oid {
// An annotated tag pointing to the already known target commit.
return Some(oid);
}
// Unknown id. Recurse from the current state. A tag may point to
// non-commit object.
peeling_ref.to_mut().inner.target = gix::refs::Target::Object(oid);
}
}
}
// Alternatively, we might want to inline the first half of the peeling
// loop. into_fully_peeled_id() looks up the target object to see if it's
// a tag or not, and we need to check if it's a commit object.
let peeled_id = peeling_ref.into_owned().into_fully_peeled_id().ok()?;
let is_commit = peeled_id
.object()
.is_ok_and(|object| object.kind.is_commit());
is_commit.then_some(peeled_id.detach())
}
#[derive(Error, Debug)]
pub enum GitImportError {
#[error("Failed to read Git HEAD target commit {id}")]
MissingHeadTarget {
id: CommitId,
#[source]
err: BackendError,
},
#[error("Ancestor of Git ref {symbol} is missing")]
MissingRefAncestor {
symbol: RemoteRefSymbolBuf,
#[source]
err: BackendError,
},
#[error(transparent)]
Backend(#[from] BackendError),
#[error(transparent)]
Index(#[from] IndexError),
#[error(transparent)]
Git(Box<dyn std::error::Error + Send + Sync>),
#[error(transparent)]
UnexpectedBackend(#[from] UnexpectedGitBackendError),
}
impl GitImportError {
fn from_git(source: impl Into<Box<dyn std::error::Error + Send + Sync>>) -> Self {
Self::Git(source.into())
}
}
/// Options for [`import_refs()`].
#[derive(Debug)]
pub struct GitImportOptions {
// TODO: Delete in jj 0.42.0+
pub auto_local_bookmark: bool,
/// Whether to abandon commits that became unreachable in Git.
pub abandon_unreachable_commits: bool,
/// Per-remote patterns whether to track bookmarks automatically.
pub remote_auto_track_bookmarks: HashMap<RemoteNameBuf, StringMatcher>,
}
/// Describes changes made by `import_refs()` or `fetch()`.
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct GitImportStats {
/// Commits superseded by newly imported commits.
pub abandoned_commits: Vec<CommitId>,
/// Remote bookmark `(symbol, (old_remote_ref, new_target))`s to be merged
/// in to the local bookmarks, sorted by `symbol`.
pub changed_remote_bookmarks: Vec<(RemoteRefSymbolBuf, (RemoteRef, RefTarget))>,
/// Remote tag `(symbol, (old_remote_ref, new_target))`s to be merged in to
/// the local tags, sorted by `symbol`.
pub changed_remote_tags: Vec<(RemoteRefSymbolBuf, (RemoteRef, RefTarget))>,
/// Git ref names that couldn't be imported, sorted by name.
///
/// This list doesn't include refs that are supposed to be ignored, such as
/// refs pointing to non-commit objects.
pub failed_ref_names: Vec<BString>,
}
#[derive(Debug)]
struct RefsToImport {
/// Git ref `(full_name, new_target)`s to be copied to the view, sorted by
/// `full_name`.
changed_git_refs: Vec<(GitRefNameBuf, RefTarget)>,
/// Remote bookmark `(symbol, (old_remote_ref, new_target))`s to be merged
/// in to the local bookmarks, sorted by `symbol`.
changed_remote_bookmarks: Vec<(RemoteRefSymbolBuf, (RemoteRef, RefTarget))>,
/// Remote tag `(symbol, (old_remote_ref, new_target))`s to be merged in to
/// the local tags, sorted by `symbol`.
changed_remote_tags: Vec<(RemoteRefSymbolBuf, (RemoteRef, RefTarget))>,
/// Git ref names that couldn't be imported, sorted by name.
failed_ref_names: Vec<BString>,
}
/// Reflect changes made in the underlying Git repo in the Jujutsu repo.
///
/// This function detects conflicts (if both Git and JJ modified a bookmark) and
/// records them in JJ's view.
pub fn import_refs(
mut_repo: &mut MutableRepo,
options: &GitImportOptions,
) -> Result<GitImportStats, GitImportError> {
import_some_refs(mut_repo, options, |_, _| true)
}
/// Reflect changes made in the underlying Git repo in the Jujutsu repo.
///
/// Only bookmarks and tags whose remote symbol pass the filter will be
/// considered for addition, update, or deletion.
pub fn import_some_refs(
mut_repo: &mut MutableRepo,
options: &GitImportOptions,
git_ref_filter: impl Fn(GitRefKind, RemoteRefSymbol<'_>) -> bool,
) -> Result<GitImportStats, GitImportError> {
let git_repo = get_git_repo(mut_repo.store())?;
// Allocate views for new remotes configured externally. There may be
// remotes with no refs, but the user might still want to "track" absent
// remote refs.
for remote_name in iter_remote_names(&git_repo) {
mut_repo.ensure_remote(&remote_name);
}
let refs_to_import = diff_refs_to_import(mut_repo.view(), &git_repo, git_ref_filter)?;
import_refs_inner(mut_repo, refs_to_import, options)
}
fn import_refs_inner(
mut_repo: &mut MutableRepo,
refs_to_import: RefsToImport,
options: &GitImportOptions,
) -> Result<GitImportStats, GitImportError> {
let store = mut_repo.store();
let git_backend = get_git_backend(store).expect("backend type should have been tested");
let RefsToImport {
changed_git_refs,
changed_remote_bookmarks,
changed_remote_tags,
failed_ref_names,
} = refs_to_import;
// Bulk-import all reachable Git commits to the backend to reduce overhead
// of table merging and ref updates.
//
// changed_git_refs aren't respected because changed_remote_bookmarks/tags
// should include all heads that will become reachable in jj.
let iter_changed_refs = || itertools::chain(&changed_remote_bookmarks, &changed_remote_tags);
let index = mut_repo.index();
let missing_head_ids: Vec<&CommitId> = iter_changed_refs()
.flat_map(|(_, (_, new_target))| new_target.added_ids())
.filter_map(|id| match index.has_id(id) {
Ok(false) => Some(Ok(id)),
Ok(true) => None,
Err(e) => Some(Err(e)),
})
.try_collect()?;
let heads_imported = git_backend.import_head_commits(missing_head_ids).is_ok();
// Import new remote heads
let mut head_commits = Vec::new();
let get_commit = |id: &CommitId, symbol: &RemoteRefSymbolBuf| {
let missing_ref_err = |err| GitImportError::MissingRefAncestor {
symbol: symbol.clone(),
err,
};
// If bulk-import failed, try again to find bad head or ref.
if !heads_imported && !index.has_id(id).map_err(GitImportError::Index)? {
git_backend
.import_head_commits([id])
.map_err(missing_ref_err)?;
}
store.get_commit(id).map_err(missing_ref_err)
};
for (symbol, (_, new_target)) in iter_changed_refs() {
for id in new_target.added_ids() {
let commit = get_commit(id, symbol)?;
head_commits.push(commit);
}
}
// It's unlikely the imported commits were missing, but I/O-related error
// can still occur.
mut_repo
.add_heads(&head_commits)
.map_err(GitImportError::Backend)?;
// Apply the change that happened in git since last time we imported refs.
for (full_name, new_target) in changed_git_refs {
mut_repo.set_git_ref_target(&full_name, new_target);
}
for (symbol, (old_remote_ref, new_target)) in &changed_remote_bookmarks {
let symbol = symbol.as_ref();
let base_target = old_remote_ref.tracked_target();
let new_remote_ref = RemoteRef {
target: new_target.clone(),
state: if old_remote_ref != RemoteRef::absent_ref() {
old_remote_ref.state
} else {
default_remote_ref_state_for(GitRefKind::Bookmark, symbol, options)
},
};
if new_remote_ref.is_tracked() {
mut_repo.merge_local_bookmark(symbol.name, base_target, &new_remote_ref.target)?;
}
// Remote-tracking branch is the last known state of the branch in the remote.
// It shouldn't diverge even if we had inconsistent view.
mut_repo.set_remote_bookmark(symbol, new_remote_ref);
}
for (symbol, (old_remote_ref, new_target)) in &changed_remote_tags {
let symbol = symbol.as_ref();
let base_target = old_remote_ref.tracked_target();
let new_remote_ref = RemoteRef {
target: new_target.clone(),
state: if old_remote_ref != RemoteRef::absent_ref() {
old_remote_ref.state
} else {
default_remote_ref_state_for(GitRefKind::Tag, symbol, options)
},
};
if new_remote_ref.is_tracked() {
mut_repo.merge_local_tag(symbol.name, base_target, &new_remote_ref.target)?;
}
// Remote-tracking tag is the last known state of the tag in the remote.
// It shouldn't diverge even if we had inconsistent view.
mut_repo.set_remote_tag(symbol, new_remote_ref);
}
let abandoned_commits = if options.abandon_unreachable_commits {
abandon_unreachable_commits(mut_repo, &changed_remote_bookmarks, &changed_remote_tags)
.map_err(GitImportError::Backend)?
} else {
vec![]
};
let stats = GitImportStats {
abandoned_commits,
changed_remote_bookmarks,
changed_remote_tags,
failed_ref_names,
};
Ok(stats)
}
/// Finds commits that used to be reachable in git that no longer are reachable.
/// Those commits will be recorded as abandoned in the `MutableRepo`.
fn abandon_unreachable_commits(
mut_repo: &mut MutableRepo,
changed_remote_bookmarks: &[(RemoteRefSymbolBuf, (RemoteRef, RefTarget))],
changed_remote_tags: &[(RemoteRefSymbolBuf, (RemoteRef, RefTarget))],
) -> BackendResult<Vec<CommitId>> {
let hidable_git_heads = itertools::chain(changed_remote_bookmarks, changed_remote_tags)
.flat_map(|(_, (old_remote_ref, _))| old_remote_ref.target.added_ids())
.cloned()
.collect_vec();
if hidable_git_heads.is_empty() {
return Ok(vec![]);
}
let pinned_expression = RevsetExpression::union_all(&[
// Local refs are usually visible, no need to filter out hidden
RevsetExpression::commits(pinned_commit_ids(mut_repo.view())),
RevsetExpression::commits(remotely_pinned_commit_ids(mut_repo.view()))
// Hidden remote refs should not contribute to pinning
.intersection(&RevsetExpression::visible_heads().ancestors()),
RevsetExpression::root(),
]);
let abandoned_expression = pinned_expression
.range(&RevsetExpression::commits(hidable_git_heads))
// Don't include already-abandoned commits in GitImportStats
.intersection(&RevsetExpression::visible_heads().ancestors());
let abandoned_commit_ids: Vec<_> = abandoned_expression
.evaluate(mut_repo)
.map_err(|err| err.into_backend_error())?
.iter()
.try_collect()
.map_err(|err| err.into_backend_error())?;
for id in &abandoned_commit_ids {
let commit = mut_repo.store().get_commit(id)?;
mut_repo.record_abandoned_commit(&commit);
}
Ok(abandoned_commit_ids)
}
/// Calculates diff of git refs to be imported.
fn diff_refs_to_import(
view: &View,
git_repo: &gix::Repository,
git_ref_filter: impl Fn(GitRefKind, RemoteRefSymbol<'_>) -> bool,
) -> Result<RefsToImport, GitImportError> {
let mut known_git_refs = view
.git_refs()
.iter()
.filter_map(|(full_name, target)| {
// TODO: or clean up invalid ref in case it was stored due to historical bug?
let (kind, symbol) =
parse_git_ref(full_name).expect("stored git ref should be parsable");
git_ref_filter(kind, symbol).then_some((full_name.as_ref(), target))
})
.collect();
let mut known_remote_bookmarks = view
.all_remote_bookmarks()
.filter(|&(symbol, _)| git_ref_filter(GitRefKind::Bookmark, symbol))
.map(|(symbol, remote_ref)| (RemoteRefKey(symbol), remote_ref))
.collect();
let mut known_remote_tags = {
// Exclude real remote tags, which should never be updated by Git.
let remote = REMOTE_NAME_FOR_LOCAL_GIT_REPO;
view.remote_tags(remote)
.map(|(name, remote_ref)| (name.to_remote_symbol(remote), remote_ref))
.filter(|&(symbol, _)| git_ref_filter(GitRefKind::Tag, symbol))
.map(|(symbol, remote_ref)| (RemoteRefKey(symbol), remote_ref))
.collect()
};
let mut changed_git_refs = Vec::new();
let mut changed_remote_bookmarks = Vec::new();
let mut changed_remote_tags = Vec::new();
let mut failed_ref_names = Vec::new();
let actual = git_repo.references().map_err(GitImportError::from_git)?;
collect_changed_refs_to_import(
actual.local_branches().map_err(GitImportError::from_git)?,
&mut known_git_refs,
&mut known_remote_bookmarks,
&mut changed_git_refs,
&mut changed_remote_bookmarks,
&mut failed_ref_names,
&git_ref_filter,
)?;
collect_changed_refs_to_import(
actual.remote_branches().map_err(GitImportError::from_git)?,
&mut known_git_refs,
&mut known_remote_bookmarks,
&mut changed_git_refs,
&mut changed_remote_bookmarks,
&mut failed_ref_names,
&git_ref_filter,
)?;
collect_changed_refs_to_import(
actual.tags().map_err(GitImportError::from_git)?,
&mut known_git_refs,
&mut known_remote_tags,
&mut changed_git_refs,
&mut changed_remote_tags,
&mut failed_ref_names,
&git_ref_filter,
)?;
for full_name in known_git_refs.into_keys() {
changed_git_refs.push((full_name.to_owned(), RefTarget::absent()));
}
for (RemoteRefKey(symbol), old) in known_remote_bookmarks {
if old.is_present() {
changed_remote_bookmarks.push((symbol.to_owned(), (old.clone(), RefTarget::absent())));
}
}
for (RemoteRefKey(symbol), old) in known_remote_tags {
if old.is_present() {
changed_remote_tags.push((symbol.to_owned(), (old.clone(), RefTarget::absent())));
}
}
// Stabilize merge order and output.
changed_git_refs.sort_unstable_by(|(name1, _), (name2, _)| name1.cmp(name2));
changed_remote_bookmarks.sort_unstable_by(|(sym1, _), (sym2, _)| sym1.cmp(sym2));
changed_remote_tags.sort_unstable_by(|(sym1, _), (sym2, _)| sym1.cmp(sym2));
failed_ref_names.sort_unstable();
Ok(RefsToImport {
changed_git_refs,
changed_remote_bookmarks,
changed_remote_tags,
failed_ref_names,
})
}
fn collect_changed_refs_to_import(
actual_git_refs: gix::reference::iter::Iter,
known_git_refs: &mut HashMap<&GitRefName, &RefTarget>,
known_remote_refs: &mut HashMap<RemoteRefKey<'_>, &RemoteRef>,
changed_git_refs: &mut Vec<(GitRefNameBuf, RefTarget)>,
changed_remote_refs: &mut Vec<(RemoteRefSymbolBuf, (RemoteRef, RefTarget))>,
failed_ref_names: &mut Vec<BString>,
git_ref_filter: impl Fn(GitRefKind, RemoteRefSymbol<'_>) -> bool,
) -> Result<(), GitImportError> {
for git_ref in actual_git_refs {
let git_ref = git_ref.map_err(GitImportError::from_git)?;
let full_name_bytes = git_ref.name().as_bstr();
let Ok(full_name) = str::from_utf8(full_name_bytes) else {
// Non-utf8 refs cannot be imported.
failed_ref_names.push(full_name_bytes.to_owned());
continue;
};
if full_name.starts_with(RESERVED_REMOTE_REF_NAMESPACE) {
failed_ref_names.push(full_name_bytes.to_owned());
continue;
}
let full_name = GitRefName::new(full_name);
let Some((kind, symbol)) = parse_git_ref(full_name) else {
// Skip special refs such as refs/remotes/*/HEAD.
continue;
};
if !git_ref_filter(kind, symbol) {
continue;
}
let old_git_target = known_git_refs.get(full_name).copied().flatten();
let old_git_oid = old_git_target
.as_normal()
.map(|id| gix::oid::from_bytes_unchecked(id.as_bytes()));
let Some(oid) = resolve_git_ref_to_commit_id(&git_ref, old_git_oid) else {
// Skip (or remove existing) invalid refs.
continue;
};
let new_target = RefTarget::normal(CommitId::from_bytes(oid.as_bytes()));
known_git_refs.remove(full_name);
if new_target != *old_git_target {
changed_git_refs.push((full_name.to_owned(), new_target.clone()));
}
// TODO: Make it configurable which remotes are publishing and update public
// heads here.
let old_remote_ref = known_remote_refs
.remove(&symbol)
.unwrap_or_else(|| RemoteRef::absent_ref());
if new_target != old_remote_ref.target {
changed_remote_refs.push((symbol.to_owned(), (old_remote_ref.clone(), new_target)));
}
}
Ok(())
}
fn default_remote_ref_state_for(
kind: GitRefKind,
symbol: RemoteRefSymbol<'_>,
options: &GitImportOptions,
) -> RemoteRefState {
match kind {
GitRefKind::Bookmark => {
if symbol.remote == REMOTE_NAME_FOR_LOCAL_GIT_REPO
|| options.auto_local_bookmark
|| options
.remote_auto_track_bookmarks
.get(symbol.remote)
.is_some_and(|matcher| matcher.is_match(symbol.name.as_str()))
{
RemoteRefState::Tracked
} else {
RemoteRefState::New
}
}
// TODO: add option to not track tags by default?
GitRefKind::Tag => RemoteRefState::Tracked,
}
}
/// Commits referenced by local branches or tags.
///
/// On `import_refs()`, this is similar to collecting commits referenced by
/// `view.git_refs()`. Main difference is that local branches can be moved by
/// tracking remotes, and such mutation isn't applied to `view.git_refs()` yet.
fn pinned_commit_ids(view: &View) -> Vec<CommitId> {
itertools::chain(view.local_bookmarks(), view.local_tags())
.flat_map(|(_, target)| target.added_ids())
.cloned()
.collect()
}
/// Commits referenced by untracked remote bookmarks/tags including hidden ones.
///
/// Tracked remote refs aren't included because they should have been merged
/// into the local counterparts, and the changes pulled from one remote should
/// propagate to the other remotes on later push. OTOH, untracked remote refs
/// are considered independent refs.
fn remotely_pinned_commit_ids(view: &View) -> Vec<CommitId> {
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/simple_op_store.rs | lib/src/simple_op_store.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt::Debug;
use std::fs;
use std::io;
use std::io::ErrorKind;
use std::io::Write as _;
use std::path::Path;
use std::path::PathBuf;
use std::time::SystemTime;
use async_trait::async_trait;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use prost::Message as _;
use smallvec::SmallVec;
use tempfile::NamedTempFile;
use thiserror::Error;
use crate::backend::BackendInitError;
use crate::backend::CommitId;
use crate::backend::MillisSinceEpoch;
use crate::backend::Timestamp;
use crate::content_hash::blake2b_hash;
use crate::dag_walk;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::file_util::persist_content_addressed_temp_file;
use crate::merge::Merge;
use crate::object_id::HexPrefix;
use crate::object_id::ObjectId;
use crate::object_id::PrefixResolution;
use crate::op_store;
use crate::op_store::OpStore;
use crate::op_store::OpStoreError;
use crate::op_store::OpStoreResult;
use crate::op_store::Operation;
use crate::op_store::OperationId;
use crate::op_store::OperationMetadata;
use crate::op_store::RefTarget;
use crate::op_store::RemoteRef;
use crate::op_store::RemoteRefState;
use crate::op_store::RemoteView;
use crate::op_store::RootOperationData;
use crate::op_store::TimestampRange;
use crate::op_store::View;
use crate::op_store::ViewId;
use crate::ref_name::GitRefNameBuf;
use crate::ref_name::RefNameBuf;
use crate::ref_name::RemoteNameBuf;
use crate::ref_name::WorkspaceName;
use crate::ref_name::WorkspaceNameBuf;
// BLAKE2b-512 hash length in bytes
const OPERATION_ID_LENGTH: usize = 64;
const VIEW_ID_LENGTH: usize = 64;
/// Error that may occur during [`SimpleOpStore`] initialization.
#[derive(Debug, Error)]
#[error("Failed to initialize simple operation store")]
pub struct SimpleOpStoreInitError(#[from] pub PathError);
impl From<SimpleOpStoreInitError> for BackendInitError {
fn from(err: SimpleOpStoreInitError) -> Self {
Self(err.into())
}
}
#[derive(Debug)]
pub struct SimpleOpStore {
path: PathBuf,
root_data: RootOperationData,
root_operation_id: OperationId,
root_view_id: ViewId,
}
impl SimpleOpStore {
pub fn name() -> &'static str {
"simple_op_store"
}
/// Creates an empty OpStore. Returns error if it already exists.
pub fn init(
store_path: &Path,
root_data: RootOperationData,
) -> Result<Self, SimpleOpStoreInitError> {
let store = Self::new(store_path, root_data);
store.init_base_dirs()?;
Ok(store)
}
/// Load an existing OpStore
pub fn load(store_path: &Path, root_data: RootOperationData) -> Self {
Self::new(store_path, root_data)
}
fn new(store_path: &Path, root_data: RootOperationData) -> Self {
Self {
path: store_path.to_path_buf(),
root_data,
root_operation_id: OperationId::from_bytes(&[0; OPERATION_ID_LENGTH]),
root_view_id: ViewId::from_bytes(&[0; VIEW_ID_LENGTH]),
}
}
fn init_base_dirs(&self) -> Result<(), PathError> {
for dir in [self.views_dir(), self.operations_dir()] {
fs::create_dir(&dir).context(&dir)?;
}
Ok(())
}
fn views_dir(&self) -> PathBuf {
self.path.join("views")
}
fn operations_dir(&self) -> PathBuf {
self.path.join("operations")
}
}
#[async_trait]
impl OpStore for SimpleOpStore {
fn name(&self) -> &str {
Self::name()
}
fn root_operation_id(&self) -> &OperationId {
&self.root_operation_id
}
async fn read_view(&self, id: &ViewId) -> OpStoreResult<View> {
if *id == self.root_view_id {
return Ok(View::make_root(self.root_data.root_commit_id.clone()));
}
let path = self.views_dir().join(id.hex());
let buf = fs::read(&path)
.context(&path)
.map_err(|err| io_to_read_error(err, id))?;
let proto = crate::protos::simple_op_store::View::decode(&*buf)
.map_err(|err| to_read_error(err.into(), id))?;
view_from_proto(proto).map_err(|err| to_read_error(err.into(), id))
}
async fn write_view(&self, view: &View) -> OpStoreResult<ViewId> {
let dir = self.views_dir();
let temp_file = NamedTempFile::new_in(&dir)
.context(&dir)
.map_err(|err| io_to_write_error(err, "view"))?;
let proto = view_to_proto(view);
temp_file
.as_file()
.write_all(&proto.encode_to_vec())
.context(temp_file.path())
.map_err(|err| io_to_write_error(err, "view"))?;
let id = ViewId::new(blake2b_hash(view).to_vec());
let new_path = dir.join(id.hex());
persist_content_addressed_temp_file(temp_file, &new_path)
.context(&new_path)
.map_err(|err| io_to_write_error(err, "view"))?;
Ok(id)
}
async fn read_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
if *id == self.root_operation_id {
return Ok(Operation::make_root(self.root_view_id.clone()));
}
let path = self.operations_dir().join(id.hex());
let buf = fs::read(&path)
.context(&path)
.map_err(|err| io_to_read_error(err, id))?;
let proto = crate::protos::simple_op_store::Operation::decode(&*buf)
.map_err(|err| to_read_error(err.into(), id))?;
let mut operation =
operation_from_proto(proto).map_err(|err| to_read_error(err.into(), id))?;
if operation.parents.is_empty() {
// Repos created before we had the root operation will have an operation without
// parents.
operation.parents.push(self.root_operation_id.clone());
}
Ok(operation)
}
async fn write_operation(&self, operation: &Operation) -> OpStoreResult<OperationId> {
assert!(!operation.parents.is_empty());
let dir = self.operations_dir();
let temp_file = NamedTempFile::new_in(&dir)
.context(&dir)
.map_err(|err| io_to_write_error(err, "operation"))?;
let proto = operation_to_proto(operation);
temp_file
.as_file()
.write_all(&proto.encode_to_vec())
.context(temp_file.path())
.map_err(|err| io_to_write_error(err, "operation"))?;
let id = OperationId::new(blake2b_hash(operation).to_vec());
let new_path = dir.join(id.hex());
persist_content_addressed_temp_file(temp_file, &new_path)
.context(&new_path)
.map_err(|err| io_to_write_error(err, "operation"))?;
Ok(id)
}
async fn resolve_operation_id_prefix(
&self,
prefix: &HexPrefix,
) -> OpStoreResult<PrefixResolution<OperationId>> {
let op_dir = self.operations_dir();
let find = || -> io::Result<_> {
let matches_root = prefix.matches(&self.root_operation_id);
let hex_prefix = prefix.hex();
if hex_prefix.len() == OPERATION_ID_LENGTH * 2 {
// Fast path for full-length ID
if matches_root || op_dir.join(hex_prefix).try_exists()? {
let id = OperationId::from_bytes(prefix.as_full_bytes().unwrap());
return Ok(PrefixResolution::SingleMatch(id));
} else {
return Ok(PrefixResolution::NoMatch);
}
}
let mut matched = matches_root.then(|| self.root_operation_id.clone());
for entry in op_dir.read_dir()? {
let Ok(name) = entry?.file_name().into_string() else {
continue; // Skip invalid UTF-8
};
if !name.starts_with(&hex_prefix) {
continue;
}
let Some(id) = OperationId::try_from_hex(&name) else {
continue; // Skip invalid hex
};
if matched.is_some() {
return Ok(PrefixResolution::AmbiguousMatch);
}
matched = Some(id);
}
if let Some(id) = matched {
Ok(PrefixResolution::SingleMatch(id))
} else {
Ok(PrefixResolution::NoMatch)
}
};
find()
.context(&op_dir)
.map_err(|err| OpStoreError::Other(err.into()))
}
#[tracing::instrument(skip(self))]
fn gc(&self, head_ids: &[OperationId], keep_newer: SystemTime) -> OpStoreResult<()> {
let to_op_id = |entry: &fs::DirEntry| -> Option<OperationId> {
let name = entry.file_name().into_string().ok()?;
OperationId::try_from_hex(name)
};
let to_view_id = |entry: &fs::DirEntry| -> Option<ViewId> {
let name = entry.file_name().into_string().ok()?;
ViewId::try_from_hex(name)
};
let remove_file_if_not_new = |entry: &fs::DirEntry| -> Result<(), PathError> {
let path = entry.path();
// Check timestamp, but there's still TOCTOU problem if an existing
// file is renewed.
let metadata = entry.metadata().context(&path)?;
let mtime = metadata.modified().expect("unsupported platform?");
if mtime > keep_newer {
tracing::trace!(?path, "not removing");
Ok(())
} else {
tracing::trace!(?path, "removing");
fs::remove_file(&path).context(&path)
}
};
// Reachable objects are resolved without considering the keep_newer
// parameter. We could collect ancestors of the "new" operations here,
// but more files can be added anyway after that.
let read_op = |id: &OperationId| {
self.read_operation(id)
.block_on()
.map(|data| (id.clone(), data))
};
let reachable_ops: HashMap<OperationId, Operation> = dag_walk::dfs_ok(
head_ids.iter().map(read_op),
|(id, _)| id.clone(),
|(_, data)| data.parents.iter().map(read_op).collect_vec(),
)
.try_collect()?;
let reachable_views: HashSet<&ViewId> =
reachable_ops.values().map(|data| &data.view_id).collect();
tracing::info!(
reachable_op_count = reachable_ops.len(),
reachable_view_count = reachable_views.len(),
"collected reachable objects"
);
let prune_ops = || -> Result<(), PathError> {
let op_dir = self.operations_dir();
for entry in op_dir.read_dir().context(&op_dir)? {
let entry = entry.context(&op_dir)?;
let Some(id) = to_op_id(&entry) else {
tracing::trace!(?entry, "skipping invalid file name");
continue;
};
if reachable_ops.contains_key(&id) {
continue;
}
// If the operation was added after collecting reachable_views,
// its view mtime would also be renewed. So there's no need to
// update the reachable_views set to preserve the view.
remove_file_if_not_new(&entry)?;
}
Ok(())
};
prune_ops().map_err(|err| OpStoreError::Other(err.into()))?;
let prune_views = || -> Result<(), PathError> {
let view_dir = self.views_dir();
for entry in view_dir.read_dir().context(&view_dir)? {
let entry = entry.context(&view_dir)?;
let Some(id) = to_view_id(&entry) else {
tracing::trace!(?entry, "skipping invalid file name");
continue;
};
if reachable_views.contains(&id) {
continue;
}
remove_file_if_not_new(&entry)?;
}
Ok(())
};
prune_views().map_err(|err| OpStoreError::Other(err.into()))?;
Ok(())
}
}
fn io_to_read_error(err: PathError, id: &impl ObjectId) -> OpStoreError {
if err.source.kind() == ErrorKind::NotFound {
OpStoreError::ObjectNotFound {
object_type: id.object_type(),
hash: id.hex(),
source: Box::new(err),
}
} else {
to_read_error(err.into(), id)
}
}
fn to_read_error(
source: Box<dyn std::error::Error + Send + Sync>,
id: &impl ObjectId,
) -> OpStoreError {
OpStoreError::ReadObject {
object_type: id.object_type(),
hash: id.hex(),
source,
}
}
fn io_to_write_error(err: PathError, object_type: &'static str) -> OpStoreError {
OpStoreError::WriteObject {
object_type,
source: Box::new(err),
}
}
#[derive(Debug, Error)]
enum PostDecodeError {
#[error("Invalid hash length (expected {expected} bytes, got {actual} bytes)")]
InvalidHashLength { expected: usize, actual: usize },
#[error("Invalid remote ref state value {0}")]
InvalidRemoteRefStateValue(i32),
#[error("Invalid number of ref target terms {0}")]
EvenNumberOfRefTargetTerms(usize),
}
fn operation_id_from_proto(bytes: Vec<u8>) -> Result<OperationId, PostDecodeError> {
if bytes.len() != OPERATION_ID_LENGTH {
Err(PostDecodeError::InvalidHashLength {
expected: OPERATION_ID_LENGTH,
actual: bytes.len(),
})
} else {
Ok(OperationId::new(bytes))
}
}
fn view_id_from_proto(bytes: Vec<u8>) -> Result<ViewId, PostDecodeError> {
if bytes.len() != VIEW_ID_LENGTH {
Err(PostDecodeError::InvalidHashLength {
expected: VIEW_ID_LENGTH,
actual: bytes.len(),
})
} else {
Ok(ViewId::new(bytes))
}
}
fn timestamp_to_proto(timestamp: &Timestamp) -> crate::protos::simple_op_store::Timestamp {
crate::protos::simple_op_store::Timestamp {
millis_since_epoch: timestamp.timestamp.0,
tz_offset: timestamp.tz_offset,
}
}
fn timestamp_from_proto(proto: crate::protos::simple_op_store::Timestamp) -> Timestamp {
Timestamp {
timestamp: MillisSinceEpoch(proto.millis_since_epoch),
tz_offset: proto.tz_offset,
}
}
fn operation_metadata_to_proto(
metadata: &OperationMetadata,
) -> crate::protos::simple_op_store::OperationMetadata {
crate::protos::simple_op_store::OperationMetadata {
start_time: Some(timestamp_to_proto(&metadata.time.start)),
end_time: Some(timestamp_to_proto(&metadata.time.end)),
description: metadata.description.clone(),
hostname: metadata.hostname.clone(),
username: metadata.username.clone(),
is_snapshot: metadata.is_snapshot,
tags: metadata.tags.clone(),
}
}
fn operation_metadata_from_proto(
proto: crate::protos::simple_op_store::OperationMetadata,
) -> OperationMetadata {
let time = TimestampRange {
start: timestamp_from_proto(proto.start_time.unwrap_or_default()),
end: timestamp_from_proto(proto.end_time.unwrap_or_default()),
};
OperationMetadata {
time,
description: proto.description,
hostname: proto.hostname,
username: proto.username,
is_snapshot: proto.is_snapshot,
tags: proto.tags,
}
}
fn commit_predecessors_map_to_proto(
map: &BTreeMap<CommitId, Vec<CommitId>>,
) -> Vec<crate::protos::simple_op_store::CommitPredecessors> {
map.iter()
.map(
|(commit_id, predecessor_ids)| crate::protos::simple_op_store::CommitPredecessors {
commit_id: commit_id.to_bytes(),
predecessor_ids: predecessor_ids.iter().map(|id| id.to_bytes()).collect(),
},
)
.collect()
}
fn commit_predecessors_map_from_proto(
proto: Vec<crate::protos::simple_op_store::CommitPredecessors>,
) -> BTreeMap<CommitId, Vec<CommitId>> {
proto
.into_iter()
.map(|entry| {
let commit_id = CommitId::new(entry.commit_id);
let predecessor_ids = entry
.predecessor_ids
.into_iter()
.map(CommitId::new)
.collect();
(commit_id, predecessor_ids)
})
.collect()
}
fn operation_to_proto(operation: &Operation) -> crate::protos::simple_op_store::Operation {
let (commit_predecessors, stores_commit_predecessors) = match &operation.commit_predecessors {
Some(map) => (commit_predecessors_map_to_proto(map), true),
None => (vec![], false),
};
let parents = operation.parents.iter().map(|id| id.to_bytes()).collect();
crate::protos::simple_op_store::Operation {
view_id: operation.view_id.as_bytes().to_vec(),
parents,
metadata: Some(operation_metadata_to_proto(&operation.metadata)),
commit_predecessors,
stores_commit_predecessors,
}
}
fn operation_from_proto(
proto: crate::protos::simple_op_store::Operation,
) -> Result<Operation, PostDecodeError> {
let parents = proto
.parents
.into_iter()
.map(operation_id_from_proto)
.try_collect()?;
let view_id = view_id_from_proto(proto.view_id)?;
let metadata = operation_metadata_from_proto(proto.metadata.unwrap_or_default());
let commit_predecessors = proto
.stores_commit_predecessors
.then(|| commit_predecessors_map_from_proto(proto.commit_predecessors));
Ok(Operation {
view_id,
parents,
metadata,
commit_predecessors,
})
}
fn view_to_proto(view: &View) -> crate::protos::simple_op_store::View {
let wc_commit_ids = view
.wc_commit_ids
.iter()
.map(|(name, id)| (name.into(), id.to_bytes()))
.collect();
let head_ids = view.head_ids.iter().map(|id| id.to_bytes()).collect();
let bookmarks = bookmark_views_to_proto_legacy(&view.local_bookmarks, &view.remote_views);
let local_tags = view
.local_tags
.iter()
.map(|(name, target)| crate::protos::simple_op_store::Tag {
name: name.into(),
target: ref_target_to_proto(target),
})
.collect();
let remote_views = remote_views_to_proto(&view.remote_views);
let git_refs = view
.git_refs
.iter()
.map(|(name, target)| {
#[expect(deprecated)]
crate::protos::simple_op_store::GitRef {
name: name.into(),
commit_id: Default::default(),
target: ref_target_to_proto(target),
}
})
.collect();
let git_head = ref_target_to_proto(&view.git_head);
#[expect(deprecated)]
crate::protos::simple_op_store::View {
head_ids,
wc_commit_id: Default::default(),
wc_commit_ids,
bookmarks,
local_tags,
remote_views,
git_refs,
git_head_legacy: Default::default(),
git_head,
// New/loaded view should have been migrated to the latest format
has_git_refs_migrated_to_remote_tags: true,
}
}
fn view_from_proto(proto: crate::protos::simple_op_store::View) -> Result<View, PostDecodeError> {
// TODO: validate commit id length?
// For compatibility with old repos before we had support for multiple working
// copies
let mut wc_commit_ids = BTreeMap::new();
#[expect(deprecated)]
if !proto.wc_commit_id.is_empty() {
wc_commit_ids.insert(
WorkspaceName::DEFAULT.to_owned(),
CommitId::new(proto.wc_commit_id),
);
}
for (name, commit_id) in proto.wc_commit_ids {
wc_commit_ids.insert(WorkspaceNameBuf::from(name), CommitId::new(commit_id));
}
let head_ids = proto.head_ids.into_iter().map(CommitId::new).collect();
let (local_bookmarks, mut remote_views) = bookmark_views_from_proto_legacy(proto.bookmarks)?;
let local_tags = proto
.local_tags
.into_iter()
.map(|tag_proto| {
let name: RefNameBuf = tag_proto.name.into();
(name, ref_target_from_proto(tag_proto.target))
})
.collect();
let git_refs: BTreeMap<_, _> = proto
.git_refs
.into_iter()
.map(|git_ref| {
let name: GitRefNameBuf = git_ref.name.into();
let target = if git_ref.target.is_some() {
ref_target_from_proto(git_ref.target)
} else {
// Legacy format
#[expect(deprecated)]
RefTarget::normal(CommitId::new(git_ref.commit_id))
};
(name, target)
})
.collect();
// Use legacy remote_views only when new data isn't available (jj < 0.34)
if !proto.remote_views.is_empty() {
remote_views = remote_views_from_proto(proto.remote_views)?;
}
#[cfg(feature = "git")]
if !proto.has_git_refs_migrated_to_remote_tags {
tracing::info!("migrating Git-tracking tags");
let git_tags: BTreeMap<_, _> = git_refs
.iter()
.filter_map(|(full_name, target)| {
let name = full_name.as_str().strip_prefix("refs/tags/")?;
assert!(!name.is_empty());
let name: RefNameBuf = name.into();
let remote_ref = RemoteRef {
target: target.clone(),
state: RemoteRefState::Tracked,
};
Some((name, remote_ref))
})
.collect();
if !git_tags.is_empty() {
let git_view = remote_views
.entry(crate::git::REMOTE_NAME_FOR_LOCAL_GIT_REPO.to_owned())
.or_default();
assert!(git_view.tags.is_empty());
git_view.tags = git_tags;
}
}
#[expect(deprecated)]
let git_head = if proto.git_head.is_some() {
ref_target_from_proto(proto.git_head)
} else if !proto.git_head_legacy.is_empty() {
RefTarget::normal(CommitId::new(proto.git_head_legacy))
} else {
RefTarget::absent()
};
Ok(View {
head_ids,
local_bookmarks,
local_tags,
remote_views,
git_refs,
git_head,
wc_commit_ids,
})
}
fn bookmark_views_to_proto_legacy(
local_bookmarks: &BTreeMap<RefNameBuf, RefTarget>,
remote_views: &BTreeMap<RemoteNameBuf, RemoteView>,
) -> Vec<crate::protos::simple_op_store::Bookmark> {
op_store::merge_join_ref_views(local_bookmarks, remote_views, |view| &view.bookmarks)
.map(|(name, bookmark_target)| {
let local_target = ref_target_to_proto(bookmark_target.local_target);
// TODO: Drop serialization to the old format in jj 0.40 or so.
let remote_bookmarks = bookmark_target
.remote_refs
.iter()
.map(
|&(remote_name, remote_ref)| crate::protos::simple_op_store::RemoteBookmark {
remote_name: remote_name.into(),
target: ref_target_to_proto(&remote_ref.target),
state: Some(remote_ref_state_to_proto(remote_ref.state)),
},
)
.collect();
#[expect(deprecated)]
crate::protos::simple_op_store::Bookmark {
name: name.into(),
local_target,
remote_bookmarks,
}
})
.collect()
}
type BookmarkViews = (
BTreeMap<RefNameBuf, RefTarget>,
BTreeMap<RemoteNameBuf, RemoteView>,
);
fn bookmark_views_from_proto_legacy(
bookmarks_legacy: Vec<crate::protos::simple_op_store::Bookmark>,
) -> Result<BookmarkViews, PostDecodeError> {
let mut local_bookmarks: BTreeMap<RefNameBuf, RefTarget> = BTreeMap::new();
let mut remote_views: BTreeMap<RemoteNameBuf, RemoteView> = BTreeMap::new();
for bookmark_proto in bookmarks_legacy {
let bookmark_name: RefNameBuf = bookmark_proto.name.into();
let local_target = ref_target_from_proto(bookmark_proto.local_target);
#[expect(deprecated)]
let remote_bookmarks = bookmark_proto.remote_bookmarks;
for remote_bookmark in remote_bookmarks {
let remote_name: RemoteNameBuf = remote_bookmark.remote_name.into();
let state = match remote_bookmark.state {
Some(n) => remote_ref_state_from_proto(n)?,
// Legacy view saved by jj < 0.11. The proto field is not
// changed to non-optional type because that would break forward
// compatibility. Zero may be omitted if the field is optional.
None => RemoteRefState::New,
};
let remote_view = remote_views.entry(remote_name).or_default();
let remote_ref = RemoteRef {
target: ref_target_from_proto(remote_bookmark.target),
state,
};
remote_view
.bookmarks
.insert(bookmark_name.clone(), remote_ref);
}
if local_target.is_present() {
local_bookmarks.insert(bookmark_name, local_target);
}
}
Ok((local_bookmarks, remote_views))
}
fn remote_views_to_proto(
remote_views: &BTreeMap<RemoteNameBuf, RemoteView>,
) -> Vec<crate::protos::simple_op_store::RemoteView> {
remote_views
.iter()
.map(|(name, view)| crate::protos::simple_op_store::RemoteView {
name: name.into(),
bookmarks: remote_refs_to_proto(&view.bookmarks),
tags: remote_refs_to_proto(&view.tags),
})
.collect()
}
fn remote_views_from_proto(
remote_views_proto: Vec<crate::protos::simple_op_store::RemoteView>,
) -> Result<BTreeMap<RemoteNameBuf, RemoteView>, PostDecodeError> {
remote_views_proto
.into_iter()
.map(|proto| {
let name: RemoteNameBuf = proto.name.into();
let view = RemoteView {
bookmarks: remote_refs_from_proto(proto.bookmarks)?,
tags: remote_refs_from_proto(proto.tags)?,
};
Ok((name, view))
})
.collect()
}
fn remote_refs_to_proto(
remote_refs: &BTreeMap<RefNameBuf, RemoteRef>,
) -> Vec<crate::protos::simple_op_store::RemoteRef> {
remote_refs
.iter()
.map(
|(name, remote_ref)| crate::protos::simple_op_store::RemoteRef {
name: name.into(),
target_terms: ref_target_to_terms_proto(&remote_ref.target),
state: remote_ref_state_to_proto(remote_ref.state),
},
)
.collect()
}
fn remote_refs_from_proto(
remote_refs_proto: Vec<crate::protos::simple_op_store::RemoteRef>,
) -> Result<BTreeMap<RefNameBuf, RemoteRef>, PostDecodeError> {
remote_refs_proto
.into_iter()
.map(|proto| {
let name: RefNameBuf = proto.name.into();
let remote_ref = RemoteRef {
target: ref_target_from_terms_proto(proto.target_terms)?,
state: remote_ref_state_from_proto(proto.state)?,
};
Ok((name, remote_ref))
})
.collect()
}
fn ref_target_to_terms_proto(
value: &RefTarget,
) -> Vec<crate::protos::simple_op_store::RefTargetTerm> {
value
.as_merge()
.iter()
.map(|term| term.as_ref().map(|id| id.to_bytes()))
.map(|value| crate::protos::simple_op_store::RefTargetTerm { value })
.collect()
}
fn ref_target_from_terms_proto(
proto: Vec<crate::protos::simple_op_store::RefTargetTerm>,
) -> Result<RefTarget, PostDecodeError> {
let terms: SmallVec<[_; 1]> = proto
.into_iter()
.map(|crate::protos::simple_op_store::RefTargetTerm { value }| value.map(CommitId::new))
.collect();
if terms.len().is_multiple_of(2) {
Err(PostDecodeError::EvenNumberOfRefTargetTerms(terms.len()))
} else {
Ok(RefTarget::from_merge(Merge::from_vec(terms)))
}
}
fn ref_target_to_proto(value: &RefTarget) -> Option<crate::protos::simple_op_store::RefTarget> {
let term_to_proto =
|term: &Option<CommitId>| crate::protos::simple_op_store::ref_conflict::Term {
value: term.as_ref().map(|id| id.to_bytes()),
};
let merge = value.as_merge();
let conflict_proto = crate::protos::simple_op_store::RefConflict {
removes: merge.removes().map(term_to_proto).collect(),
adds: merge.adds().map(term_to_proto).collect(),
};
let proto = crate::protos::simple_op_store::RefTarget {
value: Some(crate::protos::simple_op_store::ref_target::Value::Conflict(
conflict_proto,
)),
};
Some(proto)
}
#[expect(deprecated)]
#[cfg(test)]
fn ref_target_to_proto_legacy(
value: &RefTarget,
) -> Option<crate::protos::simple_op_store::RefTarget> {
if let Some(id) = value.as_normal() {
let proto = crate::protos::simple_op_store::RefTarget {
value: Some(crate::protos::simple_op_store::ref_target::Value::CommitId(
id.to_bytes(),
)),
};
Some(proto)
} else if value.has_conflict() {
let ref_conflict_proto = crate::protos::simple_op_store::RefConflictLegacy {
removes: value.removed_ids().map(|id| id.to_bytes()).collect(),
adds: value.added_ids().map(|id| id.to_bytes()).collect(),
};
let proto = crate::protos::simple_op_store::RefTarget {
value: Some(
crate::protos::simple_op_store::ref_target::Value::ConflictLegacy(
ref_conflict_proto,
),
),
};
Some(proto)
} else {
assert!(value.is_absent());
None
}
}
fn ref_target_from_proto(
maybe_proto: Option<crate::protos::simple_op_store::RefTarget>,
) -> RefTarget {
// TODO: Delete legacy format handling when we decide to drop support for views
// saved by jj <= 0.8.
let Some(proto) = maybe_proto else {
// Legacy absent id
return RefTarget::absent();
};
match proto.value.unwrap() {
crate::protos::simple_op_store::ref_target::Value::CommitId(id) => {
// Legacy non-conflicting id
RefTarget::normal(CommitId::new(id))
}
#[expect(deprecated)]
crate::protos::simple_op_store::ref_target::Value::ConflictLegacy(conflict) => {
// Legacy conflicting ids
let removes = conflict.removes.into_iter().map(CommitId::new);
let adds = conflict.adds.into_iter().map(CommitId::new);
RefTarget::from_legacy_form(removes, adds)
}
crate::protos::simple_op_store::ref_target::Value::Conflict(conflict) => {
let term_from_proto = |term: crate::protos::simple_op_store::ref_conflict::Term| {
term.value.map(CommitId::new)
};
let removes = conflict.removes.into_iter().map(term_from_proto);
let adds = conflict.adds.into_iter().map(term_from_proto);
RefTarget::from_merge(Merge::from_removes_adds(removes, adds))
}
}
}
fn remote_ref_state_to_proto(state: RemoteRefState) -> i32 {
let proto_state = match state {
RemoteRefState::New => crate::protos::simple_op_store::RemoteRefState::New,
RemoteRefState::Tracked => crate::protos::simple_op_store::RemoteRefState::Tracked,
};
proto_state as i32
}
fn remote_ref_state_from_proto(proto_value: i32) -> Result<RemoteRefState, PostDecodeError> {
let proto_state = proto_value
.try_into()
.map_err(|prost::UnknownEnumValue(n)| PostDecodeError::InvalidRemoteRefStateValue(n))?;
let state = match proto_state {
crate::protos::simple_op_store::RemoteRefState::New => RemoteRefState::New,
crate::protos::simple_op_store::RemoteRefState::Tracked => RemoteRefState::Tracked,
};
Ok(state)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/backend.rs | lib/src/backend.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::any::Any;
use std::fmt::Debug;
use std::pin::Pin;
use std::slice;
use std::time::SystemTime;
use async_trait::async_trait;
use chrono::TimeZone as _;
use futures::stream::BoxStream;
use thiserror::Error;
use tokio::io::AsyncRead;
use crate::content_hash::ContentHash;
use crate::hex_util;
use crate::index::Index;
use crate::merge::Merge;
use crate::object_id::ObjectId as _;
use crate::object_id::id_type;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponent;
use crate::repo_path::RepoPathComponentBuf;
use crate::signing::SignResult;
id_type!(
/// Identifier for a [`Commit`] based on its content. When a commit is
/// rewritten, its `CommitId` changes.
pub CommitId { hex() }
);
id_type!(
/// Stable identifier for a [`Commit`]. Unlike the `CommitId`, the `ChangeId`
/// follows the commit and is not updated when the commit is rewritten.
pub ChangeId { reverse_hex() }
);
id_type!(pub TreeId { hex() });
id_type!(pub FileId { hex() });
id_type!(pub SymlinkId { hex() });
id_type!(pub CopyId { hex() });
impl ChangeId {
/// Parses the given "reverse" hex string into a `ChangeId`.
pub fn try_from_reverse_hex(hex: impl AsRef<[u8]>) -> Option<Self> {
hex_util::decode_reverse_hex(hex).map(Self)
}
/// Returns the hex string representation of this ID, which uses `z-k`
/// "digits" instead of `0-9a-f`.
pub fn reverse_hex(&self) -> String {
hex_util::encode_reverse_hex(&self.0)
}
}
impl CopyId {
/// Returns a placeholder copy id to be used when we don't have a real copy
/// id yet.
// TODO: Delete this
pub fn placeholder() -> Self {
Self::new(vec![])
}
}
#[derive(Debug, Error)]
#[error("Out-of-range date")]
pub struct TimestampOutOfRange;
#[derive(ContentHash, Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct MillisSinceEpoch(pub i64);
#[derive(ContentHash, Debug, PartialEq, Eq, Clone, Copy, PartialOrd, Ord)]
pub struct Timestamp {
pub timestamp: MillisSinceEpoch,
// time zone offset in minutes
pub tz_offset: i32,
}
impl Timestamp {
pub fn now() -> Self {
Self::from_datetime(chrono::offset::Local::now())
}
pub fn from_datetime<Tz: chrono::TimeZone<Offset = chrono::offset::FixedOffset>>(
datetime: chrono::DateTime<Tz>,
) -> Self {
Self {
timestamp: MillisSinceEpoch(datetime.timestamp_millis()),
tz_offset: datetime.offset().local_minus_utc() / 60,
}
}
pub fn to_datetime(
&self,
) -> Result<chrono::DateTime<chrono::FixedOffset>, TimestampOutOfRange> {
let utc = match chrono::Utc.timestamp_opt(
self.timestamp.0.div_euclid(1000),
(self.timestamp.0.rem_euclid(1000)) as u32 * 1000000,
) {
chrono::LocalResult::None => {
return Err(TimestampOutOfRange);
}
chrono::LocalResult::Single(x) => x,
chrono::LocalResult::Ambiguous(y, _z) => y,
};
Ok(utc.with_timezone(
&chrono::FixedOffset::east_opt(self.tz_offset * 60)
.unwrap_or_else(|| chrono::FixedOffset::east_opt(0).unwrap()),
))
}
}
impl serde::Serialize for Timestamp {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
// TODO: test is_human_readable() to use raw format?
let t = self.to_datetime().map_err(serde::ser::Error::custom)?;
t.serialize(serializer)
}
}
/// Represents a [`Commit`] signature.
#[derive(ContentHash, Debug, PartialEq, Eq, Clone, serde::Serialize)]
pub struct Signature {
pub name: String,
pub email: String,
pub timestamp: Timestamp,
}
/// Represents a cryptographically signed [`Commit`] signature.
#[derive(ContentHash, Debug, PartialEq, Eq, Clone)]
pub struct SecureSig {
pub data: Vec<u8>,
pub sig: Vec<u8>,
}
pub type SigningFn<'a> = dyn FnMut(&[u8]) -> SignResult<Vec<u8>> + Send + 'a;
#[derive(ContentHash, Debug, PartialEq, Eq, Clone, serde::Serialize)]
pub struct Commit {
pub parents: Vec<CommitId>,
// TODO: delete commit.predecessors when we can assume that most commits are
// tracked by op.commit_predecessors. (in jj 0.42 or so?)
#[serde(skip)] // deprecated
pub predecessors: Vec<CommitId>,
#[serde(skip)] // TODO: should be exposed?
pub root_tree: Merge<TreeId>,
// If resolved, must be empty string. Otherwise, must have same number of terms as `root_tree`.
#[serde(skip)]
pub conflict_labels: Merge<String>,
pub change_id: ChangeId,
pub description: String,
pub author: Signature,
pub committer: Signature,
#[serde(skip)] // raw data wouldn't be useful
pub secure_sig: Option<SecureSig>,
}
/// An individual copy event, from file A -> B.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct CopyRecord {
/// The destination of the copy, B.
pub target: RepoPathBuf,
/// The CommitId where the copy took place.
pub target_commit: CommitId,
/// The source path a target was copied from.
///
/// It is not required that the source path is different than the target
/// path. A custom backend may choose to represent 'rollbacks' as copies
/// from a file unto itself, from a specific prior commit.
pub source: RepoPathBuf,
pub source_file: FileId,
/// The source commit the target was copied from. Backends may use this
/// field to implement 'integration' logic, where a source may be
/// periodically merged into a target, similar to a branch, but the
/// branching occurs at the file level rather than the repository level. It
/// also follows naturally that any copy source targeted to a specific
/// commit should avoid copy propagation on rebasing, which is desirable
/// for 'fork' style copies.
///
/// It is required that the commit id is an ancestor of the commit with
/// which this copy source is associated.
pub source_commit: CommitId,
}
/// Describes the copy history of a file. The copy object is unchanged when a
/// file is modified.
#[derive(ContentHash, Debug, PartialEq, Eq, Clone, PartialOrd, Ord)]
pub struct CopyHistory {
/// The file's current path.
pub current_path: RepoPathBuf,
/// IDs of the files that became the current incarnation of this file.
///
/// A newly created file has no parents. A regular copy or rename has one
/// parent. A merge of multiple files has multiple parents.
pub parents: Vec<CopyId>,
/// An optional piece of data to give the Copy object a different ID. May be
/// randomly generated. This allows a commit to say that a file was replaced
/// by a new incarnation of it, indicating a logically distinct file
/// taking the place of the previous file at the path.
pub salt: Vec<u8>,
}
/// Error that may occur during backend initialization.
#[derive(Debug, Error)]
#[error(transparent)]
pub struct BackendInitError(pub Box<dyn std::error::Error + Send + Sync>);
/// Error that may occur during backend loading.
#[derive(Debug, Error)]
#[error(transparent)]
pub struct BackendLoadError(pub Box<dyn std::error::Error + Send + Sync>);
/// Commit-backend error that may occur after the backend is loaded.
#[derive(Debug, Error)]
pub enum BackendError {
#[error(
"Invalid hash length for object of type {object_type} (expected {expected} bytes, got \
{actual} bytes): {hash}"
)]
InvalidHashLength {
expected: usize,
actual: usize,
object_type: String,
hash: String,
},
#[error("Invalid UTF-8 for object {hash} of type {object_type}")]
InvalidUtf8 {
object_type: String,
hash: String,
source: std::str::Utf8Error,
},
#[error("Object {hash} of type {object_type} not found")]
ObjectNotFound {
object_type: String,
hash: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Error when reading object {hash} of type {object_type}")]
ReadObject {
object_type: String,
hash: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Access denied to read object {hash} of type {object_type}")]
ReadAccessDenied {
object_type: String,
hash: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error(
"Error when reading file content for file {path} with id {id}",
path = path.as_internal_file_string()
)]
ReadFile {
path: RepoPathBuf,
id: FileId,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Could not write object of type {object_type}")]
WriteObject {
object_type: &'static str,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error(transparent)]
Other(Box<dyn std::error::Error + Send + Sync>),
/// A valid operation attempted, but failed because it isn't supported by
/// the particular backend.
#[error("{0}")]
Unsupported(String),
}
pub type BackendResult<T> = Result<T, BackendError>;
#[derive(ContentHash, Debug, PartialEq, Eq, Clone, Hash)]
pub enum TreeValue {
// TODO: When there's a CopyId here, the copy object's path must match
// the path identified by the tree.
File {
id: FileId,
executable: bool,
copy_id: CopyId,
},
Symlink(SymlinkId),
Tree(TreeId),
GitSubmodule(CommitId),
}
impl TreeValue {
pub fn hex(&self) -> String {
match self {
Self::File { id, .. } => id.hex(),
Self::Symlink(id) => id.hex(),
Self::Tree(id) => id.hex(),
Self::GitSubmodule(id) => id.hex(),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct TreeEntry<'a> {
name: &'a RepoPathComponent,
value: &'a TreeValue,
}
impl<'a> TreeEntry<'a> {
pub fn new(name: &'a RepoPathComponent, value: &'a TreeValue) -> Self {
Self { name, value }
}
pub fn name(&self) -> &'a RepoPathComponent {
self.name
}
pub fn value(&self) -> &'a TreeValue {
self.value
}
}
pub struct TreeEntriesNonRecursiveIterator<'a> {
iter: slice::Iter<'a, (RepoPathComponentBuf, TreeValue)>,
}
impl<'a> Iterator for TreeEntriesNonRecursiveIterator<'a> {
type Item = TreeEntry<'a>;
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.map(|(name, value)| TreeEntry { name, value })
}
}
#[derive(ContentHash, Default, PartialEq, Eq, Debug, Clone)]
pub struct Tree {
entries: Vec<(RepoPathComponentBuf, TreeValue)>,
}
impl Tree {
pub fn from_sorted_entries(entries: Vec<(RepoPathComponentBuf, TreeValue)>) -> Self {
debug_assert!(entries.is_sorted_by(|(a, _), (b, _)| a < b));
Self { entries }
}
pub fn is_empty(&self) -> bool {
self.entries.is_empty()
}
pub fn names(&self) -> impl Iterator<Item = &RepoPathComponent> {
self.entries.iter().map(|(name, _)| name.as_ref())
}
pub fn entries(&self) -> TreeEntriesNonRecursiveIterator<'_> {
TreeEntriesNonRecursiveIterator {
iter: self.entries.iter(),
}
}
pub fn entry(&self, name: &RepoPathComponent) -> Option<TreeEntry<'_>> {
let index = self
.entries
.binary_search_by_key(&name, |(name, _)| name)
.ok()?;
let (name, value) = &self.entries[index];
Some(TreeEntry { name, value })
}
pub fn value(&self, name: &RepoPathComponent) -> Option<&TreeValue> {
self.entry(name).map(|entry| entry.value)
}
}
pub fn make_root_commit(root_change_id: ChangeId, empty_tree_id: TreeId) -> Commit {
let timestamp = Timestamp {
timestamp: MillisSinceEpoch(0),
tz_offset: 0,
};
let signature = Signature {
name: String::new(),
email: String::new(),
timestamp,
};
Commit {
parents: vec![],
predecessors: vec![],
root_tree: Merge::resolved(empty_tree_id),
conflict_labels: Merge::resolved(String::new()),
change_id: root_change_id,
description: String::new(),
author: signature.clone(),
committer: signature,
secure_sig: None,
}
}
/// Defines the interface for commit backends.
#[async_trait]
pub trait Backend: Any + Send + Sync + Debug {
/// A unique name that identifies this backend. Written to
/// `.jj/repo/store/type` when the repo is created.
fn name(&self) -> &str;
/// The length of commit IDs in bytes.
fn commit_id_length(&self) -> usize;
/// The length of change IDs in bytes.
fn change_id_length(&self) -> usize;
fn root_commit_id(&self) -> &CommitId;
fn root_change_id(&self) -> &ChangeId;
fn empty_tree_id(&self) -> &TreeId;
/// An estimate of how many concurrent requests this backend handles well. A
/// local backend like the Git backend (at until it supports partial clones)
/// may want to set this to 1. A cloud-backed backend may want to set it to
/// 100 or so.
///
/// It is not guaranteed that at most this number of concurrent requests are
/// sent.
fn concurrency(&self) -> usize;
async fn read_file(
&self,
path: &RepoPath,
id: &FileId,
) -> BackendResult<Pin<Box<dyn AsyncRead + Send>>>;
async fn write_file(
&self,
path: &RepoPath,
contents: &mut (dyn AsyncRead + Send + Unpin),
) -> BackendResult<FileId>;
async fn read_symlink(&self, path: &RepoPath, id: &SymlinkId) -> BackendResult<String>;
async fn write_symlink(&self, path: &RepoPath, target: &str) -> BackendResult<SymlinkId>;
/// Read the specified `CopyHistory` object.
///
/// Backends that don't support copy tracking may return
/// `BackendError::Unsupported`.
async fn read_copy(&self, id: &CopyId) -> BackendResult<CopyHistory>;
/// Write the `CopyHistory` object and return its ID.
///
/// Backends that don't support copy tracking may return
/// `BackendError::Unsupported`.
async fn write_copy(&self, copy: &CopyHistory) -> BackendResult<CopyId>;
/// Find all copy histories that are related to the specified one. This is
/// defined as those that are ancestors of the given specified one, plus
/// their descendants. Children must be returned before parents.
///
/// It is valid (but wasteful) to include other copy histories, such as
/// siblings, or even completely unrelated copy histories.
///
/// Backends that don't support copy tracking may return
/// `BackendError::Unsupported`.
async fn get_related_copies(&self, copy_id: &CopyId) -> BackendResult<Vec<CopyHistory>>;
async fn read_tree(&self, path: &RepoPath, id: &TreeId) -> BackendResult<Tree>;
async fn write_tree(&self, path: &RepoPath, contents: &Tree) -> BackendResult<TreeId>;
async fn read_commit(&self, id: &CommitId) -> BackendResult<Commit>;
/// Writes a commit and returns its ID and the commit itself. The commit
/// should contain the data that was actually written, which may differ
/// from the data passed in. For example, the backend may change the
/// committer name to an authenticated user's name, or the backend's
/// timestamps may have less precision than the millisecond precision in
/// `Commit`.
///
/// The `sign_with` parameter could contain a function to cryptographically
/// sign some binary representation of the commit.
/// If the backend supports it, it could call it and store the result in
/// an implementation specific fashion, and both `read_commit` and the
/// return of `write_commit` should read it back as the `secure_sig`
/// field.
async fn write_commit(
&self,
contents: Commit,
sign_with: Option<&mut SigningFn>,
) -> BackendResult<(CommitId, Commit)>;
/// Get copy records for the dag range `root..head`. If `paths` is None
/// include all paths, otherwise restrict to only `paths`.
///
/// The exact order these are returned is unspecified, but it is guaranteed
/// to be reverse-topological. That is, for any two copy records with
/// different commit ids A and B, if A is an ancestor of B, A is streamed
/// after B.
///
/// Streaming by design to better support large backends which may have very
/// large single-file histories. This also allows more iterative algorithms
/// like blame/annotate to short-circuit after a point without wasting
/// unnecessary resources.
fn get_copy_records(
&self,
paths: Option<&[RepoPathBuf]>,
root: &CommitId,
head: &CommitId,
) -> BackendResult<BoxStream<'_, BackendResult<CopyRecord>>>;
/// Perform garbage collection.
///
/// All commits found in the `index` won't be removed. In addition to that,
/// objects created after `keep_newer` will be preserved. This mitigates a
/// risk of deleting new commits created concurrently by another process.
fn gc(&self, index: &dyn Index, keep_newer: SystemTime) -> BackendResult<()>;
}
impl dyn Backend {
/// Returns reference of the implementation type.
pub fn downcast_ref<T: Backend>(&self) -> Option<&T> {
(self as &dyn Any).downcast_ref()
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/rewrite.rs | lib/src/rewrite.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::HashMap;
use std::collections::HashSet;
use std::slice;
use std::sync::Arc;
use futures::StreamExt as _;
use futures::future::try_join_all;
use futures::try_join;
use indexmap::IndexMap;
use indexmap::IndexSet;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use tracing::instrument;
use crate::backend::BackendError;
use crate::backend::BackendResult;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::commit::CommitIteratorExt as _;
use crate::commit::conflict_label_for_commits;
use crate::commit_builder::CommitBuilder;
use crate::index::Index;
use crate::index::IndexResult;
use crate::index::ResolvedChangeTargets;
use crate::iter_util::fallible_any;
use crate::matchers::Matcher;
use crate::matchers::Visit;
use crate::merge::Diff;
use crate::merge::Merge;
use crate::merged_tree::MergedTree;
use crate::merged_tree::MergedTreeBuilder;
use crate::merged_tree::TreeDiffEntry;
use crate::repo::MutableRepo;
use crate::repo::Repo;
use crate::repo_path::RepoPath;
use crate::revset::RevsetExpression;
use crate::revset::RevsetIteratorExt as _;
use crate::store::Store;
/// Merges `commits` and tries to resolve any conflicts recursively.
#[instrument(skip(repo))]
pub async fn merge_commit_trees(repo: &dyn Repo, commits: &[Commit]) -> BackendResult<MergedTree> {
if let [commit] = commits {
Ok(commit.tree())
} else {
merge_commit_trees_no_resolve_without_repo(repo.store(), repo.index(), commits)
.await?
.resolve()
.await
}
}
/// Merges `commits` without attempting to resolve file conflicts.
#[instrument(skip(index))]
pub async fn merge_commit_trees_no_resolve_without_repo(
store: &Arc<Store>,
index: &dyn Index,
commits: &[Commit],
) -> BackendResult<MergedTree> {
let commit_ids = commits
.iter()
.map(|commit| commit.id().clone())
.collect_vec();
let commit_id_merge = find_recursive_merge_commits(store, index, commit_ids)?;
let tree_merge: Merge<(MergedTree, String)> = commit_id_merge
.try_map_async(async |commit_id| {
let commit = store.get_commit_async(commit_id).await?;
Ok::<_, BackendError>((commit.tree(), commit.conflict_label()))
})
.await?;
Ok(MergedTree::merge_no_resolve(tree_merge))
}
/// Find the commits to use as input to the recursive merge algorithm.
pub fn find_recursive_merge_commits(
store: &Arc<Store>,
index: &dyn Index,
mut commit_ids: Vec<CommitId>,
) -> BackendResult<Merge<CommitId>> {
if commit_ids.is_empty() {
Ok(Merge::resolved(store.root_commit_id().clone()))
} else if commit_ids.len() == 1 {
Ok(Merge::resolved(commit_ids.pop().unwrap()))
} else {
let mut result = Merge::resolved(commit_ids[0].clone());
for (i, other_commit_id) in commit_ids.iter().enumerate().skip(1) {
let ancestor_ids = index
.common_ancestors(&commit_ids[0..i], &commit_ids[i..][..1])
// TODO: indexing error shouldn't be a "BackendError"
.map_err(|err| BackendError::Other(err.into()))?;
let ancestor_merge = find_recursive_merge_commits(store, index, ancestor_ids)?;
result = Merge::from_vec(vec![
result,
ancestor_merge,
Merge::resolved(other_commit_id.clone()),
])
.flatten();
}
Ok(result)
}
}
/// Restore matching paths from the source into the destination.
pub async fn restore_tree(
source: &MergedTree,
destination: &MergedTree,
matcher: &dyn Matcher,
) -> BackendResult<MergedTree> {
if matcher.visit(RepoPath::root()) == Visit::AllRecursively {
// Optimization for a common case
Ok(source.clone())
} else {
// TODO: We should be able to not traverse deeper in the diff if the matcher
// matches an entire subtree.
let mut tree_builder = MergedTreeBuilder::new(destination.clone());
// TODO: handle copy tracking
let mut diff_stream = source.diff_stream(destination, matcher);
while let Some(TreeDiffEntry {
path: repo_path,
values,
}) = diff_stream.next().await
{
let source_value = values?.before;
tree_builder.set_or_remove(repo_path, source_value);
}
tree_builder.write_tree()
}
}
pub async fn rebase_commit(
mut_repo: &mut MutableRepo,
old_commit: Commit,
new_parents: Vec<CommitId>,
) -> BackendResult<Commit> {
let rewriter = CommitRewriter::new(mut_repo, old_commit, new_parents);
let builder = rewriter.rebase().await?;
builder.write()
}
/// Helps rewrite a commit.
pub struct CommitRewriter<'repo> {
mut_repo: &'repo mut MutableRepo,
old_commit: Commit,
new_parents: Vec<CommitId>,
}
impl<'repo> CommitRewriter<'repo> {
/// Create a new instance.
pub fn new(
mut_repo: &'repo mut MutableRepo,
old_commit: Commit,
new_parents: Vec<CommitId>,
) -> Self {
Self {
mut_repo,
old_commit,
new_parents,
}
}
/// Returns the `MutableRepo`.
pub fn repo_mut(&mut self) -> &mut MutableRepo {
self.mut_repo
}
/// The commit we're rewriting.
pub fn old_commit(&self) -> &Commit {
&self.old_commit
}
/// Get the old commit's intended new parents.
pub fn new_parents(&self) -> &[CommitId] {
&self.new_parents
}
/// Set the old commit's intended new parents.
pub fn set_new_parents(&mut self, new_parents: Vec<CommitId>) {
self.new_parents = new_parents;
}
/// Set the old commit's intended new parents to be the rewritten versions
/// of the given parents.
pub fn set_new_rewritten_parents(&mut self, unrewritten_parents: &[CommitId]) {
self.new_parents = self.mut_repo.new_parents(unrewritten_parents);
}
/// Update the intended new parents by replacing any occurrence of
/// `old_parent` by `new_parents`.
pub fn replace_parent<'a>(
&mut self,
old_parent: &CommitId,
new_parents: impl IntoIterator<Item = &'a CommitId>,
) {
if let Some(i) = self.new_parents.iter().position(|p| p == old_parent) {
self.new_parents
.splice(i..i + 1, new_parents.into_iter().cloned());
let mut unique = HashSet::new();
self.new_parents.retain(|p| unique.insert(p.clone()));
}
}
/// Checks if the intended new parents are different from the old commit's
/// parents.
pub fn parents_changed(&self) -> bool {
self.new_parents != self.old_commit.parent_ids()
}
/// If a merge commit would end up with one parent being an ancestor of the
/// other, then filter out the ancestor.
pub fn simplify_ancestor_merge(&mut self) -> IndexResult<()> {
let head_set: HashSet<_> = self
.mut_repo
.index()
.heads(&mut self.new_parents.iter())?
.into_iter()
.collect();
self.new_parents.retain(|parent| head_set.contains(parent));
Ok(())
}
/// Records the old commit as abandoned with the new parents.
///
/// This is equivalent to `reparent(settings).abandon()`, but is cheaper.
pub fn abandon(self) {
let old_commit_id = self.old_commit.id().clone();
let new_parents = self.new_parents;
self.mut_repo
.record_abandoned_commit_with_parents(old_commit_id, new_parents);
}
/// Rebase the old commit onto the new parents. Returns a `CommitBuilder`
/// for the new commit. Returns `None` if the commit was abandoned.
pub async fn rebase_with_empty_behavior(
self,
empty: EmptyBehavior,
) -> BackendResult<Option<CommitBuilder<'repo>>> {
let old_parents_fut = self.old_commit.parents_async();
let new_parents_fut = try_join_all(
self.new_parents
.iter()
.map(|new_parent_id| self.mut_repo.store().get_commit_async(new_parent_id)),
);
let (old_parents, new_parents) = try_join!(old_parents_fut, new_parents_fut)?;
let old_parent_trees = old_parents
.iter()
.map(|parent| parent.tree_ids().clone())
.collect_vec();
let new_parent_trees = new_parents
.iter()
.map(|parent| parent.tree_ids().clone())
.collect_vec();
let (was_empty, new_tree) = if new_parent_trees == old_parent_trees {
(
// Optimization: was_empty is only used for newly empty, but when the
// parents haven't changed it can't be newly empty.
true,
// Optimization: Skip merging.
self.old_commit.tree(),
)
} else {
// We wouldn't need to resolve merge conflicts here if the
// same-change rule is "keep". See 9d4a97381f30 "rewrite: don't
// resolve intermediate parent tree when rebasing" for details.
let old_base_tree_fut = merge_commit_trees(self.mut_repo, &old_parents);
let new_base_tree_fut = merge_commit_trees(self.mut_repo, &new_parents);
let old_tree = self.old_commit.tree();
let (old_base_tree, new_base_tree) = try_join!(old_base_tree_fut, new_base_tree_fut)?;
(
old_base_tree.tree_ids() == self.old_commit.tree_ids(),
MergedTree::merge(Merge::from_vec(vec![
(
new_base_tree,
format!(
"{} (rebase destination)",
conflict_label_for_commits(&new_parents)
),
),
(
old_base_tree,
format!(
"{} (parents of rebased revision)",
conflict_label_for_commits(&old_parents)
),
),
(
old_tree,
format!("{} (rebased revision)", self.old_commit.conflict_label()),
),
]))
.await?,
)
};
// Ensure we don't abandon commits with multiple parents (merge commits), even
// if they're empty.
if let [parent] = &new_parents[..] {
let should_abandon = match empty {
EmptyBehavior::Keep => false,
EmptyBehavior::AbandonNewlyEmpty => {
parent.tree_ids() == new_tree.tree_ids() && !was_empty
}
EmptyBehavior::AbandonAllEmpty => parent.tree_ids() == new_tree.tree_ids(),
};
if should_abandon {
self.abandon();
return Ok(None);
}
}
let builder = self
.mut_repo
.rewrite_commit(&self.old_commit)
.set_parents(self.new_parents)
.set_tree(new_tree);
Ok(Some(builder))
}
/// Rebase the old commit onto the new parents. Returns a `CommitBuilder`
/// for the new commit.
pub async fn rebase(self) -> BackendResult<CommitBuilder<'repo>> {
let builder = self.rebase_with_empty_behavior(EmptyBehavior::Keep).await?;
Ok(builder.unwrap())
}
/// Rewrite the old commit onto the new parents without changing its
/// contents. Returns a `CommitBuilder` for the new commit.
pub fn reparent(self) -> CommitBuilder<'repo> {
self.mut_repo
.rewrite_commit(&self.old_commit)
.set_parents(self.new_parents)
}
}
#[derive(Debug)]
pub enum RebasedCommit {
Rewritten(Commit),
Abandoned { parent_id: CommitId },
}
pub fn rebase_commit_with_options(
mut rewriter: CommitRewriter<'_>,
options: &RebaseOptions,
) -> BackendResult<RebasedCommit> {
// If specified, don't create commit where one parent is an ancestor of another.
if options.simplify_ancestor_merge {
rewriter
.simplify_ancestor_merge()
// TODO: indexing error shouldn't be a "BackendError"
.map_err(|err| BackendError::Other(err.into()))?;
}
let single_parent = match &rewriter.new_parents[..] {
[parent_id] => Some(parent_id.clone()),
_ => None,
};
let new_parents_len = rewriter.new_parents.len();
if let Some(builder) = rewriter
.rebase_with_empty_behavior(options.empty)
.block_on()?
{
let new_commit = builder.write()?;
Ok(RebasedCommit::Rewritten(new_commit))
} else {
assert_eq!(new_parents_len, 1);
Ok(RebasedCommit::Abandoned {
parent_id: single_parent.unwrap(),
})
}
}
/// Moves changes from `sources` to the `destination` parent, returns new tree.
// TODO: pass conflict labels as argument to provide more specific information
pub fn rebase_to_dest_parent(
repo: &dyn Repo,
sources: &[Commit],
destination: &Commit,
) -> BackendResult<MergedTree> {
if let [source] = sources
&& source.parent_ids() == destination.parent_ids()
{
return Ok(source.tree());
}
let diffs: Vec<_> = sources
.iter()
.map(|source| -> BackendResult<_> {
Ok(Diff::new(
(
source.parent_tree(repo)?,
format!("{} (original parents)", source.parents_conflict_label()?),
),
(
source.tree(),
format!("{} (original revision)", source.conflict_label()),
),
))
})
.try_collect()?;
MergedTree::merge(Merge::from_diffs(
(
destination.parent_tree(repo)?,
format!("{} (new parents)", destination.parents_conflict_label()?),
),
diffs,
))
.block_on()
}
#[derive(Clone, Copy, Default, PartialEq, Eq, Debug)]
pub enum EmptyBehavior {
/// Always keep empty commits
#[default]
Keep,
/// Skips commits that would be empty after the rebase, but that were not
/// originally empty.
/// Will never skip merge commits with multiple non-empty parents.
AbandonNewlyEmpty,
/// Skips all empty commits, including ones that were empty before the
/// rebase.
/// Will never skip merge commits with multiple non-empty parents.
AbandonAllEmpty,
}
/// Controls the configuration of a rebase.
// If we wanted to add a flag similar to `git rebase --ignore-date`, then this
// makes it much easier by ensuring that the only changes required are to
// change the RebaseOptions construction in the CLI, and changing the
// rebase_commit function to actually use the flag, and ensure we don't need to
// plumb it in.
#[derive(Clone, Debug, Default)]
pub struct RebaseOptions {
pub empty: EmptyBehavior,
pub rewrite_refs: RewriteRefsOptions,
/// If a merge commit would end up with one parent being an ancestor of the
/// other, then filter out the ancestor.
pub simplify_ancestor_merge: bool,
}
/// Configuration for [`MutableRepo::update_rewritten_references()`].
#[derive(Clone, Debug, Default)]
pub struct RewriteRefsOptions {
/// Whether or not delete bookmarks pointing to the abandoned commits.
///
/// If false, bookmarks will be moved to the parents of the abandoned
/// commit.
pub delete_abandoned_bookmarks: bool,
}
#[derive(Debug)]
pub struct MoveCommitsStats {
/// The number of commits in the target set which were rebased.
pub num_rebased_targets: u32,
/// The number of descendant commits which were rebased.
pub num_rebased_descendants: u32,
/// The number of commits for which rebase was skipped, due to the commit
/// already being in place.
pub num_skipped_rebases: u32,
/// The number of commits which were abandoned due to being empty.
pub num_abandoned_empty: u32,
/// The rebased commits
pub rebased_commits: HashMap<CommitId, RebasedCommit>,
}
/// Target and destination commits to be rebased by [`move_commits()`].
#[derive(Clone, Debug)]
pub struct MoveCommitsLocation {
pub new_parent_ids: Vec<CommitId>,
pub new_child_ids: Vec<CommitId>,
pub target: MoveCommitsTarget,
}
#[derive(Clone, Debug)]
pub enum MoveCommitsTarget {
/// The commits to be moved. Commits should be mutable and in reverse
/// topological order.
Commits(Vec<CommitId>),
/// The root commits to be moved, along with all their descendants.
Roots(Vec<CommitId>),
}
#[derive(Clone, Debug)]
pub struct ComputedMoveCommits {
target_commit_ids: IndexSet<CommitId>,
descendants: Vec<Commit>,
commit_new_parents_map: HashMap<CommitId, Vec<CommitId>>,
to_abandon: HashSet<CommitId>,
}
impl ComputedMoveCommits {
fn empty() -> Self {
Self {
target_commit_ids: IndexSet::new(),
descendants: vec![],
commit_new_parents_map: HashMap::new(),
to_abandon: HashSet::new(),
}
}
/// Records a set of commits to abandon while rebasing.
///
/// Abandoning these commits while rebasing ensures that their descendants
/// are still rebased properly. [`MutableRepo::record_abandoned_commit`] is
/// similar, but it can lead to issues when abandoning a target commit
/// before the rebase.
pub fn record_to_abandon(&mut self, commit_ids: impl IntoIterator<Item = CommitId>) {
self.to_abandon.extend(commit_ids);
}
pub fn apply(
self,
mut_repo: &mut MutableRepo,
options: &RebaseOptions,
) -> BackendResult<MoveCommitsStats> {
apply_move_commits(mut_repo, self, options)
}
}
/// Moves `loc.target` commits from their current location to a new location in
/// the graph.
///
/// Commits in `target` are rebased onto the new parents given by
/// `new_parent_ids`, while the `new_child_ids` commits are rebased onto the
/// heads of the commits in `targets`. This assumes that commits in `target` and
/// `new_child_ids` can be rewritten, and there will be no cycles in the
/// resulting graph. Commits in `target` should be in reverse topological order.
pub fn move_commits(
mut_repo: &mut MutableRepo,
loc: &MoveCommitsLocation,
options: &RebaseOptions,
) -> BackendResult<MoveCommitsStats> {
compute_move_commits(mut_repo, loc)?.apply(mut_repo, options)
}
pub fn compute_move_commits(
repo: &MutableRepo,
loc: &MoveCommitsLocation,
) -> BackendResult<ComputedMoveCommits> {
let target_commit_ids: IndexSet<CommitId>;
let connected_target_commits: Vec<Commit>;
let connected_target_commits_internal_parents: HashMap<CommitId, IndexSet<CommitId>>;
let target_roots: HashSet<CommitId>;
match &loc.target {
MoveCommitsTarget::Commits(commit_ids) => {
if commit_ids.is_empty() {
return Ok(ComputedMoveCommits::empty());
}
target_commit_ids = commit_ids.iter().cloned().collect();
connected_target_commits = RevsetExpression::commits(commit_ids.clone())
.connected()
.evaluate(repo)
.map_err(|err| err.into_backend_error())?
.iter()
.commits(repo.store())
.try_collect()
.map_err(|err| err.into_backend_error())?;
connected_target_commits_internal_parents =
compute_internal_parents_within(&target_commit_ids, &connected_target_commits);
target_roots = connected_target_commits_internal_parents
.iter()
.filter(|&(commit_id, parents)| {
target_commit_ids.contains(commit_id) && parents.is_empty()
})
.map(|(commit_id, _)| commit_id.clone())
.collect();
}
MoveCommitsTarget::Roots(root_ids) => {
if root_ids.is_empty() {
return Ok(ComputedMoveCommits::empty());
}
target_commit_ids = RevsetExpression::commits(root_ids.clone())
.descendants()
.evaluate(repo)
.map_err(|err| err.into_backend_error())?
.iter()
.try_collect()
.map_err(|err| err.into_backend_error())?;
connected_target_commits = target_commit_ids
.iter()
.map(|id| repo.store().get_commit(id))
.try_collect()?;
// We don't have to compute the internal parents for the connected target set,
// since the connected target set is the same as the target set.
connected_target_commits_internal_parents = HashMap::new();
target_roots = root_ids.iter().cloned().collect();
}
}
// If a commit outside the target set has a commit in the target set as a
// parent, then - after the transformation - it should have that commit's
// ancestors which are not in the target set as parents.
let mut target_commits_external_parents: HashMap<CommitId, IndexSet<CommitId>> = HashMap::new();
for id in target_commit_ids.iter().rev() {
let commit = repo.store().get_commit(id)?;
let mut new_parents = IndexSet::new();
for old_parent in commit.parent_ids() {
if let Some(parents) = target_commits_external_parents.get(old_parent) {
new_parents.extend(parents.iter().cloned());
} else {
new_parents.insert(old_parent.clone());
}
}
target_commits_external_parents.insert(commit.id().clone(), new_parents);
}
// If the new parents include a commit in the target set, replace it with the
// commit's ancestors which are outside the set.
// e.g. `jj rebase -r A --before A`
let new_parent_ids: Vec<_> = loc
.new_parent_ids
.iter()
.flat_map(|parent_id| {
if let Some(parent_ids) = target_commits_external_parents.get(parent_id) {
parent_ids.iter().cloned().collect_vec()
} else {
vec![parent_id.clone()]
}
})
.collect();
// If the new children include a commit in the target set, replace it with the
// commit's descendants which are outside the set.
// e.g. `jj rebase -r A --after A`
let new_children: Vec<_> = if loc
.new_child_ids
.iter()
.any(|id| target_commit_ids.contains(id))
{
let target_commits_descendants: Vec<_> =
RevsetExpression::commits(target_commit_ids.iter().cloned().collect_vec())
.union(
&RevsetExpression::commits(target_commit_ids.iter().cloned().collect_vec())
.children(),
)
.evaluate(repo)
.map_err(|err| err.into_backend_error())?
.iter()
.commits(repo.store())
.try_collect()
.map_err(|err| err.into_backend_error())?;
// For all commits in the target set, compute its transitive descendant commits
// which are outside of the target set by up to 1 generation.
let mut target_commit_external_descendants: HashMap<CommitId, IndexSet<Commit>> =
HashMap::new();
// Iterate through all descendants of the target set, going through children
// before parents.
for commit in &target_commits_descendants {
if !target_commit_external_descendants.contains_key(commit.id()) {
let children = if target_commit_ids.contains(commit.id()) {
IndexSet::new()
} else {
IndexSet::from([commit.clone()])
};
target_commit_external_descendants.insert(commit.id().clone(), children);
}
let children = target_commit_external_descendants
.get(commit.id())
.unwrap()
.iter()
.cloned()
.collect_vec();
for parent_id in commit.parent_ids() {
if target_commit_ids.contains(parent_id) {
if let Some(target_children) =
target_commit_external_descendants.get_mut(parent_id)
{
target_children.extend(children.iter().cloned());
} else {
target_commit_external_descendants
.insert(parent_id.clone(), children.iter().cloned().collect());
}
};
}
}
let mut new_children = Vec::new();
for id in &loc.new_child_ids {
if let Some(children) = target_commit_external_descendants.get(id) {
new_children.extend(children.iter().cloned());
} else {
new_children.push(repo.store().get_commit(id)?);
}
}
new_children
} else {
loc.new_child_ids
.iter()
.map(|id| repo.store().get_commit(id))
.try_collect()?
};
// Compute the parents of the new children, which will include the heads of the
// target set.
let new_children_parents: HashMap<_, _> = if !new_children.is_empty() {
// Compute the heads of the target set, which will be used as the parents of
// `new_children`.
let target_heads = compute_commits_heads(&target_commit_ids, &connected_target_commits);
new_children
.iter()
.map(|child_commit| {
let mut new_child_parent_ids = IndexSet::new();
for old_child_parent_id in child_commit.parent_ids() {
// Replace target commits with their parents outside the target set.
let old_child_parent_ids = if let Some(parents) =
target_commits_external_parents.get(old_child_parent_id)
{
parents.iter().collect_vec()
} else {
vec![old_child_parent_id]
};
// If the original parents of the new children are the new parents of the
// `target_heads`, replace them with the target heads since we are "inserting"
// the target commits in between the new parents and the new children.
for id in old_child_parent_ids {
if new_parent_ids.contains(id) {
new_child_parent_ids.extend(target_heads.clone());
} else {
new_child_parent_ids.insert(id.clone());
};
}
}
// If not already present, add `target_heads` as parents of the new child
// commit.
new_child_parent_ids.extend(target_heads.clone());
(
child_commit.id().clone(),
new_child_parent_ids.into_iter().collect_vec(),
)
})
.collect()
} else {
HashMap::new()
};
// Compute the set of commits to visit, which includes the target commits, the
// new children commits (if any), and their descendants.
let mut roots = target_roots.iter().cloned().collect_vec();
roots.extend(new_children.iter().ids().cloned());
let descendants = repo.find_descendants_for_rebase(roots.clone())?;
let commit_new_parents_map = descendants
.iter()
.map(|commit| -> BackendResult<_> {
let commit_id = commit.id();
let new_parent_ids =
if let Some(new_child_parents) = new_children_parents.get(commit_id) {
// New child of the rebased target commits.
new_child_parents.clone()
} else if target_commit_ids.contains(commit_id) {
// Commit is in the target set.
if target_roots.contains(commit_id) {
// If the commit is a root of the target set, it should be rebased onto the
// new destination.
new_parent_ids.clone()
} else {
// Otherwise:
// 1. Keep parents which are within the target set.
// 2. Replace parents which are outside the target set but are part of the
// connected target set with their ancestor commits which are in the
// target set.
// 3. Keep other parents outside the target set if they are not descendants
// of the new children of the target set.
let mut new_parents = vec![];
for parent_id in commit.parent_ids() {
if target_commit_ids.contains(parent_id) {
new_parents.push(parent_id.clone());
} else if let Some(parents) =
connected_target_commits_internal_parents.get(parent_id)
{
new_parents.extend(parents.iter().cloned());
} else if !fallible_any(&new_children, |child| {
repo.index().is_ancestor(child.id(), parent_id)
})
// TODO: indexing error shouldn't be a "BackendError"
.map_err(|err| BackendError::Other(err.into()))?
{
new_parents.push(parent_id.clone());
}
}
new_parents
}
} else if commit
.parent_ids()
.iter()
.any(|id| target_commits_external_parents.contains_key(id))
{
// Commits outside the target set should have references to commits inside the
// set replaced.
let mut new_parents = vec![];
for parent in commit.parent_ids() {
if let Some(parents) = target_commits_external_parents.get(parent) {
new_parents.extend(parents.iter().cloned());
} else {
new_parents.push(parent.clone());
}
}
new_parents
} else {
commit.parent_ids().iter().cloned().collect_vec()
};
Ok((commit.id().clone(), new_parent_ids))
})
.try_collect()?;
Ok(ComputedMoveCommits {
target_commit_ids,
descendants,
commit_new_parents_map,
to_abandon: HashSet::new(),
})
}
fn apply_move_commits(
mut_repo: &mut MutableRepo,
commits: ComputedMoveCommits,
options: &RebaseOptions,
) -> BackendResult<MoveCommitsStats> {
let mut num_rebased_targets = 0;
let mut num_rebased_descendants = 0;
let mut num_skipped_rebases = 0;
let mut num_abandoned_empty = 0;
// Always keep empty commits when rebasing descendants.
let rebase_descendant_options = &RebaseOptions {
empty: EmptyBehavior::Keep,
rewrite_refs: options.rewrite_refs.clone(),
simplify_ancestor_merge: options.simplify_ancestor_merge,
};
let mut rebased_commits: HashMap<CommitId, RebasedCommit> = HashMap::new();
mut_repo.transform_commits(
commits.descendants,
&commits.commit_new_parents_map,
&options.rewrite_refs,
async |rewriter| {
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/repo.rs | lib/src/repo.rs | // Copyright 2020 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![expect(missing_docs)]
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::fs;
use std::path::Path;
use std::slice;
use std::sync::Arc;
use itertools::Itertools as _;
use once_cell::sync::OnceCell;
use pollster::FutureExt as _;
use thiserror::Error;
use tracing::instrument;
use self::dirty_cell::DirtyCell;
use crate::backend::Backend;
use crate::backend::BackendError;
use crate::backend::BackendInitError;
use crate::backend::BackendLoadError;
use crate::backend::BackendResult;
use crate::backend::ChangeId;
use crate::backend::CommitId;
use crate::commit::Commit;
use crate::commit::CommitByCommitterTimestamp;
use crate::commit_builder::CommitBuilder;
use crate::commit_builder::DetachedCommitBuilder;
use crate::dag_walk;
use crate::default_index::DefaultIndexStore;
use crate::default_index::DefaultMutableIndex;
use crate::default_submodule_store::DefaultSubmoduleStore;
use crate::file_util::IoResultExt as _;
use crate::file_util::PathError;
use crate::index::ChangeIdIndex;
use crate::index::Index;
use crate::index::IndexError;
use crate::index::IndexResult;
use crate::index::IndexStore;
use crate::index::IndexStoreError;
use crate::index::MutableIndex;
use crate::index::ReadonlyIndex;
use crate::index::ResolvedChangeTargets;
use crate::merge::MergeBuilder;
use crate::merge::SameChange;
use crate::merge::trivial_merge;
use crate::merged_tree::MergedTree;
use crate::object_id::HexPrefix;
use crate::object_id::PrefixResolution;
use crate::op_heads_store;
use crate::op_heads_store::OpHeadResolutionError;
use crate::op_heads_store::OpHeadsStore;
use crate::op_heads_store::OpHeadsStoreError;
use crate::op_store;
use crate::op_store::OpStore;
use crate::op_store::OpStoreError;
use crate::op_store::OpStoreResult;
use crate::op_store::OperationId;
use crate::op_store::RefTarget;
use crate::op_store::RemoteRef;
use crate::op_store::RemoteRefState;
use crate::op_store::RootOperationData;
use crate::operation::Operation;
use crate::ref_name::GitRefName;
use crate::ref_name::RefName;
use crate::ref_name::RemoteName;
use crate::ref_name::RemoteRefSymbol;
use crate::ref_name::WorkspaceName;
use crate::ref_name::WorkspaceNameBuf;
use crate::refs::diff_named_commit_ids;
use crate::refs::diff_named_ref_targets;
use crate::refs::diff_named_remote_refs;
use crate::refs::merge_ref_targets;
use crate::refs::merge_remote_refs;
use crate::revset;
use crate::revset::RevsetEvaluationError;
use crate::revset::RevsetExpression;
use crate::revset::RevsetIteratorExt as _;
use crate::rewrite::CommitRewriter;
use crate::rewrite::RebaseOptions;
use crate::rewrite::RebasedCommit;
use crate::rewrite::RewriteRefsOptions;
use crate::rewrite::merge_commit_trees;
use crate::rewrite::rebase_commit_with_options;
use crate::settings::UserSettings;
use crate::signing::SignInitError;
use crate::signing::Signer;
use crate::simple_backend::SimpleBackend;
use crate::simple_op_heads_store::SimpleOpHeadsStore;
use crate::simple_op_store::SimpleOpStore;
use crate::store::Store;
use crate::submodule_store::SubmoduleStore;
use crate::transaction::Transaction;
use crate::transaction::TransactionCommitError;
use crate::tree_merge::MergeOptions;
use crate::view::RenameWorkspaceError;
use crate::view::View;
pub trait Repo {
/// Base repository that contains all committed data. Returns `self` if this
/// is a `ReadonlyRepo`,
fn base_repo(&self) -> &ReadonlyRepo;
fn store(&self) -> &Arc<Store>;
fn op_store(&self) -> &Arc<dyn OpStore>;
fn index(&self) -> &dyn Index;
fn view(&self) -> &View;
fn submodule_store(&self) -> &Arc<dyn SubmoduleStore>;
fn resolve_change_id(
&self,
change_id: &ChangeId,
) -> IndexResult<Option<ResolvedChangeTargets>> {
// Replace this if we added more efficient lookup method.
let prefix = HexPrefix::from_id(change_id);
match self.resolve_change_id_prefix(&prefix)? {
PrefixResolution::NoMatch => Ok(None),
PrefixResolution::SingleMatch(entries) => Ok(Some(entries)),
PrefixResolution::AmbiguousMatch => panic!("complete change_id should be unambiguous"),
}
}
fn resolve_change_id_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<ResolvedChangeTargets>>;
fn shortest_unique_change_id_prefix_len(
&self,
target_id_bytes: &ChangeId,
) -> IndexResult<usize>;
}
pub struct ReadonlyRepo {
loader: RepoLoader,
operation: Operation,
index: Box<dyn ReadonlyIndex>,
change_id_index: OnceCell<Box<dyn ChangeIdIndex>>,
// TODO: This should eventually become part of the index and not be stored fully in memory.
view: View,
}
impl Debug for ReadonlyRepo {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
f.debug_struct("ReadonlyRepo")
.field("store", &self.loader.store)
.finish_non_exhaustive()
}
}
#[derive(Error, Debug)]
pub enum RepoInitError {
#[error(transparent)]
Backend(#[from] BackendInitError),
#[error(transparent)]
OpHeadsStore(#[from] OpHeadsStoreError),
#[error(transparent)]
Path(#[from] PathError),
}
impl ReadonlyRepo {
pub fn default_op_store_initializer() -> &'static OpStoreInitializer<'static> {
&|_settings, store_path, root_data| {
Ok(Box::new(SimpleOpStore::init(store_path, root_data)?))
}
}
pub fn default_op_heads_store_initializer() -> &'static OpHeadsStoreInitializer<'static> {
&|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::init(store_path)?))
}
pub fn default_index_store_initializer() -> &'static IndexStoreInitializer<'static> {
&|_settings, store_path| Ok(Box::new(DefaultIndexStore::init(store_path)?))
}
pub fn default_submodule_store_initializer() -> &'static SubmoduleStoreInitializer<'static> {
&|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::init(store_path)))
}
#[expect(clippy::too_many_arguments)]
pub fn init(
settings: &UserSettings,
repo_path: &Path,
backend_initializer: &BackendInitializer,
signer: Signer,
op_store_initializer: &OpStoreInitializer,
op_heads_store_initializer: &OpHeadsStoreInitializer,
index_store_initializer: &IndexStoreInitializer,
submodule_store_initializer: &SubmoduleStoreInitializer,
) -> Result<Arc<Self>, RepoInitError> {
let repo_path = dunce::canonicalize(repo_path).context(repo_path)?;
let store_path = repo_path.join("store");
fs::create_dir(&store_path).context(&store_path)?;
let backend = backend_initializer(settings, &store_path)?;
let backend_path = store_path.join("type");
fs::write(&backend_path, backend.name()).context(&backend_path)?;
let merge_options =
MergeOptions::from_settings(settings).map_err(|err| BackendInitError(err.into()))?;
let store = Store::new(backend, signer, merge_options);
let op_store_path = repo_path.join("op_store");
fs::create_dir(&op_store_path).context(&op_store_path)?;
let root_op_data = RootOperationData {
root_commit_id: store.root_commit_id().clone(),
};
let op_store = op_store_initializer(settings, &op_store_path, root_op_data)?;
let op_store_type_path = op_store_path.join("type");
fs::write(&op_store_type_path, op_store.name()).context(&op_store_type_path)?;
let op_store: Arc<dyn OpStore> = Arc::from(op_store);
let op_heads_path = repo_path.join("op_heads");
fs::create_dir(&op_heads_path).context(&op_heads_path)?;
let op_heads_store = op_heads_store_initializer(settings, &op_heads_path)?;
let op_heads_type_path = op_heads_path.join("type");
fs::write(&op_heads_type_path, op_heads_store.name()).context(&op_heads_type_path)?;
op_heads_store
.update_op_heads(&[], op_store.root_operation_id())
.block_on()?;
let op_heads_store: Arc<dyn OpHeadsStore> = Arc::from(op_heads_store);
let index_path = repo_path.join("index");
fs::create_dir(&index_path).context(&index_path)?;
let index_store = index_store_initializer(settings, &index_path)?;
let index_type_path = index_path.join("type");
fs::write(&index_type_path, index_store.name()).context(&index_type_path)?;
let index_store: Arc<dyn IndexStore> = Arc::from(index_store);
let submodule_store_path = repo_path.join("submodule_store");
fs::create_dir(&submodule_store_path).context(&submodule_store_path)?;
let submodule_store = submodule_store_initializer(settings, &submodule_store_path)?;
let submodule_store_type_path = submodule_store_path.join("type");
fs::write(&submodule_store_type_path, submodule_store.name())
.context(&submodule_store_type_path)?;
let submodule_store = Arc::from(submodule_store);
let loader = RepoLoader {
settings: settings.clone(),
store,
op_store,
op_heads_store,
index_store,
submodule_store,
};
let root_operation = loader.root_operation();
let root_view = root_operation.view().expect("failed to read root view");
assert!(!root_view.heads().is_empty());
let index = loader
.index_store
.get_index_at_op(&root_operation, &loader.store)
// If the root op index couldn't be read, the index backend wouldn't
// be initialized properly.
.map_err(|err| BackendInitError(err.into()))?;
Ok(Arc::new(Self {
loader,
operation: root_operation,
index,
change_id_index: OnceCell::new(),
view: root_view,
}))
}
pub fn loader(&self) -> &RepoLoader {
&self.loader
}
pub fn op_id(&self) -> &OperationId {
self.operation.id()
}
pub fn operation(&self) -> &Operation {
&self.operation
}
pub fn view(&self) -> &View {
&self.view
}
pub fn readonly_index(&self) -> &dyn ReadonlyIndex {
self.index.as_ref()
}
fn change_id_index(&self) -> &dyn ChangeIdIndex {
self.change_id_index
.get_or_init(|| {
self.readonly_index()
.change_id_index(&mut self.view().heads().iter())
})
.as_ref()
}
pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
self.loader.op_heads_store()
}
pub fn index_store(&self) -> &Arc<dyn IndexStore> {
self.loader.index_store()
}
pub fn settings(&self) -> &UserSettings {
self.loader.settings()
}
pub fn start_transaction(self: &Arc<Self>) -> Transaction {
let mut_repo = MutableRepo::new(self.clone(), self.readonly_index(), &self.view);
Transaction::new(mut_repo, self.settings())
}
pub fn reload_at_head(&self) -> Result<Arc<Self>, RepoLoaderError> {
self.loader().load_at_head()
}
#[instrument]
pub fn reload_at(&self, operation: &Operation) -> Result<Arc<Self>, RepoLoaderError> {
self.loader().load_at(operation)
}
}
impl Repo for ReadonlyRepo {
fn base_repo(&self) -> &ReadonlyRepo {
self
}
fn store(&self) -> &Arc<Store> {
self.loader.store()
}
fn op_store(&self) -> &Arc<dyn OpStore> {
self.loader.op_store()
}
fn index(&self) -> &dyn Index {
self.readonly_index().as_index()
}
fn view(&self) -> &View {
&self.view
}
fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
self.loader.submodule_store()
}
fn resolve_change_id_prefix(
&self,
prefix: &HexPrefix,
) -> IndexResult<PrefixResolution<ResolvedChangeTargets>> {
self.change_id_index().resolve_prefix(prefix)
}
fn shortest_unique_change_id_prefix_len(&self, target_id: &ChangeId) -> IndexResult<usize> {
self.change_id_index().shortest_unique_prefix_len(target_id)
}
}
pub type BackendInitializer<'a> =
dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendInitError> + 'a;
#[rustfmt::skip] // auto-formatted line would exceed the maximum width
pub type OpStoreInitializer<'a> =
dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendInitError>
+ 'a;
pub type OpHeadsStoreInitializer<'a> =
dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendInitError> + 'a;
pub type IndexStoreInitializer<'a> =
dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendInitError> + 'a;
pub type SubmoduleStoreInitializer<'a> =
dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendInitError> + 'a;
type BackendFactory =
Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn Backend>, BackendLoadError>>;
type OpStoreFactory = Box<
dyn Fn(&UserSettings, &Path, RootOperationData) -> Result<Box<dyn OpStore>, BackendLoadError>,
>;
type OpHeadsStoreFactory =
Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn OpHeadsStore>, BackendLoadError>>;
type IndexStoreFactory =
Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn IndexStore>, BackendLoadError>>;
type SubmoduleStoreFactory =
Box<dyn Fn(&UserSettings, &Path) -> Result<Box<dyn SubmoduleStore>, BackendLoadError>>;
pub fn merge_factories_map<F>(base: &mut HashMap<String, F>, ext: HashMap<String, F>) {
for (name, factory) in ext {
match base.entry(name) {
Entry::Vacant(v) => {
v.insert(factory);
}
Entry::Occupied(o) => {
panic!("Conflicting factory definitions for '{}' factory", o.key())
}
}
}
}
pub struct StoreFactories {
backend_factories: HashMap<String, BackendFactory>,
op_store_factories: HashMap<String, OpStoreFactory>,
op_heads_store_factories: HashMap<String, OpHeadsStoreFactory>,
index_store_factories: HashMap<String, IndexStoreFactory>,
submodule_store_factories: HashMap<String, SubmoduleStoreFactory>,
}
impl Default for StoreFactories {
fn default() -> Self {
let mut factories = Self::empty();
// Backends
factories.add_backend(
SimpleBackend::name(),
Box::new(|_settings, store_path| Ok(Box::new(SimpleBackend::load(store_path)))),
);
#[cfg(feature = "git")]
factories.add_backend(
crate::git_backend::GitBackend::name(),
Box::new(|settings, store_path| {
Ok(Box::new(crate::git_backend::GitBackend::load(
settings, store_path,
)?))
}),
);
#[cfg(feature = "testing")]
factories.add_backend(
crate::secret_backend::SecretBackend::name(),
Box::new(|settings, store_path| {
Ok(Box::new(crate::secret_backend::SecretBackend::load(
settings, store_path,
)?))
}),
);
// OpStores
factories.add_op_store(
SimpleOpStore::name(),
Box::new(|_settings, store_path, root_data| {
Ok(Box::new(SimpleOpStore::load(store_path, root_data)))
}),
);
// OpHeadsStores
factories.add_op_heads_store(
SimpleOpHeadsStore::name(),
Box::new(|_settings, store_path| Ok(Box::new(SimpleOpHeadsStore::load(store_path)))),
);
// Index
factories.add_index_store(
DefaultIndexStore::name(),
Box::new(|_settings, store_path| Ok(Box::new(DefaultIndexStore::load(store_path)))),
);
// SubmoduleStores
factories.add_submodule_store(
DefaultSubmoduleStore::name(),
Box::new(|_settings, store_path| Ok(Box::new(DefaultSubmoduleStore::load(store_path)))),
);
factories
}
}
#[derive(Debug, Error)]
pub enum StoreLoadError {
#[error("Unsupported {store} backend type '{store_type}'")]
UnsupportedType {
store: &'static str,
store_type: String,
},
#[error("Failed to read {store} backend type")]
ReadError {
store: &'static str,
source: PathError,
},
#[error(transparent)]
Backend(#[from] BackendLoadError),
#[error(transparent)]
Signing(#[from] SignInitError),
}
impl StoreFactories {
pub fn empty() -> Self {
Self {
backend_factories: HashMap::new(),
op_store_factories: HashMap::new(),
op_heads_store_factories: HashMap::new(),
index_store_factories: HashMap::new(),
submodule_store_factories: HashMap::new(),
}
}
pub fn merge(&mut self, ext: Self) {
let Self {
backend_factories,
op_store_factories,
op_heads_store_factories,
index_store_factories,
submodule_store_factories,
} = ext;
merge_factories_map(&mut self.backend_factories, backend_factories);
merge_factories_map(&mut self.op_store_factories, op_store_factories);
merge_factories_map(&mut self.op_heads_store_factories, op_heads_store_factories);
merge_factories_map(&mut self.index_store_factories, index_store_factories);
merge_factories_map(
&mut self.submodule_store_factories,
submodule_store_factories,
);
}
pub fn add_backend(&mut self, name: &str, factory: BackendFactory) {
self.backend_factories.insert(name.to_string(), factory);
}
pub fn load_backend(
&self,
settings: &UserSettings,
store_path: &Path,
) -> Result<Box<dyn Backend>, StoreLoadError> {
let backend_type = read_store_type("commit", store_path.join("type"))?;
let backend_factory = self.backend_factories.get(&backend_type).ok_or_else(|| {
StoreLoadError::UnsupportedType {
store: "commit",
store_type: backend_type.clone(),
}
})?;
Ok(backend_factory(settings, store_path)?)
}
pub fn add_op_store(&mut self, name: &str, factory: OpStoreFactory) {
self.op_store_factories.insert(name.to_string(), factory);
}
pub fn load_op_store(
&self,
settings: &UserSettings,
store_path: &Path,
root_data: RootOperationData,
) -> Result<Box<dyn OpStore>, StoreLoadError> {
let op_store_type = read_store_type("operation", store_path.join("type"))?;
let op_store_factory = self.op_store_factories.get(&op_store_type).ok_or_else(|| {
StoreLoadError::UnsupportedType {
store: "operation",
store_type: op_store_type.clone(),
}
})?;
Ok(op_store_factory(settings, store_path, root_data)?)
}
pub fn add_op_heads_store(&mut self, name: &str, factory: OpHeadsStoreFactory) {
self.op_heads_store_factories
.insert(name.to_string(), factory);
}
pub fn load_op_heads_store(
&self,
settings: &UserSettings,
store_path: &Path,
) -> Result<Box<dyn OpHeadsStore>, StoreLoadError> {
let op_heads_store_type = read_store_type("operation heads", store_path.join("type"))?;
let op_heads_store_factory = self
.op_heads_store_factories
.get(&op_heads_store_type)
.ok_or_else(|| StoreLoadError::UnsupportedType {
store: "operation heads",
store_type: op_heads_store_type.clone(),
})?;
Ok(op_heads_store_factory(settings, store_path)?)
}
pub fn add_index_store(&mut self, name: &str, factory: IndexStoreFactory) {
self.index_store_factories.insert(name.to_string(), factory);
}
pub fn load_index_store(
&self,
settings: &UserSettings,
store_path: &Path,
) -> Result<Box<dyn IndexStore>, StoreLoadError> {
let index_store_type = read_store_type("index", store_path.join("type"))?;
let index_store_factory = self
.index_store_factories
.get(&index_store_type)
.ok_or_else(|| StoreLoadError::UnsupportedType {
store: "index",
store_type: index_store_type.clone(),
})?;
Ok(index_store_factory(settings, store_path)?)
}
pub fn add_submodule_store(&mut self, name: &str, factory: SubmoduleStoreFactory) {
self.submodule_store_factories
.insert(name.to_string(), factory);
}
pub fn load_submodule_store(
&self,
settings: &UserSettings,
store_path: &Path,
) -> Result<Box<dyn SubmoduleStore>, StoreLoadError> {
let submodule_store_type = read_store_type("submodule_store", store_path.join("type"))?;
let submodule_store_factory = self
.submodule_store_factories
.get(&submodule_store_type)
.ok_or_else(|| StoreLoadError::UnsupportedType {
store: "submodule_store",
store_type: submodule_store_type.clone(),
})?;
Ok(submodule_store_factory(settings, store_path)?)
}
}
pub fn read_store_type(
store: &'static str,
path: impl AsRef<Path>,
) -> Result<String, StoreLoadError> {
let path = path.as_ref();
fs::read_to_string(path)
.context(path)
.map_err(|source| StoreLoadError::ReadError { store, source })
}
#[derive(Debug, Error)]
pub enum RepoLoaderError {
#[error(transparent)]
Backend(#[from] BackendError),
#[error(transparent)]
Index(#[from] IndexError),
#[error(transparent)]
IndexStore(#[from] IndexStoreError),
#[error(transparent)]
OpHeadResolution(#[from] OpHeadResolutionError),
#[error(transparent)]
OpHeadsStoreError(#[from] OpHeadsStoreError),
#[error(transparent)]
OpStore(#[from] OpStoreError),
#[error(transparent)]
TransactionCommit(#[from] TransactionCommitError),
}
/// Helps create `ReadonlyRepo` instances of a repo at the head operation or at
/// a given operation.
#[derive(Clone)]
pub struct RepoLoader {
settings: UserSettings,
store: Arc<Store>,
op_store: Arc<dyn OpStore>,
op_heads_store: Arc<dyn OpHeadsStore>,
index_store: Arc<dyn IndexStore>,
submodule_store: Arc<dyn SubmoduleStore>,
}
impl RepoLoader {
pub fn new(
settings: UserSettings,
store: Arc<Store>,
op_store: Arc<dyn OpStore>,
op_heads_store: Arc<dyn OpHeadsStore>,
index_store: Arc<dyn IndexStore>,
submodule_store: Arc<dyn SubmoduleStore>,
) -> Self {
Self {
settings,
store,
op_store,
op_heads_store,
index_store,
submodule_store,
}
}
/// Creates a `RepoLoader` for the repo at `repo_path` by reading the
/// various `.jj/repo/<backend>/type` files and loading the right
/// backends from `store_factories`.
pub fn init_from_file_system(
settings: &UserSettings,
repo_path: &Path,
store_factories: &StoreFactories,
) -> Result<Self, StoreLoadError> {
let merge_options =
MergeOptions::from_settings(settings).map_err(|err| BackendLoadError(err.into()))?;
let store = Store::new(
store_factories.load_backend(settings, &repo_path.join("store"))?,
Signer::from_settings(settings)?,
merge_options,
);
let root_op_data = RootOperationData {
root_commit_id: store.root_commit_id().clone(),
};
let op_store = Arc::from(store_factories.load_op_store(
settings,
&repo_path.join("op_store"),
root_op_data,
)?);
let op_heads_store =
Arc::from(store_factories.load_op_heads_store(settings, &repo_path.join("op_heads"))?);
let index_store =
Arc::from(store_factories.load_index_store(settings, &repo_path.join("index"))?);
let submodule_store = Arc::from(
store_factories.load_submodule_store(settings, &repo_path.join("submodule_store"))?,
);
Ok(Self {
settings: settings.clone(),
store,
op_store,
op_heads_store,
index_store,
submodule_store,
})
}
pub fn settings(&self) -> &UserSettings {
&self.settings
}
pub fn store(&self) -> &Arc<Store> {
&self.store
}
pub fn index_store(&self) -> &Arc<dyn IndexStore> {
&self.index_store
}
pub fn op_store(&self) -> &Arc<dyn OpStore> {
&self.op_store
}
pub fn op_heads_store(&self) -> &Arc<dyn OpHeadsStore> {
&self.op_heads_store
}
pub fn submodule_store(&self) -> &Arc<dyn SubmoduleStore> {
&self.submodule_store
}
pub fn load_at_head(&self) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
let op = op_heads_store::resolve_op_heads(
self.op_heads_store.as_ref(),
&self.op_store,
|op_heads| self.resolve_op_heads(op_heads),
)?;
let view = op.view()?;
self.finish_load(op, view)
}
#[instrument(skip(self))]
pub fn load_at(&self, op: &Operation) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
let view = op.view()?;
self.finish_load(op.clone(), view)
}
pub fn create_from(
&self,
operation: Operation,
view: View,
index: Box<dyn ReadonlyIndex>,
) -> Arc<ReadonlyRepo> {
let repo = ReadonlyRepo {
loader: self.clone(),
operation,
index,
change_id_index: OnceCell::new(),
view,
};
Arc::new(repo)
}
// If we add a higher-level abstraction of OpStore, root_operation() and
// load_operation() will be moved there.
/// Returns the root operation.
pub fn root_operation(&self) -> Operation {
self.load_operation(self.op_store.root_operation_id())
.expect("failed to read root operation")
}
/// Loads the specified operation from the operation store.
pub fn load_operation(&self, id: &OperationId) -> OpStoreResult<Operation> {
let data = self.op_store.read_operation(id).block_on()?;
Ok(Operation::new(self.op_store.clone(), id.clone(), data))
}
/// Merges the given `operations` into a single operation. Returns the root
/// operation if the `operations` is empty.
pub fn merge_operations(
&self,
operations: Vec<Operation>,
tx_description: Option<&str>,
) -> Result<Operation, RepoLoaderError> {
let num_operations = operations.len();
let mut operations = operations.into_iter();
let Some(base_op) = operations.next() else {
return Ok(self.root_operation());
};
let final_op = if num_operations > 1 {
let base_repo = self.load_at(&base_op)?;
let mut tx = base_repo.start_transaction();
for other_op in operations {
tx.merge_operation(other_op)?;
tx.repo_mut().rebase_descendants()?;
}
let tx_description = tx_description.map_or_else(
|| format!("merge {num_operations} operations"),
|tx_description| tx_description.to_string(),
);
let merged_repo = tx.write(tx_description)?.leave_unpublished();
merged_repo.operation().clone()
} else {
base_op
};
Ok(final_op)
}
fn resolve_op_heads(&self, op_heads: Vec<Operation>) -> Result<Operation, RepoLoaderError> {
assert!(!op_heads.is_empty());
self.merge_operations(op_heads, Some("reconcile divergent operations"))
}
fn finish_load(
&self,
operation: Operation,
view: View,
) -> Result<Arc<ReadonlyRepo>, RepoLoaderError> {
let index = self.index_store.get_index_at_op(&operation, &self.store)?;
let repo = ReadonlyRepo {
loader: self.clone(),
operation,
index,
change_id_index: OnceCell::new(),
view,
};
Ok(Arc::new(repo))
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum Rewrite {
/// The old commit was rewritten as this new commit. Children should be
/// rebased onto the new commit.
Rewritten(CommitId),
/// The old commit was rewritten as multiple other commits. Children should
/// not be rebased.
Divergent(Vec<CommitId>),
/// The old commit was abandoned. Children should be rebased onto the given
/// commits (typically the parents of the old commit).
Abandoned(Vec<CommitId>),
}
impl Rewrite {
fn new_parent_ids(&self) -> &[CommitId] {
match self {
Self::Rewritten(new_parent_id) => std::slice::from_ref(new_parent_id),
Self::Divergent(new_parent_ids) => new_parent_ids.as_slice(),
Self::Abandoned(new_parent_ids) => new_parent_ids.as_slice(),
}
}
}
pub struct MutableRepo {
base_repo: Arc<ReadonlyRepo>,
index: Box<dyn MutableIndex>,
view: DirtyCell<View>,
/// Mapping from new commit to its predecessors.
///
/// This is similar to (the reverse of) `parent_mapping`, but
/// `commit_predecessors` will never be cleared on `rebase_descendants()`.
commit_predecessors: BTreeMap<CommitId, Vec<CommitId>>,
// The commit identified by the key has been replaced by all the ones in the value.
// * Bookmarks pointing to the old commit should be updated to the new commit, resulting in a
// conflict if there multiple new commits.
// * Children of the old commit should be rebased onto the new commits. However, if the type is
// `Divergent`, they should be left in place.
// * Working copies pointing to the old commit should be updated to the first of the new
// commits. However, if the type is `Abandoned`, a new working-copy commit should be created
// on top of all of the new commits instead.
parent_mapping: HashMap<CommitId, Rewrite>,
}
impl MutableRepo {
pub fn new(base_repo: Arc<ReadonlyRepo>, index: &dyn ReadonlyIndex, view: &View) -> Self {
let mut_view = view.clone();
let mut_index = index.start_modification();
Self {
base_repo,
index: mut_index,
view: DirtyCell::with_clean(mut_view),
commit_predecessors: Default::default(),
parent_mapping: Default::default(),
}
}
pub fn base_repo(&self) -> &Arc<ReadonlyRepo> {
&self.base_repo
}
fn view_mut(&mut self) -> &mut View {
self.view.get_mut()
}
pub fn mutable_index(&self) -> &dyn MutableIndex {
self.index.as_ref()
}
pub(crate) fn is_backed_by_default_index(&self) -> bool {
self.index.downcast_ref::<DefaultMutableIndex>().is_some()
}
pub fn has_changes(&self) -> bool {
self.view.ensure_clean(|v| self.enforce_view_invariants(v));
!(self.commit_predecessors.is_empty()
&& self.parent_mapping.is_empty()
&& self.view() == &self.base_repo.view)
}
pub(crate) fn consume(
self,
) -> (
Box<dyn MutableIndex>,
View,
BTreeMap<CommitId, Vec<CommitId>>,
) {
self.view.ensure_clean(|v| self.enforce_view_invariants(v));
(self.index, self.view.into_inner(), self.commit_predecessors)
}
/// Returns a [`CommitBuilder`] to write new commit to the repo.
pub fn new_commit(&mut self, parents: Vec<CommitId>, tree: MergedTree) -> CommitBuilder<'_> {
let settings = self.base_repo.settings();
DetachedCommitBuilder::for_new_commit(self, settings, parents, tree).attach(self)
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/config_resolver.rs | lib/src/config_resolver.rs | // Copyright 2024 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Post-processing functions for [`StackedConfig`].
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use itertools::Itertools as _;
use serde::Deserialize as _;
use serde::de::IntoDeserializer as _;
use thiserror::Error;
use toml_edit::DocumentMut;
use crate::config::ConfigGetError;
use crate::config::ConfigLayer;
use crate::config::ConfigNamePathBuf;
use crate::config::ConfigSource;
use crate::config::ConfigUpdateError;
use crate::config::ConfigValue;
use crate::config::StackedConfig;
use crate::config::ToConfigNamePath;
// Prefixed by "--" so these keys look unusual. It's also nice that "-" is
// placed earlier than the other keys in lexicographical order.
const SCOPE_CONDITION_KEY: &str = "--when";
const SCOPE_TABLE_KEY: &str = "--scope";
/// Parameters to enable scoped config tables conditionally.
#[derive(Clone, Debug)]
pub struct ConfigResolutionContext<'a> {
/// Home directory. `~` will be substituted with this path.
pub home_dir: Option<&'a Path>,
/// Repository path, which is usually `<main_workspace_root>/.jj/repo`.
pub repo_path: Option<&'a Path>,
/// Workspace path: `<workspace_root>`.
pub workspace_path: Option<&'a Path>,
/// Space-separated subcommand. `jj file show ...` should result in `"file
/// show"`.
pub command: Option<&'a str>,
/// Hostname
pub hostname: &'a str,
}
/// Conditions to enable the parent table.
///
/// - Each predicate is tested separately, and the results are intersected.
/// - `None` means there are no constraints. (i.e. always `true`)
// TODO: introduce fileset-like DSL?
// TODO: add support for fileset-like pattern prefixes? it might be a bit tricky
// if path canonicalization is involved.
#[derive(Clone, Debug, Default, serde::Deserialize)]
#[serde(default, rename_all = "kebab-case")]
struct ScopeCondition {
/// Paths to match the repository path prefix.
pub repositories: Option<Vec<PathBuf>>,
/// Paths to match the workspace path prefix.
pub workspaces: Option<Vec<PathBuf>>,
/// Commands to match. Subcommands are matched space-separated.
/// - `--when.commands = ["foo"]` -> matches "foo", "foo bar", "foo bar baz"
/// - `--when.commands = ["foo bar"]` -> matches "foo bar", "foo bar baz",
/// NOT "foo"
pub commands: Option<Vec<String>>,
/// Platforms to match. The values are defined by `std::env::consts::FAMILY`
/// and `std::env::consts::OS`.
pub platforms: Option<Vec<String>>,
/// Hostnames to match the hostname.
pub hostnames: Option<Vec<String>>,
}
impl ScopeCondition {
fn from_value(
value: ConfigValue,
context: &ConfigResolutionContext,
) -> Result<Self, toml_edit::de::Error> {
Self::deserialize(value.into_deserializer())?
.expand_paths(context)
.map_err(serde::de::Error::custom)
}
fn expand_paths(mut self, context: &ConfigResolutionContext) -> Result<Self, &'static str> {
// It might make some sense to compare paths in canonicalized form, but
// be careful to not resolve relative path patterns against cwd, which
// wouldn't be what the user would expect.
for path in self.repositories.as_mut().into_iter().flatten() {
if let Some(new_path) = expand_home(path, context.home_dir)? {
*path = new_path;
}
}
for path in self.workspaces.as_mut().into_iter().flatten() {
if let Some(new_path) = expand_home(path, context.home_dir)? {
*path = new_path;
}
}
Ok(self)
}
fn matches(&self, context: &ConfigResolutionContext) -> bool {
matches_path_prefix(self.repositories.as_deref(), context.repo_path)
&& matches_path_prefix(self.workspaces.as_deref(), context.workspace_path)
&& matches_platform(self.platforms.as_deref())
&& matches_hostname(self.hostnames.as_deref(), context.hostname)
&& matches_command(self.commands.as_deref(), context.command)
}
}
fn expand_home(path: &Path, home_dir: Option<&Path>) -> Result<Option<PathBuf>, &'static str> {
match path.strip_prefix("~") {
Ok(tail) => {
let home_dir = home_dir.ok_or("Cannot expand ~ (home directory is unknown)")?;
Ok(Some(home_dir.join(tail)))
}
Err(_) => Ok(None),
}
}
fn matches_path_prefix(candidates: Option<&[PathBuf]>, actual: Option<&Path>) -> bool {
match (candidates, actual) {
(Some(candidates), Some(actual)) => candidates.iter().any(|base| actual.starts_with(base)),
(Some(_), None) => false, // actual path not known (e.g. not in workspace)
(None, _) => true, // no constraints
}
}
fn matches_platform(candidates: Option<&[String]>) -> bool {
candidates.is_none_or(|candidates| {
candidates
.iter()
.any(|value| value == std::env::consts::FAMILY || value == std::env::consts::OS)
})
}
fn matches_hostname(candidates: Option<&[String]>, actual: &str) -> bool {
candidates.is_none_or(|candidates| candidates.iter().any(|candidate| actual == candidate))
}
fn matches_command(candidates: Option<&[String]>, actual: Option<&str>) -> bool {
match (candidates, actual) {
(Some(candidates), Some(actual)) => candidates.iter().any(|candidate| {
actual
.strip_prefix(candidate)
.is_some_and(|trailing| trailing.starts_with(' ') || trailing.is_empty())
}),
(Some(_), None) => false,
(None, _) => true,
}
}
/// Evaluates condition for each layer and scope, flattens scoped tables.
/// Returns new config that only contains enabled layers and tables.
pub fn resolve(
source_config: &StackedConfig,
context: &ConfigResolutionContext,
) -> Result<StackedConfig, ConfigGetError> {
let mut source_layers_stack: Vec<Arc<ConfigLayer>> =
source_config.layers().iter().rev().cloned().collect();
let mut resolved_layers: Vec<Arc<ConfigLayer>> = Vec::new();
while let Some(mut source_layer) = source_layers_stack.pop() {
if !source_layer.data.contains_key(SCOPE_CONDITION_KEY)
&& !source_layer.data.contains_key(SCOPE_TABLE_KEY)
{
resolved_layers.push(source_layer); // reuse original table
continue;
}
let layer_mut = Arc::make_mut(&mut source_layer);
let condition = pop_scope_condition(layer_mut, context)?;
if !condition.matches(context) {
continue;
}
let tables = pop_scope_tables(layer_mut)?;
// tables.iter() does not implement DoubleEndedIterator as of toml_edit
// 0.22.22.
let frame = source_layers_stack.len();
for table in tables {
let layer = ConfigLayer {
source: source_layer.source,
path: source_layer.path.clone(),
data: DocumentMut::from(table),
};
source_layers_stack.push(Arc::new(layer));
}
source_layers_stack[frame..].reverse();
resolved_layers.push(source_layer);
}
let mut resolved_config = StackedConfig::empty();
resolved_config.extend_layers(resolved_layers);
Ok(resolved_config)
}
fn pop_scope_condition(
layer: &mut ConfigLayer,
context: &ConfigResolutionContext,
) -> Result<ScopeCondition, ConfigGetError> {
let Some(item) = layer.data.remove(SCOPE_CONDITION_KEY) else {
return Ok(ScopeCondition::default());
};
let value = item
.clone()
.into_value()
.expect("Item::None should not exist in table");
ScopeCondition::from_value(value, context).map_err(|err| ConfigGetError::Type {
name: SCOPE_CONDITION_KEY.to_owned(),
error: err.into(),
source_path: layer.path.clone(),
})
}
fn pop_scope_tables(layer: &mut ConfigLayer) -> Result<toml_edit::ArrayOfTables, ConfigGetError> {
let Some(item) = layer.data.remove(SCOPE_TABLE_KEY) else {
return Ok(toml_edit::ArrayOfTables::new());
};
item.into_array_of_tables()
.map_err(|item| ConfigGetError::Type {
name: SCOPE_TABLE_KEY.to_owned(),
error: format!("Expected an array of tables, but is {}", item.type_name()).into(),
source_path: layer.path.clone(),
})
}
/// Error that can occur when migrating config variables.
#[derive(Debug, Error)]
#[error("Migration failed")]
pub struct ConfigMigrateError {
/// Source error.
#[source]
pub error: ConfigMigrateLayerError,
/// Source file path where the value is defined.
pub source_path: Option<PathBuf>,
}
/// Inner error of [`ConfigMigrateError`].
#[derive(Debug, Error)]
pub enum ConfigMigrateLayerError {
/// Cannot delete old value or set new value.
#[error(transparent)]
Update(#[from] ConfigUpdateError),
/// Old config value cannot be converted.
#[error("Invalid type or value for {name}")]
Type {
/// Dotted config name path.
name: String,
/// Source error.
#[source]
error: DynError,
},
}
impl ConfigMigrateLayerError {
fn with_source_path(self, source_path: Option<&Path>) -> ConfigMigrateError {
ConfigMigrateError {
error: self,
source_path: source_path.map(|path| path.to_owned()),
}
}
}
type DynError = Box<dyn std::error::Error + Send + Sync>;
/// Rule to migrate deprecated config variables.
pub struct ConfigMigrationRule {
inner: MigrationRule,
}
enum MigrationRule {
RenameValue {
old_name: ConfigNamePathBuf,
new_name: ConfigNamePathBuf,
},
RenameUpdateValue {
old_name: ConfigNamePathBuf,
new_name: ConfigNamePathBuf,
#[expect(clippy::type_complexity)] // type alias wouldn't help readability
new_value_fn: Box<dyn Fn(&ConfigValue) -> Result<ConfigValue, DynError>>,
},
Custom {
matches_fn: Box<dyn Fn(&ConfigLayer) -> bool>,
#[expect(clippy::type_complexity)] // type alias wouldn't help readability
apply_fn: Box<dyn Fn(&mut ConfigLayer) -> Result<String, ConfigMigrateLayerError>>,
},
}
impl ConfigMigrationRule {
/// Creates rule that moves value from `old_name` to `new_name`.
pub fn rename_value(old_name: impl ToConfigNamePath, new_name: impl ToConfigNamePath) -> Self {
let inner = MigrationRule::RenameValue {
old_name: old_name.into_name_path().into(),
new_name: new_name.into_name_path().into(),
};
Self { inner }
}
/// Creates rule that moves value from `old_name` to `new_name`, and updates
/// the value.
///
/// If `new_value_fn(&old_value)` returned an error, the whole migration
/// process would fail.
pub fn rename_update_value(
old_name: impl ToConfigNamePath,
new_name: impl ToConfigNamePath,
new_value_fn: impl Fn(&ConfigValue) -> Result<ConfigValue, DynError> + 'static,
) -> Self {
let inner = MigrationRule::RenameUpdateValue {
old_name: old_name.into_name_path().into(),
new_name: new_name.into_name_path().into(),
new_value_fn: Box::new(new_value_fn),
};
Self { inner }
}
// TODO: update value, etc.
/// Creates rule that updates config layer by `apply_fn`. `match_fn` should
/// return true if the layer contains items to be updated.
pub fn custom(
matches_fn: impl Fn(&ConfigLayer) -> bool + 'static,
apply_fn: impl Fn(&mut ConfigLayer) -> Result<String, ConfigMigrateLayerError> + 'static,
) -> Self {
let inner = MigrationRule::Custom {
matches_fn: Box::new(matches_fn),
apply_fn: Box::new(apply_fn),
};
Self { inner }
}
/// Returns true if `layer` contains an item to be migrated.
fn matches(&self, layer: &ConfigLayer) -> bool {
match &self.inner {
MigrationRule::RenameValue { old_name, .. }
| MigrationRule::RenameUpdateValue { old_name, .. } => {
matches!(layer.look_up_item(old_name), Ok(Some(_)))
}
MigrationRule::Custom { matches_fn, .. } => matches_fn(layer),
}
}
/// Migrates `layer` item. Returns a description of the applied migration.
fn apply(&self, layer: &mut ConfigLayer) -> Result<String, ConfigMigrateLayerError> {
match &self.inner {
MigrationRule::RenameValue { old_name, new_name } => {
rename_value(layer, old_name, new_name)
}
MigrationRule::RenameUpdateValue {
old_name,
new_name,
new_value_fn,
} => rename_update_value(layer, old_name, new_name, new_value_fn),
MigrationRule::Custom { apply_fn, .. } => apply_fn(layer),
}
}
}
fn rename_value(
layer: &mut ConfigLayer,
old_name: &ConfigNamePathBuf,
new_name: &ConfigNamePathBuf,
) -> Result<String, ConfigMigrateLayerError> {
let value = layer.delete_value(old_name)?.expect("tested by matches()");
if matches!(layer.look_up_item(new_name), Ok(Some(_))) {
return Ok(format!("{old_name} is deleted (superseded by {new_name})"));
}
layer.set_value(new_name, value)?;
Ok(format!("{old_name} is renamed to {new_name}"))
}
fn rename_update_value(
layer: &mut ConfigLayer,
old_name: &ConfigNamePathBuf,
new_name: &ConfigNamePathBuf,
new_value_fn: impl FnOnce(&ConfigValue) -> Result<ConfigValue, DynError>,
) -> Result<String, ConfigMigrateLayerError> {
let old_value = layer.delete_value(old_name)?.expect("tested by matches()");
if matches!(layer.look_up_item(new_name), Ok(Some(_))) {
return Ok(format!("{old_name} is deleted (superseded by {new_name})"));
}
let new_value = new_value_fn(&old_value).map_err(|error| ConfigMigrateLayerError::Type {
name: old_name.to_string(),
error,
})?;
layer.set_value(new_name, new_value.clone())?;
Ok(format!("{old_name} is updated to {new_name} = {new_value}"))
}
/// Applies migration `rules` to `config`. Returns descriptions of the applied
/// migrations.
pub fn migrate(
config: &mut StackedConfig,
rules: &[ConfigMigrationRule],
) -> Result<Vec<(ConfigSource, String)>, ConfigMigrateError> {
let mut descriptions = Vec::new();
for layer in config.layers_mut() {
migrate_layer(layer, rules, &mut descriptions)
.map_err(|err| err.with_source_path(layer.path.as_deref()))?;
}
Ok(descriptions)
}
fn migrate_layer(
layer: &mut Arc<ConfigLayer>,
rules: &[ConfigMigrationRule],
descriptions: &mut Vec<(ConfigSource, String)>,
) -> Result<(), ConfigMigrateLayerError> {
let rules_to_apply = rules
.iter()
.filter(|rule| rule.matches(layer))
.collect_vec();
if rules_to_apply.is_empty() {
return Ok(());
}
let layer_mut = Arc::make_mut(layer);
for rule in rules_to_apply {
let desc = rule.apply(layer_mut)?;
descriptions.push((layer_mut.source, desc));
}
Ok(())
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use indoc::indoc;
use super::*;
#[test]
fn test_expand_home() {
let home_dir = Some(Path::new("/home/dir"));
assert_eq!(
expand_home("~".as_ref(), home_dir).unwrap(),
Some(PathBuf::from("/home/dir"))
);
assert_eq!(expand_home("~foo".as_ref(), home_dir).unwrap(), None);
assert_eq!(expand_home("/foo/~".as_ref(), home_dir).unwrap(), None);
assert_eq!(
expand_home("~/foo".as_ref(), home_dir).unwrap(),
Some(PathBuf::from("/home/dir/foo"))
);
assert!(expand_home("~/foo".as_ref(), None).is_err());
}
#[test]
fn test_condition_default() {
let condition = ScopeCondition::default();
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
assert!(condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new("/foo")),
workspace_path: None,
command: None,
hostname: "",
};
assert!(condition.matches(&context));
}
#[test]
fn test_condition_repo_path() {
let condition = ScopeCondition {
repositories: Some(["/foo", "/bar"].map(PathBuf::from).into()),
workspaces: None,
commands: None,
platforms: None,
hostnames: None,
};
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
assert!(!condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new("/foo")),
workspace_path: None,
command: None,
hostname: "",
};
assert!(condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new("/fooo")),
workspace_path: None,
command: None,
hostname: "",
};
assert!(!condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new("/foo/baz")),
workspace_path: None,
command: None,
hostname: "",
};
assert!(condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new("/bar")),
workspace_path: None,
command: None,
hostname: "",
};
assert!(condition.matches(&context));
}
#[test]
fn test_condition_repo_path_windows() {
let condition = ScopeCondition {
repositories: Some(["c:/foo", r"d:\bar/baz"].map(PathBuf::from).into()),
workspaces: None,
commands: None,
platforms: None,
hostnames: None,
};
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new(r"c:\foo")),
workspace_path: None,
command: None,
hostname: "",
};
assert_eq!(condition.matches(&context), cfg!(windows));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new(r"c:\foo\baz")),
workspace_path: None,
command: None,
hostname: "",
};
assert_eq!(condition.matches(&context), cfg!(windows));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new(r"d:\foo")),
workspace_path: None,
command: None,
hostname: "",
};
assert!(!condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: Some(Path::new(r"d:/bar\baz")),
workspace_path: None,
command: None,
hostname: "",
};
assert_eq!(condition.matches(&context), cfg!(windows));
}
#[test]
fn test_condition_hostname() {
let condition = ScopeCondition {
repositories: None,
hostnames: Some(["host-a", "host-b"].map(String::from).into()),
workspaces: None,
commands: None,
platforms: None,
};
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
assert!(!condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "host-a",
};
assert!(condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "host-b",
};
assert!(condition.matches(&context));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "host-c",
};
assert!(!condition.matches(&context));
}
fn new_user_layer(text: &str) -> ConfigLayer {
ConfigLayer::parse(ConfigSource::User, text).unwrap()
}
#[test]
fn test_resolve_transparent() {
let mut source_config = StackedConfig::empty();
source_config.add_layer(ConfigLayer::empty(ConfigSource::Default));
source_config.add_layer(ConfigLayer::empty(ConfigSource::User));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 2);
assert!(Arc::ptr_eq(
&source_config.layers()[0],
&resolved_config.layers()[0]
));
assert!(Arc::ptr_eq(
&source_config.layers()[1],
&resolved_config.layers()[1]
));
}
#[test]
fn test_resolve_table_order() {
let mut source_config = StackedConfig::empty();
source_config.add_layer(new_user_layer(indoc! {"
a = 'a #0'
[[--scope]]
a = 'a #0.0'
[[--scope]]
a = 'a #0.1'
[[--scope.--scope]]
a = 'a #0.1.0'
[[--scope]]
a = 'a #0.2'
"}));
source_config.add_layer(new_user_layer(indoc! {"
a = 'a #1'
[[--scope]]
a = 'a #1.0'
"}));
let context = ConfigResolutionContext {
home_dir: None,
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 7);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #0.0'");
insta::assert_snapshot!(resolved_config.layers()[2].data, @"a = 'a #0.1'");
insta::assert_snapshot!(resolved_config.layers()[3].data, @"a = 'a #0.1.0'");
insta::assert_snapshot!(resolved_config.layers()[4].data, @"a = 'a #0.2'");
insta::assert_snapshot!(resolved_config.layers()[5].data, @"a = 'a #1'");
insta::assert_snapshot!(resolved_config.layers()[6].data, @"a = 'a #1.0'");
}
#[test]
fn test_resolve_repo_path() {
let mut source_config = StackedConfig::empty();
source_config.add_layer(new_user_layer(indoc! {"
a = 'a #0'
[[--scope]]
--when.repositories = ['/foo']
a = 'a #0.1 foo'
[[--scope]]
--when.repositories = ['/foo', '/bar']
a = 'a #0.2 foo|bar'
[[--scope]]
--when.repositories = []
a = 'a #0.3 none'
"}));
source_config.add_layer(new_user_layer(indoc! {"
--when.repositories = ['~/baz']
a = 'a #1 baz'
[[--scope]]
--when.repositories = ['/foo'] # should never be enabled
a = 'a #1.1 baz&foo'
"}));
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 1);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: Some(Path::new("/foo/.jj/repo")),
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 3);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #0.1 foo'");
insta::assert_snapshot!(resolved_config.layers()[2].data, @"a = 'a #0.2 foo|bar'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: Some(Path::new("/bar/.jj/repo")),
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 2);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #0.2 foo|bar'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: Some(Path::new("/home/dir/baz/.jj/repo")),
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 2);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #1 baz'");
}
#[test]
fn test_resolve_hostname() {
let mut source_config = StackedConfig::empty();
source_config.add_layer(new_user_layer(indoc! {"
a = 'a #0'
[[--scope]]
--when.hostnames = ['host-a']
a = 'a #0.1 host-a'
[[--scope]]
--when.hostnames = ['host-a', 'host-b']
a = 'a #0.2 host-a|host-b'
[[--scope]]
--when.hostnames = []
a = 'a #0.3 none'
"}));
source_config.add_layer(new_user_layer(indoc! {"
--when.hostnames = ['host-c']
a = 'a #1 host-c'
[[--scope]]
--when.hostnames = ['host-a'] # should never be enabled
a = 'a #1.1 host-c&host-a'
"}));
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 1);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: None,
command: None,
hostname: "host-a",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 3);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #0.1 host-a'");
insta::assert_snapshot!(resolved_config.layers()[2].data, @"a = 'a #0.2 host-a|host-b'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: None,
command: None,
hostname: "host-b",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 2);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #0.2 host-a|host-b'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: None,
command: None,
hostname: "host-c",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 2);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #1 host-c'");
}
#[test]
fn test_resolve_workspace_path() {
let mut source_config = StackedConfig::empty();
source_config.add_layer(new_user_layer(indoc! {"
a = 'a #0'
[[--scope]]
--when.workspaces = ['/foo']
a = 'a #0.1 foo'
[[--scope]]
--when.workspaces = ['/foo', '/bar']
a = 'a #0.2 foo|bar'
[[--scope]]
--when.workspaces = []
a = 'a #0.3 none'
"}));
source_config.add_layer(new_user_layer(indoc! {"
--when.workspaces = ['~/baz']
a = 'a #1 baz'
[[--scope]]
--when.workspaces = ['/foo'] # should never be enabled
a = 'a #1.1 baz&foo'
"}));
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: None,
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 1);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: Some(Path::new("/foo")),
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 3);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #0.1 foo'");
insta::assert_snapshot!(resolved_config.layers()[2].data, @"a = 'a #0.2 foo|bar'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: Some(Path::new("/bar")),
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 2);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #0.2 foo|bar'");
let context = ConfigResolutionContext {
home_dir: Some(Path::new("/home/dir")),
repo_path: None,
workspace_path: Some(Path::new("/home/dir/baz")),
command: None,
hostname: "",
};
let resolved_config = resolve(&source_config, &context).unwrap();
assert_eq!(resolved_config.layers().len(), 2);
insta::assert_snapshot!(resolved_config.layers()[0].data, @"a = 'a #0'");
insta::assert_snapshot!(resolved_config.layers()[1].data, @"a = 'a #1 baz'");
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/merged_tree.rs | lib/src/merged_tree.rs | // Copyright 2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! A lazily merged view of a set of trees.
use std::collections::BTreeMap;
use std::fmt;
use std::iter;
use std::iter::zip;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use std::task::ready;
use std::vec;
use either::Either;
use futures::Stream;
use futures::StreamExt as _;
use futures::future::BoxFuture;
use futures::future::try_join;
use futures::stream::BoxStream;
use itertools::EitherOrBoth;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use crate::backend::BackendResult;
use crate::backend::TreeId;
use crate::backend::TreeValue;
use crate::conflict_labels::ConflictLabels;
use crate::copies::CopiesTreeDiffEntry;
use crate::copies::CopiesTreeDiffStream;
use crate::copies::CopyRecords;
use crate::matchers::EverythingMatcher;
use crate::matchers::Matcher;
use crate::merge::Diff;
use crate::merge::Merge;
use crate::merge::MergeBuilder;
use crate::merge::MergedTreeVal;
use crate::merge::MergedTreeValue;
use crate::repo_path::RepoPath;
use crate::repo_path::RepoPathBuf;
use crate::repo_path::RepoPathComponent;
use crate::store::Store;
use crate::tree::Tree;
use crate::tree_builder::TreeBuilder;
use crate::tree_merge::merge_trees;
/// Presents a view of a merged set of trees at the root directory, as well as
/// conflict labels.
#[derive(Clone)]
pub struct MergedTree {
store: Arc<Store>,
tree_ids: Merge<TreeId>,
labels: ConflictLabels,
}
impl fmt::Debug for MergedTree {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MergedTree")
.field("tree_ids", &self.tree_ids)
.field("labels", &self.labels)
.finish_non_exhaustive()
}
}
impl MergedTree {
/// Creates a `MergedTree` with the given resolved tree ID.
pub fn resolved(store: Arc<Store>, tree_id: TreeId) -> Self {
Self {
store,
tree_ids: Merge::resolved(tree_id),
labels: ConflictLabels::unlabeled(),
}
}
/// Creates a `MergedTree` with the given tree IDs.
pub fn new(store: Arc<Store>, tree_ids: Merge<TreeId>, labels: ConflictLabels) -> Self {
if let Some(num_sides) = labels.num_sides() {
assert_eq!(tree_ids.num_sides(), num_sides);
}
Self {
store,
tree_ids,
labels,
}
}
/// The `Store` associated with this tree.
pub fn store(&self) -> &Arc<Store> {
&self.store
}
/// The underlying tree IDs for this `MergedTree`. If there are file changes
/// between two trees, then the tree IDs will be different.
pub fn tree_ids(&self) -> &Merge<TreeId> {
&self.tree_ids
}
/// Extracts the underlying tree IDs for this `MergedTree`, discarding any
/// conflict labels.
pub fn into_tree_ids(self) -> Merge<TreeId> {
self.tree_ids
}
/// Returns this merge's conflict labels, if any.
pub fn labels(&self) -> &ConflictLabels {
&self.labels
}
/// Returns both the underlying tree IDs and any conflict labels. This can
/// be used to check whether there are changes in files to be materialized
/// in the working copy.
pub fn tree_ids_and_labels(&self) -> (&Merge<TreeId>, &ConflictLabels) {
(&self.tree_ids, &self.labels)
}
/// Extracts the underlying tree IDs and conflict labels.
pub fn into_tree_ids_and_labels(self) -> (Merge<TreeId>, ConflictLabels) {
(self.tree_ids, self.labels)
}
/// Reads the merge of tree objects represented by this `MergedTree`.
pub async fn trees(&self) -> BackendResult<Merge<Tree>> {
self.tree_ids
.try_map_async(|id| self.store.get_tree_async(RepoPathBuf::root(), id))
.await
}
/// Returns a label for each term in a merge. Resolved merges use the
/// provided label, while conflicted merges keep their original labels.
/// Missing labels are indicated by empty strings.
pub fn labels_by_term<'a>(&'a self, label: &'a str) -> Merge<&'a str> {
if self.tree_ids.is_resolved() {
assert!(!self.labels.has_labels());
Merge::resolved(label)
} else if self.labels.has_labels() {
// If the merge is conflicted and it already has labels, then we want to use
// those labels instead of the provided label. This ensures that rebasing
// conflicted commits keeps meaningful labels.
let labels = self.labels.as_merge();
assert_eq!(labels.num_sides(), self.tree_ids.num_sides());
labels.map(|label| label.as_str())
} else {
// If the merge is conflicted but it doesn't have labels (e.g. conflicts created
// before labels were added), then we use empty strings to indicate missing
// labels. We could consider using `label` for all the sides instead, but it
// might be confusing.
Merge::repeated("", self.tree_ids.num_sides())
}
}
/// Tries to resolve any conflicts, resolving any conflicts that can be
/// automatically resolved and leaving the rest unresolved.
pub async fn resolve(self) -> BackendResult<Self> {
let merged = merge_trees(&self.store, self.tree_ids).await?;
// If the result can be resolved, then `merge_trees()` above would have returned
// a resolved merge. However, that function will always preserve the arity of
// conflicts it cannot resolve. So we simplify the conflict again
// here to possibly reduce a complex conflict to a simpler one.
let (simplified_labels, simplified) = if merged.is_resolved() {
(ConflictLabels::unlabeled(), merged)
} else {
self.labels.simplify_with(&merged)
};
// If debug assertions are enabled, check that the merge was idempotent. In
// particular, that this last simplification doesn't enable further automatic
// resolutions
if cfg!(debug_assertions) {
let re_merged = merge_trees(&self.store, simplified.clone()).await.unwrap();
debug_assert_eq!(re_merged, simplified);
}
Ok(Self {
store: self.store,
tree_ids: simplified,
labels: simplified_labels,
})
}
/// An iterator over the conflicts in this tree, including subtrees.
/// Recurses into subtrees and yields conflicts in those, but only if
/// all sides are trees, so tree/file conflicts will be reported as a single
/// conflict, not one for each path in the tree.
pub fn conflicts(
&self,
) -> impl Iterator<Item = (RepoPathBuf, BackendResult<MergedTreeValue>)> + use<> {
self.conflicts_matching(&EverythingMatcher)
}
/// Like `conflicts()` but restricted by a matcher.
pub fn conflicts_matching<'matcher>(
&self,
matcher: &'matcher dyn Matcher,
) -> impl Iterator<Item = (RepoPathBuf, BackendResult<MergedTreeValue>)> + use<'matcher> {
ConflictIterator::new(self, matcher)
}
/// Whether this tree has conflicts.
pub fn has_conflict(&self) -> bool {
!self.tree_ids.is_resolved()
}
/// The value at the given path. The value can be `Resolved` even if
/// `self` is a `Conflict`, which happens if the value at the path can be
/// trivially merged.
pub fn path_value(&self, path: &RepoPath) -> BackendResult<MergedTreeValue> {
self.path_value_async(path).block_on()
}
/// Async version of `path_value()`.
pub async fn path_value_async(&self, path: &RepoPath) -> BackendResult<MergedTreeValue> {
match path.split() {
Some((dir, basename)) => {
let trees = self.trees().await?;
match trees.sub_tree_recursive(dir).await? {
None => Ok(Merge::absent()),
Some(tree) => Ok(tree.value(basename).cloned()),
}
}
None => Ok(self.to_merged_tree_value()),
}
}
fn to_merged_tree_value(&self) -> MergedTreeValue {
self.tree_ids
.map(|tree_id| Some(TreeValue::Tree(tree_id.clone())))
}
/// Iterator over the entries matching the given matcher. Subtrees are
/// visited recursively. Subtrees that differ between the current
/// `MergedTree`'s terms are merged on the fly. Missing terms are treated as
/// empty directories. Subtrees that conflict with non-trees are not
/// visited. For example, if current tree is a merge of 3 trees, and the
/// entry for 'foo' is a conflict between a change subtree and a symlink
/// (i.e. the subdirectory was replaced by symlink in one side of the
/// conflict), then the entry for `foo` itself will be emitted, but no
/// entries from inside `foo/` from either of the trees will be.
pub fn entries(&self) -> TreeEntriesIterator<'static> {
self.entries_matching(&EverythingMatcher)
}
/// Like `entries()` but restricted by a matcher.
pub fn entries_matching<'matcher>(
&self,
matcher: &'matcher dyn Matcher,
) -> TreeEntriesIterator<'matcher> {
TreeEntriesIterator::new(self, matcher)
}
/// Stream of the differences between this tree and another tree.
///
/// Tree entries (`MergedTreeValue::is_tree()`) are included only if the
/// other side is present and not a tree.
fn diff_stream_internal<'matcher>(
&self,
other: &Self,
matcher: &'matcher dyn Matcher,
) -> TreeDiffStream<'matcher> {
let concurrency = self.store().concurrency();
if concurrency <= 1 {
Box::pin(futures::stream::iter(TreeDiffIterator::new(
self, other, matcher,
)))
} else {
Box::pin(TreeDiffStreamImpl::new(self, other, matcher, concurrency))
}
}
/// Stream of the differences between this tree and another tree.
pub fn diff_stream<'matcher>(
&self,
other: &Self,
matcher: &'matcher dyn Matcher,
) -> TreeDiffStream<'matcher> {
stream_without_trees(self.diff_stream_internal(other, matcher))
}
/// Like `diff_stream()` but files in a removed tree will be returned before
/// a file that replaces it.
pub fn diff_stream_for_file_system<'matcher>(
&self,
other: &Self,
matcher: &'matcher dyn Matcher,
) -> TreeDiffStream<'matcher> {
Box::pin(DiffStreamForFileSystem::new(
self.diff_stream_internal(other, matcher),
))
}
/// Like `diff_stream()` but takes the given copy records into account.
pub fn diff_stream_with_copies<'a>(
&self,
other: &Self,
matcher: &'a dyn Matcher,
copy_records: &'a CopyRecords,
) -> BoxStream<'a, CopiesTreeDiffEntry> {
let stream = self.diff_stream(other, matcher);
Box::pin(CopiesTreeDiffStream::new(
stream,
self.clone(),
other.clone(),
copy_records,
))
}
/// Merges the provided trees into a single `MergedTree`. Any conflicts will
/// be resolved recursively if possible. The provided labels are used if a
/// conflict arises. However, if one of the input trees is already
/// conflicted, the corresponding label will be ignored, and its existing
/// labels will be used instead.
pub async fn merge(merge: Merge<(Self, String)>) -> BackendResult<Self> {
Self::merge_no_resolve(merge).resolve().await
}
/// Merges the provided trees into a single `MergedTree`, without attempting
/// to resolve file conflicts.
pub fn merge_no_resolve(merge: Merge<(Self, String)>) -> Self {
debug_assert!(
merge
.iter()
.map(|(tree, _)| Arc::as_ptr(tree.store()))
.all_equal()
);
let store = merge.first().0.store().clone();
let flattened_labels = ConflictLabels::from_merge(
merge
.map(|(tree, label)| tree.labels_by_term(label))
.flatten()
.map(|&label| label.to_owned()),
);
let flattened_tree_ids: Merge<TreeId> = merge
.into_iter()
.map(|(tree, _label)| tree.into_tree_ids())
.collect::<MergeBuilder<_>>()
.build()
.flatten();
let (labels, tree_ids) = flattened_labels.simplify_with(&flattened_tree_ids);
Self::new(store, tree_ids, labels)
}
}
/// A single entry in a tree diff.
#[derive(Debug)]
pub struct TreeDiffEntry {
/// The path.
pub path: RepoPathBuf,
/// The resolved tree values if available.
pub values: BackendResult<Diff<MergedTreeValue>>,
}
/// Type alias for the result from `MergedTree::diff_stream()`. We use a
/// `Stream` instead of an `Iterator` so high-latency backends (e.g. cloud-based
/// ones) can fetch trees asynchronously.
pub type TreeDiffStream<'matcher> = BoxStream<'matcher, TreeDiffEntry>;
fn all_tree_entries(
trees: &Merge<Tree>,
) -> impl Iterator<Item = (&RepoPathComponent, MergedTreeVal<'_>)> {
if let Some(tree) = trees.as_resolved() {
let iter = tree
.entries_non_recursive()
.map(|entry| (entry.name(), Merge::normal(entry.value())));
Either::Left(iter)
} else {
let same_change = trees.first().store().merge_options().same_change;
let iter = all_merged_tree_entries(trees).map(move |(name, values)| {
// TODO: move resolve_trivial() to caller?
let values = match values.resolve_trivial(same_change) {
Some(resolved) => Merge::resolved(*resolved),
None => values,
};
(name, values)
});
Either::Right(iter)
}
}
/// Suppose the given `trees` aren't resolved, iterates `(name, values)` pairs
/// non-recursively. This also works if `trees` are resolved, but is more costly
/// than `tree.entries_non_recursive()`.
pub fn all_merged_tree_entries(
trees: &Merge<Tree>,
) -> impl Iterator<Item = (&RepoPathComponent, MergedTreeVal<'_>)> {
let mut entries_iters = trees
.iter()
.map(|tree| tree.entries_non_recursive().peekable())
.collect_vec();
iter::from_fn(move || {
let next_name = entries_iters
.iter_mut()
.filter_map(|iter| iter.peek())
.map(|entry| entry.name())
.min()?;
let values: MergeBuilder<_> = entries_iters
.iter_mut()
.map(|iter| {
let entry = iter.next_if(|entry| entry.name() == next_name)?;
Some(entry.value())
})
.collect();
Some((next_name, values.build()))
})
}
fn merged_tree_entry_diff<'a>(
trees1: &'a Merge<Tree>,
trees2: &'a Merge<Tree>,
) -> impl Iterator<Item = (&'a RepoPathComponent, Diff<MergedTreeVal<'a>>)> {
itertools::merge_join_by(
all_tree_entries(trees1),
all_tree_entries(trees2),
|(name1, _), (name2, _)| name1.cmp(name2),
)
.map(|entry| match entry {
EitherOrBoth::Both((name, value1), (_, value2)) => (name, Diff::new(value1, value2)),
EitherOrBoth::Left((name, value1)) => (name, Diff::new(value1, Merge::absent())),
EitherOrBoth::Right((name, value2)) => (name, Diff::new(Merge::absent(), value2)),
})
.filter(|(_, diff)| diff.is_changed())
}
/// Recursive iterator over the entries in a tree.
pub struct TreeEntriesIterator<'matcher> {
store: Arc<Store>,
stack: Vec<TreeEntriesDirItem>,
matcher: &'matcher dyn Matcher,
}
struct TreeEntriesDirItem {
entries: Vec<(RepoPathBuf, MergedTreeValue)>,
}
impl TreeEntriesDirItem {
fn new(trees: &Merge<Tree>, matcher: &dyn Matcher) -> Self {
let mut entries = vec![];
let dir = trees.first().dir();
for (name, value) in all_tree_entries(trees) {
let path = dir.join(name);
if value.is_tree() {
// TODO: Handle the other cases (specific files and trees)
if matcher.visit(&path).is_nothing() {
continue;
}
} else if !matcher.matches(&path) {
continue;
}
entries.push((path, value.cloned()));
}
entries.reverse();
Self { entries }
}
}
impl<'matcher> TreeEntriesIterator<'matcher> {
fn new(trees: &MergedTree, matcher: &'matcher dyn Matcher) -> Self {
Self {
store: trees.store.clone(),
stack: vec![TreeEntriesDirItem {
entries: vec![(RepoPathBuf::root(), trees.to_merged_tree_value())],
}],
matcher,
}
}
}
impl Iterator for TreeEntriesIterator<'_> {
type Item = (RepoPathBuf, BackendResult<MergedTreeValue>);
fn next(&mut self) -> Option<Self::Item> {
while let Some(top) = self.stack.last_mut() {
if let Some((path, value)) = top.entries.pop() {
let maybe_trees = match value.to_tree_merge(&self.store, &path).block_on() {
Ok(maybe_trees) => maybe_trees,
Err(err) => return Some((path, Err(err))),
};
if let Some(trees) = maybe_trees {
self.stack
.push(TreeEntriesDirItem::new(&trees, self.matcher));
} else {
return Some((path, Ok(value)));
}
} else {
self.stack.pop();
}
}
None
}
}
/// The state for the non-recursive iteration over the conflicted entries in a
/// single directory.
struct ConflictsDirItem {
entries: Vec<(RepoPathBuf, MergedTreeValue)>,
}
impl ConflictsDirItem {
fn new(trees: &Merge<Tree>, matcher: &dyn Matcher) -> Self {
if trees.is_resolved() {
return Self { entries: vec![] };
}
let dir = trees.first().dir();
let mut entries = vec![];
for (basename, value) in all_tree_entries(trees) {
if value.is_resolved() {
continue;
}
let path = dir.join(basename);
if value.is_tree() {
if matcher.visit(&path).is_nothing() {
continue;
}
} else if !matcher.matches(&path) {
continue;
}
entries.push((path, value.cloned()));
}
entries.reverse();
Self { entries }
}
}
struct ConflictIterator<'matcher> {
store: Arc<Store>,
stack: Vec<ConflictsDirItem>,
matcher: &'matcher dyn Matcher,
}
impl<'matcher> ConflictIterator<'matcher> {
fn new(tree: &MergedTree, matcher: &'matcher dyn Matcher) -> Self {
Self {
store: tree.store().clone(),
stack: vec![ConflictsDirItem {
entries: vec![(RepoPathBuf::root(), tree.to_merged_tree_value())],
}],
matcher,
}
}
}
impl Iterator for ConflictIterator<'_> {
type Item = (RepoPathBuf, BackendResult<MergedTreeValue>);
fn next(&mut self) -> Option<Self::Item> {
while let Some(top) = self.stack.last_mut() {
if let Some((path, tree_values)) = top.entries.pop() {
match tree_values.to_tree_merge(&self.store, &path).block_on() {
Ok(Some(trees)) => {
// If all sides are trees or missing, descend into the merged tree
self.stack.push(ConflictsDirItem::new(&trees, self.matcher));
}
Ok(None) => {
// Otherwise this is a conflict between files, trees, etc. If they could
// be automatically resolved, they should have been when the top-level
// tree conflict was written, so we assume that they can't be.
return Some((path, Ok(tree_values)));
}
Err(err) => {
return Some((path, Err(err)));
}
}
} else {
self.stack.pop();
}
}
None
}
}
/// Iterator over the differences between two trees.
///
/// Tree entries (`MergedTreeValue::is_tree()`) are included only if the other
/// side is present and not a tree.
pub struct TreeDiffIterator<'matcher> {
store: Arc<Store>,
stack: Vec<TreeDiffDir>,
matcher: &'matcher dyn Matcher,
}
struct TreeDiffDir {
entries: Vec<(RepoPathBuf, Diff<MergedTreeValue>)>,
}
impl<'matcher> TreeDiffIterator<'matcher> {
/// Creates a iterator over the differences between two trees.
pub fn new(tree1: &MergedTree, tree2: &MergedTree, matcher: &'matcher dyn Matcher) -> Self {
assert!(Arc::ptr_eq(tree1.store(), tree2.store()));
let root_dir = RepoPath::root();
let mut stack = Vec::new();
if !matcher.visit(root_dir).is_nothing() {
stack.push(TreeDiffDir {
entries: vec![(
root_dir.to_owned(),
Diff::new(tree1.to_merged_tree_value(), tree2.to_merged_tree_value()),
)],
});
};
Self {
store: tree1.store().clone(),
stack,
matcher,
}
}
/// Gets the given trees if `values` are trees, otherwise an empty tree.
fn trees(
store: &Arc<Store>,
dir: &RepoPath,
values: &MergedTreeValue,
) -> BackendResult<Merge<Tree>> {
if let Some(trees) = values.to_tree_merge(store, dir).block_on()? {
Ok(trees)
} else {
Ok(Merge::resolved(Tree::empty(store.clone(), dir.to_owned())))
}
}
}
impl TreeDiffDir {
fn from_trees(
dir: &RepoPath,
trees1: &Merge<Tree>,
trees2: &Merge<Tree>,
matcher: &dyn Matcher,
) -> Self {
let mut entries = vec![];
for (name, diff) in merged_tree_entry_diff(trees1, trees2) {
let path = dir.join(name);
let tree_before = diff.before.is_tree();
let tree_after = diff.after.is_tree();
// Check if trees and files match, but only if either side is a tree or a file
// (don't query the matcher unnecessarily).
let tree_matches = (tree_before || tree_after) && !matcher.visit(&path).is_nothing();
let file_matches = (!tree_before || !tree_after) && matcher.matches(&path);
// Replace trees or files that don't match by `Merge::absent()`
let before = if (tree_before && tree_matches) || (!tree_before && file_matches) {
diff.before
} else {
Merge::absent()
};
let after = if (tree_after && tree_matches) || (!tree_after && file_matches) {
diff.after
} else {
Merge::absent()
};
if before.is_absent() && after.is_absent() {
continue;
}
entries.push((path, Diff::new(before.cloned(), after.cloned())));
}
entries.reverse();
Self { entries }
}
}
impl Iterator for TreeDiffIterator<'_> {
type Item = TreeDiffEntry;
fn next(&mut self) -> Option<Self::Item> {
while let Some(top) = self.stack.last_mut() {
let (path, diff) = match top.entries.pop() {
Some(entry) => entry,
None => {
self.stack.pop().unwrap();
continue;
}
};
if diff.before.is_tree() || diff.after.is_tree() {
let (before_tree, after_tree) = match (
Self::trees(&self.store, &path, &diff.before),
Self::trees(&self.store, &path, &diff.after),
) {
(Ok(before_tree), Ok(after_tree)) => (before_tree, after_tree),
(Err(before_err), _) => {
return Some(TreeDiffEntry {
path,
values: Err(before_err),
});
}
(_, Err(after_err)) => {
return Some(TreeDiffEntry {
path,
values: Err(after_err),
});
}
};
let subdir =
TreeDiffDir::from_trees(&path, &before_tree, &after_tree, self.matcher);
self.stack.push(subdir);
};
if diff.before.is_file_like() || diff.after.is_file_like() {
return Some(TreeDiffEntry {
path,
values: Ok(diff),
});
}
}
None
}
}
/// Stream of differences between two trees.
///
/// Tree entries (`MergedTreeValue::is_tree()`) are included only if the other
/// side is present and not a tree.
pub struct TreeDiffStreamImpl<'matcher> {
store: Arc<Store>,
matcher: &'matcher dyn Matcher,
/// Pairs of tree values that may or may not be ready to emit, sorted in the
/// order we want to emit them. If either side is a tree, there will be
/// a corresponding entry in `pending_trees`. The item is ready to emit
/// unless there's a smaller or equal path in `pending_trees`.
items: BTreeMap<RepoPathBuf, BackendResult<Diff<MergedTreeValue>>>,
// TODO: Is it better to combine this and `items` into a single map?
#[expect(clippy::type_complexity)]
pending_trees:
BTreeMap<RepoPathBuf, BoxFuture<'matcher, BackendResult<(Merge<Tree>, Merge<Tree>)>>>,
/// The maximum number of trees to request concurrently. However, we do the
/// accounting per path, so there will often be twice as many pending
/// `Backend::read_tree()` calls - for the "before" and "after" sides. For
/// conflicts, there will be even more.
max_concurrent_reads: usize,
/// The maximum number of items in `items`. However, we will always add the
/// full differences from a particular pair of trees, so it may temporarily
/// go over the limit (until we emit those items). It may also go over the
/// limit because we have a file item that's blocked by pending subdirectory
/// items.
max_queued_items: usize,
}
impl<'matcher> TreeDiffStreamImpl<'matcher> {
/// Creates a iterator over the differences between two trees. Generally
/// prefer `MergedTree::diff_stream()` of calling this directly.
pub fn new(
tree1: &MergedTree,
tree2: &MergedTree,
matcher: &'matcher dyn Matcher,
max_concurrent_reads: usize,
) -> Self {
assert!(Arc::ptr_eq(tree1.store(), tree2.store()));
let store = tree1.store().clone();
let mut stream = Self {
store: store.clone(),
matcher,
items: BTreeMap::new(),
pending_trees: BTreeMap::new(),
max_concurrent_reads,
max_queued_items: 10000,
};
let dir = RepoPathBuf::root();
let root_tree_fut = Box::pin(try_join(
Self::trees(store.clone(), dir.clone(), tree1.to_merged_tree_value()),
Self::trees(store, dir.clone(), tree2.to_merged_tree_value()),
));
stream.pending_trees.insert(dir, root_tree_fut);
stream
}
async fn single_tree(
store: &Arc<Store>,
dir: RepoPathBuf,
value: Option<&TreeValue>,
) -> BackendResult<Tree> {
match value {
Some(TreeValue::Tree(tree_id)) => store.get_tree_async(dir, tree_id).await,
_ => Ok(Tree::empty(store.clone(), dir.clone())),
}
}
/// Gets the given trees if `values` are trees, otherwise an empty tree.
async fn trees(
store: Arc<Store>,
dir: RepoPathBuf,
values: MergedTreeValue,
) -> BackendResult<Merge<Tree>> {
if values.is_tree() {
values
.try_map_async(|value| Self::single_tree(&store, dir.clone(), value.as_ref()))
.await
} else {
Ok(Merge::resolved(Tree::empty(store, dir)))
}
}
fn add_dir_diff_items(&mut self, dir: &RepoPath, trees1: &Merge<Tree>, trees2: &Merge<Tree>) {
for (basename, diff) in merged_tree_entry_diff(trees1, trees2) {
let path = dir.join(basename);
let tree_before = diff.before.is_tree();
let tree_after = diff.after.is_tree();
// Check if trees and files match, but only if either side is a tree or a file
// (don't query the matcher unnecessarily).
let tree_matches =
(tree_before || tree_after) && !self.matcher.visit(&path).is_nothing();
let file_matches = (!tree_before || !tree_after) && self.matcher.matches(&path);
// Replace trees or files that don't match by `Merge::absent()`
let before = if (tree_before && tree_matches) || (!tree_before && file_matches) {
diff.before
} else {
Merge::absent()
};
let after = if (tree_after && tree_matches) || (!tree_after && file_matches) {
diff.after
} else {
Merge::absent()
};
if before.is_absent() && after.is_absent() {
continue;
}
// If the path was a tree on either side of the diff, read those trees.
if tree_matches {
let before_tree_future =
Self::trees(self.store.clone(), path.clone(), before.cloned());
let after_tree_future =
Self::trees(self.store.clone(), path.clone(), after.cloned());
let both_trees_future = try_join(before_tree_future, after_tree_future);
self.pending_trees
.insert(path.clone(), Box::pin(both_trees_future));
}
if before.is_file_like() || after.is_file_like() {
self.items
.insert(path, Ok(Diff::new(before.cloned(), after.cloned())));
}
}
}
fn poll_tree_futures(&mut self, cx: &mut Context<'_>) {
loop {
let mut tree_diffs = vec![];
let mut some_pending = false;
let mut all_pending = true;
for (dir, future) in self
.pending_trees
.iter_mut()
.take(self.max_concurrent_reads)
{
if let Poll::Ready(tree_diff) = future.as_mut().poll(cx) {
all_pending = false;
tree_diffs.push((dir.clone(), tree_diff));
} else {
some_pending = true;
}
}
for (dir, tree_diff) in tree_diffs {
drop(self.pending_trees.remove_entry(&dir).unwrap());
match tree_diff {
Ok((trees1, trees2)) => {
self.add_dir_diff_items(&dir, &trees1, &trees2);
}
Err(err) => {
self.items.insert(dir, Err(err));
}
}
}
// If none of the futures have been polled and returned `Poll::Pending`, we must
// not return. If we did, nothing would call the waker so we might never get
// polled again.
if all_pending || (some_pending && self.items.len() >= self.max_queued_items) {
return;
}
}
}
}
impl Stream for TreeDiffStreamImpl<'_> {
type Item = TreeDiffEntry;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// Go through all pending tree futures and poll them.
self.poll_tree_futures(cx);
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | true |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/op_walk.rs | lib/src/op_walk.rs | // Copyright 2020-2023 The Jujutsu Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Utility for operation id resolution and traversal.
use std::cmp::Ordering;
use std::collections::HashMap;
use std::collections::HashSet;
use std::slice;
use std::sync::Arc;
use itertools::Itertools as _;
use pollster::FutureExt as _;
use thiserror::Error;
use crate::dag_walk;
use crate::object_id::HexPrefix;
use crate::object_id::PrefixResolution;
use crate::op_heads_store;
use crate::op_heads_store::OpHeadResolutionError;
use crate::op_heads_store::OpHeadsStore;
use crate::op_heads_store::OpHeadsStoreError;
use crate::op_store::OpStore;
use crate::op_store::OpStoreError;
use crate::op_store::OpStoreResult;
use crate::op_store::OperationId;
use crate::operation::Operation;
use crate::repo::ReadonlyRepo;
use crate::repo::Repo as _;
use crate::repo::RepoLoader;
/// Error that may occur during evaluation of operation set expression.
#[derive(Debug, Error)]
pub enum OpsetEvaluationError {
/// Failed to resolve operation set expression.
#[error(transparent)]
OpsetResolution(#[from] OpsetResolutionError),
/// Failed to read op heads.
#[error(transparent)]
OpHeadsStore(#[from] OpHeadsStoreError),
/// Failed to resolve the current operation heads.
#[error(transparent)]
OpHeadResolution(#[from] OpHeadResolutionError),
/// Failed to access operation object.
#[error(transparent)]
OpStore(#[from] OpStoreError),
}
/// Error that may occur during parsing and resolution of operation set
/// expression.
#[derive(Debug, Error)]
pub enum OpsetResolutionError {
// TODO: Maybe empty/multiple operations should be allowed, and rejected by
// caller as needed.
/// Expression resolved to multiple operations.
#[error(r#"The "{expr}" expression resolved to more than one operation"#)]
MultipleOperations {
/// Source expression.
expr: String,
/// Matched operation ids.
candidates: Vec<OperationId>,
},
/// Expression resolved to no operations.
#[error(r#"The "{0}" expression resolved to no operations"#)]
EmptyOperations(String),
/// Invalid symbol as an operation ID.
#[error(r#"Operation ID "{0}" is not a valid hexadecimal prefix"#)]
InvalidIdPrefix(String),
/// Operation ID not found.
#[error(r#"No operation ID matching "{0}""#)]
NoSuchOperation(String),
/// Operation ID prefix matches multiple operations.
#[error(r#"Operation ID prefix "{0}" is ambiguous"#)]
AmbiguousIdPrefix(String),
}
/// Resolves operation set expression without loading a repo.
pub fn resolve_op_for_load(
repo_loader: &RepoLoader,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
let op_store = repo_loader.op_store();
let op_heads_store = repo_loader.op_heads_store().as_ref();
let get_current_op = || {
op_heads_store::resolve_op_heads(op_heads_store, op_store, |op_heads| {
Err(OpsetResolutionError::MultipleOperations {
expr: "@".to_owned(),
candidates: op_heads.iter().map(|op| op.id().clone()).collect(),
}
.into())
})
};
let get_head_ops = || get_current_head_ops(op_store, op_heads_store);
resolve_single_op(op_store, get_current_op, get_head_ops, op_str)
}
/// Resolves operation set expression against the loaded repo.
///
/// The "@" symbol will be resolved to the operation the repo was loaded at.
pub fn resolve_op_with_repo(
repo: &ReadonlyRepo,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
resolve_op_at(repo.op_store(), slice::from_ref(repo.operation()), op_str)
}
/// Resolves operation set expression at the given head operations.
pub fn resolve_op_at(
op_store: &Arc<dyn OpStore>,
head_ops: &[Operation],
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
let get_current_op = || match head_ops {
[head_op] => Ok(head_op.clone()),
[] => Err(OpsetResolutionError::EmptyOperations("@".to_owned()).into()),
_ => Err(OpsetResolutionError::MultipleOperations {
expr: "@".to_owned(),
candidates: head_ops.iter().map(|op| op.id().clone()).collect(),
}
.into()),
};
let get_head_ops = || Ok(head_ops.to_vec());
resolve_single_op(op_store, get_current_op, get_head_ops, op_str)
}
/// Resolves operation set expression with the given "@" symbol resolution
/// callbacks.
fn resolve_single_op(
op_store: &Arc<dyn OpStore>,
get_current_op: impl FnOnce() -> Result<Operation, OpsetEvaluationError>,
get_head_ops: impl FnOnce() -> Result<Vec<Operation>, OpsetEvaluationError>,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
let op_symbol = op_str.trim_end_matches(['-', '+']);
let op_postfix = &op_str[op_symbol.len()..];
let head_ops = op_postfix.contains('+').then(get_head_ops).transpose()?;
let mut operation = match op_symbol {
"@" => get_current_op(),
s => resolve_single_op_from_store(op_store, s),
}?;
for (i, c) in op_postfix.chars().enumerate() {
let mut neighbor_ops = match c {
'-' => operation.parents().try_collect()?,
'+' => find_child_ops(head_ops.as_ref().unwrap(), operation.id())?,
_ => unreachable!(),
};
operation = match neighbor_ops.len() {
// Since there is no hint provided for `EmptyOperations` in
// `opset_resolution_error_hint()` (there would be no useful hint for the
// user to take action on anyway), we don't have to worry about op ids being
// incoherent with the op set expression shown to the user, unlike for the
// `MultipleOperations` variant.
//
// The full op set expression is guaranteed to be empty in this case,
// because ancestors/descendants of an empty operation are empty.
0 => Err(OpsetResolutionError::EmptyOperations(op_str.to_owned()))?,
1 => neighbor_ops.pop().unwrap(),
// Returns the exact subexpression that resolves to multiple operations,
// rather than the full expression provided by the user.
_ => Err(OpsetResolutionError::MultipleOperations {
expr: op_str[..=op_symbol.len() + i].to_owned(),
candidates: neighbor_ops.iter().map(|op| op.id().clone()).collect(),
})?,
};
}
Ok(operation)
}
fn resolve_single_op_from_store(
op_store: &Arc<dyn OpStore>,
op_str: &str,
) -> Result<Operation, OpsetEvaluationError> {
if op_str.is_empty() {
return Err(OpsetResolutionError::InvalidIdPrefix(op_str.to_owned()).into());
}
let prefix = HexPrefix::try_from_hex(op_str)
.ok_or_else(|| OpsetResolutionError::InvalidIdPrefix(op_str.to_owned()))?;
match op_store.resolve_operation_id_prefix(&prefix).block_on()? {
PrefixResolution::NoMatch => {
Err(OpsetResolutionError::NoSuchOperation(op_str.to_owned()).into())
}
PrefixResolution::SingleMatch(op_id) => {
let data = op_store.read_operation(&op_id).block_on()?;
Ok(Operation::new(op_store.clone(), op_id, data))
}
PrefixResolution::AmbiguousMatch => {
Err(OpsetResolutionError::AmbiguousIdPrefix(op_str.to_owned()).into())
}
}
}
/// Loads the current head operations. The returned operations may contain
/// redundant ones which are ancestors of the other heads.
pub fn get_current_head_ops(
op_store: &Arc<dyn OpStore>,
op_heads_store: &dyn OpHeadsStore,
) -> Result<Vec<Operation>, OpsetEvaluationError> {
let mut head_ops: Vec<_> = op_heads_store
.get_op_heads()
.block_on()?
.into_iter()
.map(|id| -> OpStoreResult<Operation> {
let data = op_store.read_operation(&id).block_on()?;
Ok(Operation::new(op_store.clone(), id, data))
})
.try_collect()?;
// To stabilize output, sort in the same order as resolve_op_heads()
head_ops.sort_by_key(|op| op.metadata().time.end.timestamp);
Ok(head_ops)
}
/// Looks up children of the `root_op_id` by traversing from the `head_ops`.
///
/// This will be slow if the `root_op_id` is far away (or unreachable) from the
/// `head_ops`.
fn find_child_ops(
head_ops: &[Operation],
root_op_id: &OperationId,
) -> OpStoreResult<Vec<Operation>> {
walk_ancestors(head_ops)
.take_while(|res| res.as_ref().map_or(true, |op| op.id() != root_op_id))
.filter_ok(|op| op.parent_ids().iter().any(|id| id == root_op_id))
.try_collect()
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct OperationByEndTime(Operation);
impl Ord for OperationByEndTime {
fn cmp(&self, other: &Self) -> Ordering {
let self_end_time = &self.0.metadata().time.end;
let other_end_time = &other.0.metadata().time.end;
self_end_time
.cmp(other_end_time)
.then_with(|| self.0.cmp(&other.0)) // to comply with Eq
}
}
impl PartialOrd for OperationByEndTime {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// Walks `head_ops` and their ancestors in reverse topological order.
pub fn walk_ancestors(
head_ops: &[Operation],
) -> impl Iterator<Item = OpStoreResult<Operation>> + use<> {
let head_ops = head_ops
.iter()
.cloned()
.map(OperationByEndTime)
.collect_vec();
// Lazily load operations based on timestamp-based heuristic. This works so long
// as the operation history is mostly linear.
dag_walk::topo_order_reverse_lazy_ok(
head_ops.into_iter().map(Ok),
|OperationByEndTime(op)| op.id().clone(),
|OperationByEndTime(op)| op.parents().map_ok(OperationByEndTime).collect_vec(),
|_| panic!("graph has cycle"),
)
.map_ok(|OperationByEndTime(op)| op)
}
/// Walks ancestors from `head_ops` in reverse topological order, excluding
/// ancestors of `root_ops`.
pub fn walk_ancestors_range(
head_ops: &[Operation],
root_ops: &[Operation],
) -> impl Iterator<Item = OpStoreResult<Operation>> + use<> {
let mut start_ops = itertools::chain(head_ops, root_ops)
.cloned()
.map(OperationByEndTime)
.collect_vec();
// Consume items until root_ops to get rid of unwanted ops.
let leading_items = if root_ops.is_empty() {
vec![]
} else {
let unwanted_ids = root_ops.iter().map(|op| op.id().clone()).collect();
collect_ancestors_until_roots(&mut start_ops, unwanted_ids)
};
// Lazily load operations based on timestamp-based heuristic. This works so long
// as the operation history is mostly linear.
let trailing_iter = dag_walk::topo_order_reverse_lazy_ok(
start_ops.into_iter().map(Ok),
|OperationByEndTime(op)| op.id().clone(),
|OperationByEndTime(op)| op.parents().map_ok(OperationByEndTime).collect_vec(),
|_| panic!("graph has cycle"),
)
.map_ok(|OperationByEndTime(op)| op);
itertools::chain(leading_items, trailing_iter)
}
fn collect_ancestors_until_roots(
start_ops: &mut Vec<OperationByEndTime>,
mut unwanted_ids: HashSet<OperationId>,
) -> Vec<OpStoreResult<Operation>> {
let sorted_ops = match dag_walk::topo_order_reverse_chunked(
start_ops,
|OperationByEndTime(op)| op.id().clone(),
|OperationByEndTime(op)| op.parents().map_ok(OperationByEndTime).collect_vec(),
|_| panic!("graph has cycle"),
) {
Ok(sorted_ops) => sorted_ops,
Err(err) => return vec![Err(err)],
};
let mut items = Vec::new();
for OperationByEndTime(op) in sorted_ops {
if unwanted_ids.contains(op.id()) {
unwanted_ids.extend(op.parent_ids().iter().cloned());
} else {
items.push(Ok(op));
}
}
// Don't visit ancestors of unwanted ops further.
start_ops.retain(|OperationByEndTime(op)| !unwanted_ids.contains(op.id()));
items
}
/// Stats about `reparent_range()`.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ReparentStats {
/// New head operation ids in order of the old `head_ops`.
pub new_head_ids: Vec<OperationId>,
/// The number of rewritten operations.
pub rewritten_count: usize,
/// The number of ancestor operations that become unreachable from the
/// rewritten heads.
pub unreachable_count: usize,
}
/// Reparents the operation range `root_ops..head_ops` onto the `dest_op`.
///
/// Returns the new head operation ids as well as some stats. If the old
/// operation heads are remapped to the new heads, the operations within the
/// range `dest_op..root_ops` become unreachable.
///
/// If the source operation range `root_ops..head_ops` was empty, the
/// `new_head_ids` will be `[dest_op.id()]`, meaning the `dest_op` is the head.
// TODO: Find better place to host this function. It might be an OpStore method.
pub fn reparent_range(
op_store: &dyn OpStore,
root_ops: &[Operation],
head_ops: &[Operation],
dest_op: &Operation,
) -> OpStoreResult<ReparentStats> {
let ops_to_reparent: Vec<_> = walk_ancestors_range(head_ops, root_ops).try_collect()?;
let unreachable_count = walk_ancestors_range(root_ops, slice::from_ref(dest_op))
.process_results(|iter| iter.count())?;
assert!(
ops_to_reparent
.last()
.is_none_or(|op| op.id() != op_store.root_operation_id()),
"root operation cannot be rewritten"
);
let mut rewritten_ids = HashMap::new();
for old_op in ops_to_reparent.into_iter().rev() {
let mut data = old_op.store_operation().clone();
let mut dest_once = Some(dest_op.id());
data.parents = data
.parents
.iter()
.filter_map(|id| rewritten_ids.get(id).or_else(|| dest_once.take()))
.cloned()
.collect();
let new_id = op_store.write_operation(&data).block_on()?;
rewritten_ids.insert(old_op.id().clone(), new_id);
}
let mut dest_once = Some(dest_op.id());
let new_head_ids = head_ops
.iter()
.filter_map(|op| rewritten_ids.get(op.id()).or_else(|| dest_once.take()))
.cloned()
.collect();
Ok(ReparentStats {
new_head_ids,
rewritten_count: rewritten_ids.len(),
unreachable_count,
})
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
jj-vcs/jj | https://github.com/jj-vcs/jj/blob/10efcf35613c9c2076278f1721b5e6826e77c144/lib/src/content_hash.rs | lib/src/content_hash.rs | //! Portable, stable hashing suitable for identifying values
use blake2::Blake2b512;
// Re-export DigestUpdate so that the ContentHash proc macro can be used in
// external crates without directly depending on the digest crate.
pub use digest::Update as DigestUpdate;
use itertools::Itertools as _;
pub use jj_lib_proc_macros::ContentHash;
/// Portable, stable hashing suitable for identifying values
///
/// Variable-length sequences should hash a 64-bit little-endian representation
/// of their length, then their elements in order. Unordered containers should
/// order their elements according to their `Ord` implementation. Enums should
/// hash a 32-bit little-endian encoding of the ordinal number of the enum
/// variant, then the variant's fields in lexical order.
///
/// Structs can implement `ContentHash` by using `#[derive(ContentHash)]`.
pub trait ContentHash {
/// Update the hasher state with this object's content
fn hash(&self, state: &mut impl DigestUpdate);
}
/// The 512-bit BLAKE2b content hash
pub fn blake2b_hash(x: &(impl ContentHash + ?Sized)) -> digest::Output<Blake2b512> {
use digest::Digest as _;
let mut hasher = Blake2b512::default();
x.hash(&mut hasher);
hasher.finalize()
}
impl ContentHash for () {
fn hash(&self, _: &mut impl DigestUpdate) {}
}
macro_rules! tuple_impls {
($( ( $($n:tt $T:ident),+ ) )+) => {
$(
impl<$($T: ContentHash,)+> ContentHash for ($($T,)+) {
fn hash(&self, state: &mut impl DigestUpdate) {
$(self.$n.hash(state);)+
}
}
)+
}
}
tuple_impls! {
(0 T0)
(0 T0, 1 T1)
(0 T0, 1 T1, 2 T2)
(0 T0, 1 T1, 2 T2, 3 T3)
}
impl ContentHash for bool {
fn hash(&self, state: &mut impl DigestUpdate) {
u8::from(*self).hash(state);
}
}
impl ContentHash for u8 {
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&[*self]);
}
}
impl ContentHash for u32 {
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&self.to_le_bytes());
}
}
impl ContentHash for i32 {
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&self.to_le_bytes());
}
}
impl ContentHash for u64 {
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&self.to_le_bytes());
}
}
impl ContentHash for i64 {
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&self.to_le_bytes());
}
}
// TODO: Specialize for [u8] once specialization exists
impl<T: ContentHash> ContentHash for [T] {
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&(self.len() as u64).to_le_bytes());
for x in self {
x.hash(state);
}
}
}
impl<T: ContentHash> ContentHash for Vec<T> {
fn hash(&self, state: &mut impl DigestUpdate) {
self.as_slice().hash(state);
}
}
impl ContentHash for str {
fn hash(&self, state: &mut impl DigestUpdate) {
self.as_bytes().hash(state);
}
}
impl ContentHash for String {
fn hash(&self, state: &mut impl DigestUpdate) {
self.as_str().hash(state);
}
}
impl<T: ContentHash> ContentHash for Option<T> {
fn hash(&self, state: &mut impl DigestUpdate) {
match self {
None => state.update(&0u32.to_le_bytes()),
Some(x) => {
state.update(&1u32.to_le_bytes());
x.hash(state);
}
}
}
}
impl<K, V> ContentHash for std::collections::HashMap<K, V>
where
K: ContentHash + Ord,
V: ContentHash,
{
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&(self.len() as u64).to_le_bytes());
let mut kv = self.iter().collect_vec();
kv.sort_unstable_by_key(|&(k, _)| k);
for (k, v) in kv {
k.hash(state);
v.hash(state);
}
}
}
impl<K> ContentHash for std::collections::HashSet<K>
where
K: ContentHash + Ord,
{
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&(self.len() as u64).to_le_bytes());
for k in self.iter().sorted() {
k.hash(state);
}
}
}
impl<K, V> ContentHash for std::collections::BTreeMap<K, V>
where
K: ContentHash,
V: ContentHash,
{
fn hash(&self, state: &mut impl DigestUpdate) {
state.update(&(self.len() as u64).to_le_bytes());
for (k, v) in self {
k.hash(state);
v.hash(state);
}
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use std::collections::HashMap;
use super::*;
use crate::hex_util;
#[test]
fn test_string_sanity() {
let a = "a".to_string();
let b = "b".to_string();
assert_eq!(hash(&a), hash(&a.clone()));
assert_ne!(hash(&a), hash(&b));
assert_ne!(hash(&"a".to_string()), hash(&"a\0".to_string()));
}
#[test]
fn test_hash_map_key_value_distinction() {
let a = [("ab".to_string(), "cd".to_string())]
.into_iter()
.collect::<HashMap<_, _>>();
let b = [("a".to_string(), "bcd".to_string())]
.into_iter()
.collect::<HashMap<_, _>>();
assert_ne!(hash(&a), hash(&b));
}
#[test]
fn test_btree_map_key_value_distinction() {
let a = [("ab".to_string(), "cd".to_string())]
.into_iter()
.collect::<BTreeMap<_, _>>();
let b = [("a".to_string(), "bcd".to_string())]
.into_iter()
.collect::<BTreeMap<_, _>>();
assert_ne!(hash(&a), hash(&b));
}
#[test]
fn test_tuple_sanity() {
#[derive(ContentHash)]
struct T1(i32);
#[derive(ContentHash)]
struct T2(i32, i32);
#[derive(ContentHash)]
struct T3(i32, i32, i32);
#[derive(ContentHash)]
struct T4(i32, i32, i32, i32);
assert_eq!(hash(&T1(0)), hash(&(0,)));
assert_eq!(hash(&T2(0, 1)), hash(&(0, 1)));
assert_eq!(hash(&T3(0, 1, 2)), hash(&(0, 1, 2)));
assert_eq!(hash(&T4(0, 1, 2, 3)), hash(&(0, 1, 2, 3)));
}
#[test]
fn test_struct_sanity() {
#[derive(ContentHash)]
struct Foo {
x: i32,
}
assert_ne!(hash(&Foo { x: 42 }), hash(&Foo { x: 12 }));
}
#[test]
fn test_option_sanity() {
assert_ne!(hash(&Some(42)), hash(&42));
assert_ne!(hash(&None::<i32>), hash(&42i32));
}
#[test]
fn test_slice_sanity() {
assert_ne!(hash(&[42i32][..]), hash(&[12i32][..]));
assert_ne!(hash(&([] as [i32; 0])[..]), hash(&[42i32][..]));
assert_ne!(hash(&([] as [i32; 0])[..]), hash(&()));
assert_ne!(hash(&42i32), hash(&[42i32][..]));
}
#[test]
fn test_consistent_hashing() {
#[derive(ContentHash)]
struct Foo {
x: Vec<Option<i32>>,
y: i64,
}
let foo_hash = hex_util::encode_hex(&hash(&Foo {
x: vec![None, Some(42)],
y: 17,
}));
insta::assert_snapshot!(
foo_hash,
@"e33c423b4b774b1353c414e0f9ef108822fde2fd5113fcd53bf7bd9e74e3206690b96af96373f268ed95dd020c7cbe171c7b7a6947fcaf5703ff6c8e208cefd4"
);
// Try again with an equivalent generic struct deriving ContentHash.
#[derive(ContentHash)]
struct GenericFoo<X, Y> {
x: X,
y: Y,
}
assert_eq!(
hex_util::encode_hex(&hash(&GenericFoo {
x: vec![None, Some(42)],
y: 17i64
})),
foo_hash
);
}
// Test that the derived version of `ContentHash` matches the that's
// manually implemented for `std::Option`.
#[test]
fn derive_for_enum() {
#[derive(ContentHash)]
enum MyOption<T> {
None,
Some(T),
}
assert_eq!(hash(&Option::<i32>::None), hash(&MyOption::<i32>::None));
assert_eq!(hash(&Some(1)), hash(&MyOption::Some(1)));
}
fn hash(x: &(impl ContentHash + ?Sized)) -> digest::Output<Blake2b512> {
blake2b_hash(x)
}
}
| rust | Apache-2.0 | 10efcf35613c9c2076278f1721b5e6826e77c144 | 2026-01-04T15:37:48.912814Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.