repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/setup_program_test/duplicate_wallet_names.rs | packages/fuels-macros/tests/ui/setup_program_test/duplicate_wallet_names.rs | use fuels_macros::setup_program_test;
setup_program_test!(
Wallets("wallet1", "wallet1"),
Abigen(Contract(name = "MyContract", project = "some_project"))
);
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/setup_program_test/abigen_command_is_missing.rs | packages/fuels-macros/tests/ui/setup_program_test/abigen_command_is_missing.rs | use fuels_macros::setup_program_test;
setup_program_test!(Deploy(
name = "some_instance",
contract = "SomeUnknownContract",
wallet = "some_wallet"
));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/setup_program_test/unknown_contract.rs | packages/fuels-macros/tests/ui/setup_program_test/unknown_contract.rs | use fuels_macros::setup_program_test;
setup_program_test!(
Abigen(Contract(project = "some_project", name = "MismatchedName")),
Deploy(
name = "some_instance",
contract = "SomeUnknownContract",
wallet = "some_wallet"
)
);
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/setup_program_test/invalid_path.rs | packages/fuels-macros/tests/ui/setup_program_test/invalid_path.rs | use fuels_macros::setup_program_test;
setup_program_test!(Abigen(Contract(
name = "MyContract",
project = "some_project"
)));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/setup_program_test/duplicate_wallet_command.rs | packages/fuels-macros/tests/ui/setup_program_test/duplicate_wallet_command.rs | use fuels_macros::setup_program_test;
setup_program_test!(
Wallets("wallet1"),
Wallets("wallet2"),
Abigen(Contract(name = "MyContract", project = "some_project"))
);
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/setup_program_test/unknown_options_value.rs | packages/fuels-macros/tests/ui/setup_program_test/unknown_options_value.rs | use fuels_macros::setup_program_test;
setup_program_test!(Options(profile = "not_a_profile"));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/unrecognized_attribute.rs | packages/fuels-macros/tests/ui/abigen/unrecognized_attribute.rs | use fuels_macros::abigen;
abigen!(Contract(
name = "SomeName",
abi = "some-abi.json",
unknown = "something"
));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/missing_abi_attribute.rs | packages/fuels-macros/tests/ui/abigen/missing_abi_attribute.rs | use fuels_macros::abigen;
abigen!(Contract(name = "SomeName"));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/missing_name_attr.rs | packages/fuels-macros/tests/ui/abigen/missing_name_attr.rs | use fuels_macros::abigen;
abigen!(Contract(abi = "some-abi.json"));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/invalid_abi_value.rs | packages/fuels-macros/tests/ui/abigen/invalid_abi_value.rs | use fuels_macros::abigen;
abigen!(Contract(name = "SomeName", abi = true,));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/duplicate_attribute.rs | packages/fuels-macros/tests/ui/abigen/duplicate_attribute.rs | use fuels_macros::abigen;
abigen!(Contract(
abi = "some-abi.json",
abi = "some-abi2.json",
name = "SomeName",
abi = "some-abi3.json",
));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/malformed_abi.rs | packages/fuels-macros/tests/ui/abigen/malformed_abi.rs | use fuels_macros::abigen;
abigen!(Contract(name = "SomeName", abi = r#"{}"#));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/invalid_program_type.rs | packages/fuels-macros/tests/ui/abigen/invalid_program_type.rs | use fuels_macros::abigen;
abigen!(SomeInvalidProgramType(
name = "SomeName",
abi = "some-abi.json"
));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/invalid_abi_path.rs | packages/fuels-macros/tests/ui/abigen/invalid_abi_path.rs | use fuels_macros::abigen;
abigen!(Contract(name = "SomeName", abi = "some_abi.json"));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/abigen/invalid_name_value.rs | packages/fuels-macros/tests/ui/abigen/invalid_name_value.rs | use fuels_macros::abigen;
abigen!(Contract(name = true, abi = "some-abi.json",));
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/tokenizable/only_one_variant_element_supported.rs | packages/fuels-macros/tests/ui/derive/tokenizable/only_one_variant_element_supported.rs | use fuels_macros::Tokenizable;
#[derive(Tokenizable)]
enum SomeEnum {
// problem because no elements present
B(),
}
#[derive(Tokenizable)]
enum AnotherEnum {
A(u64, u32),
}
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/tokenizable/tuple_like_structs_not_supported.rs | packages/fuels-macros/tests/ui/derive/tokenizable/tuple_like_structs_not_supported.rs | use fuels_macros::Tokenizable;
#[derive(Tokenizable)]
struct SomeStruct(pub u64, pub String);
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/tokenizable/struct_like_enum_variants_not_supported.rs | packages/fuels-macros/tests/ui/derive/tokenizable/struct_like_enum_variants_not_supported.rs | use fuels_macros::Tokenizable;
#[derive(Tokenizable)]
enum SomeEnum {
A,
B { something: u64 },
}
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/tokenizable/only_generic_types_are_supported.rs | packages/fuels-macros/tests/ui/derive/tokenizable/only_generic_types_are_supported.rs | use fuels_macros::Tokenizable;
#[derive(Tokenizable)]
enum SomeEnum<const T: usize> {
A,
}
#[derive(Tokenizable)]
enum AnotherEnum<'a> {
A(&'a u64),
}
#[derive(Tokenizable)]
struct SomeStruct<const T: usize> {}
#[derive(Tokenizable)]
struct AnotherStruct<'a> {
a: &'a u64,
}
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/parameterize/attribute_must_be_named_value.rs | packages/fuels-macros/tests/ui/derive/parameterize/attribute_must_be_named_value.rs | use fuels_macros::Parameterize;
#[derive(Parameterize)]
#[FuelsTypesPath]
enum SomeEnum {
A(u8),
}
#[derive(Parameterize)]
#[FuelsTypesPath = true]
struct SomeStruct {}
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/parameterize/only_one_variant_element_supported.rs | packages/fuels-macros/tests/ui/derive/parameterize/only_one_variant_element_supported.rs | use fuels_macros::Parameterize;
#[derive(Parameterize)]
enum SomeEnum {
// problem because no elements present
B(),
}
#[derive(Parameterize)]
enum AnotherEnum {
A(u64, u32),
}
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/parameterize/tuple_like_structs_not_supported.rs | packages/fuels-macros/tests/ui/derive/parameterize/tuple_like_structs_not_supported.rs | use fuels_macros::Parameterize;
#[derive(Parameterize)]
struct SomeStruct(pub u64, pub String);
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/parameterize/struct_like_enum_variants_not_supported.rs | packages/fuels-macros/tests/ui/derive/parameterize/struct_like_enum_variants_not_supported.rs | use fuels_macros::Parameterize;
#[derive(Parameterize)]
enum SomeEnum {
A,
B { something: u64 },
}
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-macros/tests/ui/derive/parameterize/only_generic_types_are_supported.rs | packages/fuels-macros/tests/ui/derive/parameterize/only_generic_types_are_supported.rs | use fuels_macros::Parameterize;
#[derive(Parameterize)]
enum SomeEnum<const T: usize> {
A,
}
#[derive(Parameterize)]
enum AnotherEnum<'a> {
A(&'a u64),
}
#[derive(Parameterize)]
struct SomeStruct<const T: usize> {}
#[derive(Parameterize)]
struct AnotherStruct<'a> {
a: &'a u64,
}
fn main() {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/responses.rs | packages/fuels-programs/src/responses.rs | mod call;
mod submit;
pub use call::*;
pub use submit::*;
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/contract.rs | packages/fuels-programs/src/contract.rs | mod storage;
use std::fmt::Debug;
use fuel_tx::{Bytes32, Contract as FuelContract, ContractId, Salt, StorageSlot};
pub use storage::*;
/// Represents a contract that can be deployed either directly ([`Contract::regular`]) or through a loader [`Contract::convert_to_loader`].
/// Provides the ability to calculate the `ContractId` ([`Contract::contract_id`]) without needing to deploy the contract.
/// This struct also manages contract code updates with `configurable`s
/// ([`Contract::with_configurables`]) and can automatically
/// load storage slots (via [`Contract::load_from`]).
#[derive(Debug, Clone, PartialEq)]
pub struct Contract<Code> {
code: Code,
salt: Salt,
storage_slots: Vec<StorageSlot>,
}
impl<T> Contract<T> {
pub fn salt(&self) -> Salt {
self.salt
}
pub fn with_salt(mut self, salt: impl Into<Salt>) -> Self {
self.salt = salt.into();
self
}
pub fn storage_slots(&self) -> &[StorageSlot] {
&self.storage_slots
}
pub fn with_storage_slots(mut self, storage_slots: Vec<StorageSlot>) -> Self {
self.storage_slots = storage_slots;
self
}
}
mod regular;
pub use regular::*;
mod loader;
// reexported to avoid doing a breaking change
pub use loader::*;
pub use crate::assembly::contract_call::loader_contract_asm;
fn compute_contract_id_and_state_root(
binary: &[u8],
salt: &Salt,
storage_slots: &[StorageSlot],
) -> (ContractId, Bytes32, Bytes32) {
let fuel_contract = FuelContract::from(binary);
let code_root = fuel_contract.root();
let state_root = FuelContract::initial_state_root(storage_slots.iter());
let contract_id = FuelContract::id(salt, &code_root, &state_root);
(contract_id, code_root, state_root)
}
#[cfg(test)]
mod tests {
use std::path::Path;
use fuels_core::types::{
errors::{Error, Result},
transaction_builders::Blob,
};
use tempfile::tempdir;
use super::*;
use crate::assembly::contract_call::loader_contract_asm;
#[test]
fn autoload_storage_slots() {
// given
let temp_dir = tempdir().unwrap();
let contract_bin = temp_dir.path().join("my_contract.bin");
std::fs::write(&contract_bin, "").unwrap();
let storage_file = temp_dir.path().join("my_contract-storage_slots.json");
let expected_storage_slots = vec![StorageSlot::new([1; 32].into(), [2; 32].into())];
save_slots(&expected_storage_slots, &storage_file);
let storage_config = StorageConfiguration::new(true, vec![]);
let load_config = LoadConfiguration::default().with_storage_configuration(storage_config);
// when
let loaded_contract = Contract::load_from(&contract_bin, load_config).unwrap();
// then
assert_eq!(loaded_contract.storage_slots, expected_storage_slots);
}
#[test]
fn autoload_fails_if_file_missing() {
// given
let temp_dir = tempdir().unwrap();
let contract_bin = temp_dir.path().join("my_contract.bin");
std::fs::write(&contract_bin, "").unwrap();
let storage_config = StorageConfiguration::new(true, vec![]);
let load_config = LoadConfiguration::default().with_storage_configuration(storage_config);
// when
let error = Contract::load_from(&contract_bin, load_config)
.expect_err("should have failed because the storage slots file is missing");
// then
let storage_slots_path = temp_dir.path().join("my_contract-storage_slots.json");
let Error::Other(msg) = error else {
panic!("expected an error of type `Other`");
};
assert_eq!(
msg,
format!(
"could not autoload storage slots from file: {storage_slots_path:?}. Either provide the file or disable autoloading in `StorageConfiguration`"
)
);
}
fn save_slots(slots: &Vec<StorageSlot>, path: &Path) {
std::fs::write(
path,
serde_json::to_string::<Vec<StorageSlot>>(slots).unwrap(),
)
.unwrap()
}
#[test]
fn blob_size_must_be_greater_than_zero() {
// given
let contract = Contract::regular(vec![0x00], Salt::zeroed(), vec![]);
// when
let err = contract
.convert_to_loader(0)
.expect_err("should have failed because blob size is 0");
// then
assert_eq!(
err.to_string(),
"blob size must be greater than 0".to_string()
);
}
#[test]
fn contract_with_no_code_cannot_be_turned_into_a_loader() {
// given
let contract = Contract::regular(vec![], Salt::zeroed(), vec![]);
// when
let err = contract
.convert_to_loader(100)
.expect_err("should have failed because there is no code");
// then
assert_eq!(
err.to_string(),
"must provide at least one blob".to_string()
);
}
#[test]
fn loader_needs_at_least_one_blob() {
// given
let no_blobs = vec![];
// when
let err = Contract::loader_from_blobs(no_blobs, Salt::default(), vec![])
.expect_err("should have failed because there are no blobs");
// then
assert_eq!(
err.to_string(),
"must provide at least one blob".to_string()
);
}
#[test]
fn loader_requires_all_except_the_last_blob_to_be_word_sized() {
// given
let blobs = [vec![0; 9], vec![0; 8]].map(Blob::new).to_vec();
// when
let err = Contract::loader_from_blobs(blobs, Salt::default(), vec![])
.expect_err("should have failed because the first blob is not word-sized");
// then
assert_eq!(
err.to_string(),
"blob 1/2 has a size of 9 bytes, which is not a multiple of 8".to_string()
);
}
#[test]
fn last_blob_in_loader_can_be_unaligned() {
// given
let blobs = [vec![0; 8], vec![0; 9]].map(Blob::new).to_vec();
// when
let result = Contract::loader_from_blobs(blobs, Salt::default(), vec![]);
// then
let _ = result.unwrap();
}
#[test]
fn can_load_regular_contract() -> Result<()> {
// given
let tmp_dir = tempfile::tempdir()?;
let code_file = tmp_dir.path().join("contract.bin");
let code = b"some fake contract code";
std::fs::write(&code_file, code)?;
// when
let contract = Contract::load_from(
code_file,
LoadConfiguration::default()
.with_storage_configuration(StorageConfiguration::default().with_autoload(false)),
)?;
// then
assert_eq!(contract.code(), code);
Ok(())
}
#[test]
fn can_manually_create_regular_contract() -> Result<()> {
// given
let binary = b"some fake contract code";
// when
let contract = Contract::regular(binary.to_vec(), Salt::zeroed(), vec![]);
// then
assert_eq!(contract.code(), binary);
Ok(())
}
macro_rules! getters_work {
($contract: ident, $contract_id: expr, $state_root: expr, $code_root: expr, $salt: expr, $code: expr) => {
assert_eq!($contract.contract_id(), $contract_id);
assert_eq!($contract.state_root(), $state_root);
assert_eq!($contract.code_root(), $code_root);
assert_eq!($contract.salt(), $salt);
assert_eq!($contract.code(), $code);
};
}
#[test]
fn regular_contract_has_expected_getters() -> Result<()> {
let contract_binary = b"some fake contract code";
let storage_slots = vec![StorageSlot::new([2; 32].into(), [1; 32].into())];
let contract = Contract::regular(contract_binary.to_vec(), Salt::zeroed(), storage_slots);
let expected_contract_id =
"93c9f1e61efb25458e3c56fdcfee62acb61c0533364eeec7ba61cb2957aa657b".parse()?;
let expected_state_root =
"852b7b7527124dbcd44302e52453b864dc6f4d9544851c729da666a430b84c97".parse()?;
let expected_code_root =
"69ca130191e9e469f1580229760b327a0729237f1aff65cf1d076b2dd8360031".parse()?;
let expected_salt = Salt::zeroed();
getters_work!(
contract,
expected_contract_id,
expected_state_root,
expected_code_root,
expected_salt,
contract_binary
);
Ok(())
}
#[test]
fn regular_can_be_turned_into_loader_and_back() -> Result<()> {
let contract_binary = b"some fake contract code";
let contract_original = Contract::regular(contract_binary.to_vec(), Salt::zeroed(), vec![]);
let loader_contract = contract_original.clone().convert_to_loader(1)?;
let regular_recreated = loader_contract.clone().revert_to_regular();
assert_eq!(regular_recreated, contract_original);
Ok(())
}
#[test]
fn unuploaded_loader_contract_has_expected_getters() -> Result<()> {
let contract_binary = b"some fake contract code";
let storage_slots = vec![StorageSlot::new([2; 32].into(), [1; 32].into())];
let original = Contract::regular(contract_binary.to_vec(), Salt::zeroed(), storage_slots);
let loader = original.clone().convert_to_loader(1024)?;
let loader_asm = loader_contract_asm(&loader.blob_ids()).unwrap();
let manual_loader = original.with_code(loader_asm);
getters_work!(
loader,
manual_loader.contract_id(),
manual_loader.state_root(),
manual_loader.code_root(),
manual_loader.salt(),
manual_loader.code()
);
Ok(())
}
#[test]
fn unuploaded_loader_requires_at_least_one_blob() -> Result<()> {
// given
let no_blob_ids = vec![];
// when
let loader = Contract::loader_from_blob_ids(no_blob_ids, Salt::default(), vec![])
.expect_err("should have failed because there are no blobs");
// then
assert_eq!(
loader.to_string(),
"must provide at least one blob".to_string()
);
Ok(())
}
#[test]
fn uploaded_loader_has_expected_getters() -> Result<()> {
let contract_binary = b"some fake contract code";
let original_contract = Contract::regular(contract_binary.to_vec(), Salt::zeroed(), vec![]);
let blob_ids = original_contract
.clone()
.convert_to_loader(1024)?
.blob_ids();
// we pretend we uploaded the blobs
let loader = Contract::loader_from_blob_ids(blob_ids.clone(), Salt::default(), vec![])?;
let loader_asm = loader_contract_asm(&blob_ids).unwrap();
let manual_loader = original_contract.with_code(loader_asm);
getters_work!(
loader,
manual_loader.contract_id(),
manual_loader.state_root(),
manual_loader.code_root(),
manual_loader.salt(),
manual_loader.code()
);
Ok(())
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/lib.rs | packages/fuels-programs/src/lib.rs | #[cfg(feature = "std")]
pub mod calls;
#[cfg(feature = "std")]
pub mod contract;
#[cfg(feature = "std")]
pub mod executable;
#[cfg(feature = "std")]
pub mod responses;
pub const DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE: f32 = 0.50;
pub mod debug;
pub(crate) mod assembly;
pub(crate) mod utils;
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/executable.rs | packages/fuels-programs/src/executable.rs | use fuels_core::{
Configurables,
types::{
errors::{Context, Result},
transaction::Transaction,
transaction_builders::{Blob, BlobTransactionBuilder},
tx_response::TxResponse,
},
};
use crate::{
DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE,
assembly::script_and_predicate_loader::{
LoaderCode, extract_configurables_offset, extract_data_offset,
has_configurables_section_offset,
},
};
/// This struct represents a standard executable with its associated bytecode and configurables.
#[derive(Debug, Clone, PartialEq)]
pub struct Regular {
code: Vec<u8>,
configurables: Configurables,
}
impl Regular {
pub fn new(code: Vec<u8>, configurables: Configurables) -> Self {
Self {
code,
configurables,
}
}
}
/// Used to transform Script or Predicate code into a loader variant, where the code is uploaded as
/// a blob and the binary itself is substituted with code that will load the blob code and apply
/// the given configurables to the Script/Predicate.
#[derive(Debug, Clone, PartialEq)]
pub struct Executable<State> {
state: State,
}
impl Executable<Regular> {
pub fn from_bytes(code: Vec<u8>) -> Self {
Executable {
state: Regular::new(code, Default::default()),
}
}
/// Loads an `Executable<Regular>` from a file at the given path.
///
/// # Parameters
///
/// - `path`: The file path to load the executable from.
///
/// # Returns
///
/// A `Result` containing the `Executable<Regular>` or an error if loading fails.
pub fn load_from(path: &str) -> Result<Executable<Regular>> {
let code = std::fs::read(path)?;
Ok(Executable {
state: Regular::new(code, Default::default()),
})
}
pub fn with_configurables(self, configurables: impl Into<Configurables>) -> Self {
Executable {
state: Regular {
configurables: configurables.into(),
..self.state
},
}
}
pub fn data_offset_in_code(&self) -> Result<usize> {
extract_data_offset(&self.state.code)
}
pub fn configurables_offset_in_code(&self) -> Result<Option<usize>> {
if has_configurables_section_offset(&self.state.code)? {
Ok(Some(extract_configurables_offset(&self.state.code)?))
} else {
Ok(None)
}
}
/// Returns the code of the executable with configurables applied.
///
/// # Returns
///
/// The bytecode of the executable with configurables updated.
pub fn code(&self) -> Vec<u8> {
let mut code = self.state.code.clone();
self.state.configurables.update_constants_in(&mut code);
code
}
/// Converts the `Executable<Regular>` into an `Executable<Loader>`.
///
/// # Returns
///
/// A `Result` containing the `Executable<Loader>` or an error if loader code cannot be
/// generated for the given binary.
pub fn convert_to_loader(self) -> Result<Executable<Loader>> {
validate_loader_can_be_made_from_code(
self.state.code.clone(),
self.state.configurables.clone(),
)?;
Ok(Executable {
state: Loader {
code: self.state.code,
configurables: self.state.configurables,
},
})
}
}
pub struct Loader {
code: Vec<u8>,
configurables: Configurables,
}
impl Executable<Loader> {
pub fn with_configurables(self, configurables: impl Into<Configurables>) -> Self {
Executable {
state: Loader {
configurables: configurables.into(),
..self.state
},
}
}
#[deprecated(note = "Use `configurables_offset_in_code` instead")]
pub fn data_offset_in_code(&self) -> usize {
self.loader_code().configurables_section_offset()
}
pub fn configurables_offset_in_code(&self) -> usize {
self.loader_code().configurables_section_offset()
}
fn loader_code(&self) -> LoaderCode {
let mut code = self.state.code.clone();
self.state.configurables.update_constants_in(&mut code);
LoaderCode::from_normal_binary(code)
.expect("checked before turning into a Executable<Loader>")
}
/// Returns the code of the loader executable with configurables applied.
pub fn code(&self) -> Vec<u8> {
self.loader_code().as_bytes().to_vec()
}
/// A Blob containing the original executable code minus the data section.
pub fn blob(&self) -> Blob {
// we don't apply configurables because they touch the data section which isn't part of the
// blob
LoaderCode::extract_blob(&self.state.code)
.expect("checked before turning into a Executable<Loader>")
}
/// If not previously uploaded, uploads a blob containing the original executable code minus the data section.
pub async fn upload_blob(
&self,
account: impl fuels_accounts::Account,
) -> Result<Option<TxResponse>> {
let blob = self.blob();
let provider = account.try_provider()?;
let consensus_parameters = provider.consensus_parameters().await?;
if provider.blob_exists(blob.id()).await? {
return Ok(None);
}
let mut tb = BlobTransactionBuilder::default()
.with_blob(self.blob())
.with_max_fee_estimation_tolerance(DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE);
account
.adjust_for_fee(&mut tb, 0)
.await
.context("failed to adjust inputs to cover for missing base asset")?;
account.add_witnesses(&mut tb)?;
let tx = tb.build(provider).await?;
let tx_id = tx.id(consensus_parameters.chain_id());
let tx_status = provider.send_transaction_and_await_commit(tx).await?;
Ok(Some(TxResponse {
tx_status: tx_status.take_success_checked(None)?,
tx_id,
}))
}
}
fn validate_loader_can_be_made_from_code(
mut code: Vec<u8>,
configurables: Configurables,
) -> Result<()> {
configurables.update_constants_in(&mut code);
let _ = LoaderCode::from_normal_binary(code)?;
Ok(())
}
#[cfg(test)]
mod tests {
use std::io::Write;
use fuels_core::{Configurable, Configurables};
use tempfile::NamedTempFile;
use super::*;
fn legacy_indicating_instruction() -> Vec<u8> {
fuel_asm::op::jmpf(0x0, 0x02).to_bytes().to_vec()
}
#[test]
fn test_executable_regular_from_bytes() {
// Given: Some bytecode
let code = vec![1u8, 2, 3, 4];
// When: Creating an Executable<Regular> from bytes
let executable = Executable::<Regular>::from_bytes(code.clone());
// Then: The executable should have the given code and default configurables
assert_eq!(executable.state.code, code);
assert_eq!(executable.state.configurables, Default::default());
}
#[test]
fn test_executable_regular_load_from() {
// Given: A temporary file containing some bytecode
let code = vec![5u8, 6, 7, 8];
let mut temp_file = NamedTempFile::new().expect("Failed to create temp file");
temp_file
.write_all(&code)
.expect("Failed to write to temp file");
let path = temp_file.path().to_str().unwrap();
// When: Loading an Executable<Regular> from the file
let executable_result = Executable::<Regular>::load_from(path);
// Then: The executable should be created successfully with the correct code
assert!(executable_result.is_ok());
let executable = executable_result.unwrap();
assert_eq!(executable.state.code, code);
assert_eq!(executable.state.configurables, Default::default());
}
#[test]
fn test_executable_regular_load_from_invalid_path() {
// Given: An invalid file path
let invalid_path = "/nonexistent/path/to/file";
// When: Attempting to load an Executable<Regular> from the invalid path
let executable_result = Executable::<Regular>::load_from(invalid_path);
// Then: The operation should fail with an error
assert!(executable_result.is_err());
}
#[test]
fn test_executable_regular_with_configurables() {
// Given: An Executable<Regular> and some configurables
let code = vec![1u8, 2, 3, 4];
let executable = Executable::<Regular>::from_bytes(code);
let configurables = Configurables::new(vec![Configurable {
offset: 2,
data: vec![1],
}]);
// When: Setting new configurables
let new_executable = executable.with_configurables(configurables.clone());
// Then: The executable should have the new configurables
assert_eq!(new_executable.state.configurables, configurables);
}
#[test]
fn test_executable_regular_code() {
// Given: An Executable<Regular> with some code and configurables
let code = vec![1u8, 2, 3, 4];
let configurables = Configurables::new(vec![Configurable {
offset: 1,
data: vec![1],
}]);
let executable =
Executable::<Regular>::from_bytes(code.clone()).with_configurables(configurables);
// When: Retrieving the code after applying configurables
let modified_code = executable.code();
assert_eq!(modified_code, vec![1, 1, 3, 4]);
}
#[test]
fn test_loader_extracts_code_and_data_section_legacy_format() {
let padding = vec![0; 4];
let jmpf = legacy_indicating_instruction();
let data_offset = 28u64.to_be_bytes().to_vec();
let remaining_padding = vec![0; 8];
let some_random_instruction = vec![1, 2, 3, 4];
let data_section = vec![5, 6, 7, 8];
let code = [
padding.clone(),
jmpf.clone(),
data_offset.clone(),
remaining_padding.clone(),
some_random_instruction.clone(),
data_section.clone(),
]
.concat();
let executable = Executable::<Regular>::from_bytes(code.clone());
let loader = executable.convert_to_loader().unwrap();
let blob = loader.blob();
let data_stripped_code = [
padding,
jmpf.clone(),
data_offset,
remaining_padding.clone(),
some_random_instruction,
]
.concat();
assert_eq!(blob.as_ref(), data_stripped_code);
// And: Loader code should match expected binary
let loader_code = loader.code();
assert_eq!(
loader_code,
LoaderCode::from_normal_binary(code).unwrap().as_bytes()
);
}
#[test]
fn test_loader_extracts_code_and_configurable_section_new_format() {
let padding = vec![0; 4];
let jmpf = legacy_indicating_instruction();
let data_offset = 28u64.to_be_bytes().to_vec();
let configurable_offset = vec![0; 8];
let data_section = vec![5, 6, 7, 8];
let configurable_section = vec![9, 9, 9, 9];
let code = [
padding.clone(),
jmpf.clone(),
data_offset.clone(),
configurable_offset.clone(),
data_section.clone(),
configurable_section,
]
.concat();
let executable = Executable::<Regular>::from_bytes(code.clone());
let loader = executable.convert_to_loader().unwrap();
let blob = loader.blob();
let configurable_stripped_code = [
padding,
jmpf,
data_offset,
configurable_offset,
data_section,
]
.concat();
assert_eq!(blob.as_ref(), configurable_stripped_code);
// And: Loader code should match expected binary
let loader_code = loader.code();
assert_eq!(
loader_code,
LoaderCode::from_normal_binary(code).unwrap().as_bytes()
);
}
#[test]
fn test_executable_regular_convert_to_loader_with_invalid_code() {
// Given: An Executable<Regular> with invalid code (too short)
let code = vec![1u8, 2]; // Insufficient length for a valid data offset
let executable = Executable::<Regular>::from_bytes(code);
// When: Attempting to convert to a loader
let result = executable.convert_to_loader();
// Then: The conversion should fail with an error
assert!(result.is_err());
}
#[test]
fn executable_with_no_data_section() {
// to skip over the first 2 half words and skip over the offset itself, basically stating
// that there is no data section
let data_section_offset = 16u64;
let jmpf = legacy_indicating_instruction();
let mut initial_bytes = vec![0; 16];
initial_bytes[4..8].copy_from_slice(&jmpf);
let code = [initial_bytes, data_section_offset.to_be_bytes().to_vec()].concat();
Executable::from_bytes(code).convert_to_loader().unwrap();
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/assembly.rs | packages/fuels-programs/src/assembly.rs | pub mod contract_call;
pub mod cursor;
pub mod script_and_predicate_loader;
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/debug.rs | packages/fuels-programs/src/debug.rs | use fuel_asm::{Instruction, Opcode};
use fuels_core::{error, types::errors::Result};
use itertools::Itertools;
use crate::{
assembly::{
contract_call::{ContractCallData, ContractCallInstructions},
script_and_predicate_loader::{
LoaderCode, get_offset_for_section_containing_configurables,
},
},
utils::prepend_msg,
};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ScriptCallData {
pub code: Vec<u8>,
/// This will be renamed in next breaking release. For binary generated with sway 0.66.5 this will be data_offset
/// and for binary generated with sway 0.66.6 and above this will probably be data_section_offset and configurable_section_offset.
pub data_section_offset: Option<u64>,
pub data: Vec<u8>,
}
impl ScriptCallData {
pub fn data_section(&self) -> Option<&[u8]> {
self.data_section_offset.map(|offset| {
let offset = offset as usize;
&self.code[offset..]
})
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ScriptType {
ContractCall(Vec<ContractCallData>),
Loader {
script: ScriptCallData,
blob_id: [u8; 32],
},
Other(ScriptCallData),
}
fn parse_script_call(script: &[u8], script_data: &[u8]) -> Result<ScriptCallData> {
let data_section_offset = if script.len() >= 16 {
let offset = get_offset_for_section_containing_configurables(script)?;
if offset >= script.len() {
None
} else {
Some(offset as u64)
}
} else {
None
};
Ok(ScriptCallData {
data: script_data.to_vec(),
data_section_offset,
code: script.to_vec(),
})
}
fn parse_contract_calls(
script: &[u8],
script_data: &[u8],
) -> Result<Option<Vec<ContractCallData>>> {
let instructions: std::result::Result<Vec<Instruction>, _> =
fuel_asm::from_bytes(script.to_vec()).try_collect();
let Ok(instructions) = instructions else {
return Ok(None);
};
let Some(call_instructions) = extract_call_instructions(&instructions) else {
return Ok(None);
};
let Some(minimum_call_offset) = call_instructions.iter().map(|i| i.call_data_offset()).min()
else {
return Ok(None);
};
let num_calls = call_instructions.len();
call_instructions.iter().enumerate().map(|(idx, current_call_instructions)| {
let data_start =
(current_call_instructions.call_data_offset() - minimum_call_offset) as usize;
let data_end = if idx + 1 < num_calls {
(call_instructions[idx + 1].call_data_offset()
- current_call_instructions.call_data_offset()) as usize
} else {
script_data.len()
};
if data_start > script_data.len() || data_end > script_data.len() {
return Err(error!(
Other,
"call data offset requires data section of length {}, but data section is only {} bytes long",
data_end,
script_data.len()
));
}
let contract_call_data = ContractCallData::decode(
&script_data[data_start..data_end],
current_call_instructions.is_gas_fwd_variant(),
)?;
Ok(contract_call_data)
}).collect::<Result<_>>().map(Some)
}
fn extract_call_instructions(
mut instructions: &[Instruction],
) -> Option<Vec<ContractCallInstructions>> {
let mut call_instructions = vec![];
while let Some(extracted_instructions) = ContractCallInstructions::extract_from(instructions) {
let num_instructions = extracted_instructions.len();
debug_assert!(num_instructions > 0);
instructions = &instructions[num_instructions..];
call_instructions.push(extracted_instructions);
}
if !instructions.is_empty() {
match instructions {
[single_instruction] if single_instruction.opcode() == Opcode::RET => {}
_ => return None,
}
}
Some(call_instructions)
}
impl ScriptType {
pub fn detect(script: &[u8], data: &[u8]) -> Result<Self> {
if let Some(contract_calls) = parse_contract_calls(script, data)
.map_err(prepend_msg("while decoding contract call"))?
{
return Ok(Self::ContractCall(contract_calls));
}
if let Some((script, blob_id)) = parse_loader_script(script, data)? {
return Ok(Self::Loader { script, blob_id });
}
Ok(Self::Other(parse_script_call(script, data)?))
}
}
fn parse_loader_script(script: &[u8], data: &[u8]) -> Result<Option<(ScriptCallData, [u8; 32])>> {
let Some(loader_code) = LoaderCode::from_loader_binary(script)
.map_err(prepend_msg("while decoding loader script"))?
else {
return Ok(None);
};
Ok(Some((
ScriptCallData {
code: script.to_vec(),
data: data.to_vec(),
data_section_offset: Some(loader_code.configurables_section_offset() as u64),
},
loader_code.blob_id(),
)))
}
#[cfg(test)]
mod tests {
use fuel_asm::RegId;
use fuels_core::types::errors::Error;
use rand::{RngCore, SeedableRng};
use test_case::test_case;
use super::*;
use crate::assembly::{
contract_call::{CallOpcodeParamsOffset, ContractCallInstructions},
script_and_predicate_loader::loader_instructions_w_configurables,
};
#[test]
fn can_handle_empty_scripts() {
// given
let empty_script = [];
// when
let res = ScriptType::detect(&empty_script, &[]).unwrap();
// then
assert_eq!(
res,
ScriptType::Other(ScriptCallData {
code: vec![],
data_section_offset: None,
data: vec![]
})
)
}
#[test]
fn is_fine_with_malformed_scripts() {
let mut script = vec![0; 100 * Instruction::SIZE];
let jmpf = fuel_asm::op::jmpf(0x0, 0x04).to_bytes();
let mut rng = rand::rngs::StdRng::from_seed([0; 32]);
rng.fill_bytes(&mut script);
script[4..8].copy_from_slice(&jmpf);
let script_type = ScriptType::detect(&script, &[]).unwrap();
assert_eq!(
script_type,
ScriptType::Other(ScriptCallData {
code: script,
data_section_offset: None,
data: vec![]
})
);
}
fn example_contract_call_data(has_args: bool, gas_fwd: bool) -> Vec<u8> {
let mut data = vec![];
data.extend_from_slice(&100u64.to_be_bytes());
data.extend_from_slice(&[0; 32]);
data.extend_from_slice(&[1; 32]);
data.extend_from_slice(&[0; 8]);
data.extend_from_slice(&[0; 8]);
data.extend_from_slice(&"test".len().to_be_bytes());
data.extend_from_slice("test".as_bytes());
if has_args {
data.extend_from_slice(&[0; 8]);
}
if gas_fwd {
data.extend_from_slice(&[0; 8]);
}
data
}
#[test_case(108, "amount")]
#[test_case(100, "asset id")]
#[test_case(68, "contract id")]
#[test_case(36, "function selector offset")]
#[test_case(28, "encoded args offset")]
#[test_case(20, "function selector length")]
#[test_case(12, "function selector")]
#[test_case(8, "forwarded gas")]
fn catches_missing_data(amount_of_data_to_steal: usize, expected_msg: &str) {
// given
let script = ContractCallInstructions::new(CallOpcodeParamsOffset {
call_data_offset: 0,
amount_offset: 0,
asset_id_offset: 0,
gas_forwarded_offset: Some(1),
})
.into_bytes()
.collect_vec();
let ok_data = example_contract_call_data(false, true);
let not_enough_data = ok_data[..ok_data.len() - amount_of_data_to_steal].to_vec();
// when
let err = ScriptType::detect(&script, ¬_enough_data).unwrap_err();
// then
let Error::Other(mut msg) = err else {
panic!("expected Error::Other");
};
let expected_msg =
format!("while decoding contract call: while decoding {expected_msg}: not enough data");
msg.truncate(expected_msg.len());
assert_eq!(expected_msg, msg);
}
#[test]
fn handles_invalid_utf8_fn_selector() {
// given
let script = ContractCallInstructions::new(CallOpcodeParamsOffset {
call_data_offset: 0,
amount_offset: 0,
asset_id_offset: 0,
gas_forwarded_offset: Some(1),
})
.into_bytes()
.collect_vec();
let invalid_utf8 = {
let invalid_data = [0x80, 0xBF, 0xC0, 0xAF, 0xFF];
assert!(String::from_utf8(invalid_data.to_vec()).is_err());
invalid_data
};
let mut ok_data = example_contract_call_data(false, true);
ok_data[96..101].copy_from_slice(&invalid_utf8);
// when
let script_type = ScriptType::detect(&script, &ok_data).unwrap();
// then
let ScriptType::ContractCall(calls) = script_type else {
panic!("expected ScriptType::Other");
};
let Error::Codec(err) = calls[0].decode_fn_selector().unwrap_err() else {
panic!("expected Error::Codec");
};
assert_eq!(
err,
"cannot decode function selector: invalid utf-8 sequence of 1 bytes from index 0"
);
}
#[test]
fn loader_script_without_a_blob() {
// given
let script = loader_instructions_w_configurables()
.iter()
.flat_map(|i| i.to_bytes())
.collect::<Vec<_>>();
// when
let err = ScriptType::detect(&script, &[]).unwrap_err();
// then
let Error::Other(msg) = err else {
panic!("expected Error::Other");
};
assert_eq!(
"while decoding loader script: while decoding blob id: not enough data, available: 0, requested: 32",
msg
);
}
#[test]
fn loader_script_with_almost_matching_instructions() {
// given
let mut loader_instructions = loader_instructions_w_configurables().to_vec();
loader_instructions.insert(
loader_instructions.len() - 2,
fuel_asm::op::movi(RegId::ZERO, 0),
);
let script = loader_instructions
.iter()
.flat_map(|i| i.to_bytes())
.collect::<Vec<_>>();
// when
let script_type = ScriptType::detect(&script, &[]).unwrap();
// then
assert_eq!(
script_type,
ScriptType::Other(ScriptCallData {
code: script,
data_section_offset: None,
data: vec![]
})
);
}
#[test]
fn extra_instructions_in_contract_calling_scripts_not_tolerated() {
// given
let mut contract_call_script = ContractCallInstructions::new(CallOpcodeParamsOffset {
call_data_offset: 0,
amount_offset: 0,
asset_id_offset: 0,
gas_forwarded_offset: Some(1),
})
.into_bytes()
.collect_vec();
contract_call_script.extend(fuel_asm::op::movi(RegId::ZERO, 10).to_bytes());
let script_data = example_contract_call_data(false, true);
// when
let script_type = ScriptType::detect(&contract_call_script, &script_data).unwrap();
// then
assert_eq!(
script_type,
ScriptType::Other(ScriptCallData {
code: contract_call_script,
data_section_offset: None,
data: script_data
})
);
}
#[test]
fn handles_invalid_call_data_offset() {
// given
let contract_call_1 = ContractCallInstructions::new(CallOpcodeParamsOffset {
call_data_offset: 0,
amount_offset: 0,
asset_id_offset: 0,
gas_forwarded_offset: Some(1),
})
.into_bytes();
let contract_call_2 = ContractCallInstructions::new(CallOpcodeParamsOffset {
call_data_offset: u16::MAX as usize,
amount_offset: 0,
asset_id_offset: 0,
gas_forwarded_offset: Some(1),
})
.into_bytes();
let data_only_for_one_call = example_contract_call_data(false, true);
let together = contract_call_1.chain(contract_call_2).collect_vec();
// when
let err = ScriptType::detect(&together, &data_only_for_one_call).unwrap_err();
// then
let Error::Other(msg) = err else {
panic!("expected Error::Other");
};
assert_eq!(
"while decoding contract call: call data offset requires data section of length 65535, but data section is only 108 bytes long",
msg
);
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/utils.rs | packages/fuels-programs/src/utils.rs | use fuels_core::types::errors::{Error, error};
pub fn prepend_msg<'a>(msg: impl AsRef<str> + 'a) -> impl Fn(Error) -> Error + 'a {
move |err| match err {
Error::IO(orig_msg) => {
error!(IO, "{}: {}", msg.as_ref(), orig_msg)
}
Error::Codec(orig_msg) => {
error!(Codec, "{}: {}", msg.as_ref(), orig_msg)
}
Error::Transaction(reason) => Error::Transaction(reason),
Error::Provider(orig_msg) => {
error!(Provider, "{}: {}", msg.as_ref(), orig_msg)
}
Error::Other(orig_msg) => {
error!(Other, "{}: {}", msg.as_ref(), orig_msg)
}
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls.rs | packages/fuels-programs/src/calls.rs | mod call_handler;
mod contract_call;
pub mod receipt_parser;
mod script_call;
pub mod traits;
pub mod utils;
pub use call_handler::*;
pub use contract_call::*;
use fuel_types::BlockHeight;
pub use script_call::*;
/// Used to control simulations/dry-runs
#[derive(Debug, Clone)]
pub struct Execution {
execution_type: ExecutionType,
at_height: Option<BlockHeight>,
}
impl Execution {
/// The transaction will be subject to all validations.
/// The tx fee must be covered, witnesses and UTXOs must be valid, etc.
pub fn realistic() -> Self {
Self {
execution_type: ExecutionType::Realistic,
at_height: None,
}
}
/// Most validation is disabled. Witnesses are replaced with fake ones, fake base assets are
/// added if necessary. Useful for fetching state without needing an account with base assets.
pub fn state_read_only() -> Self {
Self {
execution_type: ExecutionType::StateReadOnly,
at_height: None,
}
}
/// Simulating at as specific block height is only available if the node is using
/// `rocksdb` and has been started with the `historical_execution` flag.
pub fn at_height(mut self, height: impl Into<BlockHeight>) -> Self {
self.at_height = Some(height.into());
self
}
}
impl Default for Execution {
fn default() -> Self {
Self::realistic()
}
}
#[derive(Debug, Clone)]
pub(crate) enum ExecutionType {
Realistic,
StateReadOnly,
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/contract_call.rs | packages/fuels-programs/src/calls/contract_call.rs | use std::{collections::HashMap, fmt::Debug};
use fuels_core::{
constants::DEFAULT_CALL_PARAMS_AMOUNT,
error,
types::{
Address, AssetId, ContractId, Selector, errors::Result, input::Input, output::Output,
param_types::ParamType,
},
};
use crate::{assembly::contract_call::ContractCallData, calls::utils::sealed};
#[derive(Debug, Clone)]
/// Contains all data relevant to a single contract call
pub struct ContractCall {
pub contract_id: ContractId,
pub encoded_args: Result<Vec<u8>>,
pub encoded_selector: Selector,
pub call_parameters: CallParameters,
pub external_contracts: Vec<ContractId>,
pub output_param: ParamType,
pub is_payable: bool,
pub custom_assets: HashMap<(AssetId, Option<Address>), u64>,
pub inputs: Vec<Input>,
pub outputs: Vec<Output>,
}
impl ContractCall {
pub(crate) fn data(&self, base_asset_id: AssetId) -> Result<ContractCallData> {
let encoded_args = self
.encoded_args
.as_ref()
.map_err(|e| error!(Codec, "cannot encode contract call arguments: {e}"))?
.to_owned();
Ok(ContractCallData {
amount: self.call_parameters.amount(),
asset_id: self.call_parameters.asset_id().unwrap_or(base_asset_id),
contract_id: self.contract_id,
fn_selector_encoded: self.encoded_selector.clone(),
encoded_args,
gas_forwarded: self.call_parameters.gas_forwarded,
})
}
pub fn with_contract_id(self, contract_id: ContractId) -> Self {
ContractCall {
contract_id,
..self
}
}
pub fn with_call_parameters(self, call_parameters: CallParameters) -> ContractCall {
ContractCall {
call_parameters,
..self
}
}
pub fn add_custom_asset(&mut self, asset_id: AssetId, amount: u64, to: Option<Address>) {
*self.custom_assets.entry((asset_id, to)).or_default() += amount;
}
/// Add custom outputs to the `ContractCall`.
pub fn with_outputs(mut self, outputs: Vec<Output>) -> Self {
self.outputs = outputs;
self
}
/// Add custom inputs to the `ContractCall`.
pub fn with_inputs(mut self, inputs: Vec<Input>) -> Self {
self.inputs = inputs;
self
}
}
impl sealed::Sealed for ContractCall {}
#[derive(Debug, Clone)]
pub struct CallParameters {
amount: u64,
asset_id: Option<AssetId>,
gas_forwarded: Option<u64>,
}
impl CallParameters {
pub fn new(amount: u64, asset_id: AssetId, gas_forwarded: u64) -> Self {
Self {
amount,
asset_id: Some(asset_id),
gas_forwarded: Some(gas_forwarded),
}
}
pub fn with_amount(mut self, amount: u64) -> Self {
self.amount = amount;
self
}
pub fn amount(&self) -> u64 {
self.amount
}
pub fn with_asset_id(mut self, asset_id: AssetId) -> Self {
self.asset_id = Some(asset_id);
self
}
pub fn asset_id(&self) -> Option<AssetId> {
self.asset_id
}
pub fn with_gas_forwarded(mut self, gas_forwarded: u64) -> Self {
self.gas_forwarded = Some(gas_forwarded);
self
}
pub fn gas_forwarded(&self) -> Option<u64> {
self.gas_forwarded
}
}
impl Default for CallParameters {
fn default() -> Self {
Self {
amount: DEFAULT_CALL_PARAMS_AMOUNT,
asset_id: None,
gas_forwarded: None,
}
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/receipt_parser.rs | packages/fuels-programs/src/calls/receipt_parser.rs | use std::collections::VecDeque;
use fuel_tx::Receipt;
use fuel_types::bytes::Bytes;
use fuels_core::{
codec::{ABIDecoder, DecoderConfig},
types::{
ContractId, Token,
errors::{Error, Result, error},
param_types::ParamType,
},
};
pub struct ReceiptParser {
receipts: VecDeque<Receipt>,
decoder: ABIDecoder,
}
impl ReceiptParser {
pub fn new(receipts: &[Receipt], decoder_config: DecoderConfig) -> Self {
let relevant_receipts = receipts
.iter()
.filter(|receipt| matches!(receipt, Receipt::ReturnData { .. } | Receipt::Call { .. }))
.cloned()
.collect();
Self {
receipts: relevant_receipts,
decoder: ABIDecoder::new(decoder_config),
}
}
/// Based on receipts returned by a script transaction, the contract ID,
/// and the output param, parse the values and return them as Token.
pub fn parse_call(
&mut self,
contract_id: ContractId,
output_param: &ParamType,
) -> Result<Token> {
let data = self
.extract_contract_call_data(contract_id)
.ok_or_else(|| Self::missing_receipts_error(output_param))?;
self.decoder.decode(output_param, data.as_slice())
}
pub fn parse_script(self, output_param: &ParamType) -> Result<Token> {
let data = self
.extract_script_data()
.ok_or_else(|| Self::missing_receipts_error(output_param))?;
self.decoder.decode(output_param, data.as_slice())
}
fn missing_receipts_error(output_param: &ParamType) -> Error {
error!(
Codec,
"`ReceiptDecoder`: failed to find matching receipts entry for {output_param:?}"
)
}
pub fn extract_contract_call_data(&mut self, target_contract: ContractId) -> Option<Bytes> {
// If the script contains nested calls, we need to extract the data of the top-level call
let mut nested_calls_stack = vec![];
while let Some(receipt) = self.receipts.pop_front() {
if let Receipt::Call { to, .. } = receipt {
nested_calls_stack.push(to);
} else if let Receipt::ReturnData {
data,
id: return_id,
..
} = receipt
{
let call_id = nested_calls_stack.pop();
// Somethings off if there is a mismatch between the call and return ids
debug_assert_eq!(call_id.unwrap(), return_id);
if nested_calls_stack.is_empty() {
// The top-level call return should match our target contract
debug_assert_eq!(target_contract, return_id);
return data.clone();
}
}
}
None
}
fn extract_script_data(&self) -> Option<Bytes> {
self.receipts.iter().find_map(|receipt| match receipt {
Receipt::ReturnData {
id,
data: Some(data),
..
} if *id == ContractId::zeroed() => Some(data.clone()),
_ => None,
})
}
}
#[cfg(test)]
mod tests {
use fuel_tx::ScriptExecutionResult;
use fuels_core::traits::{Parameterize, Tokenizable};
use super::*;
const RECEIPT_DATA: &[u8; 3] = &[8, 8, 3];
const DECODED_DATA: &[u8; 3] = &[8, 8, 3];
fn target_contract() -> ContractId {
ContractId::from([1u8; 32])
}
fn get_return_data_receipt(id: ContractId, data: &[u8]) -> Receipt {
Receipt::ReturnData {
id,
ptr: Default::default(),
len: Default::default(),
digest: Default::default(),
data: Some(data.to_vec().into()),
pc: Default::default(),
is: Default::default(),
}
}
fn get_call_receipt(to: ContractId) -> Receipt {
Receipt::Call {
id: Default::default(),
to,
amount: Default::default(),
asset_id: Default::default(),
gas: Default::default(),
param1: Default::default(),
param2: Default::default(),
pc: Default::default(),
is: Default::default(),
}
}
fn get_relevant_receipts() -> Vec<Receipt> {
let id = target_contract();
vec![
get_call_receipt(id),
get_return_data_receipt(id, RECEIPT_DATA),
]
}
#[tokio::test]
async fn receipt_parser_filters_receipts() -> Result<()> {
let mut receipts = vec![
Receipt::Revert {
id: Default::default(),
ra: Default::default(),
pc: Default::default(),
is: Default::default(),
},
Receipt::Log {
id: Default::default(),
ra: Default::default(),
rb: Default::default(),
rc: Default::default(),
rd: Default::default(),
pc: Default::default(),
is: Default::default(),
},
Receipt::LogData {
id: Default::default(),
ra: Default::default(),
rb: Default::default(),
ptr: Default::default(),
len: Default::default(),
digest: Default::default(),
data: Default::default(),
pc: Default::default(),
is: Default::default(),
},
Receipt::ScriptResult {
result: ScriptExecutionResult::Success,
gas_used: Default::default(),
},
];
let relevant_receipts = get_relevant_receipts();
receipts.extend(relevant_receipts.clone());
let parser = ReceiptParser::new(&receipts, Default::default());
assert_eq!(parser.receipts, relevant_receipts);
Ok(())
}
#[tokio::test]
async fn receipt_parser_empty_receipts() -> Result<()> {
let receipts = [];
let output_param = ParamType::U8;
let error = ReceiptParser::new(&receipts, Default::default())
.parse_call(target_contract(), &output_param)
.expect_err("should error");
let expected_error = ReceiptParser::missing_receipts_error(&output_param);
assert_eq!(error.to_string(), expected_error.to_string());
Ok(())
}
#[tokio::test]
async fn receipt_parser_extract_return_data() -> Result<()> {
let receipts = get_relevant_receipts();
let mut parser = ReceiptParser::new(&receipts, Default::default());
let token = parser
.parse_call(target_contract(), &<[u8; 3]>::param_type())
.expect("parsing should succeed");
assert_eq!(&<[u8; 3]>::from_token(token)?, DECODED_DATA);
Ok(())
}
#[tokio::test]
async fn receipt_parser_extracts_top_level_call_receipts() -> Result<()> {
const CORRECT_DATA_1: [u8; 3] = [1, 2, 3];
const CORRECT_DATA_2: [u8; 3] = [5, 6, 7];
let contract_top_lvl = target_contract();
let contract_nested = ContractId::from([9u8; 32]);
let receipts = vec![
get_call_receipt(contract_top_lvl),
get_call_receipt(contract_nested),
get_return_data_receipt(contract_nested, &[9, 9, 9]),
get_return_data_receipt(contract_top_lvl, &CORRECT_DATA_1),
get_call_receipt(contract_top_lvl),
get_call_receipt(contract_nested),
get_return_data_receipt(contract_nested, &[7, 7, 7]),
get_return_data_receipt(contract_top_lvl, &CORRECT_DATA_2),
];
let mut parser = ReceiptParser::new(&receipts, Default::default());
let token_1 = parser
.parse_call(contract_top_lvl, &<[u8; 3]>::param_type())
.expect("parsing should succeed");
let token_2 = parser
.parse_call(contract_top_lvl, &<[u8; 3]>::param_type())
.expect("parsing should succeed");
assert_eq!(&<[u8; 3]>::from_token(token_1)?, &CORRECT_DATA_1);
assert_eq!(&<[u8; 3]>::from_token(token_2)?, &CORRECT_DATA_2);
Ok(())
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/utils.rs | packages/fuels-programs/src/calls/utils.rs | use std::{collections::HashSet, iter, vec};
use fuel_abi_types::error_codes::FAILED_TRANSFER_TO_ADDRESS_SIGNAL;
use fuel_asm::{RegId, op};
use fuel_tx::{ConsensusParameters, Output, PanicReason, Receipt, TxPointer, UtxoId};
use fuels_accounts::Account;
use fuels_core::{
offsets::call_script_data_offset,
types::{
Address, AssetId, Bytes32, ContractId,
errors::{Context, Result},
input::Input,
transaction::{ScriptTransaction, TxPolicies},
transaction_builders::{
BuildableTransaction, ScriptTransactionBuilder, TransactionBuilder,
VariableOutputPolicy,
},
},
};
use itertools::{Itertools, chain};
use crate::{
DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE,
assembly::contract_call::{CallOpcodeParamsOffset, ContractCallInstructions},
calls::ContractCall,
};
pub(crate) mod sealed {
pub trait Sealed {}
}
/// Creates a [`ScriptTransactionBuilder`] from contract calls.
pub(crate) fn transaction_builder_from_contract_calls(
calls: &[ContractCall],
tx_policies: TxPolicies,
variable_outputs: VariableOutputPolicy,
consensus_parameters: &ConsensusParameters,
asset_inputs: Vec<Input>,
account: &impl Account,
) -> Result<ScriptTransactionBuilder> {
let calls_instructions_len = compute_calls_instructions_len(calls);
let data_offset = call_script_data_offset(consensus_parameters, calls_instructions_len)?;
let (script_data, call_param_offsets) = build_script_data_from_contract_calls(
calls,
data_offset,
*consensus_parameters.base_asset_id(),
)?;
let script = get_instructions(call_param_offsets);
let (inputs, outputs) = get_transaction_inputs_outputs(
calls,
asset_inputs,
account.address(),
*consensus_parameters.base_asset_id(),
);
Ok(ScriptTransactionBuilder::default()
.with_variable_output_policy(variable_outputs)
.with_tx_policies(tx_policies)
.with_script(script)
.with_script_data(script_data.clone())
.with_inputs(inputs)
.with_outputs(outputs)
.with_gas_estimation_tolerance(DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE)
.with_max_fee_estimation_tolerance(DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE))
}
/// Creates a [`ScriptTransaction`] from contract calls. The internal [Transaction] is
/// initialized with the actual script instructions, script data needed to perform the call and
/// transaction inputs/outputs consisting of assets and contracts.
pub(crate) async fn build_with_tb(
calls: &[ContractCall],
mut tb: ScriptTransactionBuilder,
account: &impl Account,
) -> Result<ScriptTransaction> {
let consensus_parameters = account.try_provider()?.consensus_parameters().await?;
let base_asset_id = *consensus_parameters.base_asset_id();
let required_asset_amounts = calculate_required_asset_amounts(calls, base_asset_id);
let used_base_amount = required_asset_amounts
.iter()
.find_map(|(asset_id, amount)| (*asset_id == base_asset_id).then_some(*amount))
.unwrap_or_default();
account.add_witnesses(&mut tb)?;
account
.adjust_for_fee(&mut tb, used_base_amount)
.await
.context("failed to adjust inputs to cover for missing base asset")?;
tb.build(account.try_provider()?).await
}
/// Compute the length of the calling scripts for the two types of contract calls: those that return
/// a heap type, and those that don't.
fn compute_calls_instructions_len(calls: &[ContractCall]) -> usize {
calls
.iter()
.map(|c| {
// Use placeholder for `call_param_offsets` and `output_param_type`, because the length of
// the calling script doesn't depend on the underlying type, just on whether or not
// gas was forwarded.
let call_opcode_params = CallOpcodeParamsOffset {
gas_forwarded_offset: c.call_parameters.gas_forwarded().map(|_| 0),
..CallOpcodeParamsOffset::default()
};
ContractCallInstructions::new(call_opcode_params)
.into_bytes()
.count()
})
.sum()
}
/// Compute how much of each asset is required based on all `CallParameters` of the `ContractCalls`
pub fn calculate_required_asset_amounts(
calls: &[ContractCall],
base_asset_id: AssetId,
) -> Vec<(AssetId, u128)> {
let call_param_assets = calls.iter().map(|call| {
(
call.call_parameters.asset_id().unwrap_or(base_asset_id),
call.call_parameters.amount(),
)
});
let grouped_assets = calls
.iter()
.flat_map(|call| call.custom_assets.clone())
.map(|((asset_id, _), amount)| (asset_id, amount))
.chain(call_param_assets)
.sorted_by_key(|(asset_id, _)| *asset_id)
.group_by(|(asset_id, _)| *asset_id);
grouped_assets
.into_iter()
.filter_map(|(asset_id, groups_w_same_asset_id)| {
let total_amount_in_group = groups_w_same_asset_id
.map(|(_, amount)| u128::from(amount))
.sum();
(total_amount_in_group != 0).then_some((asset_id, total_amount_in_group))
})
.collect()
}
/// Given a list of contract calls, create the actual opcodes used to call the contract
pub(crate) fn get_instructions(offsets: Vec<CallOpcodeParamsOffset>) -> Vec<u8> {
offsets
.into_iter()
.flat_map(|offset| ContractCallInstructions::new(offset).into_bytes())
.chain(op::ret(RegId::ONE).to_bytes())
.collect()
}
pub(crate) fn build_script_data_from_contract_calls(
calls: &[ContractCall],
data_offset: usize,
base_asset_id: AssetId,
) -> Result<(Vec<u8>, Vec<CallOpcodeParamsOffset>)> {
calls.iter().try_fold(
(vec![], vec![]),
|(mut script_data, mut param_offsets), call| {
let segment_offset = data_offset + script_data.len();
let offset = call
.data(base_asset_id)?
.encode(segment_offset, &mut script_data);
param_offsets.push(offset);
Ok((script_data, param_offsets))
},
)
}
/// Returns the assets and contracts that will be consumed ([`Input`]s)
/// and created ([`Output`]s) by the transaction
pub(crate) fn get_transaction_inputs_outputs(
calls: &[ContractCall],
asset_inputs: Vec<Input>,
address: Address,
base_asset_id: AssetId,
) -> (Vec<Input>, Vec<Output>) {
let asset_ids = extract_unique_asset_ids(&asset_inputs, base_asset_id);
let contract_ids = extract_unique_contract_ids(calls);
let num_of_contracts = contract_ids.len();
// Custom `Inputs` and `Outputs` should be placed before other inputs and outputs.
let custom_inputs = calls.iter().flat_map(|c| c.inputs.clone()).collect_vec();
let custom_inputs_len = custom_inputs.len();
let custom_outputs = calls.iter().flat_map(|c| c.outputs.clone()).collect_vec();
let inputs = chain!(
custom_inputs,
generate_contract_inputs(contract_ids, custom_outputs.len()),
asset_inputs
)
.collect();
// Note the contract_outputs are placed after the custom outputs and
// the contract_inputs are referencing them via `output_index`. The
// node will, upon receiving our request, use `output_index` to index
// the `inputs` array we've sent over.
let outputs = chain!(
custom_outputs,
generate_contract_outputs(num_of_contracts, custom_inputs_len),
generate_asset_change_outputs(address, asset_ids),
generate_custom_outputs(calls),
)
.collect();
(inputs, outputs)
}
fn generate_custom_outputs(calls: &[ContractCall]) -> Vec<Output> {
calls
.iter()
.flat_map(|call| &call.custom_assets)
.group_by(|custom| (custom.0.0, custom.0.1))
.into_iter()
.filter_map(|(asset_id_address, groups_w_same_asset_id_address)| {
let total_amount_in_group = groups_w_same_asset_id_address
.map(|(_, amount)| amount)
.sum::<u64>();
asset_id_address
.1
.map(|address| Output::coin(address, total_amount_in_group, asset_id_address.0))
})
.collect::<Vec<_>>()
}
fn extract_unique_asset_ids(asset_inputs: &[Input], base_asset_id: AssetId) -> HashSet<AssetId> {
asset_inputs
.iter()
.filter_map(|input| match input {
Input::ResourceSigned { resource, .. } | Input::ResourcePredicate { resource, .. } => {
Some(resource.coin_asset_id().unwrap_or(base_asset_id))
}
_ => None,
})
.collect()
}
fn generate_asset_change_outputs(
wallet_address: Address,
asset_ids: HashSet<AssetId>,
) -> Vec<Output> {
asset_ids
.into_iter()
.map(|asset_id| Output::change(wallet_address, 0, asset_id))
.collect()
}
/// Generate contract outputs taking in consideration already existing inputs
pub(crate) fn generate_contract_outputs(
num_of_contracts: usize,
num_current_inputs: usize,
) -> Vec<Output> {
(0..num_of_contracts)
.map(|idx| {
Output::contract(
(idx + num_current_inputs) as u16,
Bytes32::zeroed(),
Bytes32::zeroed(),
)
})
.collect()
}
/// Generate contract inputs taking in consideration already existing outputs
pub(crate) fn generate_contract_inputs(
contract_ids: HashSet<ContractId>,
num_current_outputs: usize,
) -> Vec<Input> {
contract_ids
.into_iter()
.enumerate()
.map(|(idx, contract_id)| {
Input::contract(
UtxoId::new(Bytes32::zeroed(), (idx + num_current_outputs) as u16),
Bytes32::zeroed(),
Bytes32::zeroed(),
TxPointer::default(),
contract_id,
)
})
.collect()
}
fn extract_unique_contract_ids(calls: &[ContractCall]) -> HashSet<ContractId> {
calls
.iter()
.flat_map(|call| {
call.external_contracts
.iter()
.copied()
.chain(iter::once(call.contract_id))
})
.collect()
}
pub fn is_missing_output_variables(receipts: &[Receipt]) -> bool {
receipts.iter().any(
|r| matches!(r, Receipt::Revert { ra, .. } if *ra == FAILED_TRANSFER_TO_ADDRESS_SIGNAL),
)
}
pub fn find_ids_of_missing_contracts(receipts: &[Receipt]) -> Vec<ContractId> {
receipts
.iter()
.filter_map(|receipt| match receipt {
Receipt::Panic {
reason,
contract_id,
..
} if *reason.reason() == PanicReason::ContractNotInInputs => {
let contract_id = contract_id
.expect("panic caused by a contract not in inputs must have a contract id");
Some(contract_id)
}
_ => None,
})
.collect()
}
#[cfg(test)]
mod test {
use std::slice;
use fuels_accounts::signers::private_key::PrivateKeySigner;
use fuels_core::types::{coin::Coin, coin_type::CoinType, param_types::ParamType};
use rand::{Rng, thread_rng};
use super::*;
use crate::calls::{CallParameters, traits::ContractDependencyConfigurator};
fn new_contract_call_with_random_id() -> ContractCall {
ContractCall {
contract_id: random_contract_id(),
encoded_args: Ok(Default::default()),
encoded_selector: [0; 8].to_vec(),
call_parameters: Default::default(),
external_contracts: Default::default(),
output_param: ParamType::Unit,
is_payable: false,
custom_assets: Default::default(),
inputs: vec![],
outputs: vec![],
}
}
fn random_contract_id() -> ContractId {
rand::thread_rng().r#gen()
}
#[test]
fn contract_input_present() {
let call = new_contract_call_with_random_id();
let signer = PrivateKeySigner::random(&mut thread_rng());
let (inputs, _) = get_transaction_inputs_outputs(
slice::from_ref(&call),
Default::default(),
signer.address(),
AssetId::zeroed(),
);
assert_eq!(
inputs,
vec![Input::contract(
UtxoId::new(Bytes32::zeroed(), 0),
Bytes32::zeroed(),
Bytes32::zeroed(),
TxPointer::default(),
call.contract_id,
)]
);
}
#[test]
fn contract_input_is_not_duplicated() {
let call = new_contract_call_with_random_id();
let call_w_same_contract =
new_contract_call_with_random_id().with_contract_id(call.contract_id);
let signer = PrivateKeySigner::random(&mut thread_rng());
let calls = [call, call_w_same_contract];
let (inputs, _) = get_transaction_inputs_outputs(
&calls,
Default::default(),
signer.address(),
AssetId::zeroed(),
);
assert_eq!(
inputs,
vec![Input::contract(
UtxoId::new(Bytes32::zeroed(), 0),
Bytes32::zeroed(),
Bytes32::zeroed(),
TxPointer::default(),
calls[0].contract_id,
)]
);
}
#[test]
fn contract_output_present() {
let call = new_contract_call_with_random_id();
let signer = PrivateKeySigner::random(&mut thread_rng());
let (_, outputs) = get_transaction_inputs_outputs(
&[call],
Default::default(),
signer.address(),
AssetId::zeroed(),
);
assert_eq!(
outputs,
vec![Output::contract(0, Bytes32::zeroed(), Bytes32::zeroed())]
);
}
#[test]
fn external_contract_input_present() {
// given
let external_contract_id = random_contract_id();
let call =
new_contract_call_with_random_id().with_external_contracts(vec![external_contract_id]);
let signer = PrivateKeySigner::random(&mut thread_rng());
// when
let (inputs, _) = get_transaction_inputs_outputs(
slice::from_ref(&call),
Default::default(),
signer.address(),
AssetId::zeroed(),
);
// then
let mut expected_contract_ids: HashSet<ContractId> =
[call.contract_id, external_contract_id].into();
for (index, input) in inputs.into_iter().enumerate() {
match input {
Input::Contract {
utxo_id,
balance_root,
state_root,
tx_pointer,
contract_id,
} => {
assert_eq!(utxo_id, UtxoId::new(Bytes32::zeroed(), index as u16));
assert_eq!(balance_root, Bytes32::zeroed());
assert_eq!(state_root, Bytes32::zeroed());
assert_eq!(tx_pointer, TxPointer::default());
assert!(expected_contract_ids.contains(&contract_id));
expected_contract_ids.remove(&contract_id);
}
_ => {
panic!("expected only inputs of type `Input::Contract`");
}
}
}
}
#[test]
fn external_contract_output_present() {
// given
let external_contract_id = random_contract_id();
let call =
new_contract_call_with_random_id().with_external_contracts(vec![external_contract_id]);
let signer = PrivateKeySigner::random(&mut thread_rng());
// when
let (_, outputs) = get_transaction_inputs_outputs(
&[call],
Default::default(),
signer.address(),
AssetId::zeroed(),
);
// then
let expected_outputs = (0..=1)
.map(|i| Output::contract(i, Bytes32::zeroed(), Bytes32::zeroed()))
.collect::<Vec<_>>();
assert_eq!(outputs, expected_outputs);
}
#[test]
fn change_per_asset_id_added() {
// given
let asset_ids = [AssetId::zeroed(), AssetId::from([1; 32])];
let coins = asset_ids
.into_iter()
.map(|asset_id| {
let coin = CoinType::Coin(Coin {
amount: 100,
asset_id,
utxo_id: Default::default(),
owner: Default::default(),
});
Input::resource_signed(coin)
})
.collect();
let call = new_contract_call_with_random_id();
let signer = PrivateKeySigner::random(&mut thread_rng());
// when
let (_, outputs) =
get_transaction_inputs_outputs(&[call], coins, signer.address(), AssetId::zeroed());
// then
let change_outputs: HashSet<Output> = outputs[1..].iter().cloned().collect();
let expected_change_outputs = asset_ids
.into_iter()
.map(|asset_id| Output::Change {
to: signer.address(),
amount: 0,
asset_id,
})
.collect();
assert_eq!(change_outputs, expected_change_outputs);
}
#[test]
fn will_collate_same_asset_ids() {
let asset_id_1 = AssetId::from([1; 32]);
let asset_id_2 = AssetId::from([2; 32]);
let calls = [
(asset_id_1, 100),
(asset_id_2, 200),
(asset_id_1, 300),
(asset_id_2, 400),
]
.map(|(asset_id, amount)| {
CallParameters::default()
.with_amount(amount)
.with_asset_id(asset_id)
})
.map(|call_parameters| {
new_contract_call_with_random_id().with_call_parameters(call_parameters)
});
let asset_id_amounts = calculate_required_asset_amounts(&calls, AssetId::zeroed());
let expected_asset_id_amounts = [(asset_id_1, 400), (asset_id_2, 600)].into();
assert_eq!(
asset_id_amounts.into_iter().collect::<HashSet<_>>(),
expected_asset_id_amounts
)
}
mod compute_calls_instructions_len {
use fuel_asm::Instruction;
use fuels_core::types::param_types::{EnumVariants, ParamType};
use super::new_contract_call_with_random_id;
use crate::calls::utils::compute_calls_instructions_len;
// movi, movi, lw, movi + call (for gas)
const BASE_INSTRUCTION_COUNT: usize = 5;
// 2 instructions (movi and lw) added in get_single_call_instructions when gas_offset is set
const GAS_OFFSET_INSTRUCTION_COUNT: usize = 2;
#[test]
fn test_simple() {
let call = new_contract_call_with_random_id();
let instructions_len = compute_calls_instructions_len(&[call]);
assert_eq!(instructions_len, Instruction::SIZE * BASE_INSTRUCTION_COUNT);
}
#[test]
fn test_with_gas_offset() {
let mut call = new_contract_call_with_random_id();
call.call_parameters = call.call_parameters.with_gas_forwarded(0);
let instructions_len = compute_calls_instructions_len(&[call]);
assert_eq!(
instructions_len,
Instruction::SIZE * (BASE_INSTRUCTION_COUNT + GAS_OFFSET_INSTRUCTION_COUNT)
);
}
#[test]
fn test_with_enum_with_only_non_heap_variants() {
let mut call = new_contract_call_with_random_id();
call.output_param = ParamType::Enum {
name: "".to_string(),
enum_variants: EnumVariants::new(vec![
("".to_string(), ParamType::Bool),
("".to_string(), ParamType::U8),
])
.unwrap(),
generics: Vec::new(),
};
let instructions_len = compute_calls_instructions_len(&[call]);
assert_eq!(
instructions_len,
// no extra instructions if there are no heap type variants
Instruction::SIZE * BASE_INSTRUCTION_COUNT
);
}
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/call_handler.rs | packages/fuels-programs/src/calls/call_handler.rs | use crate::{
calls::{
CallParameters, ContractCall, Execution, ExecutionType, ScriptCall,
receipt_parser::ReceiptParser,
traits::{ContractDependencyConfigurator, ResponseParser, TransactionTuner},
utils::find_ids_of_missing_contracts,
},
responses::{CallResponse, SubmitResponse},
};
use core::{fmt::Debug, marker::PhantomData};
use fuel_tx::ConsensusParameters;
use fuels_accounts::{Account, provider::TransactionCost};
use fuels_core::{
codec::{ABIEncoder, DecoderConfig, EncoderConfig, LogDecoder},
traits::{Parameterize, Signer, Tokenizable},
types::{
Address, AssetId, Bytes32, ContractId, Selector, Token,
errors::{Error, Result, error, transaction::Reason},
input::Input,
output::Output,
transaction::{ScriptTransaction, Transaction, TxPolicies},
transaction_builders::{
BuildableTransaction, ScriptBuildStrategy, ScriptTransactionBuilder,
TransactionBuilder, VariableOutputPolicy,
},
tx_status::TxStatus,
},
};
use std::sync::Arc;
// Trait implemented by contract instances so that
// they can be passed to the `with_contracts` method
pub trait ContractDependency {
fn id(&self) -> ContractId;
fn log_decoder(&self) -> LogDecoder;
}
#[derive(Debug, Clone)]
#[must_use = "contract calls do nothing unless you `call` them"]
/// Helper that handles submitting a call to a client and formatting the response
pub struct CallHandler<A, C, T> {
pub account: A,
pub call: C,
pub tx_policies: TxPolicies,
pub log_decoder: LogDecoder,
pub datatype: PhantomData<T>,
decoder_config: DecoderConfig,
// Initially `None`, gets set to the right tx id after the transaction is submitted
cached_tx_id: Option<Bytes32>,
variable_output_policy: VariableOutputPolicy,
unresolved_signers: Vec<Arc<dyn Signer + Send + Sync>>,
}
impl<A, C, T> CallHandler<A, C, T> {
/// Sets the transaction policies for a given transaction.
/// Note that this is a builder method, i.e. use it as a chain:
/// ```ignore
/// let tx_policies = TxPolicies::default().with_gas_price(100);
/// my_contract_instance.my_method(...).with_tx_policies(tx_policies).call()
/// ```
pub fn with_tx_policies(mut self, tx_policies: TxPolicies) -> Self {
self.tx_policies = tx_policies;
self
}
pub fn with_decoder_config(mut self, decoder_config: DecoderConfig) -> Self {
self.decoder_config = decoder_config;
self.log_decoder.set_decoder_config(decoder_config);
self
}
/// If this method is not called, the default policy is to not add any variable outputs.
///
/// # Parameters
/// - `variable_outputs`: The [`VariableOutputPolicy`] to apply for the contract call.
///
/// # Returns
/// - `Self`: The updated SDK configuration.
pub fn with_variable_output_policy(mut self, variable_outputs: VariableOutputPolicy) -> Self {
self.variable_output_policy = variable_outputs;
self
}
pub fn add_signer(mut self, signer: impl Signer + Send + Sync + 'static) -> Self {
self.unresolved_signers.push(Arc::new(signer));
self
}
}
impl<A, C, T> CallHandler<A, C, T>
where
A: Account,
C: TransactionTuner,
T: Tokenizable + Parameterize + Debug,
{
pub async fn transaction_builder(&self) -> Result<ScriptTransactionBuilder> {
let consensus_parameters = self.account.try_provider()?.consensus_parameters().await?;
let required_asset_amounts = self
.call
.required_assets(*consensus_parameters.base_asset_id());
// Find the spendable resources required for those calls
let mut asset_inputs = vec![];
for &(asset_id, amount) in &required_asset_amounts {
let resources = self
.account
.get_asset_inputs_for_amount(asset_id, amount, None)
.await?;
asset_inputs.extend(resources);
}
self.transaction_builder_with_parameters(&consensus_parameters, asset_inputs)
}
pub fn transaction_builder_with_parameters(
&self,
consensus_parameters: &ConsensusParameters,
asset_inputs: Vec<Input>,
) -> Result<ScriptTransactionBuilder> {
let mut tb = self.call.transaction_builder(
self.tx_policies,
self.variable_output_policy,
consensus_parameters,
asset_inputs,
&self.account,
)?;
tb.add_signers(&self.unresolved_signers)?;
Ok(tb)
}
/// Returns the script that executes the contract call
pub async fn build_tx(&self) -> Result<ScriptTransaction> {
let tb = self.transaction_builder().await?;
self.call.build_tx(tb, &self.account).await
}
/// Get a call's estimated cost
pub async fn estimate_transaction_cost(
&self,
tolerance: Option<f64>,
block_horizon: Option<u32>,
) -> Result<TransactionCost> {
let tx = self.build_tx().await?;
let provider = self.account.try_provider()?;
let transaction_cost = provider
.estimate_transaction_cost(tx, tolerance, block_horizon)
.await?;
Ok(transaction_cost)
}
}
impl<A, C, T> CallHandler<A, C, T>
where
A: Account,
C: ContractDependencyConfigurator + TransactionTuner + ResponseParser,
T: Tokenizable + Parameterize + Debug,
{
/// Sets external contracts as dependencies to this contract's call.
/// Effectively, this will be used to create [`fuel_tx::Input::Contract`]/[`fuel_tx::Output::Contract`]
/// pairs and set them into the transaction. Note that this is a builder
/// method, i.e. use it as a chain:
///
/// ```ignore
/// my_contract_instance.my_method(...).with_contract_ids(&[another_contract_id]).call()
/// ```
///
/// [`Input::Contract`]: fuel_tx::Input::Contract
/// [`Output::Contract`]: fuel_tx::Output::Contract
pub fn with_contract_ids(mut self, contract_ids: &[ContractId]) -> Self {
self.call = self.call.with_external_contracts(contract_ids.to_vec());
self
}
/// Sets external contract instances as dependencies to this contract's call.
/// Effectively, this will be used to: merge `LogDecoder`s and create
/// [`fuel_tx::Input::Contract`]/[`fuel_tx::Output::Contract`] pairs and set them into the transaction.
/// Note that this is a builder method, i.e. use it as a chain:
///
/// ```ignore
/// my_contract_instance.my_method(...).with_contracts(&[another_contract_instance]).call()
/// ```
pub fn with_contracts(mut self, contracts: &[&dyn ContractDependency]) -> Self {
self.call = self
.call
.with_external_contracts(contracts.iter().map(|c| c.id()).collect());
for c in contracts {
self.log_decoder.merge(c.log_decoder());
}
self
}
/// Call a contract's method on the node, in a state-modifying manner.
pub async fn call(mut self) -> Result<CallResponse<T>> {
let tx = self.build_tx().await?;
let provider = self.account.try_provider()?;
let consensus_parameters = provider.consensus_parameters().await?;
let chain_id = consensus_parameters.chain_id();
self.cached_tx_id = Some(tx.id(chain_id));
let tx_status = provider.send_transaction_and_await_commit(tx).await?;
self.get_response(tx_status)
}
pub async fn submit(mut self) -> Result<SubmitResponse<A, C, T>> {
let tx = self.build_tx().await?;
let provider = self.account.try_provider()?;
let tx_id = provider.send_transaction(tx.clone()).await?;
self.cached_tx_id = Some(tx_id);
Ok(SubmitResponse::<A, C, T>::new(tx_id, self))
}
/// Call a contract's method on the node, in a simulated manner, meaning the state of the
/// blockchain is *not* modified but simulated.
pub async fn simulate(
&mut self,
Execution {
execution_type,
at_height,
}: Execution,
) -> Result<CallResponse<T>> {
let provider = self.account.try_provider()?;
let tx_status = if let ExecutionType::StateReadOnly = execution_type {
let tx = self
.transaction_builder()
.await?
.with_build_strategy(ScriptBuildStrategy::StateReadOnly)
.build(provider)
.await?;
provider.dry_run_opt(tx, false, Some(0), at_height).await?
} else {
let tx = self.build_tx().await?;
provider.dry_run_opt(tx, true, None, at_height).await?
};
self.get_response(tx_status)
}
/// Create a [`CallResponse`] from `TxStatus`
pub fn get_response(&self, tx_status: TxStatus) -> Result<CallResponse<T>> {
let success = tx_status.take_success_checked(Some(&self.log_decoder))?;
let token =
self.call
.parse_call(&success.receipts, self.decoder_config, &T::param_type())?;
Ok(CallResponse {
value: T::from_token(token)?,
log_decoder: self.log_decoder.clone(),
tx_id: self.cached_tx_id,
tx_status: success,
})
}
pub async fn determine_missing_contracts(mut self) -> Result<Self> {
match self.simulate(Execution::realistic()).await {
Ok(_) => Ok(self),
Err(Error::Transaction(Reason::Failure { ref receipts, .. })) => {
for contract_id in find_ids_of_missing_contracts(receipts) {
self.call.append_external_contract(contract_id);
}
Ok(self)
}
Err(other_error) => Err(other_error),
}
}
}
impl<A, T> CallHandler<A, ContractCall, T>
where
A: Account,
T: Tokenizable + Parameterize + Debug,
{
pub fn new_contract_call(
contract_id: ContractId,
account: A,
encoded_selector: Selector,
args: &[Token],
log_decoder: LogDecoder,
is_payable: bool,
encoder_config: EncoderConfig,
) -> Self {
let call = ContractCall {
contract_id,
encoded_selector,
encoded_args: ABIEncoder::new(encoder_config).encode(args),
call_parameters: CallParameters::default(),
external_contracts: vec![],
output_param: T::param_type(),
is_payable,
custom_assets: Default::default(),
inputs: vec![],
outputs: vec![],
};
CallHandler {
account,
call,
tx_policies: TxPolicies::default(),
log_decoder,
datatype: PhantomData,
decoder_config: DecoderConfig::default(),
cached_tx_id: None,
variable_output_policy: VariableOutputPolicy::default(),
unresolved_signers: vec![],
}
}
/// Adds a custom `asset_id` with its `amount` and an optional `address` to be used for
/// generating outputs to this contract's call.
///
/// # Parameters
/// - `asset_id`: The unique identifier of the asset being added.
/// - `amount`: The amount of the asset being added.
/// - `address`: The optional account address that the output amount will be sent to.
/// If not provided, the asset will be sent to the users account address.
///
/// Note that this is a builder method, i.e. use it as a chain:
///
/// ```ignore
/// let asset_id = AssetId::from([3u8; 32]);
/// let amount = 5000;
/// my_contract_instance.my_method(...).add_custom_asset(asset_id, amount, None).call()
/// ```
pub fn add_custom_asset(mut self, asset_id: AssetId, amount: u64, to: Option<Address>) -> Self {
self.call.add_custom_asset(asset_id, amount, to);
self
}
pub fn is_payable(&self) -> bool {
self.call.is_payable
}
/// Sets the call parameters for a given contract call.
/// Note that this is a builder method, i.e. use it as a chain:
///
/// ```ignore
/// let params = CallParameters { amount: 1, asset_id: AssetId::zeroed() };
/// my_contract_instance.my_method(...).call_params(params).call()
/// ```
pub fn call_params(mut self, params: CallParameters) -> Result<Self> {
if !self.is_payable() && params.amount() > 0 {
return Err(error!(Other, "assets forwarded to non-payable method"));
}
self.call.call_parameters = params;
Ok(self)
}
/// Add custom outputs to the `CallHandler`. These outputs
/// will appear at the **start** of the final output list.
pub fn with_outputs(mut self, outputs: Vec<Output>) -> Self {
self.call = self.call.with_outputs(outputs);
self
}
/// Add custom inputs to the `CallHandler`. These inputs
/// will appear at the **start** of the final input list.
pub fn with_inputs(mut self, inputs: Vec<Input>) -> Self {
self.call = self.call.with_inputs(inputs);
self
}
}
impl<A, T> CallHandler<A, ScriptCall, T>
where
A: Account,
T: Parameterize + Tokenizable + Debug,
{
pub fn new_script_call(
script_binary: Vec<u8>,
encoded_args: Result<Vec<u8>>,
account: A,
log_decoder: LogDecoder,
) -> Self {
let call = ScriptCall {
script_binary,
encoded_args,
inputs: vec![],
outputs: vec![],
external_contracts: vec![],
};
Self {
account,
call,
tx_policies: TxPolicies::default(),
log_decoder,
datatype: PhantomData,
decoder_config: DecoderConfig::default(),
cached_tx_id: None,
variable_output_policy: VariableOutputPolicy::default(),
unresolved_signers: vec![],
}
}
/// Add custom outputs to the `CallHandler`. These outputs
/// will appear at the **start** of the final output list.
pub fn with_outputs(mut self, outputs: Vec<Output>) -> Self {
self.call = self.call.with_outputs(outputs);
self
}
/// Add custom inputs to the `CallHandler`. These inputs
/// will appear at the **start** of the final input list.
pub fn with_inputs(mut self, inputs: Vec<Input>) -> Self {
self.call = self.call.with_inputs(inputs);
self
}
}
impl<A> CallHandler<A, Vec<ContractCall>, ()>
where
A: Account,
{
pub fn new_multi_call(account: A) -> Self {
Self {
account,
call: vec![],
tx_policies: TxPolicies::default(),
log_decoder: LogDecoder::new(Default::default(), Default::default()),
datatype: PhantomData,
decoder_config: DecoderConfig::default(),
cached_tx_id: None,
variable_output_policy: VariableOutputPolicy::default(),
unresolved_signers: vec![],
}
}
fn append_external_contract(mut self, contract_id: ContractId) -> Result<Self> {
if self.call.is_empty() {
return Err(error!(
Other,
"no calls added. Have you used '.add_calls()'?"
));
}
self.call
.iter_mut()
.take(1)
.for_each(|call| call.append_external_contract(contract_id));
Ok(self)
}
/// Adds a contract call to be bundled in the transaction.
/// Note that if you added custom inputs/outputs that they will follow the
/// order in which the calls are added.
pub fn add_call(
mut self,
call_handler: CallHandler<impl Account, ContractCall, impl Tokenizable>,
) -> Self {
self.log_decoder.merge(call_handler.log_decoder);
self.call.push(call_handler.call);
self.unresolved_signers
.extend(call_handler.unresolved_signers);
self
}
/// Call contract methods on the node, in a state-modifying manner.
pub async fn call<T: Tokenizable + Debug>(mut self) -> Result<CallResponse<T>> {
let tx = self.build_tx().await?;
let provider = self.account.try_provider()?;
let consensus_parameters = provider.consensus_parameters().await?;
let chain_id = consensus_parameters.chain_id();
self.cached_tx_id = Some(tx.id(chain_id));
let tx_status = provider.send_transaction_and_await_commit(tx).await?;
self.get_response(tx_status)
}
pub async fn submit(mut self) -> Result<SubmitResponse<A, Vec<ContractCall>, ()>> {
let tx = self.build_tx().await?;
let provider = self.account.try_provider()?;
let tx_id = provider.send_transaction(tx).await?;
self.cached_tx_id = Some(tx_id);
Ok(SubmitResponse::<A, Vec<ContractCall>, ()>::new(tx_id, self))
}
/// Call contract methods on the node, in a simulated manner, meaning the state of the
/// blockchain is *not* modified but simulated.
/// It is the same as the [call] method because the API is more user-friendly this way.
///
/// [call]: Self::call
pub async fn simulate<T: Tokenizable + Debug>(
&mut self,
Execution {
execution_type,
at_height,
}: Execution,
) -> Result<CallResponse<T>> {
let provider = self.account.try_provider()?;
let tx_status = if let ExecutionType::StateReadOnly = execution_type {
let tx = self
.transaction_builder()
.await?
.with_build_strategy(ScriptBuildStrategy::StateReadOnly)
.build(provider)
.await?;
provider.dry_run_opt(tx, false, Some(0), at_height).await?
} else {
let tx = self.build_tx().await?;
provider.dry_run_opt(tx, true, None, at_height).await?
};
self.get_response(tx_status)
}
/// Simulates a call without needing to resolve the generic for the return type
async fn simulate_without_decode(&self) -> Result<()> {
let provider = self.account.try_provider()?;
let tx = self.build_tx().await?;
provider.dry_run(tx).await?.check(None)?;
Ok(())
}
/// Create a [`CallResponse`] from `TxStatus`
pub fn get_response<T: Tokenizable + Debug>(
&self,
tx_status: TxStatus,
) -> Result<CallResponse<T>> {
let success = tx_status.take_success_checked(Some(&self.log_decoder))?;
let mut receipt_parser = ReceiptParser::new(&success.receipts, self.decoder_config);
let final_tokens = self
.call
.iter()
.map(|call| receipt_parser.parse_call(call.contract_id, &call.output_param))
.collect::<Result<Vec<_>>>()?;
let tokens_as_tuple = Token::Tuple(final_tokens);
Ok(CallResponse {
value: T::from_token(tokens_as_tuple)?,
log_decoder: self.log_decoder.clone(),
tx_id: self.cached_tx_id,
tx_status: success,
})
}
/// Simulates the call and attempts to resolve missing contract outputs.
/// Forwards the received error if it cannot be fixed.
pub async fn determine_missing_contracts(mut self) -> Result<Self> {
match self.simulate_without_decode().await {
Ok(_) => Ok(self),
Err(Error::Transaction(Reason::Failure { ref receipts, .. })) => {
for contract_id in find_ids_of_missing_contracts(receipts) {
self = self.append_external_contract(contract_id)?;
}
Ok(self)
}
Err(other_error) => Err(other_error),
}
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/traits.rs | packages/fuels-programs/src/calls/traits.rs | mod contract_dep_configurator;
mod response_parser;
mod transaction_tuner;
pub use contract_dep_configurator::*;
pub use response_parser::*;
pub use transaction_tuner::*;
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/script_call.rs | packages/fuels-programs/src/calls/script_call.rs | use std::{collections::HashSet, fmt::Debug};
use fuels_core::types::{
ContractId,
errors::{Result, error},
input::Input,
output::Output,
};
use itertools::chain;
use crate::calls::utils::{generate_contract_inputs, generate_contract_outputs, sealed};
#[derive(Debug, Clone)]
/// Contains all data relevant to a single script call
pub struct ScriptCall {
pub script_binary: Vec<u8>,
pub encoded_args: Result<Vec<u8>>,
pub inputs: Vec<Input>,
pub outputs: Vec<Output>,
pub external_contracts: Vec<ContractId>,
}
impl ScriptCall {
/// Add custom outputs to the `ScriptCall`.
pub fn with_outputs(mut self, outputs: Vec<Output>) -> Self {
self.outputs = outputs;
self
}
/// Add custom inputs to the `ScriptCall`.
pub fn with_inputs(mut self, inputs: Vec<Input>) -> Self {
self.inputs = inputs;
self
}
pub(crate) fn prepare_inputs_outputs(&self) -> Result<(Vec<Input>, Vec<Output>)> {
let contract_ids: HashSet<ContractId> = self.external_contracts.iter().copied().collect();
let num_of_contracts = contract_ids.len();
let inputs = chain!(
self.inputs.clone(),
generate_contract_inputs(contract_ids, self.outputs.len())
)
.collect();
// Note the contract_outputs are placed after the custom outputs and
// the contract_inputs are referencing them via `output_index`. The
// node will, upon receiving our request, use `output_index` to index
// the `inputs` array we've sent over.
let outputs = chain!(
self.outputs.clone(),
generate_contract_outputs(num_of_contracts, self.inputs.len()),
)
.collect();
Ok((inputs, outputs))
}
pub(crate) fn compute_script_data(&self) -> Result<Vec<u8>> {
self.encoded_args
.as_ref()
.map(|b| b.to_owned())
.map_err(|e| error!(Codec, "cannot encode script call arguments: {e}"))
}
}
impl sealed::Sealed for ScriptCall {}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/traits/contract_dep_configurator.rs | packages/fuels-programs/src/calls/traits/contract_dep_configurator.rs | use fuels_core::types::ContractId;
use crate::calls::{ContractCall, ScriptCall, utils::sealed};
pub trait ContractDependencyConfigurator: sealed::Sealed {
fn append_external_contract(&mut self, contract_id: ContractId);
fn with_external_contracts(self, external_contracts: Vec<ContractId>) -> Self;
}
impl ContractDependencyConfigurator for ContractCall {
fn append_external_contract(&mut self, contract_id: ContractId) {
self.external_contracts.push(contract_id)
}
fn with_external_contracts(self, external_contracts: Vec<ContractId>) -> Self {
ContractCall {
external_contracts,
..self
}
}
}
impl ContractDependencyConfigurator for ScriptCall {
fn append_external_contract(&mut self, contract_id: ContractId) {
self.external_contracts.push(contract_id)
}
fn with_external_contracts(self, external_contracts: Vec<ContractId>) -> Self {
ScriptCall {
external_contracts,
..self
}
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/traits/response_parser.rs | packages/fuels-programs/src/calls/traits/response_parser.rs | use fuel_tx::Receipt;
use fuels_core::{
codec::DecoderConfig,
types::{Token, errors::Result, param_types::ParamType},
};
use crate::calls::{ContractCall, ScriptCall, receipt_parser::ReceiptParser, utils::sealed};
pub trait ResponseParser: sealed::Sealed {
fn parse_call(
&self,
receipts: &[Receipt],
decoder_config: DecoderConfig,
param_type: &ParamType,
) -> Result<Token>;
}
impl ResponseParser for ContractCall {
fn parse_call(
&self,
receipts: &[Receipt],
decoder_config: DecoderConfig,
param_type: &ParamType,
) -> Result<Token> {
ReceiptParser::new(receipts, decoder_config).parse_call(self.contract_id, param_type)
}
}
impl ResponseParser for ScriptCall {
fn parse_call(
&self,
receipts: &[Receipt],
decoder_config: DecoderConfig,
param_type: &ParamType,
) -> Result<Token> {
ReceiptParser::new(receipts, decoder_config).parse_script(param_type)
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/calls/traits/transaction_tuner.rs | packages/fuels-programs/src/calls/traits/transaction_tuner.rs | use crate::calls::utils::calculate_required_asset_amounts;
use crate::{
DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE,
calls::{
ContractCall, ScriptCall,
utils::{build_with_tb, sealed, transaction_builder_from_contract_calls},
},
};
use fuel_tx::ConsensusParameters;
use fuel_types::AssetId;
use fuels_accounts::Account;
use fuels_core::types::input::Input;
use fuels_core::types::{
errors::{Context, Result, error},
transaction::{ScriptTransaction, TxPolicies},
transaction_builders::{
BuildableTransaction, ScriptTransactionBuilder, TransactionBuilder, VariableOutputPolicy,
},
};
#[async_trait::async_trait]
pub trait TransactionTuner: sealed::Sealed {
fn required_assets(&self, base_asset_id: AssetId) -> Vec<(AssetId, u128)>;
fn transaction_builder<T: Account>(
&self,
tx_policies: TxPolicies,
variable_output_policy: VariableOutputPolicy,
consensus_parameters: &ConsensusParameters,
asset_input: Vec<Input>,
account: &T,
) -> Result<ScriptTransactionBuilder>;
async fn build_tx<T: Account>(
&self,
tb: ScriptTransactionBuilder,
account: &T,
) -> Result<ScriptTransaction>;
}
#[async_trait::async_trait]
impl TransactionTuner for ContractCall {
fn required_assets(&self, base_asset_id: AssetId) -> Vec<(AssetId, u128)> {
calculate_required_asset_amounts(std::slice::from_ref(self), base_asset_id)
}
fn transaction_builder<T: Account>(
&self,
tx_policies: TxPolicies,
variable_output_policy: VariableOutputPolicy,
consensus_parameters: &ConsensusParameters,
asset_input: Vec<Input>,
account: &T,
) -> Result<ScriptTransactionBuilder> {
transaction_builder_from_contract_calls(
std::slice::from_ref(self),
tx_policies,
variable_output_policy,
consensus_parameters,
asset_input,
account,
)
}
async fn build_tx<T: Account>(
&self,
tb: ScriptTransactionBuilder,
account: &T,
) -> Result<ScriptTransaction> {
build_with_tb(std::slice::from_ref(self), tb, account).await
}
}
#[async_trait::async_trait]
impl TransactionTuner for ScriptCall {
fn required_assets(&self, _: AssetId) -> Vec<(AssetId, u128)> {
vec![]
}
fn transaction_builder<T: Account>(
&self,
tx_policies: TxPolicies,
variable_output_policy: VariableOutputPolicy,
_: &ConsensusParameters,
_: Vec<Input>,
_account: &T,
) -> Result<ScriptTransactionBuilder> {
let (inputs, outputs) = self.prepare_inputs_outputs()?;
Ok(ScriptTransactionBuilder::default()
.with_variable_output_policy(variable_output_policy)
.with_tx_policies(tx_policies)
.with_script(self.script_binary.clone())
.with_script_data(self.compute_script_data()?)
.with_inputs(inputs)
.with_outputs(outputs)
.with_gas_estimation_tolerance(DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE)
.with_max_fee_estimation_tolerance(DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE))
}
async fn build_tx<T: Account>(
&self,
mut tb: ScriptTransactionBuilder,
account: &T,
) -> Result<ScriptTransaction> {
account.add_witnesses(&mut tb)?;
account
.adjust_for_fee(&mut tb, 0)
.await
.context("failed to adjust inputs to cover for missing base asset")?;
tb.build(account.try_provider()?).await
}
}
impl sealed::Sealed for Vec<ContractCall> {}
#[async_trait::async_trait]
impl TransactionTuner for Vec<ContractCall> {
fn required_assets(&self, base_asset_id: AssetId) -> Vec<(AssetId, u128)> {
calculate_required_asset_amounts(self, base_asset_id)
}
fn transaction_builder<T: Account>(
&self,
tx_policies: TxPolicies,
variable_output_policy: VariableOutputPolicy,
consensus_parameters: &ConsensusParameters,
asset_input: Vec<Input>,
account: &T,
) -> Result<ScriptTransactionBuilder> {
validate_contract_calls(self)?;
transaction_builder_from_contract_calls(
self,
tx_policies,
variable_output_policy,
consensus_parameters,
asset_input,
account,
)
}
/// Returns the script that executes the contract calls
async fn build_tx<T: Account>(
&self,
tb: ScriptTransactionBuilder,
account: &T,
) -> Result<ScriptTransaction> {
validate_contract_calls(self)?;
build_with_tb(self, tb, account).await
}
}
fn validate_contract_calls(calls: &[ContractCall]) -> Result<()> {
if calls.is_empty() {
return Err(error!(
Other,
"no calls added. Have you used '.add_calls()'?"
));
}
Ok(())
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/assembly/script_and_predicate_loader.rs | packages/fuels-programs/src/assembly/script_and_predicate_loader.rs | //! # Loader Module
//!
//! This module provides functionality for loading and processing binaries generated by Sway.
//! **Important:** All functions within this module assume the binary adheres to the structure
//! produced by Sway. Using binaries generated by other means (e.g., manually constructed binaries)
//! may lead to unexpected or incorrect results.
//!
//! For more information on Sway, please visit the [Sway GitHub repository](https://github.com/FuelLabs/sway).
use fuel_asm::{Instruction, RegId, op};
use fuels_core::{constants::WORD_SIZE, types::errors::Result};
use itertools::Itertools;
use crate::assembly::cursor::WasmFriendlyCursor;
pub struct LoaderCode {
blob_id: [u8; 32],
code: Vec<u8>,
section_offset: usize,
}
impl LoaderCode {
// std gated because of Blob usage which is in transaction_builders which are currently not
// nostd friendly
#[cfg(feature = "std")]
pub fn from_normal_binary(binary: Vec<u8>) -> Result<Self> {
let (original_code, split_section) = split_for_loader(&binary)?;
let blob_id =
fuels_core::types::transaction_builders::Blob::from(original_code.to_vec()).id();
let (loader_code, section_offset) = Self::generate_loader_code(blob_id, split_section);
Ok(Self {
blob_id,
code: loader_code,
section_offset,
})
}
pub fn from_loader_binary(binary: &[u8]) -> Result<Option<Self>> {
if let Some((blob_id, section_offset)) = extract_blob_id_and_section_offset(binary)? {
Ok(Some(Self {
section_offset,
code: binary.to_vec(),
blob_id,
}))
} else {
Ok(None)
}
}
#[cfg(feature = "std")]
pub fn extract_blob(binary: &[u8]) -> Result<fuels_core::types::transaction_builders::Blob> {
let (code, _) = split_for_loader(binary)?;
Ok(code.to_vec().into())
}
pub fn as_bytes(&self) -> &[u8] {
&self.code
}
pub fn configurables_section_offset(&self) -> usize {
self.section_offset
}
fn generate_loader_code(blob_id: [u8; 32], split_section: &[u8]) -> (Vec<u8>, usize) {
if !split_section.is_empty() {
generate_loader_w_configurables(blob_id, split_section)
} else {
generate_loader_wo_configurables(blob_id)
}
}
pub fn blob_id(&self) -> [u8; 32] {
self.blob_id
}
}
fn extract_blob_id_and_section_offset(binary: &[u8]) -> Result<Option<([u8; 32], usize)>> {
let (has_configurables, mut cursor) = if let Some(cursor) =
consume_instructions(binary, &loader_instructions_w_configurables())
{
(true, cursor)
} else if let Some(cursor) =
consume_instructions(binary, &loader_instructions_no_configurables())
{
(false, cursor)
} else {
return Ok(None);
};
let blob_id = cursor.consume_fixed("blob id")?;
if has_configurables {
let _section_len = cursor.consume(WORD_SIZE, "section with configurables len")?;
}
let section_offset = binary
.len()
.checked_sub(cursor.unconsumed())
.expect("must be less or eq");
Ok(Some((blob_id, section_offset)))
}
fn consume_instructions<'a>(
binary: &'a [u8],
expected_instructions: &[Instruction],
) -> Option<WasmFriendlyCursor<'a>> {
let loader_instructions_byte_size = expected_instructions.len() * Instruction::SIZE;
let mut script_cursor = WasmFriendlyCursor::new(binary);
let instruction_bytes = script_cursor
.consume(loader_instructions_byte_size, "loader instructions")
.ok()?;
let instructions = fuel_asm::from_bytes(instruction_bytes.to_vec())
.collect::<std::result::Result<Vec<Instruction>, _>>()
.ok()?;
instructions
.iter()
.zip(expected_instructions.iter())
.all(|(actual, expected)| actual == expected)
.then_some(script_cursor)
}
fn generate_loader_wo_configurables(blob_id: [u8; 32]) -> (Vec<u8>, usize) {
let instruction_bytes = loader_instructions_no_configurables()
.into_iter()
.flat_map(|instruction| instruction.to_bytes());
let code = instruction_bytes
.chain(blob_id.iter().copied())
.collect_vec();
// there is no data section, so we point the offset to the end of the file
let new_section_offset = code.len();
(code, new_section_offset)
}
fn generate_loader_w_configurables(
blob_id: [u8; 32],
section_w_configurables: &[u8],
) -> (Vec<u8>, usize) {
// The final code is going to have this structure:
// 1. loader instructions
// 2. blob id
// 3. length_of_section_containing_configurables
// 4. the section with configurables (updated with configurables as needed)
let instruction_bytes = loader_instructions_w_configurables()
.into_iter()
.flat_map(|instruction| instruction.to_bytes())
.collect_vec();
let blob_bytes = blob_id.iter().copied().collect_vec();
let original_section_len_encoded = u64::try_from(section_w_configurables.len())
.expect("data section to be less than u64::MAX")
.to_be_bytes();
// The section with configurables is placed after all of the instructions, the BlobId, and the number representing
// how big the data section is.
let new_section_offset =
instruction_bytes.len() + blob_bytes.len() + original_section_len_encoded.len();
let code = instruction_bytes
.into_iter()
.chain(blob_bytes)
.chain(original_section_len_encoded)
.chain(section_w_configurables.to_vec())
.collect();
(code, new_section_offset)
}
fn loader_instructions_no_configurables() -> [Instruction; 8] {
const REG_ADDRESS_OF_DATA_AFTER_CODE: u8 = 0x10;
const REG_START_OF_LOADED_CODE: u8 = 0x11;
const REG_GENERAL_USE: u8 = 0x12;
const NUM_OF_INSTRUCTIONS: u16 = 8;
// There are 2 main steps:
// 1. Load the blob content into memory
// 2. Jump to the beginning of the memory where the blob was loaded
let instructions = [
// 1. Load the blob content into memory
// Find the start of the hardcoded blob ID, which is located after the loader code ends.
op::move_(REG_ADDRESS_OF_DATA_AFTER_CODE, RegId::PC),
// hold the address of the blob ID.
op::addi(
REG_ADDRESS_OF_DATA_AFTER_CODE,
REG_ADDRESS_OF_DATA_AFTER_CODE,
NUM_OF_INSTRUCTIONS * Instruction::SIZE as u16,
),
// The code is going to be loaded from the current value of SP onwards, save
// the location into REG_START_OF_LOADED_CODE so we can jump into it at the end.
op::move_(REG_START_OF_LOADED_CODE, RegId::SP),
// REG_GENERAL_USE to hold the size of the blob.
op::bsiz(REG_GENERAL_USE, REG_ADDRESS_OF_DATA_AFTER_CODE),
// Push the blob contents onto the stack.
op::ldc(REG_ADDRESS_OF_DATA_AFTER_CODE, 0, REG_GENERAL_USE, 1),
// Jump into the memory where the contract is loaded.
// What follows is called _jmp_mem by the sway compiler.
// Subtract the address contained in IS because jmp will add it back.
op::sub(
REG_START_OF_LOADED_CODE,
REG_START_OF_LOADED_CODE,
RegId::IS,
),
// jmp will multiply by 4, so we need to divide to cancel that out.
op::divi(REG_START_OF_LOADED_CODE, REG_START_OF_LOADED_CODE, 4),
// Jump to the start of the contract we loaded.
op::jmp(REG_START_OF_LOADED_CODE),
];
debug_assert_eq!(instructions.len(), NUM_OF_INSTRUCTIONS as usize);
instructions
}
pub fn loader_instructions_w_configurables() -> [Instruction; 12] {
const BLOB_ID_SIZE: u16 = 32;
const REG_ADDRESS_OF_DATA_AFTER_CODE: u8 = 0x10;
const REG_START_OF_LOADED_CODE: u8 = 0x11;
const REG_GENERAL_USE: u8 = 0x12;
// extract the length of the NoDataSectionLoaderInstructions type
const NUM_OF_INSTRUCTIONS: u16 = 12;
// There are 3 main steps:
// 1. Load the blob content into memory
// 2. Load the data section right after the blob
// 3. Jump to the beginning of the memory where the blob was loaded
let instructions = [
// 1. Load the blob content into memory
// Find the start of the hardcoded blob ID, which is located after the loader code ends.
op::move_(REG_ADDRESS_OF_DATA_AFTER_CODE, RegId::PC),
// hold the address of the blob ID.
op::addi(
REG_ADDRESS_OF_DATA_AFTER_CODE,
REG_ADDRESS_OF_DATA_AFTER_CODE,
NUM_OF_INSTRUCTIONS * Instruction::SIZE as u16,
),
// The code is going to be loaded from the current value of SP onwards, save
// the location into REG_START_OF_LOADED_CODE so we can jump into it at the end.
op::move_(REG_START_OF_LOADED_CODE, RegId::SP),
// REG_GENERAL_USE to hold the size of the blob.
op::bsiz(REG_GENERAL_USE, REG_ADDRESS_OF_DATA_AFTER_CODE),
// Push the blob contents onto the stack.
op::ldc(REG_ADDRESS_OF_DATA_AFTER_CODE, 0, REG_GENERAL_USE, 1),
// Move on to the data section length
op::addi(
REG_ADDRESS_OF_DATA_AFTER_CODE,
REG_ADDRESS_OF_DATA_AFTER_CODE,
BLOB_ID_SIZE,
),
// load the size of the data section into REG_GENERAL_USE
op::lw(REG_GENERAL_USE, REG_ADDRESS_OF_DATA_AFTER_CODE, 0),
// after we have read the length of the data section, we move the pointer to the actual
// data by skipping WORD_SIZE B.
op::addi(
REG_ADDRESS_OF_DATA_AFTER_CODE,
REG_ADDRESS_OF_DATA_AFTER_CODE,
WORD_SIZE as u16,
),
// load the data section of the executable
op::ldc(REG_ADDRESS_OF_DATA_AFTER_CODE, 0, REG_GENERAL_USE, 2),
// Jump into the memory where the contract is loaded.
// What follows is called _jmp_mem by the sway compiler.
// Subtract the address contained in IS because jmp will add it back.
op::sub(
REG_START_OF_LOADED_CODE,
REG_START_OF_LOADED_CODE,
RegId::IS,
),
// jmp will multiply by 4, so we need to divide to cancel that out.
op::divi(REG_START_OF_LOADED_CODE, REG_START_OF_LOADED_CODE, 4),
// Jump to the start of the contract we loaded.
op::jmp(REG_START_OF_LOADED_CODE),
];
debug_assert_eq!(instructions.len(), NUM_OF_INSTRUCTIONS as usize);
instructions
}
pub fn extract_configurables_offset(binary: &[u8]) -> Result<usize> {
if binary.len() < 24 {
return Err(fuels_core::error!(
Other,
"given binary is too short to contain a configurable offset, len: {}",
binary.len()
));
}
let configurable_offset: [u8; 8] = binary[16..24].try_into().expect("checked above");
Ok(u64::from_be_bytes(configurable_offset) as usize)
}
pub fn split_at_configurables_offset(binary: &[u8]) -> Result<(&[u8], &[u8])> {
let offset = extract_configurables_offset(binary)?;
if binary.len() < offset {
return Err(fuels_core::error!(
Other,
"configurables section offset is out of bounds, offset: {offset}, binary len: {}",
binary.len()
));
}
Ok(binary.split_at(offset))
}
pub fn extract_data_offset(binary: &[u8]) -> Result<usize> {
if binary.len() < 16 {
return Err(fuels_core::error!(
Other,
"given binary is too short to contain a data offset, len: {}",
binary.len()
));
}
let data_offset: [u8; 8] = binary[8..16].try_into().expect("checked above");
Ok(u64::from_be_bytes(data_offset) as usize)
}
pub fn split_at_data_offset(binary: &[u8]) -> Result<(&[u8], &[u8])> {
let offset = extract_data_offset(binary)?;
if binary.len() < offset {
return Err(fuels_core::error!(
Other,
"data section offset is out of bounds, offset: {offset}, binary len: {}",
binary.len()
));
}
Ok(binary.split_at(offset))
}
pub fn split_for_loader(binary: &[u8]) -> Result<(&[u8], &[u8])> {
// First determine if it's a legacy binary
if has_configurables_section_offset(binary)? {
split_at_configurables_offset(binary)
} else {
split_at_data_offset(binary)
}
}
pub fn get_offset_for_section_containing_configurables(binary: &[u8]) -> Result<usize> {
if has_configurables_section_offset(binary).unwrap_or(true) {
extract_configurables_offset(binary)
} else {
extract_data_offset(binary)
}
}
pub fn has_configurables_section_offset(binary: &[u8]) -> Result<bool> {
let slice = binary.get(4..8).ok_or_else(|| {
fuels_core::error!(
Other,
"binary too short to check JMPF instruction, need at least 8 bytes but got: {}",
binary.len()
)
})?;
let instruction_bytes: [u8; 4] = slice
.try_into()
.map_err(|_| fuels_core::error!(Other, "Failed to convert slice to [u8; 4]"))?;
match Instruction::try_from(instruction_bytes)
.map_err(|e| fuels_core::error!(Other, "Invalid instruction at byte 4: {:?}", e))?
{
Instruction::JMPF(offset) => match offset.imm18().to_u32() {
0x04 => Ok(true),
0x02 => Ok(false),
other => Err(fuels_core::error!(
Other,
"invalid JMPF offset, expected 0x02 or 0x04, got: {:#04x}",
other
)),
},
inst => Err(fuels_core::error!(
Other,
"expected JMPF instruction, got: {:?}",
inst
)),
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/assembly/cursor.rs | packages/fuels-programs/src/assembly/cursor.rs | use fuels_core::{error, types::errors::Result};
pub struct WasmFriendlyCursor<'a> {
data: &'a [u8],
}
impl<'a> WasmFriendlyCursor<'a> {
pub fn new(data: &'a [u8]) -> Self {
Self { data }
}
pub fn consume(&mut self, amount: usize, ctx: &'static str) -> Result<&'a [u8]> {
if self.data.len() < amount {
Err(error!(
Other,
"while decoding {ctx}: not enough data, available: {}, requested: {}",
self.data.len(),
amount
))
} else {
let data = &self.data[..amount];
self.data = &self.data[amount..];
Ok(data)
}
}
pub fn consume_fixed<const AMOUNT: usize>(
&mut self,
ctx: &'static str,
) -> Result<[u8; AMOUNT]> {
let data = self
.consume(AMOUNT, ctx)?
.try_into()
.expect("should have failed if not enough data");
Ok(data)
}
pub fn consume_all(&mut self) -> &'a [u8] {
let data = self.data;
self.data = &[];
data
}
pub fn unconsumed(&self) -> usize {
self.data.len()
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/assembly/contract_call.rs | packages/fuels-programs/src/assembly/contract_call.rs | use fuel_asm::{Instruction, RegId, Word, op};
use fuel_tx::{AssetId, ContractId};
use fuels_core::{constants::WORD_SIZE, error, types::errors::Result};
use super::cursor::WasmFriendlyCursor;
#[derive(Debug)]
pub struct ContractCallInstructions {
instructions: Vec<Instruction>,
gas_fwd: bool,
}
impl IntoIterator for ContractCallInstructions {
type Item = Instruction;
type IntoIter = std::vec::IntoIter<Instruction>;
fn into_iter(self) -> Self::IntoIter {
self.instructions.into_iter()
}
}
impl ContractCallInstructions {
pub fn new(opcode_params: CallOpcodeParamsOffset) -> Self {
Self {
gas_fwd: opcode_params.gas_forwarded_offset.is_some(),
instructions: Self::generate_instructions(opcode_params),
}
}
pub fn into_bytes(self) -> impl Iterator<Item = u8> {
self.instructions
.into_iter()
.flat_map(|instruction| instruction.to_bytes())
}
/// Returns the VM instructions for calling a contract method
/// We use the [`Opcode`] to call a contract: [`CALL`](Opcode::CALL)
/// pointing at the following registers:
///
/// 0x10 Script data offset
/// 0x11 Coin amount
/// 0x12 Asset ID
/// 0x13 Gas forwarded
///
/// Note that these are soft rules as we're picking this addresses simply because they
/// non-reserved register.
fn generate_instructions(offsets: CallOpcodeParamsOffset) -> Vec<Instruction> {
let call_data_offset = offsets
.call_data_offset
.try_into()
.expect("call_data_offset out of range");
let amount_offset = offsets
.amount_offset
.try_into()
.expect("amount_offset out of range");
let asset_id_offset = offsets
.asset_id_offset
.try_into()
.expect("asset_id_offset out of range");
let mut instructions = [
op::movi(0x10, call_data_offset),
op::movi(0x11, amount_offset),
op::lw(0x11, 0x11, 0),
op::movi(0x12, asset_id_offset),
]
.to_vec();
match offsets.gas_forwarded_offset {
Some(gas_forwarded_offset) => {
let gas_forwarded_offset = gas_forwarded_offset
.try_into()
.expect("gas_forwarded_offset out of range");
instructions.extend(&[
op::movi(0x13, gas_forwarded_offset),
op::lw(0x13, 0x13, 0),
op::call(0x10, 0x11, 0x12, 0x13),
]);
}
// if `gas_forwarded` was not set use `REG_CGAS`
None => instructions.push(op::call(0x10, 0x11, 0x12, RegId::CGAS)),
};
instructions
}
fn extract_normal_variant(instructions: &[Instruction]) -> Option<&[Instruction]> {
let normal_instructions = Self::generate_instructions(CallOpcodeParamsOffset {
call_data_offset: 0,
amount_offset: 0,
asset_id_offset: 0,
gas_forwarded_offset: None,
});
Self::extract_if_match(instructions, &normal_instructions)
}
fn extract_gas_fwd_variant(instructions: &[Instruction]) -> Option<&[Instruction]> {
let gas_fwd_instructions = Self::generate_instructions(CallOpcodeParamsOffset {
call_data_offset: 0,
amount_offset: 0,
asset_id_offset: 0,
gas_forwarded_offset: Some(0),
});
Self::extract_if_match(instructions, &gas_fwd_instructions)
}
pub fn extract_from(instructions: &[Instruction]) -> Option<Self> {
if let Some(instructions) = Self::extract_normal_variant(instructions) {
return Some(Self {
instructions: instructions.to_vec(),
gas_fwd: false,
});
}
Self::extract_gas_fwd_variant(instructions).map(|instructions| Self {
instructions: instructions.to_vec(),
gas_fwd: true,
})
}
pub fn len(&self) -> usize {
self.instructions.len()
}
pub fn call_data_offset(&self) -> u32 {
let Instruction::MOVI(movi) = self.instructions[0] else {
panic!("should have validated the first instruction is a MOVI");
};
movi.imm18().into()
}
pub fn is_gas_fwd_variant(&self) -> bool {
self.gas_fwd
}
fn extract_if_match<'a>(
unknown: &'a [Instruction],
correct: &[Instruction],
) -> Option<&'a [Instruction]> {
if unknown.len() < correct.len() {
return None;
}
unknown
.iter()
.zip(correct)
.all(|(expected, actual)| expected.opcode() == actual.opcode())
.then(|| &unknown[..correct.len()])
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ContractCallData {
pub amount: u64,
pub asset_id: AssetId,
pub contract_id: ContractId,
pub fn_selector_encoded: Vec<u8>,
pub encoded_args: Vec<u8>,
pub gas_forwarded: Option<u64>,
}
impl ContractCallData {
pub fn decode_fn_selector(&self) -> Result<String> {
String::from_utf8(self.fn_selector_encoded.clone())
.map_err(|e| error!(Codec, "cannot decode function selector: {}", e))
}
/// Encodes as script data, consisting of the following items in the given order:
/// 1. Amount to be forwarded `(1 * `[`WORD_SIZE`]`)`
/// 2. Asset ID to be forwarded ([`AssetId::LEN`])
/// 3. Contract ID ([`ContractId::LEN`]);
/// 4. Function selector offset `(1 * `[`WORD_SIZE`]`)`
/// 5. Calldata offset `(1 * `[`WORD_SIZE`]`)`
/// 6. Encoded function selector - method name
/// 7. Encoded arguments
/// 8. Gas to be forwarded `(1 * `[`WORD_SIZE`]`)` - Optional
pub fn encode(&self, memory_offset: usize, buffer: &mut Vec<u8>) -> CallOpcodeParamsOffset {
let amount_offset = memory_offset;
let asset_id_offset = amount_offset + WORD_SIZE;
let call_data_offset = asset_id_offset + AssetId::LEN;
let encoded_selector_offset = call_data_offset + ContractId::LEN + 2 * WORD_SIZE;
let encoded_args_offset = encoded_selector_offset + self.fn_selector_encoded.len();
buffer.extend(self.amount.to_be_bytes()); // 1. Amount
let asset_id = self.asset_id;
buffer.extend(asset_id.iter()); // 2. Asset ID
buffer.extend(self.contract_id.as_ref()); // 3. Contract ID
buffer.extend((encoded_selector_offset as Word).to_be_bytes()); // 4. Fun. selector offset
buffer.extend((encoded_args_offset as Word).to_be_bytes()); // 5. Calldata offset
buffer.extend(&self.fn_selector_encoded); // 6. Encoded function selector
let encoded_args_len = self.encoded_args.len();
buffer.extend(&self.encoded_args); // 7. Encoded arguments
let gas_forwarded_offset = self.gas_forwarded.map(|gf| {
buffer.extend((gf as Word).to_be_bytes()); // 8. Gas to be forwarded - Optional
encoded_args_offset + encoded_args_len
});
CallOpcodeParamsOffset {
amount_offset,
asset_id_offset,
gas_forwarded_offset,
call_data_offset,
}
}
pub fn decode(data: &[u8], gas_fwd: bool) -> Result<Self> {
let mut data = WasmFriendlyCursor::new(data);
let amount = u64::from_be_bytes(data.consume_fixed("amount")?);
let asset_id = AssetId::new(data.consume_fixed("asset id")?);
let contract_id = ContractId::new(data.consume_fixed("contract id")?);
let _ = data.consume(8, "function selector offset")?;
let _ = data.consume(8, "encoded args offset")?;
let fn_selector = {
let fn_selector_len = {
let bytes = data.consume_fixed("function selector length")?;
u64::from_be_bytes(bytes) as usize
};
data.consume(fn_selector_len, "function selector")?.to_vec()
};
let (encoded_args, gas_forwarded) = if gas_fwd {
let encoded_args = data
.consume(data.unconsumed().saturating_sub(WORD_SIZE), "encoded_args")?
.to_vec();
let gas_fwd = { u64::from_be_bytes(data.consume_fixed("forwarded gas")?) };
(encoded_args, Some(gas_fwd))
} else {
(data.consume_all().to_vec(), None)
};
Ok(ContractCallData {
amount,
asset_id,
contract_id,
fn_selector_encoded: fn_selector,
encoded_args,
gas_forwarded,
})
}
}
#[derive(Default)]
/// Specifies offsets of [`Opcode::CALL`][`fuel_asm::Opcode::CALL`] parameters stored in the script
/// data from which they can be loaded into registers
pub struct CallOpcodeParamsOffset {
pub call_data_offset: usize,
pub amount_offset: usize,
pub asset_id_offset: usize,
pub gas_forwarded_offset: Option<usize>,
}
// Creates a contract that loads the specified blobs into memory and delegates the call to the code contained in the blobs.
pub fn loader_contract_asm(blob_ids: &[[u8; 32]]) -> Result<Vec<u8>> {
const BLOB_ID_SIZE: u16 = 32;
let get_instructions = |num_of_instructions, num_of_blobs| {
// There are 2 main steps:
// 1. Load the blob contents into memory
// 2. Jump to the beginning of the memory where the blobs were loaded
// After that the execution continues normally with the loaded contract reading our
// prepared fn selector and jumps to the selected contract method.
[
// 1. Load the blob contents into memory
// Find the start of the hardcoded blob IDs, which are located after the code ends.
op::move_(0x10, RegId::PC),
// 0x10 to hold the address of the current blob ID.
op::addi(0x10, 0x10, num_of_instructions * Instruction::SIZE as u16),
// The contract is going to be loaded from the current value of SP onwards, save
// the location into 0x16 so we can jump into it later on.
op::move_(0x16, RegId::SP),
// Loop counter.
op::movi(0x13, num_of_blobs),
// LOOP starts here.
// 0x11 to hold the size of the current blob.
op::bsiz(0x11, 0x10),
// Push the blob contents onto the stack.
op::ldc(0x10, 0, 0x11, 1),
// Move on to the next blob.
op::addi(0x10, 0x10, BLOB_ID_SIZE),
// Decrement the loop counter.
op::subi(0x13, 0x13, 1),
// Jump backwards (3+1) instructions if the counter has not reached 0.
op::jnzb(0x13, RegId::ZERO, 3),
// 2. Jump into the memory where the contract is loaded.
// What follows is called _jmp_mem by the sway compiler.
// Subtract the address contained in IS because jmp will add it back.
op::sub(0x16, 0x16, RegId::IS),
// jmp will multiply by 4, so we need to divide to cancel that out.
op::divi(0x16, 0x16, 4),
// Jump to the start of the contract we loaded.
op::jmp(0x16),
]
};
let num_of_instructions = u16::try_from(get_instructions(0, 0).len())
.expect("to never have more than u16::MAX instructions");
let num_of_blobs = u32::try_from(blob_ids.len()).map_err(|_| {
error!(
Other,
"the number of blobs ({}) exceeds the maximum number of blobs supported: {}",
blob_ids.len(),
u32::MAX
)
})?;
let instruction_bytes = get_instructions(num_of_instructions, num_of_blobs)
.into_iter()
.flat_map(|instruction| instruction.to_bytes());
let blob_bytes = blob_ids.iter().flatten().copied();
Ok(instruction_bytes.chain(blob_bytes).collect())
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/responses/submit.rs | packages/fuels-programs/src/responses/submit.rs | use std::fmt::Debug;
use fuel_types::Bytes32;
use fuels_accounts::Account;
use fuels_core::{
traits::{Parameterize, Tokenizable},
types::errors::Result,
};
use crate::{
calls::{
CallHandler, ContractCall,
traits::{ContractDependencyConfigurator, ResponseParser, TransactionTuner},
},
responses::CallResponse,
};
/// Represents the response of a submitted transaction with customizable retry behavior.
///
/// This struct holds information about the retry configuration, transaction ID (`tx_id`),
/// and the call handler that manages the type of call (contract or script).
///
/// # Type Parameters
///
/// - `T`: The account type associated with the transaction.
/// - `D`: The data type representing the response value.
/// - `C`: The call type.
///
/// # Fields
///
/// - `retry_config`: The retry configuration for the transaction.
/// - `tx_id`: The optional transaction ID of the submitted transaction.
/// - `call_handler`: The call handler that manages the type of call.
///
/// ```
#[derive(Debug)]
pub struct SubmitResponse<A, C, T> {
tx_id: Bytes32,
call_handler: CallHandler<A, C, T>,
}
impl<A, C, T> SubmitResponse<A, C, T>
where
A: Account,
C: ContractDependencyConfigurator + TransactionTuner + ResponseParser,
T: Tokenizable + Parameterize + Debug,
{
pub fn new(tx_id: Bytes32, call_handler: CallHandler<A, C, T>) -> Self {
Self {
tx_id,
call_handler,
}
}
pub async fn response(self) -> Result<CallResponse<T>> {
let provider = self.call_handler.account.try_provider()?;
let tx_status = provider.tx_status(&self.tx_id).await?;
self.call_handler.get_response(tx_status)
}
pub fn tx_id(&self) -> Bytes32 {
self.tx_id
}
}
/// Represents the response of a submitted transaction with multiple contract calls.
impl<A: Account> SubmitResponse<A, Vec<ContractCall>, ()> {
pub fn new(tx_id: Bytes32, call_handler: CallHandler<A, Vec<ContractCall>, ()>) -> Self {
Self {
tx_id,
call_handler,
}
}
pub async fn response<T: Tokenizable + Debug>(self) -> Result<CallResponse<T>> {
let provider = self.call_handler.account.try_provider()?;
let tx_status = provider.tx_status(&self.tx_id).await?;
self.call_handler.get_response(tx_status)
}
pub fn tx_id(&self) -> Bytes32 {
self.tx_id
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/responses/call.rs | packages/fuels-programs/src/responses/call.rs | use std::fmt::Debug;
use fuel_tx::TxId;
use fuels_core::{
codec::{LogDecoder, LogResult},
traits::{Parameterize, Tokenizable},
types::{errors::Result, tx_status::Success},
};
/// [`CallResponse`] is a struct that is returned by a call to the contract or script. Its value
/// field holds the decoded typed value returned by the contract's method. The other field holds all
/// the receipts returned by the call.
#[derive(Clone, Debug)]
// ANCHOR: call_response
pub struct CallResponse<D> {
pub value: D,
pub tx_status: Success,
pub tx_id: Option<TxId>,
pub log_decoder: LogDecoder,
}
// ANCHOR_END: call_response
impl<D> CallResponse<D> {
pub fn decode_logs(&self) -> LogResult {
self.log_decoder.decode_logs(&self.tx_status.receipts)
}
pub fn decode_logs_with_type<T: Tokenizable + Parameterize + 'static>(&self) -> Result<Vec<T>> {
self.log_decoder
.decode_logs_with_type::<T>(&self.tx_status.receipts)
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/contract/regular.rs | packages/fuels-programs/src/contract/regular.rs | use std::{default::Default, fmt::Debug, path::Path};
use fuel_tx::{StorageSlot, TxId};
use fuels_accounts::Account;
use fuels_core::{
Configurables,
constants::WORD_SIZE,
error,
types::{
Bytes32, ContractId, Salt,
errors::{Context, Result},
transaction::{Transaction, TxPolicies},
transaction_builders::{Blob, CreateTransactionBuilder},
tx_status::Success,
},
};
use super::{
BlobsNotUploaded, Contract, Loader, StorageConfiguration, compute_contract_id_and_state_root,
validate_path_and_extension,
};
use crate::DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE;
#[derive(Clone, Debug)]
pub struct DeployResponse {
pub tx_status: Option<Success>,
pub tx_id: Option<TxId>,
pub contract_id: ContractId,
}
// In a mod so that we eliminate the footgun of getting the private `code` field without applying
// configurables
mod code_types {
use fuels_core::Configurables;
#[derive(Debug, Clone, PartialEq)]
pub struct Regular {
code: Vec<u8>,
configurables: Configurables,
}
impl Regular {
pub(crate) fn new(code: Vec<u8>, configurables: Configurables) -> Self {
Self {
code,
configurables,
}
}
pub(crate) fn with_code(self, code: Vec<u8>) -> Self {
Self { code, ..self }
}
pub(crate) fn with_configurables(self, configurables: Configurables) -> Self {
Self {
configurables,
..self
}
}
pub(crate) fn code(&self) -> Vec<u8> {
let mut code = self.code.clone();
self.configurables.update_constants_in(&mut code);
code
}
}
}
pub use code_types::*;
impl Contract<Regular> {
pub fn with_code(self, code: Vec<u8>) -> Self {
Self {
code: self.code.with_code(code),
salt: self.salt,
storage_slots: self.storage_slots,
}
}
pub fn with_configurables(self, configurables: impl Into<Configurables>) -> Self {
Self {
code: self.code.with_configurables(configurables.into()),
..self
}
}
pub fn code(&self) -> Vec<u8> {
self.code.code()
}
pub fn contract_id(&self) -> ContractId {
self.compute_roots().0
}
pub fn code_root(&self) -> Bytes32 {
self.compute_roots().1
}
pub fn state_root(&self) -> Bytes32 {
self.compute_roots().2
}
fn compute_roots(&self) -> (ContractId, Bytes32, Bytes32) {
compute_contract_id_and_state_root(&self.code(), &self.salt, &self.storage_slots)
}
/// Loads a contract from a binary file. Salt and storage slots are loaded as well, depending on the configuration provided.
pub fn load_from(
binary_filepath: impl AsRef<Path>,
config: LoadConfiguration,
) -> Result<Contract<Regular>> {
let binary_filepath = binary_filepath.as_ref();
validate_path_and_extension(binary_filepath, "bin")?;
let binary = std::fs::read(binary_filepath).map_err(|e| {
std::io::Error::new(
e.kind(),
format!("failed to read binary: {binary_filepath:?}: {e}"),
)
})?;
let storage_slots = super::determine_storage_slots(config.storage, binary_filepath)?;
Ok(Contract {
code: Regular::new(binary, config.configurables),
salt: config.salt,
storage_slots,
})
}
/// Creates a regular contract with the given code, salt, and storage slots.
pub fn regular(
code: Vec<u8>,
salt: Salt,
storage_slots: Vec<StorageSlot>,
) -> Contract<Regular> {
Contract {
code: Regular::new(code, Configurables::default()),
salt,
storage_slots,
}
}
/// Deploys a compiled contract to a running node.
/// To deploy a contract, you need an account with enough assets to pay for deployment.
/// This account will also receive the change.
pub async fn deploy(
self,
account: &impl Account,
tx_policies: TxPolicies,
) -> Result<DeployResponse> {
let contract_id = self.contract_id();
let state_root = self.state_root();
let salt = self.salt;
let storage_slots = self.storage_slots;
let mut tb = CreateTransactionBuilder::prepare_contract_deployment(
self.code.code(),
contract_id,
state_root,
salt,
storage_slots.to_vec(),
tx_policies,
)
.with_max_fee_estimation_tolerance(DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE);
account.add_witnesses(&mut tb)?;
account
.adjust_for_fee(&mut tb, 0)
.await
.context("failed to adjust inputs to cover for missing base asset")?;
let provider = account.try_provider()?;
let consensus_parameters = provider.consensus_parameters().await?;
let tx = tb.build(provider).await?;
let tx_id = Some(tx.id(consensus_parameters.chain_id()));
let tx_status = provider.send_transaction_and_await_commit(tx).await?;
Ok(DeployResponse {
tx_status: Some(tx_status.take_success_checked(None)?),
tx_id,
contract_id,
})
}
/// Deploys a compiled contract to a running node if a contract with
/// the corresponding [`ContractId`] doesn't exist.
pub async fn deploy_if_not_exists(
self,
account: &impl Account,
tx_policies: TxPolicies,
) -> Result<DeployResponse> {
let contract_id = self.contract_id();
let provider = account.try_provider()?;
if provider.contract_exists(&contract_id).await? {
Ok(DeployResponse {
tx_status: None,
tx_id: None,
contract_id,
})
} else {
self.deploy(account, tx_policies).await
}
}
/// Converts a regular contract into a loader contract, splitting the code into blobs.
pub fn convert_to_loader(
self,
max_words_per_blob: usize,
) -> Result<Contract<Loader<BlobsNotUploaded>>> {
if max_words_per_blob == 0 {
return Err(error!(Other, "blob size must be greater than 0"));
}
let blobs = self
.code()
.chunks(max_words_per_blob.saturating_mul(WORD_SIZE))
.map(|chunk| Blob::new(chunk.to_vec()))
.collect();
Contract::loader_from_blobs(blobs, self.salt, self.storage_slots)
}
/// Deploys the contract either as a regular contract or as a loader contract if it exceeds the size limit.
pub async fn smart_deploy(
self,
account: &impl Account,
tx_policies: TxPolicies,
max_words_per_blob: usize,
) -> Result<DeployResponse> {
let provider = account.try_provider()?;
let max_contract_size = provider
.consensus_parameters()
.await?
.contract_params()
.contract_max_size() as usize;
if self.code().len() <= max_contract_size {
self.deploy(account, tx_policies).await
} else {
self.convert_to_loader(max_words_per_blob)?
.deploy(account, tx_policies)
.await
}
}
}
/// Configuration for contract deployment.
#[derive(Debug, Clone, Default)]
pub struct LoadConfiguration {
pub(crate) storage: StorageConfiguration,
pub(crate) configurables: Configurables,
pub(crate) salt: Salt,
}
impl LoadConfiguration {
pub fn new(
storage: StorageConfiguration,
configurables: impl Into<Configurables>,
salt: impl Into<Salt>,
) -> Self {
Self {
storage,
configurables: configurables.into(),
salt: salt.into(),
}
}
pub fn with_storage_configuration(mut self, storage: StorageConfiguration) -> Self {
self.storage = storage;
self
}
pub fn with_configurables(mut self, configurables: impl Into<Configurables>) -> Self {
self.configurables = configurables.into();
self
}
pub fn with_salt(mut self, salt: impl Into<Salt>) -> Self {
self.salt = salt.into();
self
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/contract/storage.rs | packages/fuels-programs/src/contract/storage.rs | use std::{
collections::HashMap,
default::Default,
fmt::Debug,
io,
path::{Path, PathBuf},
};
use fuel_tx::{Bytes32, StorageSlot};
use fuels_core::types::errors::{Result, error};
/// Configuration for contract storage
#[derive(Debug, Clone)]
pub struct StorageConfiguration {
autoload_storage: bool,
slot_overrides: StorageSlots,
}
impl Default for StorageConfiguration {
fn default() -> Self {
Self {
autoload_storage: true,
slot_overrides: Default::default(),
}
}
}
impl StorageConfiguration {
pub fn new(autoload_enabled: bool, slots: impl IntoIterator<Item = StorageSlot>) -> Self {
let config = Self {
autoload_storage: autoload_enabled,
slot_overrides: Default::default(),
};
config.add_slot_overrides(slots)
}
/// If enabled will try to automatically discover and load the storage configuration from the
/// storage config json file.
pub fn with_autoload(mut self, enabled: bool) -> Self {
self.autoload_storage = enabled;
self
}
pub fn autoload_enabled(&self) -> bool {
self.autoload_storage
}
/// Slots added via [`add_slot_overrides`] will override any
/// existing slots with matching keys.
pub fn add_slot_overrides(
mut self,
storage_slots: impl IntoIterator<Item = StorageSlot>,
) -> Self {
self.slot_overrides.add_overrides(storage_slots);
self
}
/// Slots added via [`add_slot_overrides_from_file`] will override any
/// existing slots with matching keys.
///
/// `path` - path to a JSON file containing the storage slots.
pub fn add_slot_overrides_from_file(mut self, path: impl AsRef<Path>) -> Result<Self> {
let slots = StorageSlots::load_from_file(path.as_ref())?;
self.slot_overrides.add_overrides(slots.into_iter());
Ok(self)
}
pub fn into_slots(self) -> impl Iterator<Item = StorageSlot> {
self.slot_overrides.into_iter()
}
}
#[derive(Debug, Clone, Default)]
pub(crate) struct StorageSlots {
storage_slots: HashMap<Bytes32, StorageSlot>,
}
impl StorageSlots {
fn from(storage_slots: impl IntoIterator<Item = StorageSlot>) -> Self {
let pairs = storage_slots.into_iter().map(|slot| (*slot.key(), slot));
Self {
storage_slots: pairs.collect(),
}
}
pub(crate) fn add_overrides(
&mut self,
storage_slots: impl IntoIterator<Item = StorageSlot>,
) -> &mut Self {
let pairs = storage_slots.into_iter().map(|slot| (*slot.key(), slot));
self.storage_slots.extend(pairs);
self
}
pub(crate) fn load_from_file(storage_path: impl AsRef<Path>) -> Result<Self> {
let storage_path = storage_path.as_ref();
validate_path_and_extension(storage_path, "json")?;
let storage_json_string = std::fs::read_to_string(storage_path).map_err(|e| {
io::Error::new(
e.kind(),
format!("failed to read storage slots from: {storage_path:?}: {e}"),
)
})?;
let decoded_slots = serde_json::from_str::<Vec<StorageSlot>>(&storage_json_string)?;
Ok(StorageSlots::from(decoded_slots))
}
pub(crate) fn into_iter(self) -> impl Iterator<Item = StorageSlot> {
self.storage_slots.into_values()
}
}
pub(crate) fn determine_storage_slots(
storage_config: StorageConfiguration,
binary_filepath: &Path,
) -> Result<Vec<StorageSlot>> {
let autoload_enabled = storage_config.autoload_enabled();
let user_overrides = storage_config.into_slots().collect::<Vec<_>>();
let slots = if autoload_enabled {
let mut slots = autoload_storage_slots(binary_filepath)?;
slots.add_overrides(user_overrides);
slots.into_iter().collect()
} else {
user_overrides
};
Ok(slots)
}
pub(crate) fn autoload_storage_slots(contract_binary: &Path) -> Result<StorageSlots> {
let storage_file = expected_storage_slots_filepath(contract_binary)
.ok_or_else(|| error!(Other, "could not determine storage slots file"))?;
StorageSlots::load_from_file(&storage_file)
.map_err(|_| error!(Other, "could not autoload storage slots from file: {storage_file:?}. \
Either provide the file or disable autoloading in `StorageConfiguration`"))
}
pub(crate) fn expected_storage_slots_filepath(contract_binary: &Path) -> Option<PathBuf> {
let dir = contract_binary.parent()?;
let binary_filename = contract_binary.file_stem()?.to_str()?;
Some(dir.join(format!("{binary_filename}-storage_slots.json")))
}
pub(crate) fn validate_path_and_extension(file_path: &Path, extension: &str) -> Result<()> {
if !file_path.exists() {
return Err(error!(IO, "file {file_path:?} does not exist"));
}
let path_extension = file_path
.extension()
.ok_or_else(|| error!(Other, "could not extract extension from: {file_path:?}"))?;
if extension != path_extension {
return Err(error!(
Other,
"expected {file_path:?} to have '.{extension}' extension"
));
}
Ok(())
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
#[test]
fn merging_overrides_storage_slots() {
// given
let make_slot = |id, value| StorageSlot::new([id; 32].into(), [value; 32].into());
let slots = (1..3).map(|id| make_slot(id, 100));
let original_config = StorageConfiguration::new(false, slots);
let overlapping_slots = (2..4).map(|id| make_slot(id, 200));
// when
let original_config = original_config.add_slot_overrides(overlapping_slots);
// then
assert_eq!(
HashSet::from_iter(original_config.slot_overrides.into_iter()),
HashSet::from([make_slot(1, 100), make_slot(2, 200), make_slot(3, 200)])
);
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
FuelLabs/fuels-rs | https://github.com/FuelLabs/fuels-rs/blob/865e00c295de8b4a0a1ef7ac926c3c8266d5151b/packages/fuels-programs/src/contract/loader.rs | packages/fuels-programs/src/contract/loader.rs | use std::collections::HashSet;
use fuel_tx::{Bytes32, ContractId, Salt, StorageSlot};
use fuels_accounts::Account;
use fuels_core::{
constants::WORD_SIZE,
types::{
errors::{Context, Result, error},
transaction::TxPolicies,
transaction_builders::{Blob, BlobId, BlobTransactionBuilder, TransactionBuilder},
},
};
use super::{Contract, DeployResponse, Regular, compute_contract_id_and_state_root};
use crate::{DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE, assembly::contract_call::loader_contract_asm};
#[derive(Debug, Clone)]
pub struct BlobsUploaded {
blob_ids: Vec<BlobId>,
}
#[derive(Debug, Clone)]
pub struct BlobsNotUploaded {
blobs: Vec<Blob>,
}
#[derive(Debug, Clone)]
pub struct Loader<Blobs> {
as_blobs: Blobs,
}
impl Contract<Loader<BlobsNotUploaded>> {
pub fn code(&self) -> Vec<u8> {
let ids: Vec<_> = self.blob_ids();
loader_contract_asm(&ids)
.expect("a contract to be creatable due to the check done in loader_from_blobs")
}
pub fn contract_id(&self) -> ContractId {
self.compute_roots().0
}
pub fn code_root(&self) -> Bytes32 {
self.compute_roots().1
}
pub fn state_root(&self) -> Bytes32 {
self.compute_roots().2
}
fn compute_roots(&self) -> (ContractId, Bytes32, Bytes32) {
compute_contract_id_and_state_root(&self.code(), &self.salt, &self.storage_slots)
}
/// Creates a loader contract for the code found in `blobs`. Calling `deploy` on this contract
/// does two things:
/// 1. Uploads the code blobs.
/// 2. Deploys the loader contract.
///
/// The loader contract, when executed, will load all the given blobs into memory and delegate the call to the original contract code contained in the blobs.
pub fn loader_from_blobs(
blobs: Vec<Blob>,
salt: Salt,
storage_slots: Vec<StorageSlot>,
) -> Result<Self> {
if blobs.is_empty() {
return Err(error!(Other, "must provide at least one blob"));
}
let idx_of_last_blob = blobs.len().saturating_sub(1);
let idx_of_offender = blobs.iter().enumerate().find_map(|(idx, blob)| {
(blob.len() % WORD_SIZE != 0 && idx != idx_of_last_blob).then_some(idx)
});
if let Some(idx) = idx_of_offender {
return Err(error!(
Other,
"blob {}/{} has a size of {} bytes, which is not a multiple of {WORD_SIZE}",
idx.saturating_add(1),
blobs.len(),
blobs[idx].len()
));
}
let ids = blobs.iter().map(|blob| blob.id()).collect::<Vec<_>>();
// Validate that the loader contract can be created.
loader_contract_asm(&ids)?;
Ok(Self {
code: Loader {
as_blobs: BlobsNotUploaded { blobs },
},
salt,
storage_slots,
})
}
pub fn blobs(&self) -> &[Blob] {
self.code.as_blobs.blobs.as_slice()
}
pub fn blob_ids(&self) -> Vec<BlobId> {
self.code
.as_blobs
.blobs
.iter()
.map(|blob| blob.id())
.collect()
}
/// Uploads the blobs associated with this contract. Calling `deploy` on the result will only
/// deploy the loader contract.
pub async fn upload_blobs(
self,
account: &impl Account,
tx_policies: TxPolicies,
) -> Result<Contract<Loader<BlobsUploaded>>> {
let provider = account.try_provider()?;
let all_blob_ids = self.blob_ids();
let mut already_uploaded = HashSet::new();
for blob in self.code.as_blobs.blobs {
let id = blob.id();
if already_uploaded.contains(&id) {
continue;
}
if provider.blob_exists(id).await? {
already_uploaded.insert(id);
continue;
}
let mut tb = BlobTransactionBuilder::default()
.with_blob(blob)
.with_tx_policies(tx_policies)
.with_max_fee_estimation_tolerance(DEFAULT_MAX_FEE_ESTIMATION_TOLERANCE);
account
.adjust_for_fee(&mut tb, 0)
.await
.context("failed to adjust inputs to cover for missing base asset")?;
account.add_witnesses(&mut tb)?;
let tx = tb.build(provider).await?;
let tx_status_response = provider.send_transaction_and_await_commit(tx).await;
tx_status_response.and_then(|response| response.check(None))?;
already_uploaded.insert(id);
}
Contract::loader_from_blob_ids(all_blob_ids, self.salt, self.storage_slots)
}
/// Deploys the loader contract after uploading the code blobs.
pub async fn deploy(
self,
account: &impl Account,
tx_policies: TxPolicies,
) -> Result<DeployResponse> {
self.upload_blobs(account, tx_policies)
.await?
.deploy(account, tx_policies)
.await
}
/// Deploys the loader contract after uploading the code blobs,
/// if there is no contract with this ContractId Already.
pub async fn deploy_if_not_exists(
self,
account: &impl Account,
tx_policies: TxPolicies,
) -> Result<DeployResponse> {
self.upload_blobs(account, tx_policies)
.await?
.deploy_if_not_exists(account, tx_policies)
.await
}
/// Reverts the contract from a loader contract back to a regular contract.
pub fn revert_to_regular(self) -> Contract<Regular> {
let code = self
.code
.as_blobs
.blobs
.into_iter()
.flat_map(Vec::from)
.collect();
Contract::regular(code, self.salt, self.storage_slots)
}
}
impl Contract<Loader<BlobsUploaded>> {
pub fn code(&self) -> Vec<u8> {
loader_contract_asm(&self.code.as_blobs.blob_ids)
.expect("a contract to be creatable due to the check done in loader_for_blobs")
}
pub fn contract_id(&self) -> ContractId {
self.compute_roots().0
}
pub fn code_root(&self) -> Bytes32 {
self.compute_roots().1
}
pub fn state_root(&self) -> Bytes32 {
self.compute_roots().2
}
pub fn compute_roots(&self) -> (ContractId, Bytes32, Bytes32) {
compute_contract_id_and_state_root(&self.code(), &self.salt, &self.storage_slots)
}
/// Creates a loader contract using previously uploaded blobs.
///
/// The contract code has been uploaded in blobs with [`BlobId`]s specified in `blob_ids`.
/// This will create a loader contract that, when deployed and executed, will load all the specified blobs into memory and delegate the call to the code contained in the blobs.
pub fn loader_from_blob_ids(
blob_ids: Vec<BlobId>,
salt: Salt,
storage_slots: Vec<StorageSlot>,
) -> Result<Self> {
if blob_ids.is_empty() {
return Err(error!(Other, "must provide at least one blob"));
}
// Validate that the loader contract can be created.
loader_contract_asm(&blob_ids)?;
Ok(Self {
code: Loader {
as_blobs: BlobsUploaded { blob_ids },
},
salt,
storage_slots,
})
}
pub fn blob_ids(&self) -> &[BlobId] {
&self.code.as_blobs.blob_ids
}
/// Deploys the loader contract.
pub async fn deploy(
self,
account: &impl Account,
tx_policies: TxPolicies,
) -> Result<DeployResponse> {
Contract::regular(self.code(), self.salt, self.storage_slots)
.deploy(account, tx_policies)
.await
}
pub async fn deploy_if_not_exists(
self,
account: &impl Account,
tx_policies: TxPolicies,
) -> Result<DeployResponse> {
Contract::regular(self.code(), self.salt, self.storage_slots)
.deploy_if_not_exists(account, tx_policies)
.await
}
}
| rust | Apache-2.0 | 865e00c295de8b4a0a1ef7ac926c3c8266d5151b | 2026-01-04T15:31:59.450823Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/build.rs | build.rs | fn main() {
set_git_revision_hash();
set_windows_exe_options();
}
/// Embed a Windows manifest and set some linker options.
///
/// The main reason for this is to enable long path support on Windows. This
/// still, I believe, requires enabling long path support in the registry. But
/// if that's enabled, then this will let ripgrep use C:\... style paths that
/// are longer than 260 characters.
fn set_windows_exe_options() {
static MANIFEST: &str = "pkg/windows/Manifest.xml";
let Ok(target_os) = std::env::var("CARGO_CFG_TARGET_OS") else { return };
let Ok(target_env) = std::env::var("CARGO_CFG_TARGET_ENV") else { return };
if !(target_os == "windows" && target_env == "msvc") {
return;
}
let Ok(mut manifest) = std::env::current_dir() else { return };
manifest.push(MANIFEST);
let Some(manifest) = manifest.to_str() else { return };
println!("cargo:rerun-if-changed={MANIFEST}");
// Embed the Windows application manifest file.
println!("cargo:rustc-link-arg-bin=rg=/MANIFEST:EMBED");
println!("cargo:rustc-link-arg-bin=rg=/MANIFESTINPUT:{manifest}");
// Turn linker warnings into errors. Helps debugging, otherwise the
// warnings get squashed (I believe).
println!("cargo:rustc-link-arg-bin=rg=/WX");
}
/// Make the current git hash available to the build as the environment
/// variable `RIPGREP_BUILD_GIT_HASH`.
fn set_git_revision_hash() {
use std::process::Command;
let args = &["rev-parse", "--short=10", "HEAD"];
let output = Command::new("git").args(args).output();
match output {
Ok(output) => {
let rev =
String::from_utf8_lossy(&output.stdout).trim().to_string();
if rev.is_empty() {
println!(
"cargo:warning=output from `git rev-parse` is empty, \
so skipping embedding of commit hash"
);
return;
}
println!("cargo:rustc-env=RIPGREP_BUILD_GIT_HASH={rev}");
}
Err(e) => {
println!(
"cargo:warning=failed to run `git rev-parse`, \
so skipping embedding of commit hash: {e}"
);
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/feature.rs | tests/feature.rs | use crate::hay::{SHERLOCK, SHERLOCK_CRLF};
use crate::util::{Dir, TestCommand, sort_lines};
// See: https://github.com/BurntSushi/ripgrep/issues/1
rgtest!(f1_sjis, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes(
"foo",
b"\x84Y\x84u\x84\x82\x84|\x84\x80\x84{ \x84V\x84\x80\x84|\x84}\x84\x83"
);
cmd.arg("-Esjis").arg("Шерлок Холмс");
eqnice!("foo:Шерлок Холмс\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1
rgtest!(f1_utf16_auto, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes(
"foo",
b"\xff\xfe(\x045\x04@\x04;\x04>\x04:\x04 \x00%\x04>\x04;\x04<\x04A\x04"
);
cmd.arg("Шерлок Холмс");
eqnice!("foo:Шерлок Холмс\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1
rgtest!(f1_utf16_explicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes(
"foo",
b"\xff\xfe(\x045\x04@\x04;\x04>\x04:\x04 \x00%\x04>\x04;\x04<\x04A\x04"
);
cmd.arg("-Eutf-16le").arg("Шерлок Холмс");
eqnice!("foo:Шерлок Холмс\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1
rgtest!(f1_eucjp, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes(
"foo",
b"\xa7\xba\xa7\xd6\xa7\xe2\xa7\xdd\xa7\xe0\xa7\xdc \xa7\xb7\xa7\xe0\xa7\xdd\xa7\xde\xa7\xe3"
);
cmd.arg("-Eeuc-jp").arg("Шерлок Холмс");
eqnice!("foo:Шерлок Холмс\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1
rgtest!(f1_unknown_encoding, |_: Dir, mut cmd: TestCommand| {
cmd.arg("-Efoobar").assert_non_empty_stderr();
});
// See: https://github.com/BurntSushi/ripgrep/issues/1
rgtest!(f1_replacement_encoding, |_: Dir, mut cmd: TestCommand| {
cmd.arg("-Ecsiso2022kr").assert_non_empty_stderr();
});
// See: https://github.com/BurntSushi/ripgrep/issues/7
rgtest!(f7, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("pat", "Sherlock\nHolmes");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.arg("-fpat").arg("sherlock").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/7
rgtest!(f7_stdin, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.arg("-f-").pipe(b"Sherlock"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/20
rgtest!(f20_no_filename, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--no-filename");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.arg("--no-filename").arg("Sherlock").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/34
rgtest!(f34_only_matching, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
sherlock:Sherlock
sherlock:Sherlock
";
eqnice!(expected, cmd.arg("-o").arg("Sherlock").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/34
rgtest!(f34_only_matching_line_column, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
sherlock:1:57:Sherlock
sherlock:3:49:Sherlock
";
cmd.arg("-o").arg("--column").arg("-n").arg("Sherlock");
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
rgtest!(f45_relative_cwd, |dir: Dir, mut cmd: TestCommand| {
dir.create(".not-an-ignore", "foo\n/bar");
dir.create_dir("bar");
dir.create_dir("baz/bar");
dir.create_dir("baz/baz/bar");
dir.create("bar/test", "test");
dir.create("baz/bar/test", "test");
dir.create("baz/baz/bar/test", "test");
dir.create("baz/foo", "test");
dir.create("baz/test", "test");
dir.create("foo", "test");
dir.create("test", "test");
cmd.arg("-l").arg("test");
// First, get a baseline without applying ignore rules.
let expected = "
bar/test
baz/bar/test
baz/baz/bar/test
baz/foo
baz/test
foo
test
";
eqnice!(sort_lines(expected), sort_lines(&cmd.stdout()));
// Now try again with the ignore file activated.
cmd.arg("--ignore-file").arg(".not-an-ignore");
let expected = "
baz/bar/test
baz/baz/bar/test
baz/test
test
";
eqnice!(sort_lines(expected), sort_lines(&cmd.stdout()));
// Now do it again, but inside the baz directory. Since the ignore file
// is interpreted relative to the CWD, this will cause the /bar anchored
// pattern to filter out baz/bar, which is a subtle difference between true
// parent ignore files and manually specified ignore files.
let mut cmd = dir.command();
cmd.args(&["--ignore-file", "../.not-an-ignore", "-l", "test"]);
cmd.current_dir("baz");
let expected = "
baz/bar/test
test
";
eqnice!(sort_lines(expected), sort_lines(&cmd.stdout()));
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
rgtest!(f45_precedence_with_others, |dir: Dir, mut cmd: TestCommand| {
dir.create(".not-an-ignore", "*.log");
dir.create(".ignore", "!imp.log");
dir.create("imp.log", "test");
dir.create("wat.log", "test");
cmd.arg("--ignore-file").arg(".not-an-ignore").arg("test");
eqnice!("imp.log:test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/45
rgtest!(f45_precedence_internal, |dir: Dir, mut cmd: TestCommand| {
dir.create(".not-an-ignore1", "*.log");
dir.create(".not-an-ignore2", "!imp.log");
dir.create("imp.log", "test");
dir.create("wat.log", "test");
cmd.args(&[
"--ignore-file",
".not-an-ignore1",
"--ignore-file",
".not-an-ignore2",
"test",
]);
eqnice!("imp.log:test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/68
rgtest!(f68_no_ignore_vcs, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "foo");
dir.create(".ignore", "bar");
dir.create("foo", "test");
dir.create("bar", "test");
eqnice!("foo:test\n", cmd.arg("--no-ignore-vcs").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/70
rgtest!(f70_smart_case, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.arg("-S").arg("sherlock").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
rgtest!(f89_files_with_matches, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--null").arg("--files-with-matches").arg("Sherlock");
eqnice!("sherlock\x00", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
rgtest!(f89_files_without_match, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "foo");
cmd.arg("--null").arg("--files-without-match").arg("Sherlock");
eqnice!("file.py\x00", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
rgtest!(f89_count, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--null").arg("--count").arg("Sherlock");
eqnice!("sherlock\x002\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
rgtest!(f89_files, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
eqnice!("sherlock\x00", cmd.arg("--null").arg("--files").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/89
rgtest!(f89_match, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
sherlock\x00For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock\x00Holmeses, success in the province of detective work must always
sherlock\x00be, to a very large extent, the result of luck. Sherlock Holmes
sherlock\x00can extract a clew from a wisp of straw or a flake of cigar ash;
";
eqnice!(expected, cmd.arg("--null").arg("-C1").arg("Sherlock").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/109
rgtest!(f109_max_depth, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("one");
dir.create("one/pass", "far");
dir.create_dir("one/too");
dir.create("one/too/many", "far");
cmd.arg("--maxdepth").arg("2").arg("far");
eqnice!("one/pass:far\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/124
rgtest!(f109_case_sensitive_part1, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "tEsT");
cmd.arg("--smart-case").arg("--case-sensitive").arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/124
rgtest!(f109_case_sensitive_part2, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "tEsT");
cmd.arg("--ignore-case").arg("--case-sensitive").arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/129
rgtest!(f129_matches, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test\ntest abcdefghijklmnopqrstuvwxyz test");
let expected = "foo:test\nfoo:[Omitted long matching line]\n";
eqnice!(expected, cmd.arg("-M26").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/129
rgtest!(f129_context, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test\nabcdefghijklmnopqrstuvwxyz");
let expected = "foo:test\nfoo-[Omitted long context line]\n";
eqnice!(expected, cmd.arg("-M20").arg("-C1").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/129
rgtest!(f129_replace, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test\ntest abcdefghijklmnopqrstuvwxyz test");
let expected = "foo:foo\nfoo:[Omitted long line with 2 matches]\n";
eqnice!(expected, cmd.arg("-M26").arg("-rfoo").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/159
rgtest!(f159_max_count, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test\ntest");
eqnice!("foo:test\n", cmd.arg("-m1").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/159
rgtest!(f159_max_count_zero, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test\ntest");
cmd.arg("-m0").arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/196
rgtest!(f196_persistent_config, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("sherlock").arg("sherlock");
// Make sure we get no matches by default.
cmd.assert_err();
// Now add our config file, and make sure it impacts ripgrep.
dir.create(".ripgreprc", "--ignore-case");
cmd.cmd().env("RIPGREP_CONFIG_PATH", ".ripgreprc");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/243
rgtest!(f243_column_line, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test");
eqnice!("foo:1:1:test\n", cmd.arg("--column").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/263
rgtest!(f263_sort_files, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test");
dir.create("abc", "test");
dir.create("zoo", "test");
dir.create("bar", "test");
let expected = "abc:test\nbar:test\nfoo:test\nzoo:test\n";
eqnice!(expected, cmd.arg("--sort-files").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/263
rgtest!(f263_sort_files_reverse, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test");
dir.create("abc", "test");
dir.create("zoo", "test");
dir.create("bar", "test");
let expected = "zoo:test\nfoo:test\nbar:test\nabc:test\n";
eqnice!(expected, cmd.arg("--sortr=path").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/275
rgtest!(f275_pathsep, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("foo");
dir.create("foo/bar", "test");
cmd.arg("test").arg("--path-separator").arg("Z");
eqnice!("fooZbar:test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/362
rgtest!(f362_dfa_size_limit, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
// This should fall back to the nfa engine but should still produce the
// expected result.
cmd.arg("--dfa-size-limit").arg("10").arg(r"For\s").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/362
rgtest!(f362_exceeds_regex_size_limit, |dir: Dir, mut cmd: TestCommand| {
// --regex-size-limit doesn't apply to PCRE2.
if dir.is_pcre2() {
return;
}
cmd.arg("--regex-size-limit").arg("10K").arg(r"[0-9]\w+").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/362
#[cfg(target_pointer_width = "32")]
rgtest!(
f362_u64_to_narrow_usize_overflow,
|dir: Dir, mut cmd: TestCommand| {
// --dfa-size-limit doesn't apply to PCRE2.
if dir.is_pcre2() {
return;
}
dir.create_size("foo", 1000000);
// 2^35 * 2^20 is ok for u64, but not for usize
cmd.arg("--dfa-size-limit").arg("34359738368M").arg("--files");
cmd.assert_err();
}
);
// See: https://github.com/BurntSushi/ripgrep/issues/411
rgtest!(
f411_single_threaded_search_stats,
|dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let lines = cmd.arg("-j1").arg("--stats").arg("Sherlock").stdout();
assert!(lines.contains("Sherlock"));
assert!(lines.contains("2 matched lines"));
assert!(lines.contains("1 files contained matches"));
assert!(lines.contains("1 files searched"));
assert!(lines.contains("seconds"));
}
);
rgtest!(f411_parallel_search_stats, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock_1", SHERLOCK);
dir.create("sherlock_2", SHERLOCK);
let lines = cmd.arg("-j2").arg("--stats").arg("Sherlock").stdout();
assert!(lines.contains("4 matched lines"));
assert!(lines.contains("2 files contained matches"));
assert!(lines.contains("2 files searched"));
assert!(lines.contains("seconds"));
});
rgtest!(
f411_single_threaded_quiet_search_stats,
|dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let lines = cmd
.arg("--quiet")
.arg("-j1")
.arg("--stats")
.arg("Sherlock")
.stdout();
assert!(!lines.contains("Sherlock"));
assert!(lines.contains("2 matched lines"));
assert!(lines.contains("1 files contained matches"));
assert!(lines.contains("1 files searched"));
assert!(lines.contains("seconds"));
}
);
rgtest!(f411_parallel_quiet_search_stats, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock_1", SHERLOCK);
dir.create("sherlock_2", SHERLOCK);
let lines =
cmd.arg("-j2").arg("--quiet").arg("--stats").arg("Sherlock").stdout();
assert!(!lines.contains("Sherlock"));
assert!(lines.contains("4 matched lines"));
assert!(lines.contains("2 files contained matches"));
assert!(lines.contains("2 files searched"));
assert!(lines.contains("seconds"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/416
rgtest!(f416_crlf, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK_CRLF);
cmd.arg("--crlf").arg(r"Sherlock$").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock\r
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/416
rgtest!(f416_crlf_multiline, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK_CRLF);
cmd.arg("--crlf").arg("-U").arg(r"Sherlock$").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock\r
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/416
rgtest!(f416_crlf_only_matching, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK_CRLF);
cmd.arg("--crlf").arg("-o").arg(r"Sherlock$").arg("sherlock");
let expected = "\
Sherlock\r
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/419
rgtest!(f419_zero_as_shortcut_for_null, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-0").arg("--count").arg("Sherlock");
eqnice!("sherlock\x002\n", cmd.stdout());
});
rgtest!(f740_passthru, |dir: Dir, mut cmd: TestCommand| {
dir.create("file", "\nfoo\nbar\nfoobar\n\nbaz\n");
dir.create("patterns", "foo\nbar\n");
// We can't assume that the way colour specs are translated to ANSI
// sequences will remain stable, and --replace doesn't currently work with
// pass-through, so for now we don't actually test the match sub-strings
let common_args = &["-n", "--passthru"];
let foo_expected = "\
1-
2:foo
3-bar
4:foobar
5-
6-baz
";
// With single pattern
cmd.args(common_args).arg("foo").arg("file");
eqnice!(foo_expected, cmd.stdout());
let foo_bar_expected = "\
1-
2:foo
3:bar
4:foobar
5-
6-baz
";
// With multiple -e patterns
let mut cmd = dir.command();
cmd.args(common_args);
cmd.args(&["-e", "foo", "-e", "bar", "file"]);
eqnice!(foo_bar_expected, cmd.stdout());
// With multiple -f patterns
let mut cmd = dir.command();
cmd.args(common_args);
cmd.args(&["-f", "patterns", "file"]);
eqnice!(foo_bar_expected, cmd.stdout());
// -c should override
let mut cmd = dir.command();
cmd.args(common_args);
cmd.args(&["-c", "foo", "file"]);
eqnice!("2\n", cmd.stdout());
let only_foo_expected = "\
1-
2:foo
3-bar
4:foo
5-
6-baz
";
// -o should work
let mut cmd = dir.command();
cmd.args(common_args);
cmd.args(&["-o", "foo", "file"]);
eqnice!(only_foo_expected, cmd.stdout());
let replace_foo_expected = "\
1-
2:wat
3-bar
4:watbar
5-
6-baz
";
// -r should work
let mut cmd = dir.command();
cmd.args(common_args);
cmd.args(&["-r", "wat", "foo", "file"]);
eqnice!(replace_foo_expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/948
rgtest!(f948_exit_code_match, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg(".");
cmd.assert_exit_code(0);
});
// See: https://github.com/BurntSushi/ripgrep/issues/948
rgtest!(f948_exit_code_no_match, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("NADA");
cmd.assert_exit_code(1);
});
// See: https://github.com/BurntSushi/ripgrep/issues/948
rgtest!(f948_exit_code_error, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("*");
cmd.assert_exit_code(2);
});
// See: https://github.com/BurntSushi/ripgrep/issues/917
rgtest!(f917_trim, |dir: Dir, mut cmd: TestCommand| {
const SHERLOCK: &'static str = "\
zzz
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
\tbe, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
dir.create("sherlock", SHERLOCK);
cmd.args(&["-n", "-B1", "-A2", "--trim", "Holmeses", "sherlock"]);
let expected = "\
2-For the Doctor Watsons of this world, as opposed to the Sherlock
3:Holmeses, success in the province of detective work must always
4-be, to a very large extent, the result of luck. Sherlock Holmes
5-can extract a clew from a wisp of straw or a flake of cigar ash;
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/917
//
// This is like f917_trim, except this tests that trimming occurs even when the
// whitespace is part of a match.
rgtest!(f917_trim_match, |dir: Dir, mut cmd: TestCommand| {
const SHERLOCK: &'static str = "\
zzz
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
\tbe, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
dir.create("sherlock", SHERLOCK);
cmd.args(&["-n", "-B1", "-A2", "--trim", r"\s+Holmeses", "sherlock"]);
let expected = "\
2-For the Doctor Watsons of this world, as opposed to the Sherlock
3:Holmeses, success in the province of detective work must always
4-be, to a very large extent, the result of luck. Sherlock Holmes
5-can extract a clew from a wisp of straw or a flake of cigar ash;
";
eqnice!(expected, cmd.stdout());
});
rgtest!(f917_trim_multi_standard, |dir: Dir, mut cmd: TestCommand| {
const HAYSTACK: &str = " 0123456789abcdefghijklmnopqrstuvwxyz";
dir.create("haystack", HAYSTACK);
cmd.args(&["--multiline", "--trim", "-r$0", "--no-filename", r"a\n?bc"]);
let expected = "0123456789abcdefghijklmnopqrstuvwxyz\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(f917_trim_max_columns_normal, |dir: Dir, mut cmd: TestCommand| {
const HAYSTACK: &str = " 0123456789abcdefghijklmnopqrstuvwxyz";
dir.create("haystack", HAYSTACK);
cmd.args(&[
"--trim",
"--max-columns-preview",
"-M8",
"--no-filename",
"abc",
]);
let expected = "01234567 [... omitted end of long line]\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(f917_trim_max_columns_matches, |dir: Dir, mut cmd: TestCommand| {
const HAYSTACK: &str = " 0123456789abcdefghijklmnopqrstuvwxyz";
dir.create("haystack", HAYSTACK);
cmd.args(&[
"--trim",
"--max-columns-preview",
"-M8",
"--color=always",
"--colors=path:none",
"--no-filename",
"abc",
]);
let expected = "01234567 [... 1 more match]\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(
f917_trim_max_columns_multi_standard,
|dir: Dir, mut cmd: TestCommand| {
const HAYSTACK: &str = " 0123456789abcdefghijklmnopqrstuvwxyz";
dir.create("haystack", HAYSTACK);
cmd.args(&[
"--multiline",
"--trim",
"--max-columns-preview",
"-M8",
// Force the "slow" printing path without actually
// putting colors in the output.
"--color=always",
"--colors=path:none",
"--no-filename",
r"a\n?bc",
]);
let expected = "01234567 [... 1 more match]\n";
eqnice!(expected, cmd.stdout());
}
);
rgtest!(
f917_trim_max_columns_multi_only_matching,
|dir: Dir, mut cmd: TestCommand| {
const HAYSTACK: &str = " 0123456789abcdefghijklmnopqrstuvwxyz";
dir.create("haystack", HAYSTACK);
cmd.args(&[
"--multiline",
"--trim",
"--max-columns-preview",
"-M8",
"--only-matching",
"--no-filename",
r".*a\n?bc.*",
]);
let expected = "01234567 [... 0 more matches]\n";
eqnice!(expected, cmd.stdout());
}
);
rgtest!(
f917_trim_max_columns_multi_per_match,
|dir: Dir, mut cmd: TestCommand| {
const HAYSTACK: &str = " 0123456789abcdefghijklmnopqrstuvwxyz";
dir.create("haystack", HAYSTACK);
cmd.args(&[
"--multiline",
"--trim",
"--max-columns-preview",
"-M8",
"--vimgrep",
"--no-filename",
r".*a\n?bc.*",
]);
let expected = "1:1:01234567 [... 0 more matches]\n";
eqnice!(expected, cmd.stdout());
}
);
// See: https://github.com/BurntSushi/ripgrep/issues/993
rgtest!(f993_null_data, |dir: Dir, mut cmd: TestCommand| {
dir.create("test", "foo\x00bar\x00\x00\x00baz\x00");
cmd.arg("--null-data").arg(r".+").arg("test");
// If we just used -a instead of --null-data, then the result would include
// all NUL bytes.
let expected = "foo\x00bar\x00baz\x00";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1078
//
// N.B. There are many more tests in the grep-printer crate.
rgtest!(f1078_max_columns_preview1, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-M46",
"--max-columns-preview",
"exhibited|dusted|has to have it",
]);
let expected = "\
sherlock:but Doctor Watson has to have it taken out for [... omitted end of long line]
sherlock:and exhibited clearly, with a label attached.
";
eqnice!(expected, cmd.stdout());
});
rgtest!(f1078_max_columns_preview2, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-M43",
"--max-columns-preview",
// Doing a replacement forces ripgrep to show the number of remaining
// matches. Normally, this happens by default when printing a tty with
// colors.
"-rxxx",
"exhibited|dusted|has to have it",
]);
let expected = "\
sherlock:but Doctor Watson xxx taken out for him and [... 1 more match]
sherlock:and xxx clearly, with a label attached.
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1138
rgtest!(f1138_no_ignore_dot, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "foo");
dir.create(".ignore", "bar");
dir.create(".fzf-ignore", "quux");
dir.create("foo", "");
dir.create("bar", "");
dir.create("quux", "");
cmd.arg("--sort").arg("path").arg("--files");
eqnice!("quux\n", cmd.stdout());
eqnice!("bar\nquux\n", cmd.arg("--no-ignore-dot").stdout());
eqnice!("bar\n", cmd.arg("--ignore-file").arg(".fzf-ignore").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1155
rgtest!(f1155_auto_hybrid_regex, |dir: Dir, mut cmd: TestCommand| {
// No sense in testing a hybrid regex engine with only one engine!
if !dir.is_pcre2() {
return;
}
dir.create("sherlock", SHERLOCK);
cmd.arg("--no-pcre2").arg("--auto-hybrid-regex").arg(r"(?<=the )Sherlock");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1207
//
// Tests if without encoding 'none' flag null bytes are consumed by automatic
// encoding detection.
rgtest!(f1207_auto_encoding, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("foo", b"\xFF\xFE\x00\x62");
cmd.arg("-a").arg("\\x00").arg("foo");
cmd.assert_exit_code(1);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1207
//
// Tests if encoding 'none' flag does treat file as raw bytes
rgtest!(f1207_ignore_encoding, |dir: Dir, mut cmd: TestCommand| {
// PCRE2 chokes on this test because it can't search invalid non-UTF-8
// and the point of this test is to search raw UTF-16.
if dir.is_pcre2() {
return;
}
dir.create_bytes("foo", b"\xFF\xFE\x00\x62");
cmd.arg("--encoding").arg("none").arg("-a").arg("\\x00").arg("foo");
eqnice!("\u{FFFD}\u{FFFD}\x00b\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1414
rgtest!(f1414_no_require_git, |dir: Dir, mut cmd: TestCommand| {
dir.create(".gitignore", "foo");
dir.create("foo", "");
dir.create("bar", "");
let stdout = cmd.args(&["--sort", "path", "--files"]).stdout();
eqnice!("bar\nfoo\n", stdout);
let stdout =
cmd.args(&["--sort", "path", "--files", "--no-require-git"]).stdout();
eqnice!("bar\n", stdout);
let stdout = cmd
.args(&[
"--sort",
"path",
"--files",
"--no-require-git",
"--require-git",
])
.stdout();
eqnice!("bar\nfoo\n", stdout);
});
// See: https://github.com/BurntSushi/ripgrep/pull/1420
rgtest!(f1420_no_ignore_exclude, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git/info");
dir.create(".git/info/exclude", "foo");
dir.create("bar", "");
dir.create("foo", "");
cmd.arg("--sort").arg("path").arg("--files");
eqnice!("bar\n", cmd.stdout());
eqnice!("bar\nfoo\n", cmd.arg("--no-ignore-exclude").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/pull/1466
rgtest!(f1466_no_ignore_files, |dir: Dir, mut cmd: TestCommand| {
dir.create(".myignore", "bar");
dir.create("bar", "");
dir.create("foo", "");
// Test that --no-ignore-files disables --ignore-file.
// And that --ignore-files overrides --no-ignore-files.
cmd.arg("--sort").arg("path").arg("--files");
eqnice!("bar\nfoo\n", cmd.stdout());
eqnice!("foo\n", cmd.arg("--ignore-file").arg(".myignore").stdout());
eqnice!("bar\nfoo\n", cmd.arg("--no-ignore-files").stdout());
eqnice!("foo\n", cmd.arg("--ignore-files").stdout());
// Test that the -u flag does not disable --ignore-file.
let mut cmd = dir.command();
cmd.arg("--sort").arg("path").arg("--files");
cmd.arg("--ignore-file").arg(".myignore");
eqnice!("foo\n", cmd.stdout());
eqnice!("foo\n", cmd.arg("-u").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/pull/2361
rgtest!(f2361_sort_nested_files, |dir: Dir, mut cmd: TestCommand| {
use std::{thread::sleep, time::Duration};
if crate::util::is_cross() {
return;
}
dir.create("foo", "1");
sleep(Duration::from_millis(200));
dir.create_dir("dir");
sleep(Duration::from_millis(200));
dir.create(dir.path().join("dir").join("bar"), "1");
cmd.arg("--sort").arg("accessed").arg("--files");
eqnice!("foo\ndir/bar\n", cmd.stdout());
dir.create("foo", "2");
sleep(Duration::from_millis(200));
dir.create(dir.path().join("dir").join("bar"), "2");
sleep(Duration::from_millis(200));
cmd.arg("--sort").arg("accessed").arg("--files");
eqnice!("foo\ndir/bar\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1404
rgtest!(f1404_nothing_searched_warning, |dir: Dir, mut cmd: TestCommand| {
dir.create(".ignore", "ignored-dir/**");
dir.create_dir("ignored-dir");
dir.create("ignored-dir/foo", "needle");
// Test that, if ripgrep searches only ignored folders/files, then there
// is a non-zero exit code.
cmd.arg("needle");
cmd.assert_err();
// Test that we actually get an error message that we expect.
let output = cmd.raw_output();
let stderr = String::from_utf8_lossy(&output.stderr);
let expected = "\
rg: No files were searched, which means ripgrep probably applied \
a filter you didn't expect.\n\
Running with --debug will show why files are being skipped.\n\
";
eqnice!(expected, stderr);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1404
rgtest!(f1404_nothing_searched_ignored, |dir: Dir, mut cmd: TestCommand| {
dir.create(".ignore", "ignored-dir/**");
dir.create_dir("ignored-dir");
dir.create("ignored-dir/foo", "needle");
// Test that, if ripgrep searches only ignored folders/files, then there
// is a non-zero exit code.
cmd.arg("--no-messages").arg("needle");
cmd.assert_err();
// But since --no-messages is given, there should not be any error message
// printed.
let output = cmd.raw_output();
let stderr = String::from_utf8_lossy(&output.stderr);
let expected = "";
eqnice!(expected, stderr);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1842
rgtest!(f1842_field_context_separator, |dir: Dir, _: TestCommand| {
dir.create("sherlock", SHERLOCK);
// Test the default.
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/hay.rs | tests/hay.rs | pub const SHERLOCK: &'static str = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
pub const SHERLOCK_CRLF: &'static str = "\
For the Doctor Watsons of this world, as opposed to the Sherlock\r
Holmeses, success in the province of detective work must always\r
be, to a very large extent, the result of luck. Sherlock Holmes\r
can extract a clew from a wisp of straw or a flake of cigar ash;\r
but Doctor Watson has to have it taken out for him and dusted,\r
and exhibited clearly, with a label attached.\r
";
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/misc.rs | tests/misc.rs | use crate::hay::SHERLOCK;
use crate::util::{Dir, TestCommand, cmd_exists, sort_lines};
// This file contains "miscellaneous" tests that were either written before
// features were tracked more explicitly, or were simply written without
// linking them to a specific issue number. We should try to minimize the
// addition of more tests in this file and instead add them to either the
// regression test suite or the feature test suite (found in regression.rs and
// feature.rs, respectively).
rgtest!(single_file, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.arg("Sherlock").arg("sherlock").stdout());
});
rgtest!(dir, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.arg("Sherlock").stdout());
});
rgtest!(line_numbers, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
3:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.arg("-n").arg("Sherlock").arg("sherlock").stdout());
});
rgtest!(columns, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--column").arg("Sherlock").arg("sherlock");
let expected = "\
1:57:For the Doctor Watsons of this world, as opposed to the Sherlock
3:49:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(with_filename, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-H").arg("Sherlock").arg("sherlock");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(with_heading, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
// This forces the issue since --with-filename is disabled by default
// when searching one file.
"--with-filename",
"--heading",
"Sherlock",
"sherlock",
]);
let expected = "\
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(with_heading_default, |dir: Dir, mut cmd: TestCommand| {
// Search two or more and get --with-filename enabled by default.
// Use -j1 to get deterministic results.
dir.create("sherlock", SHERLOCK);
dir.create("foo", "Sherlock Holmes lives on Baker Street.");
cmd.arg("-j1").arg("--heading").arg("Sherlock");
let expected = "\
foo
Sherlock Holmes lives on Baker Street.
sherlock
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(sort_lines(expected), sort_lines(&cmd.stdout()));
});
rgtest!(inverted, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-v").arg("Sherlock").arg("sherlock");
let expected = "\
Holmeses, success in the province of detective work must always
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
eqnice!(expected, cmd.stdout());
});
rgtest!(inverted_line_numbers, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-n").arg("-v").arg("Sherlock").arg("sherlock");
let expected = "\
2:Holmeses, success in the province of detective work must always
4:can extract a clew from a wisp of straw or a flake of cigar ash;
5:but Doctor Watson has to have it taken out for him and dusted,
6:and exhibited clearly, with a label attached.
";
eqnice!(expected, cmd.stdout());
});
rgtest!(case_insensitive, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-i").arg("sherlock").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(word, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-w").arg("as").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
";
eqnice!(expected, cmd.stdout());
});
rgtest!(word_period, |dir: Dir, mut cmd: TestCommand| {
dir.create("haystack", "...");
cmd.arg("-ow").arg(".").arg("haystack");
let expected = "\
.
.
.
";
eqnice!(expected, cmd.stdout());
});
rgtest!(line, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-x",
"Watson|and exhibited clearly, with a label attached.",
"sherlock",
]);
let expected = "\
and exhibited clearly, with a label attached.
";
eqnice!(expected, cmd.stdout());
});
rgtest!(literal, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file", "blib\n()\nblab\n");
cmd.arg("-F").arg("()").arg("file");
eqnice!("()\n", cmd.stdout());
});
rgtest!(quiet, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-q").arg("Sherlock").arg("sherlock");
assert!(cmd.stdout().is_empty());
});
rgtest!(replace, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-r").arg("FooBar").arg("Sherlock").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the FooBar
be, to a very large extent, the result of luck. FooBar Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(replace_groups, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&["-r", "$2, $1", "([A-Z][a-z]+) ([A-Z][a-z]+)", "sherlock"]);
let expected = "\
For the Watsons, Doctor of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Holmes, Sherlock
but Watson, Doctor has to have it taken out for him and dusted,
";
eqnice!(expected, cmd.stdout());
});
rgtest!(replace_named_groups, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-r",
"$last, $first",
"(?P<first>[A-Z][a-z]+) (?P<last>[A-Z][a-z]+)",
"sherlock",
]);
let expected = "\
For the Watsons, Doctor of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Holmes, Sherlock
but Watson, Doctor has to have it taken out for him and dusted,
";
eqnice!(expected, cmd.stdout());
});
rgtest!(replace_with_only_matching, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-o").arg("-r").arg("$1").arg(r"of (\w+)").arg("sherlock");
let expected = "\
this
detective
luck
straw
cigar
";
eqnice!(expected, cmd.stdout());
});
rgtest!(file_types, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "Sherlock");
dir.create("file.rs", "Sherlock");
cmd.arg("-t").arg("rust").arg("Sherlock");
eqnice!("file.rs:Sherlock\n", cmd.stdout());
});
rgtest!(file_types_all, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "Sherlock");
cmd.arg("-t").arg("all").arg("Sherlock");
eqnice!("file.py:Sherlock\n", cmd.stdout());
});
rgtest!(file_types_negate, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.remove("sherlock");
dir.create("file.py", "Sherlock");
dir.create("file.rs", "Sherlock");
cmd.arg("-T").arg("rust").arg("Sherlock");
eqnice!("file.py:Sherlock\n", cmd.stdout());
});
rgtest!(file_types_negate_all, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "Sherlock");
cmd.arg("-T").arg("all").arg("Sherlock");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(file_type_clear, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "Sherlock");
dir.create("file.rs", "Sherlock");
cmd.arg("--type-clear").arg("rust").arg("-t").arg("rust").arg("Sherlock");
cmd.assert_non_empty_stderr();
});
rgtest!(file_type_add, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "Sherlock");
dir.create("file.rs", "Sherlock");
dir.create("file.wat", "Sherlock");
cmd.args(&["--type-add", "wat:*.wat", "-t", "wat", "Sherlock"]);
eqnice!("file.wat:Sherlock\n", cmd.stdout());
});
rgtest!(file_type_add_compose, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "Sherlock");
dir.create("file.rs", "Sherlock");
dir.create("file.wat", "Sherlock");
cmd.args(&[
"--type-add",
"wat:*.wat",
"--type-add",
"combo:include:wat,py",
"-t",
"combo",
"Sherlock",
]);
let expected = "\
file.py:Sherlock
file.wat:Sherlock
";
eqnice!(expected, sort_lines(&cmd.stdout()));
});
rgtest!(glob, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "Sherlock");
dir.create("file.rs", "Sherlock");
cmd.arg("-g").arg("*.rs").arg("Sherlock");
eqnice!("file.rs:Sherlock\n", cmd.stdout());
});
rgtest!(glob_negate, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.remove("sherlock");
dir.create("file.py", "Sherlock");
dir.create("file.rs", "Sherlock");
cmd.arg("-g").arg("!*.rs").arg("Sherlock");
eqnice!("file.py:Sherlock\n", cmd.stdout());
});
rgtest!(glob_case_insensitive, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.HTML", "Sherlock");
cmd.arg("--iglob").arg("*.html").arg("Sherlock");
eqnice!("file.HTML:Sherlock\n", cmd.stdout());
});
rgtest!(glob_case_sensitive, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file1.HTML", "Sherlock");
dir.create("file2.html", "Sherlock");
cmd.arg("--glob").arg("*.html").arg("Sherlock");
eqnice!("file2.html:Sherlock\n", cmd.stdout());
});
rgtest!(glob_always_case_insensitive, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.HTML", "Sherlock");
cmd.args(&["--glob-case-insensitive", "--glob", "*.html", "Sherlock"]);
eqnice!("file.HTML:Sherlock\n", cmd.stdout());
});
rgtest!(byte_offset_only_matching, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-b").arg("-o").arg("Sherlock");
let expected = "\
sherlock:56:Sherlock
sherlock:177:Sherlock
";
eqnice!(expected, cmd.stdout());
});
rgtest!(count, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--count").arg("Sherlock");
let expected = "sherlock:2\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(count_matches, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--count-matches").arg("the");
let expected = "sherlock:4\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(count_matches_inverted, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--count-matches").arg("--invert-match").arg("Sherlock");
let expected = "sherlock:4\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(count_matches_via_only, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--count").arg("--only-matching").arg("the");
let expected = "sherlock:4\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(include_zero, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&["--count", "--include-zero", "nada"]);
cmd.assert_err();
let output = cmd.raw_output();
let stdout = String::from_utf8_lossy(&output.stdout);
let expected = "sherlock:0\n";
eqnice!(expected, stdout);
});
rgtest!(include_zero_override, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&["--count", "--include-zero", "--no-include-zero", "nada"]);
cmd.assert_err();
let output = cmd.raw_output();
let stdout = String::from_utf8_lossy(&output.stdout);
assert!(stdout.is_empty());
});
rgtest!(files_with_matches, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--files-with-matches").arg("Sherlock");
let expected = "sherlock\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(files_without_match, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("file.py", "foo");
cmd.arg("--files-without-match").arg("Sherlock");
let expected = "file.py\n";
eqnice!(expected, cmd.stdout());
});
rgtest!(after_context, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-A").arg("1").arg("Sherlock").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
";
eqnice!(expected, cmd.stdout());
});
rgtest!(after_context_line_numbers, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-A").arg("1").arg("-n").arg("Sherlock").arg("sherlock");
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
3:be, to a very large extent, the result of luck. Sherlock Holmes
4-can extract a clew from a wisp of straw or a flake of cigar ash;
";
eqnice!(expected, cmd.stdout());
});
rgtest!(before_context, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-B").arg("1").arg("Sherlock").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(before_context_line_numbers, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-B").arg("1").arg("-n").arg("Sherlock").arg("sherlock");
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
3:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(context, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-C").arg("1").arg("world|attached").arg("sherlock");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
--
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
eqnice!(expected, cmd.stdout());
});
rgtest!(context_line_numbers, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("-C").arg("1").arg("-n").arg("world|attached").arg("sherlock");
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2-Holmeses, success in the province of detective work must always
--
5-but Doctor Watson has to have it taken out for him and dusted,
6:and exhibited clearly, with a label attached.
";
eqnice!(expected, cmd.stdout());
});
rgtest!(max_filesize_parse_error_length, |_: Dir, mut cmd: TestCommand| {
cmd.arg("--max-filesize").arg("44444444444444444444");
cmd.assert_non_empty_stderr();
});
rgtest!(max_filesize_parse_error_suffix, |_: Dir, mut cmd: TestCommand| {
cmd.arg("--max-filesize").arg("45k");
cmd.assert_non_empty_stderr();
});
rgtest!(max_filesize_parse_no_suffix, |dir: Dir, mut cmd: TestCommand| {
dir.create_size("foo", 40);
dir.create_size("bar", 60);
cmd.arg("--max-filesize").arg("50").arg("--files");
eqnice!("foo\n", cmd.stdout());
});
rgtest!(max_filesize_parse_k_suffix, |dir: Dir, mut cmd: TestCommand| {
dir.create_size("foo", 3048);
dir.create_size("bar", 4100);
cmd.arg("--max-filesize").arg("4K").arg("--files");
eqnice!("foo\n", cmd.stdout());
});
rgtest!(max_filesize_parse_m_suffix, |dir: Dir, mut cmd: TestCommand| {
dir.create_size("foo", 1000000);
dir.create_size("bar", 1400000);
cmd.arg("--max-filesize").arg("1M").arg("--files");
eqnice!("foo\n", cmd.stdout());
});
rgtest!(max_filesize_suffix_overflow, |dir: Dir, mut cmd: TestCommand| {
dir.create_size("foo", 1000000);
// 2^35 * 2^30 would otherwise overflow
cmd.arg("--max-filesize").arg("34359738368G").arg("--files");
cmd.assert_non_empty_stderr();
});
rgtest!(ignore_hidden, |dir: Dir, mut cmd: TestCommand| {
dir.create(".sherlock", SHERLOCK);
cmd.arg("Sherlock").assert_err();
});
rgtest!(no_ignore_hidden, |dir: Dir, mut cmd: TestCommand| {
dir.create(".sherlock", SHERLOCK);
cmd.arg("--hidden").arg("Sherlock");
let expected = "\
.sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
.sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(ignore_git, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create_dir(".git");
dir.create(".gitignore", "sherlock\n");
cmd.arg("Sherlock");
cmd.assert_err();
});
rgtest!(ignore_generic, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create(".ignore", "sherlock\n");
cmd.arg("Sherlock");
cmd.assert_err();
});
rgtest!(ignore_ripgrep, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create(".rgignore", "sherlock\n");
cmd.arg("Sherlock");
cmd.assert_err();
});
rgtest!(no_ignore, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create(".gitignore", "sherlock\n");
cmd.arg("--no-ignore").arg("Sherlock");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(ignore_git_parent, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "sherlock\n");
dir.create_dir("foo");
dir.create("foo/sherlock", SHERLOCK);
cmd.arg("Sherlock");
// Even though we search in foo/, which has no .gitignore, ripgrep will
// traverse parent directories and respect the gitignore files found.
cmd.current_dir("foo");
cmd.assert_err();
});
rgtest!(ignore_git_parent_stop, |dir: Dir, mut cmd: TestCommand| {
// This tests that searching parent directories for .gitignore files stops
// after it sees a .git directory. To test this, we create this directory
// hierarchy:
//
// .gitignore (contains `sherlock`)
// foo/
// .git/
// bar/
// sherlock
//
// And we perform the search inside `foo/bar/`. ripgrep will stop looking
// for .gitignore files after it sees `foo/.git/`, and therefore not
// respect the top-level `.gitignore` containing `sherlock`.
dir.create(".gitignore", "sherlock\n");
dir.create_dir("foo");
dir.create_dir("foo/.git");
dir.create_dir("foo/bar");
dir.create("foo/bar/sherlock", SHERLOCK);
cmd.arg("Sherlock");
cmd.current_dir("foo/bar");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
// Like ignore_git_parent_stop, but with a .git file instead of a .git
// directory.
rgtest!(ignore_git_parent_stop_file, |dir: Dir, mut cmd: TestCommand| {
// This tests that searching parent directories for .gitignore files stops
// after it sees a .git *file*. A .git file is used for submodules. To test
// this, we create this directory hierarchy:
//
// .gitignore (contains `sherlock`)
// foo/
// .git
// bar/
// sherlock
//
// And we perform the search inside `foo/bar/`. ripgrep will stop looking
// for .gitignore files after it sees `foo/.git`, and therefore not
// respect the top-level `.gitignore` containing `sherlock`.
dir.create(".gitignore", "sherlock\n");
dir.create_dir("foo");
dir.create("foo/.git", "");
dir.create_dir("foo/bar");
dir.create("foo/bar/sherlock", SHERLOCK);
cmd.arg("Sherlock");
cmd.current_dir("foo/bar");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(ignore_ripgrep_parent_no_stop, |dir: Dir, mut cmd: TestCommand| {
// This is like the `ignore_git_parent_stop` test, except it checks that
// ripgrep *doesn't* stop checking for .rgignore files.
dir.create(".rgignore", "sherlock\n");
dir.create_dir("foo");
dir.create_dir("foo/.git");
dir.create_dir("foo/bar");
dir.create("foo/bar/sherlock", SHERLOCK);
cmd.arg("Sherlock");
cmd.current_dir("foo/bar");
// The top-level .rgignore applies.
cmd.assert_err();
});
rgtest!(no_parent_ignore_git, |dir: Dir, mut cmd: TestCommand| {
// Set up a directory hierarchy like this:
//
// .git/
// .gitignore
// foo/
// .gitignore
// sherlock
// watson
//
// Where `.gitignore` contains `sherlock` and `foo/.gitignore` contains
// `watson`.
//
// Now *do the search* from the foo directory. By default, ripgrep will
// search parent directories for .gitignore files. The --no-ignore-parent
// flag should prevent that. At the same time, the `foo/.gitignore` file
// will still be respected (since the search is happening in `foo/`).
//
// In other words, we should only see results from `sherlock`, not from
// `watson`.
dir.create_dir(".git");
dir.create(".gitignore", "sherlock\n");
dir.create_dir("foo");
dir.create("foo/.gitignore", "watson\n");
dir.create("foo/sherlock", SHERLOCK);
dir.create("foo/watson", SHERLOCK);
cmd.arg("--no-ignore-parent").arg("Sherlock");
cmd.current_dir("foo");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(symlink_nofollow, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("foo");
dir.create_dir("foo/bar");
dir.link_dir("foo/baz", "foo/bar/baz");
dir.create_dir("foo/baz");
dir.create("foo/baz/sherlock", SHERLOCK);
cmd.arg("Sherlock");
cmd.current_dir("foo/bar");
cmd.assert_err();
});
#[cfg(not(windows))]
rgtest!(symlink_follow, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("foo");
dir.create_dir("foo/bar");
dir.create_dir("foo/baz");
dir.create("foo/baz/sherlock", SHERLOCK);
dir.link_dir("foo/baz", "foo/bar/baz");
cmd.arg("-L").arg("Sherlock");
cmd.current_dir("foo/bar");
let expected = "\
baz/sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
baz/sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(unrestricted1, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create(".gitignore", "sherlock\n");
cmd.arg("-u").arg("Sherlock");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(unrestricted2, |dir: Dir, mut cmd: TestCommand| {
dir.create(".sherlock", SHERLOCK);
cmd.arg("-uu").arg("Sherlock");
let expected = "\
.sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
.sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(unrestricted3, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("hay", "foo\x00bar\nfoo\x00baz\n");
cmd.arg("-uuu").arg("foo");
let expected = "\
hay: binary file matches (found \"\\0\" byte around offset 3)
";
eqnice!(expected, cmd.stdout());
});
rgtest!(vimgrep, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--vimgrep").arg("Sherlock|Watson");
let expected = "\
sherlock:1:16:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:1:57:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:3:49:be, to a very large extent, the result of luck. Sherlock Holmes
sherlock:5:12:but Doctor Watson has to have it taken out for him and dusted,
";
eqnice!(expected, cmd.stdout());
});
rgtest!(vimgrep_no_line, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--vimgrep").arg("-N").arg("Sherlock|Watson");
let expected = "\
sherlock:16:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:57:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:49:be, to a very large extent, the result of luck. Sherlock Holmes
sherlock:12:but Doctor Watson has to have it taken out for him and dusted,
";
eqnice!(expected, cmd.stdout());
});
rgtest!(vimgrep_no_line_no_column, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--vimgrep").arg("-N").arg("--no-column").arg("Sherlock|Watson");
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
sherlock:but Doctor Watson has to have it taken out for him and dusted,
";
eqnice!(expected, cmd.stdout());
});
rgtest!(preprocessing, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("xzcat") {
return;
}
dir.create_bytes("sherlock.xz", include_bytes!("./data/sherlock.xz"));
cmd.arg("--pre").arg("xzcat").arg("Sherlock").arg("sherlock.xz");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(preprocessing_glob, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("xzcat") {
return;
}
dir.create("sherlock", SHERLOCK);
dir.create_bytes("sherlock.xz", include_bytes!("./data/sherlock.xz"));
cmd.args(&["--pre", "xzcat", "--pre-glob", "*.xz", "Sherlock"]);
let expected = "\
sherlock.xz:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock.xz:be, to a very large extent, the result of luck. Sherlock Holmes
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(sort_lines(expected), sort_lines(&cmd.stdout()));
});
rgtest!(compressed_gzip, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("gzip") {
return;
}
dir.create_bytes("sherlock.gz", include_bytes!("./data/sherlock.gz"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.gz");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_bzip2, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("bzip2") {
return;
}
dir.create_bytes("sherlock.bz2", include_bytes!("./data/sherlock.bz2"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.bz2");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_xz, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("xz") {
return;
}
dir.create_bytes("sherlock.xz", include_bytes!("./data/sherlock.xz"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.xz");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_lz4, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("lz4") {
return;
}
dir.create_bytes("sherlock.lz4", include_bytes!("./data/sherlock.lz4"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.lz4");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_lzma, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("xz") {
return;
}
dir.create_bytes("sherlock.lzma", include_bytes!("./data/sherlock.lzma"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.lzma");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_brotli, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("brotli") {
return;
}
dir.create_bytes("sherlock.br", include_bytes!("./data/sherlock.br"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.br");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_zstd, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("zstd") {
return;
}
dir.create_bytes("sherlock.zst", include_bytes!("./data/sherlock.zst"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.zst");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_uncompress, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("uncompress") {
return;
}
dir.create_bytes("sherlock.Z", include_bytes!("./data/sherlock.Z"));
cmd.arg("-z").arg("Sherlock").arg("sherlock.Z");
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
be, to a very large extent, the result of luck. Sherlock Holmes
";
eqnice!(expected, cmd.stdout());
});
rgtest!(compressed_failing_gzip, |dir: Dir, mut cmd: TestCommand| {
if !cmd_exists("gzip") {
return;
}
dir.create("sherlock.gz", SHERLOCK);
cmd.arg("-z").arg("Sherlock").arg("sherlock.gz");
cmd.assert_non_empty_stderr();
});
rgtest!(binary_convert, |dir: Dir, mut cmd: TestCommand| {
dir.create("file", "foo\x00bar\nfoo\x00baz\n");
cmd.arg("--no-mmap").arg("foo").arg("file");
let expected = "\
binary file matches (found \"\\0\" byte around offset 3)
";
eqnice!(expected, cmd.stdout());
});
rgtest!(binary_convert_mmap, |dir: Dir, mut cmd: TestCommand| {
dir.create("file", "foo\x00bar\nfoo\x00baz\n");
cmd.arg("--mmap").arg("foo").arg("file");
let expected = "\
binary file matches (found \"\\0\" byte around offset 3)
";
eqnice!(expected, cmd.stdout());
});
rgtest!(binary_quit, |dir: Dir, mut cmd: TestCommand| {
dir.create("file", "foo\x00bar\nfoo\x00baz\n");
cmd.arg("--no-mmap").arg("foo").arg("-gfile");
cmd.assert_err();
});
rgtest!(binary_quit_mmap, |dir: Dir, mut cmd: TestCommand| {
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/tests.rs | tests/tests.rs | // Macros useful for testing.
#[macro_use]
mod macros;
// Corpora.
mod hay;
// Utilities for making tests nicer to read and easier to write.
mod util;
// Tests for ripgrep's handling of binary files.
mod binary;
// Tests related to most features in ripgrep. If you're adding something new
// to ripgrep, tests should probably go in here.
mod feature;
// Tests for ripgrep's JSON format.
mod json;
// Miscellaneous tests grouped in a haphazard manner. Try not to add more.
mod misc;
// Tests for ripgrep's multiline search support.
mod multiline;
// Regression tests.
mod regression;
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/regression.rs | tests/regression.rs | use crate::hay::SHERLOCK;
use crate::util::{Dir, TestCommand, sort_lines};
// See: https://github.com/BurntSushi/ripgrep/issues/16
rgtest!(r16, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "ghi/");
dir.create_dir("ghi");
dir.create_dir("def/ghi");
dir.create("ghi/toplevel.txt", "xyz");
dir.create("def/ghi/subdir.txt", "xyz");
cmd.arg("xyz").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/25
rgtest!(r25, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "/llvm/");
dir.create_dir("src/llvm");
dir.create("src/llvm/foo", "test");
cmd.arg("test");
eqnice!("src/llvm/foo:test\n", cmd.stdout());
cmd.current_dir("src");
eqnice!("llvm/foo:test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/30
rgtest!(r30, |dir: Dir, mut cmd: TestCommand| {
dir.create(".gitignore", "vendor/**\n!vendor/manifest");
dir.create_dir("vendor");
dir.create("vendor/manifest", "test");
eqnice!("vendor/manifest:test\n", cmd.arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/49
rgtest!(r49, |dir: Dir, mut cmd: TestCommand| {
dir.create(".gitignore", "foo/bar");
dir.create_dir("test/foo/bar");
dir.create("test/foo/bar/baz", "test");
cmd.arg("xyz").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/50
rgtest!(r50, |dir: Dir, mut cmd: TestCommand| {
dir.create(".gitignore", "XXX/YYY/");
dir.create_dir("abc/def/XXX/YYY");
dir.create_dir("ghi/XXX/YYY");
dir.create("abc/def/XXX/YYY/bar", "test");
dir.create("ghi/XXX/YYY/bar", "test");
cmd.arg("xyz").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/64
rgtest!(r64, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("dir");
dir.create_dir("foo");
dir.create("dir/abc", "");
dir.create("foo/abc", "");
eqnice!("foo/abc\n", cmd.arg("--files").arg("foo").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/65
rgtest!(r65, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "a/");
dir.create_dir("a");
dir.create("a/foo", "xyz");
dir.create("a/bar", "xyz");
cmd.arg("xyz").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/67
rgtest!(r67, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "/*\n!/dir");
dir.create_dir("dir");
dir.create_dir("foo");
dir.create("foo/bar", "test");
dir.create("dir/bar", "test");
eqnice!("dir/bar:test\n", cmd.arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/87
rgtest!(r87, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "foo\n**no-vcs**");
dir.create("foo", "test");
cmd.arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/90
rgtest!(r90, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "!.foo");
dir.create(".foo", "test");
eqnice!(".foo:test\n", cmd.arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/93
rgtest!(r93, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "192.168.1.1");
eqnice!("foo:192.168.1.1\n", cmd.arg(r"(\d{1,3}\.){3}\d{1,3}").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/99
rgtest!(r99, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo1", "test");
dir.create("foo2", "zzz");
dir.create("bar", "test");
eqnice!(
sort_lines("bar\ntest\n\nfoo1\ntest\n"),
sort_lines(&cmd.arg("-j1").arg("--heading").arg("test").stdout())
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/105
rgtest!(r105_part1, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "zztest");
eqnice!("foo:1:3:zztest\n", cmd.arg("--vimgrep").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/105
rgtest!(r105_part2, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "zztest");
eqnice!("foo:1:3:zztest\n", cmd.arg("--column").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/127
rgtest!(r127, |dir: Dir, mut cmd: TestCommand| {
// Set up a directory hierarchy like this:
//
// .gitignore
// foo/
// sherlock
// watson
//
// Where `.gitignore` contains `foo/sherlock`.
//
// ripgrep should ignore 'foo/sherlock' giving us results only from
// 'foo/watson' but on Windows ripgrep will include both 'foo/sherlock' and
// 'foo/watson' in the search results.
dir.create_dir(".git");
dir.create(".gitignore", "foo/sherlock\n");
dir.create_dir("foo");
dir.create("foo/sherlock", SHERLOCK);
dir.create("foo/watson", SHERLOCK);
let expected = "\
foo/watson:For the Doctor Watsons of this world, as opposed to the Sherlock
foo/watson:be, to a very large extent, the result of luck. Sherlock Holmes
";
assert_eq!(expected, cmd.arg("Sherlock").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/128
rgtest!(r128, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("foo", b"01234567\x0b\n\x0b\n\x0b\n\x0b\nx");
eqnice!("foo:5:x\n", cmd.arg("-n").arg("x").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/131
//
// TODO(burntsushi): Darwin doesn't like this test for some reason. Probably
// due to the weird file path.
#[cfg(not(target_os = "macos"))]
rgtest!(r131, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "TopÑapa");
dir.create("TopÑapa", "test");
cmd.arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/137
//
// TODO(burntsushi): Figure out how to make this test work on Windows. Right
// now it gives "access denied" errors when trying to create a file symlink.
// For now, disable test on Windows.
#[cfg(not(windows))]
rgtest!(r137, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.link_file("sherlock", "sym1");
dir.link_file("sherlock", "sym2");
let expected = "\
./sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
./sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
sym1:For the Doctor Watsons of this world, as opposed to the Sherlock
sym1:be, to a very large extent, the result of luck. Sherlock Holmes
sym2:For the Doctor Watsons of this world, as opposed to the Sherlock
sym2:be, to a very large extent, the result of luck. Sherlock Holmes
";
cmd.arg("-j1").arg("Sherlock").arg("./").arg("sym1").arg("sym2");
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/156
rgtest!(r156, |dir: Dir, mut cmd: TestCommand| {
let expected = r#"#parse('widgets/foo_bar_macros.vm')
#parse ( 'widgets/mobile/foo_bar_macros.vm' )
#parse ("widgets/foobarhiddenformfields.vm")
#parse ( "widgets/foo_bar_legal.vm" )
#include( 'widgets/foo_bar_tips.vm' )
#include('widgets/mobile/foo_bar_macros.vm')
#include ("widgets/mobile/foo_bar_resetpw.vm")
#parse('widgets/foo-bar-macros.vm')
#parse ( 'widgets/mobile/foo-bar-macros.vm' )
#parse ("widgets/foo-bar-hiddenformfields.vm")
#parse ( "widgets/foo-bar-legal.vm" )
#include( 'widgets/foo-bar-tips.vm' )
#include('widgets/mobile/foo-bar-macros.vm')
#include ("widgets/mobile/foo-bar-resetpw.vm")
"#;
dir.create("testcase.txt", expected);
cmd.arg("-N");
cmd.arg(r#"#(?:parse|include)\s*\(\s*(?:"|')[./A-Za-z_-]+(?:"|')"#);
cmd.arg("testcase.txt");
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/184
rgtest!(r184, |dir: Dir, mut cmd: TestCommand| {
dir.create(".gitignore", ".*");
dir.create_dir("foo/bar");
dir.create("foo/bar/baz", "test");
cmd.arg("test");
eqnice!("foo/bar/baz:test\n", cmd.stdout());
cmd.current_dir("./foo/bar");
eqnice!("baz:test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/199
rgtest!(r199, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "tEsT");
eqnice!("foo:tEsT\n", cmd.arg("--smart-case").arg(r"\btest\b").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/206
rgtest!(r206, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("foo");
dir.create("foo/bar.txt", "test");
cmd.arg("test").arg("-g").arg("*.txt");
eqnice!("foo/bar.txt:test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/210
#[cfg(unix)]
rgtest!(r210, |dir: Dir, mut cmd: TestCommand| {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
let badutf8 = OsStr::from_bytes(&b"foo\xffbar"[..]);
// APFS does not support creating files with invalid UTF-8 bytes.
// https://github.com/BurntSushi/ripgrep/issues/559
if dir.try_create(badutf8, "test").is_ok() {
cmd.arg("-H").arg("test").arg(badutf8);
assert_eq!(b"foo\xffbar:test\n".to_vec(), cmd.output().stdout);
}
});
// See: https://github.com/BurntSushi/ripgrep/issues/228
rgtest!(r228, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("foo");
cmd.arg("--ignore-file").arg("foo").arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/229
rgtest!(r229, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "economie");
cmd.arg("-S").arg("[E]conomie").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/251
rgtest!(r251, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "привет\nПривет\nПрИвЕт");
let expected = "foo:привет\nfoo:Привет\nfoo:ПрИвЕт\n";
eqnice!(expected, cmd.arg("-i").arg("привет").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/256
#[cfg(not(windows))]
rgtest!(r256, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("bar");
dir.create("bar/baz", "test");
dir.link_dir("bar", "foo");
eqnice!("foo/baz:test\n", cmd.arg("test").arg("foo").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/256
#[cfg(not(windows))]
rgtest!(r256_j1, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("bar");
dir.create("bar/baz", "test");
dir.link_dir("bar", "foo");
eqnice!("foo/baz:test\n", cmd.arg("-j1").arg("test").arg("foo").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/270
rgtest!(r270, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "-test");
cmd.arg("-e").arg("-test");
eqnice!("foo:-test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/279
rgtest!(r279, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test");
eqnice!("", cmd.arg("-q").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/391
rgtest!(r391, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create("lock", "");
dir.create("bar.py", "");
dir.create(".git/packed-refs", "");
dir.create(".git/description", "");
cmd.args(&[
"--no-ignore",
"--hidden",
"--follow",
"--files",
"--glob",
"!{.git,node_modules,plugged}/**",
"--glob",
"*.{js,json,php,md,styl,scss,sass,pug,html,config,py,cpp,c,go,hs}",
]);
eqnice!("bar.py\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/405
rgtest!(r405, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("foo/bar");
dir.create_dir("bar/foo");
dir.create("foo/bar/file1.txt", "test");
dir.create("bar/foo/file2.txt", "test");
cmd.arg("-g").arg("!/foo/**").arg("test");
eqnice!("bar/foo/file2.txt:test\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/428
#[cfg(not(windows))]
rgtest!(r428_color_context_path, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", "foo\nbar");
cmd.args(&[
"-A1",
"-H",
"--no-heading",
"-N",
"--colors=match:none",
"--color=always",
"--hyperlink-format=",
"foo",
]);
let expected = format!(
"{colored_path}:foo\n{colored_path}-bar\n",
colored_path =
"\x1b\x5b\x30\x6d\x1b\x5b\x33\x35\x6dsherlock\x1b\x5b\x30\x6d"
);
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/428
rgtest!(r428_unrecognized_style, |dir: Dir, mut cmd: TestCommand| {
dir.create("file.txt", "Sherlock");
cmd.arg("--colors=match:style:").arg("Sherlock");
cmd.assert_err();
let output = cmd.raw_output();
let stderr = String::from_utf8_lossy(&output.stderr);
let expected = "\
rg: error parsing flag --colors: \
unrecognized style attribute ''. Choose from: nobold, bold, nointense, \
intense, nounderline, underline, noitalic, italic.
";
eqnice!(expected, stderr);
});
// See: https://github.com/BurntSushi/ripgrep/issues/451
rgtest!(r451_only_matching_as_in_issue, |dir: Dir, mut cmd: TestCommand| {
dir.create("digits.txt", "1 2 3\n");
cmd.arg("--only-matching").arg(r"[0-9]+").arg("digits.txt");
let expected = "\
1
2
3
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/451
rgtest!(r451_only_matching, |dir: Dir, mut cmd: TestCommand| {
dir.create("digits.txt", "1 2 3\n123\n");
cmd.args(&["--only-matching", "--column", r"[0-9]", "digits.txt"]);
let expected = "\
1:1:1
1:3:2
1:5:3
2:1:1
2:2:2
2:3:3
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/483
rgtest!(r483_matching_no_stdout, |dir: Dir, mut cmd: TestCommand| {
dir.create("file.py", "");
cmd.arg("--quiet").arg("--files").arg("--glob").arg("*.py");
eqnice!("", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/483
rgtest!(r483_non_matching_exit_code, |dir: Dir, mut cmd: TestCommand| {
dir.create("file.rs", "");
cmd.arg("--quiet").arg("--files").arg("--glob").arg("*.py");
cmd.assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/493
rgtest!(r493, |dir: Dir, mut cmd: TestCommand| {
dir.create("input.txt", "peshwaship 're seminomata");
cmd.arg("-o").arg(r"\b 're \b").arg("input.txt");
assert_eq!(" 're \n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/506
rgtest!(r506_word_not_parenthesized, |dir: Dir, mut cmd: TestCommand| {
dir.create("wb.txt", "min minimum amin\nmax maximum amax");
cmd.arg("-w").arg("-o").arg("min|max").arg("wb.txt");
eqnice!("min\nmax\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/553
rgtest!(r553_switch, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
sherlock:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:be, to a very large extent, the result of luck. Sherlock Holmes
";
cmd.arg("-i").arg("sherlock");
eqnice!(expected, cmd.stdout());
// Repeat the `i` flag to make sure everything still works.
eqnice!(expected, cmd.arg("-i").stdout());
});
rgtest!(r553_flag, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
--
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
cmd.arg("-C").arg("1").arg(r"world|attached").arg("sherlock");
eqnice!(expected, cmd.stdout());
let expected = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
and exhibited clearly, with a label attached.
";
eqnice!(expected, cmd.arg("-C").arg("0").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/568
rgtest!(r568_leading_hyphen_option_args, |dir: Dir, mut cmd: TestCommand| {
dir.create("file", "foo bar -baz\n");
cmd.arg("-e-baz").arg("-e").arg("-baz").arg("file");
eqnice!("foo bar -baz\n", cmd.stdout());
let mut cmd = dir.command();
cmd.arg("-rni").arg("bar").arg("file");
eqnice!("foo ni -baz\n", cmd.stdout());
let mut cmd = dir.command();
cmd.arg("-r").arg("-n").arg("-i").arg("bar").arg("file");
eqnice!("foo -n -baz\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/599
//
// This test used to check that we emitted color escape sequences even for
// empty matches, but with the addition of the JSON output format, clients no
// longer need to rely on escape sequences to parse matches. Therefore, we no
// longer emit useless escape sequences.
rgtest!(r599, |dir: Dir, mut cmd: TestCommand| {
dir.create("input.txt", "\n\ntest\n");
cmd.args(&[
"--color",
"ansi",
"--colors",
"path:none",
"--colors",
"line:none",
"--colors",
"match:fg:red",
"--colors",
"match:style:nobold",
"--line-number",
r"^$",
"input.txt",
]);
let expected = "\
[0m1[0m:
[0m2[0m:
";
eqnice_repr!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/693
rgtest!(r693_context_in_contextless_mode, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "xyz\n");
dir.create("bar", "xyz\n");
cmd.arg("-C1").arg("-c").arg("--sort-files").arg("xyz");
eqnice!("bar:1\nfoo:1\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/807
rgtest!(r807, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", ".a/b");
dir.create_dir(".a/b");
dir.create_dir(".a/c");
dir.create(".a/b/file", "test");
dir.create(".a/c/file", "test");
eqnice!(".a/c/file:test\n", cmd.arg("--hidden").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/pull/2711
//
// Note that this isn't a regression test. In particular, this didn't fail
// with ripgrep 14.1.1. I couldn't figure out how to turn what the OP gave me
// into a failing test.
rgtest!(r2711, |dir: Dir, _cmd: TestCommand| {
dir.create_dir("a/b");
dir.create("a/.ignore", ".foo");
dir.create("a/b/.foo", "");
{
let mut cmd = dir.command();
eqnice!("a/.ignore\n", cmd.arg("--hidden").arg("--files").stdout());
}
{
let mut cmd = dir.command();
eqnice!(
"./a/.ignore\n",
cmd.arg("--hidden").arg("--files").arg("./").stdout()
);
}
{
let mut cmd = dir.command();
eqnice!(
"a/.ignore\n",
cmd.arg("--hidden").arg("--files").arg("a").stdout()
);
}
{
let mut cmd = dir.command();
cmd.arg("--hidden").arg("--files").arg("a/b").assert_err();
}
{
let mut cmd = dir.command();
eqnice!(
"./a/.ignore\n",
cmd.arg("--hidden").arg("--files").arg("./a").stdout()
);
}
{
let mut cmd = dir.command();
cmd.current_dir("a");
eqnice!(".ignore\n", cmd.arg("--hidden").arg("--files").stdout());
}
{
let mut cmd = dir.command();
cmd.current_dir("a/b");
cmd.arg("--hidden").arg("--files").assert_err();
}
{
let mut cmd = dir.command();
cmd.current_dir("./a");
eqnice!(".ignore\n", cmd.arg("--hidden").arg("--files").stdout());
}
});
// See: https://github.com/BurntSushi/ripgrep/issues/829
rgtest!(r829_original, |dir: Dir, _cmd: TestCommand| {
dir.create_dir("a/b");
dir.create(".ignore", "/a/b");
dir.create("a/b/test.txt", "Sample text");
{
let mut cmd = dir.command();
cmd.args(&["Sample"]).assert_err();
}
{
let mut cmd = dir.command();
cmd.args(&["Sample", "a"]).assert_err();
}
{
let mut cmd = dir.command();
cmd.current_dir("a");
cmd.args(&["Sample"]).assert_err();
}
});
// See: https://github.com/BurntSushi/ripgrep/issues/2731
rgtest!(r829_2731, |dir: Dir, _cmd: TestCommand| {
dir.create_dir("some_dir/build");
dir.create("some_dir/build/foo", "string");
dir.create(".ignore", "build/\n!/some_dir/build/");
{
let mut cmd = dir.command();
eqnice!("some_dir/build/foo\n", cmd.arg("-l").arg("string").stdout());
}
{
let mut cmd = dir.command();
eqnice!(
"some_dir/build/foo\n",
cmd.arg("-l").arg("string").arg("some_dir").stdout()
);
}
{
let mut cmd = dir.command();
eqnice!(
"./some_dir/build/foo\n",
cmd.arg("-l").arg("string").arg("./some_dir").stdout()
);
}
{
let mut cmd = dir.command();
eqnice!(
"some_dir/build/foo\n",
cmd.arg("-l").arg("string").arg("some_dir/build").stdout()
);
}
{
let mut cmd = dir.command();
eqnice!(
"./some_dir/build/foo\n",
cmd.arg("-l").arg("string").arg("./some_dir/build").stdout()
);
}
});
// See: https://github.com/BurntSushi/ripgrep/issues/2747
rgtest!(r829_2747, |dir: Dir, _cmd: TestCommand| {
dir.create_dir("a/c/b");
dir.create_dir("a/src/f/b");
dir.create("a/c/b/foo", "");
dir.create("a/src/f/b/foo", "");
dir.create(".ignore", "/a/*/b");
{
let mut cmd = dir.command();
eqnice!("a/src/f/b/foo\n", cmd.arg("--files").stdout());
}
{
let mut cmd = dir.command();
eqnice!("a/src/f/b/foo\n", cmd.arg("--files").arg("a/src").stdout());
}
{
let mut cmd = dir.command();
cmd.current_dir("a/src");
eqnice!("f/b/foo\n", cmd.arg("--files").stdout());
}
});
// See: https://github.com/BurntSushi/ripgrep/issues/2778
rgtest!(r829_2778, |dir: Dir, _cmd: TestCommand| {
dir.create_dir("parent/subdir");
dir.create(".ignore", "/parent/*.txt");
dir.create("parent/ignore-me.txt", "");
dir.create("parent/subdir/dont-ignore-me.txt", "");
{
let mut cmd = dir.command();
eqnice!(
"parent/subdir/dont-ignore-me.txt\n",
cmd.arg("--files").stdout()
);
}
{
let mut cmd = dir.command();
cmd.current_dir("parent");
eqnice!("subdir/dont-ignore-me.txt\n", cmd.arg("--files").stdout());
}
});
// See: https://github.com/BurntSushi/ripgrep/issues/2836
rgtest!(r829_2836, |dir: Dir, _cmd: TestCommand| {
dir.create_dir("testdir/sub/sub2");
dir.create(".ignore", "/testdir/sub/sub2/\n");
dir.create("testdir/sub/sub2/foo", "");
{
let mut cmd = dir.command();
cmd.arg("--files").assert_err();
}
{
let mut cmd = dir.command();
cmd.current_dir("testdir");
cmd.arg("--files").assert_err();
}
});
// See: https://github.com/BurntSushi/ripgrep/pull/2933
rgtest!(r829_2933, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("testdir/sub/sub2");
dir.create(".ignore", "/testdir/sub/sub2/");
dir.create("testdir/sub/sub2/testfile", "needle");
let args = &["--files-with-matches", "needle"];
cmd.current_dir("testdir");
cmd.args(args).assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/900
rgtest!(r900, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
dir.create("pat", "");
cmd.arg("-fpat").arg("sherlock").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/1064
rgtest!(r1064, |dir: Dir, mut cmd: TestCommand| {
dir.create("input", "abc");
eqnice!("input:abc\n", cmd.arg("a(.*c)").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1174
rgtest!(r1098, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "a**b");
dir.create("afoob", "test");
cmd.arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/1130
rgtest!(r1130, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test");
eqnice!(
"foo\n",
cmd.arg("--files-with-matches").arg("test").arg("foo").stdout()
);
let mut cmd = dir.command();
eqnice!(
"foo\n",
cmd.arg("--files-without-match").arg("nada").arg("foo").stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1159
rgtest!(r1159_invalid_flag, |_: Dir, mut cmd: TestCommand| {
cmd.arg("--wat").assert_exit_code(2);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1159
rgtest!(r1159_exit_status, |dir: Dir, _: TestCommand| {
dir.create("foo", "test");
// search with a match gets 0 exit status.
let mut cmd = dir.command();
cmd.arg("test").assert_exit_code(0);
// search with --quiet and a match gets 0 exit status.
let mut cmd = dir.command();
cmd.arg("-q").arg("test").assert_exit_code(0);
// search with a match and an error gets 2 exit status.
let mut cmd = dir.command();
cmd.arg("test").arg("no-file").assert_exit_code(2);
// search with a match in --quiet mode and an error gets 0 exit status.
let mut cmd = dir.command();
cmd.arg("-q").arg("test").arg("foo").arg("no-file").assert_exit_code(0);
// search with no match gets 1 exit status.
let mut cmd = dir.command();
cmd.arg("nada").assert_exit_code(1);
// search with --quiet and no match gets 1 exit status.
let mut cmd = dir.command();
cmd.arg("-q").arg("nada").assert_exit_code(1);
// search with no match and an error gets 2 exit status.
let mut cmd = dir.command();
cmd.arg("nada").arg("no-file").assert_exit_code(2);
// search with no match in --quiet mode and an error gets 2 exit status.
let mut cmd = dir.command();
cmd.arg("-q").arg("nada").arg("foo").arg("no-file").assert_exit_code(2);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1163
rgtest!(r1163, |dir: Dir, mut cmd: TestCommand| {
dir.create("bom.txt", "\u{FEFF}test123\ntest123");
eqnice!(
"bom.txt:test123\nbom.txt:test123\n",
cmd.arg("^test123").stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1164
rgtest!(r1164, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "myfile");
dir.create("MYFILE", "test");
cmd.arg("--ignore-file-case-insensitive").arg("test").assert_err();
eqnice!(
"MYFILE:test\n",
cmd.arg("--no-ignore-file-case-insensitive").stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1173
rgtest!(r1173, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "**");
dir.create("foo", "test");
cmd.arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/1174
rgtest!(r1174, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir(".git");
dir.create(".gitignore", "**/**/*");
dir.create_dir("a");
dir.create("a/foo", "test");
cmd.arg("test").assert_err();
});
// See: https://github.com/BurntSushi/ripgrep/issues/1176
rgtest!(r1176_literal_file, |dir: Dir, mut cmd: TestCommand| {
dir.create("patterns", "foo(bar\n");
dir.create("test", "foo(bar");
eqnice!(
"foo(bar\n",
cmd.arg("-F").arg("-f").arg("patterns").arg("test").stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1176
rgtest!(r1176_line_regex, |dir: Dir, mut cmd: TestCommand| {
dir.create("patterns", "foo\n");
dir.create("test", "foobar\nfoo\nbarfoo\n");
eqnice!(
"foo\n",
cmd.arg("-x").arg("-f").arg("patterns").arg("test").stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1203
rgtest!(r1203_reverse_suffix_literal, |dir: Dir, _: TestCommand| {
dir.create("test", "153.230000\n");
let mut cmd = dir.command();
eqnice!("153.230000\n", cmd.arg(r"\d\d\d00").arg("test").stdout());
let mut cmd = dir.command();
eqnice!("153.230000\n", cmd.arg(r"\d\d\d000").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1223
rgtest!(
r1223_no_dir_check_for_default_path,
|dir: Dir, mut cmd: TestCommand| {
dir.create_dir("-");
dir.create("a.json", "{}");
dir.create("a.txt", "some text");
eqnice!(
"a.json\na.txt\n",
sort_lines(&cmd.arg("a").pipe(b"a.json\na.txt"))
);
}
);
// See: https://github.com/BurntSushi/ripgrep/issues/1259
rgtest!(r1259_drop_last_byte_nonl, |dir: Dir, mut cmd: TestCommand| {
dir.create("patterns-nonl", "[foo]");
dir.create("patterns-nl", "[foo]\n");
dir.create("test", "fz");
eqnice!("fz\n", cmd.arg("-f").arg("patterns-nonl").arg("test").stdout());
cmd = dir.command();
eqnice!("fz\n", cmd.arg("-f").arg("patterns-nl").arg("test").stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1311
rgtest!(r1311_multi_line_term_replace, |dir: Dir, mut cmd: TestCommand| {
dir.create("input", "hello\nworld\n");
eqnice!(
"1:hello?world?\n",
cmd.args(&["-U", "-r?", "-n", "\n", "input"]).stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1319
rgtest!(r1319, |dir: Dir, mut cmd: TestCommand| {
dir.create("input", "CCAGCTACTCGGGAGGCTGAGGCTGGAGGATCGCTTGAGTCCAGGAGTTC");
eqnice!(
"input:CCAGCTACTCGGGAGGCTGAGGCTGGAGGATCGCTTGAGTCCAGGAGTTC\n",
cmd.arg("TTGAGTCCAGGAG[ATCG]{2}C").stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1332
rgtest!(r1334_invert_empty_patterns, |dir: Dir, _cmd: TestCommand| {
dir.create("zero-patterns", "");
dir.create("one-pattern", "\n");
dir.create("haystack", "one\ntwo\nthree\n");
// zero patterns matches nothing
{
let mut cmd = dir.command();
cmd.arg("-f").arg("zero-patterns").arg("haystack").assert_err();
}
// one pattern that matches empty string matches everything
{
let mut cmd = dir.command();
eqnice!(
"one\ntwo\nthree\n",
cmd.arg("-f").arg("one-pattern").arg("haystack").stdout()
);
}
// inverting zero patterns matches everything
// (This is the regression. ripgrep used to match nothing because of an
// incorrect optimization.)
{
let mut cmd = dir.command();
eqnice!(
"one\ntwo\nthree\n",
cmd.arg("-vf").arg("zero-patterns").arg("haystack").stdout()
);
}
// inverting one pattern that matches empty string matches nothing
{
let mut cmd = dir.command();
cmd.arg("-vf").arg("one-pattern").arg("haystack").assert_err();
}
});
// See: https://github.com/BurntSushi/ripgrep/issues/1334
rgtest!(r1334_crazy_literals, |dir: Dir, mut cmd: TestCommand| {
dir.create("patterns", &"1.208.0.0/12\n".repeat(40));
dir.create("corpus", "1.208.0.0/12\n");
eqnice!(
"1.208.0.0/12\n",
cmd.arg("-Ff").arg("patterns").arg("corpus").stdout()
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1380
rgtest!(r1380, |dir: Dir, mut cmd: TestCommand| {
dir.create(
"foo",
"\
a
b
c
d
e
d
e
d
e
d
e
",
);
eqnice!("d\ne\nd\n", cmd.args(&["-A2", "-m1", "d", "foo"]).stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1389
rgtest!(r1389_bad_symlinks_no_biscuit, |dir: Dir, mut cmd: TestCommand| {
dir.create_dir("mydir");
dir.create("mydir/file.txt", "test");
dir.link_dir("mydir", "mylink");
let stdout = cmd
.args(&["test", "--no-ignore", "--sort", "path", "mylink"])
.stdout();
eqnice!("mylink/file.txt:test\n", stdout);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1401
rgtest!(r1401_look_ahead_only_matching_1, |dir: Dir, mut cmd: TestCommand| {
// Only PCRE2 supports look-around.
if !dir.is_pcre2() {
return;
}
dir.create("ip.txt", "foo 42\nxoyz\ncat\tdog\n");
cmd.args(&["-No", r".*o(?!.*\s)", "ip.txt"]);
eqnice!("xo\ncat\tdo\n", cmd.stdout());
let mut cmd = dir.command();
cmd.args(&["-No", r".*o(?!.*[ \t])", "ip.txt"]);
eqnice!("xo\ncat\tdo\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1401
rgtest!(r1401_look_ahead_only_matching_2, |dir: Dir, mut cmd: TestCommand| {
// Only PCRE2 supports look-around.
if !dir.is_pcre2() {
return;
}
dir.create("ip.txt", "foo 42\nxoyz\ncat\tdog\nfoo");
cmd.args(&["-No", r".*o(?!.*\s)", "ip.txt"]);
eqnice!("xo\ncat\tdo\nfoo\n", cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/1412
rgtest!(r1412_look_behind_no_replacement, |dir: Dir, mut cmd: TestCommand| {
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/json.rs | tests/json.rs | use std::time;
use serde_derive::Deserialize;
use serde_json as json;
use crate::hay::{SHERLOCK, SHERLOCK_CRLF};
use crate::util::{Dir, TestCommand};
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(tag = "type", content = "data")]
#[serde(rename_all = "snake_case")]
enum Message {
Begin(Begin),
End(End),
Match(Match),
Context(Context),
Summary(Summary),
}
impl Message {
fn unwrap_begin(&self) -> Begin {
match *self {
Message::Begin(ref x) => x.clone(),
ref x => panic!("expected Message::Begin but got {x:?}"),
}
}
fn unwrap_end(&self) -> End {
match *self {
Message::End(ref x) => x.clone(),
ref x => panic!("expected Message::End but got {x:?}"),
}
}
fn unwrap_match(&self) -> Match {
match *self {
Message::Match(ref x) => x.clone(),
ref x => panic!("expected Message::Match but got {x:?}"),
}
}
fn unwrap_context(&self) -> Context {
match *self {
Message::Context(ref x) => x.clone(),
ref x => panic!("expected Message::Context but got {x:?}"),
}
}
fn unwrap_summary(&self) -> Summary {
match *self {
Message::Summary(ref x) => x.clone(),
ref x => panic!("expected Message::Summary but got {x:?}"),
}
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct Begin {
path: Option<Data>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct End {
path: Option<Data>,
binary_offset: Option<u64>,
stats: Stats,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct Summary {
elapsed_total: Duration,
stats: Stats,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct Match {
path: Option<Data>,
lines: Data,
line_number: Option<u64>,
absolute_offset: u64,
submatches: Vec<SubMatch>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct Context {
path: Option<Data>,
lines: Data,
line_number: Option<u64>,
absolute_offset: u64,
submatches: Vec<SubMatch>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct SubMatch {
#[serde(rename = "match")]
m: Data,
replacement: Option<Data>,
start: usize,
end: usize,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(untagged)]
enum Data {
Text { text: String },
// This variant is used when the data isn't valid UTF-8. The bytes are
// base64 encoded, so using a String here is OK.
Bytes { bytes: String },
}
impl Data {
fn text(s: &str) -> Data {
Data::Text { text: s.to_string() }
}
fn bytes(s: &str) -> Data {
Data::Bytes { bytes: s.to_string() }
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct Stats {
elapsed: Duration,
searches: u64,
searches_with_match: u64,
bytes_searched: u64,
bytes_printed: u64,
matched_lines: u64,
matches: u64,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
struct Duration {
#[serde(flatten)]
duration: time::Duration,
human: String,
}
/// Decode JSON Lines into a Vec<Message>. If there was an error decoding,
/// this function panics.
fn json_decode(jsonlines: &str) -> Vec<Message> {
json::Deserializer::from_str(jsonlines)
.into_iter()
.collect::<Result<Vec<Message>, _>>()
.unwrap()
}
rgtest!(basic, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--json").arg("-B1").arg("Sherlock Holmes").arg("sherlock");
let msgs = json_decode(&cmd.stdout());
assert_eq!(
msgs[0].unwrap_begin(),
Begin { path: Some(Data::text("sherlock")) }
);
assert_eq!(
msgs[1].unwrap_context(),
Context {
path: Some(Data::text("sherlock")),
lines: Data::text(
"Holmeses, success in the province of \
detective work must always\n",
),
line_number: Some(2),
absolute_offset: 65,
submatches: vec![],
}
);
assert_eq!(
msgs[2].unwrap_match(),
Match {
path: Some(Data::text("sherlock")),
lines: Data::text(
"be, to a very large extent, the result of luck. \
Sherlock Holmes\n",
),
line_number: Some(3),
absolute_offset: 129,
submatches: vec![SubMatch {
m: Data::text("Sherlock Holmes"),
replacement: None,
start: 48,
end: 63,
},],
}
);
assert_eq!(msgs[3].unwrap_end().path, Some(Data::text("sherlock")));
assert_eq!(msgs[3].unwrap_end().binary_offset, None);
assert_eq!(msgs[4].unwrap_summary().stats.searches_with_match, 1);
assert_eq!(msgs[4].unwrap_summary().stats.bytes_printed, 494);
});
rgtest!(replacement, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--json")
.arg("-B1")
.arg("Sherlock Holmes")
.args(["-r", "John Watson"])
.arg("sherlock");
let msgs = json_decode(&cmd.stdout());
assert_eq!(
msgs[0].unwrap_begin(),
Begin { path: Some(Data::text("sherlock")) }
);
assert_eq!(
msgs[1].unwrap_context(),
Context {
path: Some(Data::text("sherlock")),
lines: Data::text(
"Holmeses, success in the province of \
detective work must always\n",
),
line_number: Some(2),
absolute_offset: 65,
submatches: vec![],
}
);
assert_eq!(
msgs[2].unwrap_match(),
Match {
path: Some(Data::text("sherlock")),
lines: Data::text(
"be, to a very large extent, the result of luck. \
Sherlock Holmes\n",
),
line_number: Some(3),
absolute_offset: 129,
submatches: vec![SubMatch {
m: Data::text("Sherlock Holmes"),
replacement: Some(Data::text("John Watson")),
start: 48,
end: 63,
},],
}
);
assert_eq!(msgs[3].unwrap_end().path, Some(Data::text("sherlock")));
assert_eq!(msgs[3].unwrap_end().binary_offset, None);
assert_eq!(msgs[4].unwrap_summary().stats.searches_with_match, 1);
assert_eq!(msgs[4].unwrap_summary().stats.bytes_printed, 531);
});
rgtest!(quiet_stats, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.arg("--json")
.arg("--quiet")
.arg("--stats")
.arg("Sherlock Holmes")
.arg("sherlock");
let msgs = json_decode(&cmd.stdout());
assert_eq!(msgs[0].unwrap_summary().stats.searches_with_match, 1);
assert_eq!(msgs[0].unwrap_summary().stats.bytes_searched, 367);
});
#[cfg(unix)]
rgtest!(notutf8, |dir: Dir, mut cmd: TestCommand| {
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
// This test does not work with PCRE2 because PCRE2 does not support the
// `u` flag.
if dir.is_pcre2() {
return;
}
// macOS doesn't like this either... sigh.
if cfg!(target_os = "macos") {
return;
}
let name = &b"foo\xFFbar"[..];
let contents = &b"quux\xFFbaz"[..];
// APFS does not support creating files with invalid UTF-8 bytes, so just
// skip the test if we can't create our file. Presumably we don't need this
// check if we're already skipping it on macOS, but maybe other file
// systems won't like this test either?
if !dir.try_create_bytes(OsStr::from_bytes(name), contents).is_ok() {
return;
}
cmd.arg("--json").arg(r"(?-u)\xFF");
let msgs = json_decode(&cmd.stdout());
assert_eq!(
msgs[0].unwrap_begin(),
Begin { path: Some(Data::bytes("Zm9v/2Jhcg==")) }
);
assert_eq!(
msgs[1].unwrap_match(),
Match {
path: Some(Data::bytes("Zm9v/2Jhcg==")),
lines: Data::bytes("cXV1eP9iYXo="),
line_number: Some(1),
absolute_offset: 0,
submatches: vec![SubMatch {
m: Data::bytes("/w=="),
replacement: None,
start: 4,
end: 5,
},],
}
);
});
rgtest!(notutf8_file, |dir: Dir, mut cmd: TestCommand| {
use std::ffi::OsStr;
// This test does not work with PCRE2 because PCRE2 does not support the
// `u` flag.
if dir.is_pcre2() {
return;
}
let name = "foo";
let contents = &b"quux\xFFbaz"[..];
// APFS does not support creating files with invalid UTF-8 bytes, so just
// skip the test if we can't create our file.
if !dir.try_create_bytes(OsStr::new(name), contents).is_ok() {
return;
}
cmd.arg("--json").arg(r"(?-u)\xFF");
let msgs = json_decode(&cmd.stdout());
assert_eq!(
msgs[0].unwrap_begin(),
Begin { path: Some(Data::text("foo")) }
);
assert_eq!(
msgs[1].unwrap_match(),
Match {
path: Some(Data::text("foo")),
lines: Data::bytes("cXV1eP9iYXo="),
line_number: Some(1),
absolute_offset: 0,
submatches: vec![SubMatch {
m: Data::bytes("/w=="),
replacement: None,
start: 4,
end: 5,
},],
}
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/416
//
// This test in particular checks that our match does _not_ include the `\r`
// even though the '$' may be rewritten as '(?:\r??$)' and could thus include
// `\r` in the match.
rgtest!(crlf, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK_CRLF);
cmd.arg("--json").arg("--crlf").arg(r"Sherlock$").arg("sherlock");
let msgs = json_decode(&cmd.stdout());
assert_eq!(
msgs[1].unwrap_match().submatches[0].clone(),
SubMatch {
m: Data::text("Sherlock"),
replacement: None,
start: 56,
end: 64
},
);
});
// See: https://github.com/BurntSushi/ripgrep/issues/1095
//
// This test checks that we don't drop the \r\n in a matching line when --crlf
// mode is enabled.
rgtest!(r1095_missing_crlf, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test\r\n");
// Check without --crlf flag.
let msgs = json_decode(&cmd.arg("--json").arg("test").stdout());
assert_eq!(msgs.len(), 4);
assert_eq!(msgs[1].unwrap_match().lines, Data::text("test\r\n"));
// Now check with --crlf flag.
let msgs = json_decode(&cmd.arg("--crlf").stdout());
assert_eq!(msgs.len(), 4);
assert_eq!(msgs[1].unwrap_match().lines, Data::text("test\r\n"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/1095
//
// This test checks that we don't return empty submatches when matching a `\n`
// in CRLF mode.
rgtest!(r1095_crlf_empty_match, |dir: Dir, mut cmd: TestCommand| {
dir.create("foo", "test\r\n\n");
// Check without --crlf flag.
let msgs = json_decode(&cmd.arg("-U").arg("--json").arg("\n").stdout());
assert_eq!(msgs.len(), 4);
let m = msgs[1].unwrap_match();
assert_eq!(m.lines, Data::text("test\r\n\n"));
assert_eq!(m.submatches[0].m, Data::text("\n"));
assert_eq!(m.submatches[1].m, Data::text("\n"));
// Now check with --crlf flag.
let msgs = json_decode(&cmd.arg("--crlf").stdout());
assert_eq!(msgs.len(), 4);
let m = msgs[1].unwrap_match();
assert_eq!(m.lines, Data::text("test\r\n\n"));
assert_eq!(m.submatches[0].m, Data::text("\n"));
assert_eq!(m.submatches[1].m, Data::text("\n"));
});
// See: https://github.com/BurntSushi/ripgrep/issues/1412
rgtest!(r1412_look_behind_match_missing, |dir: Dir, mut cmd: TestCommand| {
// Only PCRE2 supports look-around.
if !dir.is_pcre2() {
return;
}
dir.create("test", "foo\nbar\n");
let msgs = json_decode(
&cmd.arg("-U").arg("--json").arg(r"(?<=foo\n)bar").stdout(),
);
assert_eq!(msgs.len(), 4);
let m = msgs[1].unwrap_match();
assert_eq!(m.lines, Data::text("bar\n"));
assert_eq!(m.submatches.len(), 1);
});
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/util.rs | tests/util.rs | use std::env;
use std::error;
use std::ffi::OsStr;
use std::fs::{self, File};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::process::{self, Command};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use std::time::Duration;
use bstr::ByteSlice;
static TEST_DIR: &'static str = "ripgrep-tests";
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
/// Setup an empty work directory and return a command pointing to the ripgrep
/// executable whose CWD is set to the work directory.
///
/// The name given will be used to create the directory. Generally, it should
/// correspond to the test name.
pub fn setup(test_name: &str) -> (Dir, TestCommand) {
let dir = Dir::new(test_name);
let cmd = dir.command();
(dir, cmd)
}
/// Like `setup`, but uses PCRE2 as the underlying regex engine.
pub fn setup_pcre2(test_name: &str) -> (Dir, TestCommand) {
let mut dir = Dir::new(test_name);
dir.pcre2(true);
let cmd = dir.command();
(dir, cmd)
}
/// Break the given string into lines, sort them and then join them back
/// together. This is useful for testing output from ripgrep that may not
/// always be in the same order.
pub fn sort_lines(lines: &str) -> String {
let mut lines: Vec<&str> = lines.trim().lines().collect();
lines.sort();
format!("{}\n", lines.join("\n"))
}
/// Returns true if and only if the given program can be successfully executed
/// with a `--help` flag.
pub fn cmd_exists(program: &str) -> bool {
match Command::new(program).arg("--help").output() {
Ok(output) => output.status.success(),
Err(_) => false,
}
}
/// Dir represents a directory in which tests should be run.
///
/// Directories are created from a global atomic counter to avoid duplicates.
#[derive(Clone, Debug)]
pub struct Dir {
/// The directory in which this test executable is running.
root: PathBuf,
/// The directory in which the test should run. If a test needs to create
/// files, they should go in here. This directory is also used as the CWD
/// for any processes created by the test.
dir: PathBuf,
/// Set to true when the test should use PCRE2 as the regex engine.
pcre2: bool,
}
impl Dir {
/// Create a new test working directory with the given name. The name
/// does not need to be distinct for each invocation, but should correspond
/// to a logical grouping of tests.
pub fn new(name: &str) -> Dir {
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed);
let root = env::current_exe()
.unwrap()
.parent()
.expect("executable's directory")
.to_path_buf();
let dir =
env::temp_dir().join(TEST_DIR).join(name).join(&format!("{id}"));
if dir.exists() {
nice_err(&dir, fs::remove_dir_all(&dir));
}
nice_err(&dir, repeat(|| fs::create_dir_all(&dir)));
Dir { root, dir, pcre2: false }
}
/// Use PCRE2 for this test.
pub fn pcre2(&mut self, yes: bool) {
self.pcre2 = yes;
}
/// Returns true if and only if this test is configured to use PCRE2 as
/// the regex engine.
pub fn is_pcre2(&self) -> bool {
self.pcre2
}
/// Create a new file with the given name and contents in this directory,
/// or panic on error.
pub fn create<P: AsRef<Path>>(&self, name: P, contents: &str) {
self.create_bytes(name, contents.as_bytes());
}
/// Try to create a new file with the given name and contents in this
/// directory.
#[allow(dead_code)] // unused on Windows
pub fn try_create<P: AsRef<Path>>(
&self,
name: P,
contents: &str,
) -> io::Result<()> {
let path = self.dir.join(name);
self.try_create_bytes(path, contents.as_bytes())
}
/// Create a new file with the given name and size.
pub fn create_size<P: AsRef<Path>>(&self, name: P, filesize: u64) {
let path = self.dir.join(name);
let file = nice_err(&path, File::create(&path));
nice_err(&path, file.set_len(filesize));
}
/// Create a new file with the given name and contents in this directory,
/// or panic on error.
pub fn create_bytes<P: AsRef<Path>>(&self, name: P, contents: &[u8]) {
let path = self.dir.join(&name);
nice_err(&path, self.try_create_bytes(name, contents));
}
/// Try to create a new file with the given name and contents in this
/// directory.
pub fn try_create_bytes<P: AsRef<Path>>(
&self,
name: P,
contents: &[u8],
) -> io::Result<()> {
let path = self.dir.join(name);
let mut file = File::create(path)?;
file.write_all(contents)?;
file.flush()
}
/// Remove a file with the given name from this directory.
pub fn remove<P: AsRef<Path>>(&self, name: P) {
let path = self.dir.join(name);
nice_err(&path, fs::remove_file(&path));
}
/// Create a new directory with the given path (and any directories above
/// it) inside this directory.
pub fn create_dir<P: AsRef<Path>>(&self, path: P) {
let path = self.dir.join(path);
nice_err(&path, repeat(|| fs::create_dir_all(&path)));
}
/// Creates a new command that is set to use the ripgrep executable in
/// this working directory.
///
/// This also:
///
/// * Unsets the `RIPGREP_CONFIG_PATH` environment variable.
/// * Sets the `--path-separator` to `/` so that paths have the same output
/// on all systems. Tests that need to check `--path-separator` itself
/// can simply pass it again to override it.
pub fn command(&self) -> TestCommand {
let mut cmd = self.bin();
cmd.env_remove("RIPGREP_CONFIG_PATH");
cmd.current_dir(&self.dir);
cmd.arg("--path-separator").arg("/");
if self.is_pcre2() {
cmd.arg("--pcre2");
}
TestCommand { dir: self.clone(), cmd }
}
/// Returns the path to the ripgrep executable.
pub fn bin(&self) -> process::Command {
let rg = self.root.join(format!("../rg{}", env::consts::EXE_SUFFIX));
match cross_runner() {
None => process::Command::new(rg),
Some(runner) => {
let mut cmd = process::Command::new(runner);
cmd.arg(rg);
cmd
}
}
}
/// Returns the path to this directory.
pub fn path(&self) -> &Path {
&self.dir
}
/// Creates a directory symlink to the src with the given target name
/// in this directory.
#[cfg(not(windows))]
pub fn link_dir<S: AsRef<Path>, T: AsRef<Path>>(&self, src: S, target: T) {
use std::os::unix::fs::symlink;
let src = self.dir.join(src);
let target = self.dir.join(target);
let _ = fs::remove_file(&target);
nice_err(&target, symlink(&src, &target));
}
/// Creates a directory symlink to the src with the given target name
/// in this directory.
#[cfg(windows)]
pub fn link_dir<S: AsRef<Path>, T: AsRef<Path>>(&self, src: S, target: T) {
use std::os::windows::fs::symlink_dir;
let src = self.dir.join(src);
let target = self.dir.join(target);
let _ = fs::remove_dir(&target);
nice_err(&target, symlink_dir(&src, &target));
}
/// Creates a file symlink to the src with the given target name
/// in this directory.
#[cfg(not(windows))]
pub fn link_file<S: AsRef<Path>, T: AsRef<Path>>(
&self,
src: S,
target: T,
) {
self.link_dir(src, target);
}
/// Creates a file symlink to the src with the given target name
/// in this directory.
#[cfg(windows)]
#[allow(dead_code)] // unused on Windows
pub fn link_file<S: AsRef<Path>, T: AsRef<Path>>(
&self,
src: S,
target: T,
) {
use std::os::windows::fs::symlink_file;
let src = self.dir.join(src);
let target = self.dir.join(target);
let _ = fs::remove_file(&target);
nice_err(&target, symlink_file(&src, &target));
}
}
/// A simple wrapper around a process::Command with some conveniences.
#[derive(Debug)]
pub struct TestCommand {
/// The dir used to launched this command.
dir: Dir,
/// The actual command we use to control the process.
cmd: Command,
}
impl TestCommand {
/// Returns a mutable reference to the underlying command.
pub fn cmd(&mut self) -> &mut Command {
&mut self.cmd
}
/// Add an argument to pass to the command.
pub fn arg<A: AsRef<OsStr>>(&mut self, arg: A) -> &mut TestCommand {
self.cmd.arg(arg);
self
}
/// Add any number of arguments to the command.
pub fn args<I, A>(&mut self, args: I) -> &mut TestCommand
where
I: IntoIterator<Item = A>,
A: AsRef<OsStr>,
{
self.cmd.args(args);
self
}
/// Set the working directory for this command.
///
/// The path given is interpreted relative to the directory that this
/// command was created for.
///
/// Note that this does not need to be called normally, since the creation
/// of this TestCommand causes its working directory to be set to the
/// test's directory automatically.
pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut TestCommand {
self.cmd.current_dir(self.dir.path().join(dir));
self
}
/// Runs and captures the stdout of the given command.
pub fn stdout(&mut self) -> String {
let o = self.output();
String::from_utf8_lossy(&o.stdout).into_owned()
}
/// Pipe `input` to a command, and collect the output.
pub fn pipe(&mut self, input: &[u8]) -> String {
self.cmd.stdin(process::Stdio::piped());
self.cmd.stdout(process::Stdio::piped());
self.cmd.stderr(process::Stdio::piped());
let mut child = self.cmd.spawn().unwrap();
// Pipe input to child process using a separate thread to avoid
// risk of deadlock between parent and child process.
let mut stdin = child.stdin.take().expect("expected standard input");
let input = input.to_owned();
let worker = thread::spawn(move || stdin.write_all(&input));
let output = self.expect_success(child.wait_with_output().unwrap());
worker.join().unwrap().unwrap();
String::from_utf8_lossy(&output.stdout).into_owned()
}
/// Gets the output of a command. If the command failed, then this panics.
pub fn output(&mut self) -> process::Output {
let output = self.raw_output();
self.expect_success(output)
}
/// Gets the raw output of a command after filtering nonsense like jemalloc
/// error messages from stderr.
pub fn raw_output(&mut self) -> process::Output {
let mut output = self.cmd.output().unwrap();
output.stderr = strip_jemalloc_nonsense(&output.stderr);
output
}
/// Runs the command and asserts that it resulted in an error exit code.
pub fn assert_err(&mut self) {
let o = self.raw_output();
if o.status.success() {
panic!(
"\n\n===== {:?} =====\n\
command succeeded but expected failure!\
\n\ncwd: {}\
\n\ndir list: {:?}\
\n\nstatus: {}\
\n\nstdout: {}\n\nstderr: {}\
\n\n=====\n",
self.cmd,
self.dir.dir.display(),
dir_list(&self.dir.dir),
o.status,
String::from_utf8_lossy(&o.stdout),
String::from_utf8_lossy(&o.stderr)
);
}
}
/// Runs the command and asserts that its exit code matches expected exit
/// code.
pub fn assert_exit_code(&mut self, expected_code: i32) {
let code = self.cmd.output().unwrap().status.code().unwrap();
assert_eq!(
expected_code,
code,
"\n\n===== {:?} =====\n\
expected exit code did not match\
\n\ncwd: {}\
\n\ndir list: {:?}\
\n\nexpected: {}\
\n\nfound: {}\
\n\n=====\n",
self.cmd,
self.dir.dir.display(),
dir_list(&self.dir.dir),
expected_code,
code
);
}
/// Runs the command and asserts that something was printed to stderr.
pub fn assert_non_empty_stderr(&mut self) {
let o = self.cmd.output().unwrap();
if o.status.success() || o.stderr.is_empty() {
panic!(
"\n\n===== {:?} =====\n\
command succeeded but expected failure!\
\n\ncwd: {}\
\n\ndir list: {:?}\
\n\nstatus: {}\
\n\nstdout: {}\n\nstderr: {}\
\n\n=====\n",
self.cmd,
self.dir.dir.display(),
dir_list(&self.dir.dir),
o.status,
String::from_utf8_lossy(&o.stdout),
String::from_utf8_lossy(&o.stderr)
);
}
}
fn expect_success(&self, o: process::Output) -> process::Output {
if !o.status.success() {
let suggest = if o.stderr.is_empty() {
"\n\nDid your search end up with no results?".to_string()
} else {
"".to_string()
};
panic!(
"\n\n==========\n\
command failed but expected success!\
{}\
\n\ncommand: {:?}\
\n\ncwd: {}\
\n\ndir list: {:?}\
\n\nstatus: {}\
\n\nstdout: {}\
\n\nstderr: {}\
\n\n==========\n",
suggest,
self.cmd,
self.dir.dir.display(),
dir_list(&self.dir.dir),
o.status,
String::from_utf8_lossy(&o.stdout),
String::from_utf8_lossy(&o.stderr)
);
}
o
}
}
fn nice_err<T, E: error::Error>(path: &Path, res: Result<T, E>) -> T {
match res {
Ok(t) => t,
Err(err) => panic!("{}: {:?}", path.display(), err),
}
}
fn repeat<F: FnMut() -> io::Result<()>>(mut f: F) -> io::Result<()> {
let mut last_err = None;
for _ in 0..10 {
if let Err(err) = f() {
last_err = Some(err);
thread::sleep(Duration::from_millis(500));
} else {
return Ok(());
}
}
Err(last_err.unwrap())
}
/// Return a recursive listing of all files and directories in the given
/// directory. This is useful for debugging transient and odd failures in
/// integration tests.
fn dir_list<P: AsRef<Path>>(dir: P) -> Vec<String> {
walkdir::WalkDir::new(dir)
.follow_links(true)
.into_iter()
.map(|result| result.unwrap().path().to_string_lossy().into_owned())
.collect()
}
/// When running tests with cross, we need to be a bit smarter about how we
/// run our `rg` binary. We can't just run it directly since it might be
/// compiled for a totally different target. Instead, it's likely that `cross`
/// will have setup qemu to run it. While this is integrated into the Rust
/// testing by default, we need to handle it ourselves for integration tests.
///
/// Now thankfully, cross sets `CROSS_RUNNER` to point to the right qemu
/// executable. Or so one thinks. But it seems to always be set to `qemu-user`
/// and I cannot find `qemu-user` anywhere in the Docker image. Awesome.
///
/// Thers is `/linux-runner` which seems to work sometimes? But not always.
///
/// Instead, it looks like we have to use `qemu-aarch64` in the `aarch64`
/// case. Perfect, so just get the current target architecture and append it
/// to `qemu-`. Wrong. Cross (or qemu or whoever) uses `qemu-ppc64` for
/// `powerpc64`, so we can't just use the target architecture as Rust knows
/// it verbatim.
///
/// So... we just manually handle these cases. So fucking fun.
fn cross_runner() -> Option<String> {
let runner = std::env::var("CROSS_RUNNER").ok()?;
if runner.is_empty() || runner == "empty" {
return None;
}
if cfg!(target_arch = "powerpc64") {
Some("qemu-ppc64".to_string())
} else if cfg!(target_arch = "x86") {
Some("i386".to_string())
} else {
// Make a guess... Sigh.
Some(format!("qemu-{}", std::env::consts::ARCH))
}
}
/// Returns true if the test setup believes Cross is running and `qemu` is
/// needed to run ripgrep.
///
/// This is useful because it has been difficult to get some tests to pass
/// under Cross.
pub fn is_cross() -> bool {
std::env::var("CROSS_RUNNER").ok().map_or(false, |v| !v.is_empty())
}
/// Strips absolutely fucked `<jemalloc>:` lines from the output.
///
/// In theory this only happens under qemu, which is where our tests run under
/// `cross`. But is messes with our tests, because... they don't expect the
/// allocator to fucking write to stderr. I mean, what the fuck? Who prints a
/// warning message with absolutely no instruction for what to do with it or
/// how to disable it. Absolutely fucking bonkers.
fn strip_jemalloc_nonsense(data: &[u8]) -> Vec<u8> {
let lines = data
.lines_with_terminator()
.filter(|line| !line.starts_with_str("<jemalloc>:"));
bstr::concat(lines)
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/macros.rs | tests/macros.rs | #[macro_export]
macro_rules! rgtest {
($name:ident, $fun:expr) => {
#[test]
fn $name() {
let (dir, cmd) = crate::util::setup(stringify!($name));
$fun(dir, cmd);
if cfg!(feature = "pcre2") {
let (dir, cmd) = crate::util::setup_pcre2(stringify!($name));
$fun(dir, cmd);
}
}
};
}
#[macro_export]
macro_rules! eqnice {
($expected:expr, $got:expr) => {
let expected = &*$expected;
let got = &*$got;
if expected != got {
panic!("
printed outputs differ!
expected:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
got:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
", expected, got);
}
}
}
#[macro_export]
macro_rules! eqnice_repr {
($expected:expr, $got:expr) => {
let expected = &*$expected;
let got = &*$got;
if expected != got {
panic!("
printed outputs differ!
expected:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{:?}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
got:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{:?}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
", expected, got);
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/binary.rs | tests/binary.rs | use crate::util::{Dir, TestCommand};
// This file contains a smattering of tests specifically for checking ripgrep's
// handling of binary files. There's quite a bit of discussion on this in this
// bug report: https://github.com/BurntSushi/ripgrep/issues/306
// Our haystack is the first 2,133 lines of Gutenberg's copy of "A Study in
// Scarlet," with a NUL byte at line 1870: `abcdef\x00`.
//
// The position and size of the haystack is, unfortunately, significant. In
// particular, the NUL byte is specifically inserted at some point *after* the
// first 65,536 bytes, which corresponds to the initial capacity of the buffer
// that ripgrep uses to read files. (grep for DEFAULT_BUFFER_CAPACITY.) The
// position of the NUL byte ensures that we can execute some search on the
// initial buffer contents without ever detecting any binary data. Moreover,
// when using a memory map for searching, only the first 65,536 bytes are
// scanned for a NUL byte, so no binary bytes are detected at all when using
// a memory map (unless our query matches line 1898).
//
// One last note: in the tests below, we use --no-mmap heavily because binary
// detection with memory maps is a bit different. Namely, NUL bytes are only
// searched for in the first few KB of the file and in a match. Normally, NUL
// bytes are searched for everywhere.
const HAY: &'static [u8] = include_bytes!("./data/sherlock-nul.txt");
// Tests for binary file detection when using memory maps.
// As noted in the original comments, with memory maps binary detection
// works differently - NUL bytes are only searched for in the first few KB
// of the file and in matches.
//
// Note that we don't run these on macOS, which has memory maps forcefully
// disabled because they suck so much.
// Test that matches in a binary file with memory maps work as expected
// with implicit file search (via glob pattern).
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_match_implicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--mmap", "-n", "Project Gutenberg EBook", "-g", "hay"]);
// With mmap, we get a match and a warning about binary content
let expected = "\
hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
";
eqnice!(expected, cmd.stdout());
});
// Test with an explicit file argument when using memory maps.
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_match_explicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--mmap", "-n", "Project Gutenberg EBook", "hay"]);
let expected = "\
1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
";
eqnice!(expected, cmd.stdout());
});
// Test specifically with a pattern that matches near the NUL byte which should
// trigger binary detection with memory maps.
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_match_near_nul, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
// Pattern that matches around line 1898 where the NUL byte is.
// Note: Using direct file path instead of glob.
cmd.args(&["--mmap", "-n", "abcdef", "hay"]);
let expected = "\
binary file matches (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Test with --count option to ensure full file scanning works with mmap.
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_match_count, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--mmap", "-c", "Project Gutenberg EBook|Heaven", "hay"]);
// With mmap, since we're counting all matches and might not
// encounter the NUL byte during initial detection, the count
// should still be reported.
eqnice!("2\n", cmd.stdout());
});
// Test binary detection with mmap when pattern would match before and after NUL
// byte.
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_match_multiple, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
// Use explicit file path.
cmd.args(&["--mmap", "-n", "Project Gutenberg EBook|Heaven", "hay"]);
// With explicit file and memory maps, matches before and after NUL byte
// are shown.
let expected = "\
1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
1871:\"No. Heaven knows what the objects of his studies are. But here we
";
eqnice!(expected, cmd.stdout());
});
// Test that --binary flag can have odd results when searching with a memory
// map.
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_binary_flag, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
// Use glob pattern.
cmd.args(&["--mmap", "-n", "--binary", "Heaven", "-g", "hay"]);
let expected = "\
hay:1871:\"No. Heaven knows what the objects of his studies are. But here we
";
eqnice!(expected, cmd.stdout());
});
// Test that using -a/--text flag works as expected with mmap.
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_text_flag, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--mmap", "-n", "--text", "Heaven", "-g", "hay"]);
// With --text flag, binary detection should be disabled.
let expected = "\
hay:1871:\"No. Heaven knows what the objects of his studies are. But here we
";
eqnice!(expected, cmd.stdout());
});
// Test pattern that matches before and after the NUL byte with memory maps.
#[cfg(not(target_os = "macos"))]
rgtest!(mmap_after_nul_match, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
// Use explicit file path.
cmd.args(&["--mmap", "-n", "medical student", "hay"]);
// With explicit file and memory maps, all matches are shown
let expected = "\
176:\"A medical student, I suppose?\" said I.
409:\"A medical student, I suppose?\" said I.
642:\"A medical student, I suppose?\" said I.
875:\"A medical student, I suppose?\" said I.
1108:\"A medical student, I suppose?\" said I.
1341:\"A medical student, I suppose?\" said I.
1574:\"A medical student, I suppose?\" said I.
1807:\"A medical student, I suppose?\" said I.
1867:\"And yet you say he is not a medical student?\"
";
eqnice!(expected, cmd.stdout());
});
// This tests that ripgrep prints a warning message if it finds and prints a
// match in a binary file before detecting that it is a binary file. The point
// here is to notify that user that the search of the file is only partially
// complete.
//
// This applies to files that are *implicitly* searched via a recursive
// directory traversal. In particular, this results in a WARNING message being
// printed. We make our file "implicit" by doing a recursive search with a glob
// that matches our file.
rgtest!(after_match1_implicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "Project Gutenberg EBook", "-g", "hay"]);
let expected = "\
hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
hay: WARNING: stopped searching binary file after match (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Like after_match1_implicit, except we provide a file to search
// explicitly. This results in identical behavior, but a different message.
rgtest!(after_match1_explicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "Project Gutenberg EBook", "hay"]);
let expected = "\
1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
binary file matches (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Like after_match1_explicit, except we feed our content on stdin.
rgtest!(after_match1_stdin, |_: Dir, mut cmd: TestCommand| {
cmd.args(&["--no-mmap", "-n", "Project Gutenberg EBook"]);
let expected = "\
1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
binary file matches (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.pipe(HAY));
});
// Like after_match1_implicit, but provides the --binary flag, which
// disables binary filtering. Thus, this matches the behavior of ripgrep as
// if the file were given explicitly.
rgtest!(after_match1_implicit_binary, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&[
"--no-mmap",
"-n",
"--binary",
"Project Gutenberg EBook",
"-g",
"hay",
]);
let expected = "\
hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
hay: binary file matches (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Like after_match1_implicit, but enables -a/--text, so no binary
// detection should be performed.
rgtest!(after_match1_implicit_text, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&[
"--no-mmap",
"-n",
"--text",
"Project Gutenberg EBook",
"-g",
"hay",
]);
let expected = "\
hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
";
eqnice!(expected, cmd.stdout());
});
// Like after_match1_implicit_text, but enables -a/--text, so no binary
// detection should be performed.
rgtest!(after_match1_explicit_text, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "--text", "Project Gutenberg EBook", "hay"]);
let expected = "\
1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
";
eqnice!(expected, cmd.stdout());
});
// Like after_match1_implicit, except this asks ripgrep to print all matching
// files.
//
// This is an interesting corner case that one might consider a bug, however,
// it's unlikely to be fixed. Namely, ripgrep probably shouldn't print `hay`
// as a matching file since it is in fact a binary file, and thus should be
// filtered out by default. However, the --files-with-matches flag will print
// out the path of a matching file as soon as a match is seen and then stop
// searching completely. Therefore, the NUL byte is never actually detected.
//
// The only way to fix this would be to kill ripgrep's performance in this case
// and continue searching the entire file for a NUL byte. (Similarly if the
// --quiet flag is set. See the next test.)
rgtest!(after_match1_implicit_path, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-l", "Project Gutenberg EBook", "-g", "hay"]);
eqnice!("hay\n", cmd.stdout());
});
// Like after_match1_implicit_path, except this indicates that a match was
// found with no other output. (This is the same bug described above, but
// manifest as an exit code with no output.)
rgtest!(after_match1_implicit_quiet, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-q", "Project Gutenberg EBook", "-g", "hay"]);
eqnice!("", cmd.stdout());
});
// This sets up the same test as after_match1_implicit_path, but instead of
// just printing the matching files, this includes the full count of matches.
// In this case, we need to search the entire file, so ripgrep correctly
// detects the binary data and suppresses output.
rgtest!(after_match1_implicit_count, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-c", "Project Gutenberg EBook", "-g", "hay"]);
cmd.assert_err();
});
// Like after_match1_implicit_count, except the --binary flag is provided,
// which makes ripgrep disable binary data filtering even for implicit files.
rgtest!(
after_match1_implicit_count_binary,
|dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&[
"--no-mmap",
"-c",
"--binary",
"Project Gutenberg EBook",
"-g",
"hay",
]);
eqnice!("hay:1\n", cmd.stdout());
}
);
// Like after_match1_implicit_count, except the file path is provided
// explicitly, so binary filtering is disabled and a count is correctly
// reported.
rgtest!(after_match1_explicit_count, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-c", "Project Gutenberg EBook", "hay"]);
eqnice!("1\n", cmd.stdout());
});
// This tests that a match way before the NUL byte is shown, but a match after
// the NUL byte is not.
rgtest!(after_match2_implicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&[
"--no-mmap",
"-n",
"Project Gutenberg EBook|a medical student",
"-g",
"hay",
]);
let expected = "\
hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
hay: WARNING: stopped searching binary file after match (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Like after_match2_implicit, but enables -a/--text, so no binary
// detection should be performed.
rgtest!(after_match2_implicit_text, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&[
"--no-mmap",
"-n",
"--text",
"Project Gutenberg EBook|a medical student",
"-g",
"hay",
]);
let expected = "\
hay:1:The Project Gutenberg EBook of A Study In Scarlet, by Arthur Conan Doyle
hay:1867:\"And yet you say he is not a medical student?\"
";
eqnice!(expected, cmd.stdout());
});
// This tests that ripgrep *silently* quits before finding a match that occurs
// after a NUL byte.
rgtest!(before_match1_implicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "Heaven", "-g", "hay"]);
cmd.assert_err();
});
// This tests that ripgrep *does not* silently quit before finding a match that
// occurs after a NUL byte when a file is explicitly searched.
rgtest!(before_match1_explicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "Heaven", "hay"]);
let expected = "\
binary file matches (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Like before_match1_implicit, but enables the --binary flag, which
// disables binary filtering. Thus, this matches the behavior of ripgrep as if
// the file were given explicitly.
rgtest!(before_match1_implicit_binary, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "--binary", "Heaven", "-g", "hay"]);
let expected = "\
hay: binary file matches (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Like before_match1_implicit, but enables -a/--text, so no binary
// detection should be performed.
rgtest!(before_match1_implicit_text, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "--text", "Heaven", "-g", "hay"]);
let expected = "\
hay:1871:\"No. Heaven knows what the objects of his studies are. But here we
";
eqnice!(expected, cmd.stdout());
});
// This tests that ripgrep *silently* quits before finding a match that occurs
// before a NUL byte, but within the same buffer as the NUL byte.
rgtest!(before_match2_implicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "a medical student", "-g", "hay"]);
cmd.assert_err();
});
// This tests that ripgrep *does not* silently quit before finding a match that
// occurs before a NUL byte, but within the same buffer as the NUL byte. Even
// though the match occurs before the NUL byte, ripgrep still doesn't print it
// because it has already scanned ahead to detect the NUL byte. (This matches
// the behavior of GNU grep.)
rgtest!(before_match2_explicit, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "a medical student", "hay"]);
let expected = "\
binary file matches (found \"\\0\" byte around offset 77041)
";
eqnice!(expected, cmd.stdout());
});
// Like before_match1_implicit, but enables -a/--text, so no binary
// detection should be performed.
rgtest!(before_match2_implicit_text, |dir: Dir, mut cmd: TestCommand| {
dir.create_bytes("hay", HAY);
cmd.args(&["--no-mmap", "-n", "--text", "a medical student", "-g", "hay"]);
let expected = "\
hay:1867:\"And yet you say he is not a medical student?\"
";
eqnice!(expected, cmd.stdout());
});
// See: https://github.com/BurntSushi/ripgrep/issues/3131
rgtest!(
matching_files_inconsistent_with_count,
|dir: Dir, _cmd: TestCommand| {
let mut file1 = String::new();
file1.push_str("cat here\n");
for _ in 0..150_000 {
file1.push_str("padding line\n");
}
file1.push_str("\x00");
dir.create("file1.txt", &file1);
dir.create("file2.txt", "cat here");
let got = dir.command().args(&["--sort=path", "-l", "cat"]).stdout();
eqnice!("file1.txt\nfile2.txt\n", got);
// This is the inconsistent result that can't really be avoided without
// either making `-l/--files-with-matches` much slower or changing
// what "binary filtering" means.
let got = dir.command().args(&["--sort=path", "-c", "cat"]).stdout();
eqnice!("file2.txt:1\n", got);
let got = dir
.command()
.args(&["--sort=path", "-c", "cat", "--binary"])
.stdout();
eqnice!("file1.txt:1\nfile2.txt:1\n", got);
let got = dir
.command()
.args(&["--sort=path", "-c", "cat", "--text"])
.stdout();
eqnice!("file1.txt:1\nfile2.txt:1\n", got);
}
);
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/tests/multiline.rs | tests/multiline.rs | use crate::hay::SHERLOCK;
use crate::util::{Dir, TestCommand};
// This tests that multiline matches that span multiple lines, but where
// multiple matches may begin and end on the same line work correctly.
rgtest!(overlap1, |dir: Dir, mut cmd: TestCommand| {
dir.create("test", "xxx\nabc\ndefxxxabc\ndefxxx\nxxx");
cmd.arg("-n").arg("-U").arg("abc\ndef").arg("test");
eqnice!("2:abc\n3:defxxxabc\n4:defxxx\n", cmd.stdout());
});
// Like overlap1, but tests the case where one match ends at precisely the same
// location at which the next match begins.
rgtest!(overlap2, |dir: Dir, mut cmd: TestCommand| {
dir.create("test", "xxx\nabc\ndefabc\ndefxxx\nxxx");
cmd.arg("-n").arg("-U").arg("abc\ndef").arg("test");
eqnice!("2:abc\n3:defabc\n4:defxxx\n", cmd.stdout());
});
// Tests that even in a multiline search, a '.' does not match a newline.
rgtest!(dot_no_newline, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&["-n", "-U", "of this world.+detective work", "sherlock"]);
cmd.assert_err();
});
// Tests that the --multiline-dotall flag causes '.' to match a newline.
rgtest!(dot_all, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-n",
"-U",
"--multiline-dotall",
"of this world.+detective work",
"sherlock",
]);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2:Holmeses, success in the province of detective work must always
";
eqnice!(expected, cmd.stdout());
});
// Tests that --only-matching works in multiline mode.
rgtest!(only_matching, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-n",
"-U",
"--only-matching",
r"Watson|Sherlock\p{Any}+?Holmes",
"sherlock",
]);
let expected = "\
1:Watson
1:Sherlock
2:Holmes
3:Sherlock Holmes
5:Watson
";
eqnice!(expected, cmd.stdout());
});
// Tests that --vimgrep works in multiline mode.
//
// In particular, we test that only the first line of each match is printed,
// even when a match spans multiple lines.
//
// See: https://github.com/BurntSushi/ripgrep/issues/1866
rgtest!(vimgrep, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-n",
"-U",
"--vimgrep",
r"Watson|Sherlock\p{Any}+?Holmes",
"sherlock",
]);
let expected = "\
sherlock:1:16:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:1:57:For the Doctor Watsons of this world, as opposed to the Sherlock
sherlock:3:49:be, to a very large extent, the result of luck. Sherlock Holmes
sherlock:5:12:but Doctor Watson has to have it taken out for him and dusted,
";
eqnice!(expected, cmd.stdout());
});
// Tests that multiline search works when reading from stdin. This is an
// important test because multiline search must read the entire contents of
// what it is searching into memory before executing the search.
rgtest!(stdin, |_: Dir, mut cmd: TestCommand| {
cmd.args(&["-n", "-U", r"of this world\p{Any}+?detective work"]);
let expected = "\
1:For the Doctor Watsons of this world, as opposed to the Sherlock
2:Holmeses, success in the province of detective work must always
";
eqnice!(expected, cmd.pipe(SHERLOCK.as_bytes()));
});
// Test that multiline search and contextual matches work.
rgtest!(context, |dir: Dir, mut cmd: TestCommand| {
dir.create("sherlock", SHERLOCK);
cmd.args(&[
"-n",
"-U",
"-C1",
r"detective work\p{Any}+?result of luck",
"sherlock",
]);
let expected = "\
1-For the Doctor Watsons of this world, as opposed to the Sherlock
2:Holmeses, success in the province of detective work must always
3:be, to a very large extent, the result of luck. Sherlock Holmes
4-can extract a clew from a wisp of straw or a flake of cigar ash;
";
eqnice!(expected, cmd.stdout());
});
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/fuzz/fuzz_targets/fuzz_glob.rs | fuzz/fuzz_targets/fuzz_glob.rs | #![no_main]
use std::str::FromStr;
use globset::Glob;
libfuzzer_sys::fuzz_target!(|glob_str: &str| {
let Ok(glob) = Glob::new(glob_str) else {
return;
};
let Ok(glob2) = Glob::from_str(glob_str) else {
return;
};
// Verify that a `Glob` constructed with `new` is the same as a `Glob`` constructed
// with `from_str`.
assert_eq!(glob, glob2);
// Verify that `Glob::glob` produces the same string as the original.
assert_eq!(glob.glob(), glob_str);
});
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/matcher/src/lib.rs | crates/matcher/src/lib.rs | /*!
This crate provides an interface for regular expressions, with a focus on line
oriented search. The purpose of this crate is to provide a low level matching
interface that permits any kind of substring or regex implementation to power
the search routines provided by the
[`grep-searcher`](https://docs.rs/grep-searcher)
crate.
The primary thing provided by this crate is the [`Matcher`] trait. The trait
defines an abstract interface for text search. It is robust enough to support
everything from basic substring search all the way to arbitrarily complex
regular expression implementations without sacrificing performance.
A key design decision made in this crate is the use of *internal iteration*,
or otherwise known as the "push" model of searching. In this paradigm,
implementations of the `Matcher` trait will drive search and execute callbacks
provided by the caller when a match is found. This is in contrast to the
usual style of *external iteration* (the "pull" model) found throughout the
Rust ecosystem. There are two primary reasons why internal iteration was
chosen:
* Some search implementations may themselves require internal iteration.
Converting an internal iterator to an external iterator can be non-trivial
and sometimes even practically impossible.
* Rust's type system isn't quite expressive enough to write a generic interface
using external iteration without giving something else up (namely, ease of
use and/or performance).
In other words, internal iteration was chosen because it is the lowest common
denominator and because it is probably the least bad way of expressing the
interface in today's Rust. As a result, this trait isn't specifically intended
for everyday use, although, you might find it to be a happy price to pay if you
want to write code that is generic over multiple different regex
implementations.
*/
#![deny(missing_docs)]
use crate::interpolate::interpolate;
mod interpolate;
/// The type of a match.
///
/// The type of a match is a possibly empty range pointing to a contiguous
/// block of addressable memory.
///
/// Every `Match` is guaranteed to satisfy the invariant that `start <= end`.
///
/// # Indexing
///
/// This type is structurally identical to `std::ops::Range<usize>`, but
/// is a bit more ergonomic for dealing with match indices. In particular,
/// this type implements `Copy` and provides methods for building new `Match`
/// values based on old `Match` values. Finally, the invariant that `start`
/// is always less than or equal to `end` is enforced.
///
/// A `Match` can be used to slice a `&[u8]`, `&mut [u8]` or `&str` using
/// range notation. e.g.,
///
/// ```
/// use grep_matcher::Match;
///
/// let m = Match::new(2, 5);
/// let bytes = b"abcdefghi";
/// assert_eq!(b"cde", &bytes[m]);
/// ```
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct Match {
start: usize,
end: usize,
}
impl Match {
/// Create a new match.
///
/// # Panics
///
/// This function panics if `start > end`.
#[inline]
pub fn new(start: usize, end: usize) -> Match {
assert!(start <= end);
Match { start, end }
}
/// Creates a zero width match at the given offset.
#[inline]
pub fn zero(offset: usize) -> Match {
Match { start: offset, end: offset }
}
/// Return the start offset of this match.
#[inline]
pub fn start(&self) -> usize {
self.start
}
/// Return the end offset of this match.
#[inline]
pub fn end(&self) -> usize {
self.end
}
/// Return a new match with the start offset replaced with the given
/// value.
///
/// # Panics
///
/// This method panics if `start > self.end`.
#[inline]
pub fn with_start(&self, start: usize) -> Match {
assert!(start <= self.end, "{} is not <= {}", start, self.end);
Match { start, ..*self }
}
/// Return a new match with the end offset replaced with the given
/// value.
///
/// # Panics
///
/// This method panics if `self.start > end`.
#[inline]
pub fn with_end(&self, end: usize) -> Match {
assert!(self.start <= end, "{} is not <= {}", self.start, end);
Match { end, ..*self }
}
/// Offset this match by the given amount and return a new match.
///
/// This adds the given offset to the start and end of this match, and
/// returns the resulting match.
///
/// # Panics
///
/// This panics if adding the given amount to either the start or end
/// offset would result in an overflow.
#[inline]
pub fn offset(&self, amount: usize) -> Match {
Match {
start: self.start.checked_add(amount).unwrap(),
end: self.end.checked_add(amount).unwrap(),
}
}
/// Returns the number of bytes in this match.
#[inline]
pub fn len(&self) -> usize {
self.end - self.start
}
/// Returns true if and only if this match is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl std::ops::Index<Match> for [u8] {
type Output = [u8];
#[inline]
fn index(&self, index: Match) -> &[u8] {
&self[index.start..index.end]
}
}
impl std::ops::IndexMut<Match> for [u8] {
#[inline]
fn index_mut(&mut self, index: Match) -> &mut [u8] {
&mut self[index.start..index.end]
}
}
impl std::ops::Index<Match> for str {
type Output = str;
#[inline]
fn index(&self, index: Match) -> &str {
&self[index.start..index.end]
}
}
/// A line terminator.
///
/// A line terminator represents the end of a line. Generally, every line is
/// either "terminated" by the end of a stream or a specific byte (or sequence
/// of bytes).
///
/// Generally, a line terminator is a single byte, specifically, `\n`, on
/// Unix-like systems. On Windows, a line terminator is `\r\n` (referred to
/// as `CRLF` for `Carriage Return; Line Feed`).
///
/// The default line terminator is `\n` on all platforms.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct LineTerminator(LineTerminatorImp);
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
enum LineTerminatorImp {
/// Any single byte representing a line terminator.
Byte(u8),
/// A line terminator represented by `\r\n`.
///
/// When this option is used, consumers may generally treat a lone `\n` as
/// a line terminator in addition to `\r\n`.
CRLF,
}
impl LineTerminator {
/// Return a new single-byte line terminator. Any byte is valid.
#[inline]
pub fn byte(byte: u8) -> LineTerminator {
LineTerminator(LineTerminatorImp::Byte(byte))
}
/// Return a new line terminator represented by `\r\n`.
///
/// When this option is used, consumers may generally treat a lone `\n` as
/// a line terminator in addition to `\r\n`.
#[inline]
pub fn crlf() -> LineTerminator {
LineTerminator(LineTerminatorImp::CRLF)
}
/// Returns true if and only if this line terminator is CRLF.
#[inline]
pub fn is_crlf(&self) -> bool {
self.0 == LineTerminatorImp::CRLF
}
/// Returns this line terminator as a single byte.
///
/// If the line terminator is CRLF, then this returns `\n`. This is
/// useful for routines that, for example, find line boundaries by treating
/// `\n` as a line terminator even when it isn't preceded by `\r`.
#[inline]
pub fn as_byte(&self) -> u8 {
match self.0 {
LineTerminatorImp::Byte(byte) => byte,
LineTerminatorImp::CRLF => b'\n',
}
}
/// Returns this line terminator as a sequence of bytes.
///
/// This returns a singleton sequence for all line terminators except for
/// `CRLF`, in which case, it returns `\r\n`.
///
/// The slice returned is guaranteed to have length at least `1`.
#[inline]
pub fn as_bytes(&self) -> &[u8] {
match self.0 {
LineTerminatorImp::Byte(ref byte) => std::slice::from_ref(byte),
LineTerminatorImp::CRLF => &[b'\r', b'\n'],
}
}
/// Returns true if and only if the given slice ends with this line
/// terminator.
///
/// If this line terminator is `CRLF`, then this only checks whether the
/// last byte is `\n`.
#[inline]
pub fn is_suffix(&self, slice: &[u8]) -> bool {
slice.last().map_or(false, |&b| b == self.as_byte())
}
}
impl Default for LineTerminator {
#[inline]
fn default() -> LineTerminator {
LineTerminator::byte(b'\n')
}
}
/// A set of bytes.
///
/// In this crate, byte sets are used to express bytes that can never appear
/// anywhere in a match for a particular implementation of the `Matcher` trait.
/// Specifically, if such a set can be determined, then it's possible for
/// callers to perform additional operations on the basis that certain bytes
/// may never match.
///
/// For example, if a search is configured to possibly produce results that
/// span multiple lines but a caller provided pattern can never match across
/// multiple lines, then it may make sense to divert to more optimized line
/// oriented routines that don't need to handle the multi-line match case.
#[derive(Clone, Debug)]
pub struct ByteSet(BitSet);
#[derive(Clone, Copy)]
struct BitSet([u64; 4]);
impl std::fmt::Debug for BitSet {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut fmtd = f.debug_set();
for b in 0..=255 {
if ByteSet(*self).contains(b) {
fmtd.entry(&b);
}
}
fmtd.finish()
}
}
impl ByteSet {
/// Create an empty set of bytes.
#[inline]
pub fn empty() -> ByteSet {
ByteSet(BitSet([0; 4]))
}
/// Create a full set of bytes such that every possible byte is in the set
/// returned.
#[inline]
pub fn full() -> ByteSet {
ByteSet(BitSet([u64::MAX; 4]))
}
/// Add a byte to this set.
///
/// If the given byte already belongs to this set, then this is a no-op.
#[inline]
pub fn add(&mut self, byte: u8) {
let bucket = byte / 64;
let bit = byte % 64;
(self.0).0[usize::from(bucket)] |= 1 << bit;
}
/// Add an inclusive range of bytes.
#[inline]
pub fn add_all(&mut self, start: u8, end: u8) {
for b in start..=end {
self.add(b);
}
}
/// Remove a byte from this set.
///
/// If the given byte is not in this set, then this is a no-op.
#[inline]
pub fn remove(&mut self, byte: u8) {
let bucket = byte / 64;
let bit = byte % 64;
(self.0).0[usize::from(bucket)] &= !(1 << bit);
}
/// Remove an inclusive range of bytes.
#[inline]
pub fn remove_all(&mut self, start: u8, end: u8) {
for b in start..=end {
self.remove(b);
}
}
/// Return true if and only if the given byte is in this set.
#[inline]
pub fn contains(&self, byte: u8) -> bool {
let bucket = byte / 64;
let bit = byte % 64;
(self.0).0[usize::from(bucket)] & (1 << bit) > 0
}
}
/// A trait that describes implementations of capturing groups.
///
/// When a matcher supports capturing group extraction, then it is the
/// matcher's responsibility to provide an implementation of this trait.
///
/// Principally, this trait provides a way to access capturing groups
/// in a uniform way that does not require any specific representation.
/// Namely, different matcher implementations may require different in-memory
/// representations of capturing groups. This trait permits matchers to
/// maintain their specific in-memory representation.
///
/// Note that this trait explicitly does not provide a way to construct a new
/// capture value. Instead, it is the responsibility of a `Matcher` to build
/// one, which might require knowledge of the matcher's internal implementation
/// details.
pub trait Captures {
/// Return the total number of capturing groups. This includes capturing
/// groups that have not matched anything.
fn len(&self) -> usize;
/// Return the capturing group match at the given index. If no match of
/// that capturing group exists, then this returns `None`.
///
/// When a matcher reports a match with capturing groups, then the first
/// capturing group (at index `0`) must always correspond to the offsets
/// for the overall match.
fn get(&self, i: usize) -> Option<Match>;
/// Return the overall match for the capture.
///
/// This returns the match for index `0`. That is it is equivalent to
/// `get(0).unwrap()`
#[inline]
fn as_match(&self) -> Match {
self.get(0).unwrap()
}
/// Returns true if and only if these captures are empty. This occurs
/// when `len` is `0`.
///
/// Note that capturing groups that have non-zero length but otherwise
/// contain no matching groups are *not* empty.
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Expands all instances of `$name` in `replacement` to the corresponding
/// capture group `name`, and writes them to the `dst` buffer given.
///
/// (Note: If you're looking for a convenient way to perform replacements
/// with interpolation, then you'll want to use the `replace_with_captures`
/// method on the `Matcher` trait.)
///
/// `name` may be an integer corresponding to the index of the
/// capture group (counted by order of opening parenthesis where `0` is the
/// entire match) or it can be a name (consisting of letters, digits or
/// underscores) corresponding to a named capture group.
///
/// A `name` is translated to a capture group index via the given
/// `name_to_index` function. If `name` isn't a valid capture group
/// (whether the name doesn't exist or isn't a valid index), then it is
/// replaced with the empty string.
///
/// The longest possible name is used. e.g., `$1a` looks up the capture
/// group named `1a` and not the capture group at index `1`. To exert
/// more precise control over the name, use braces, e.g., `${1}a`. In all
/// cases, capture group names are limited to ASCII letters, numbers and
/// underscores.
///
/// To write a literal `$` use `$$`.
///
/// Note that the capture group match indices are resolved by slicing
/// the given `haystack`. Generally, this means that `haystack` should be
/// the same slice that was searched to get the current capture group
/// matches.
#[inline]
fn interpolate<F>(
&self,
name_to_index: F,
haystack: &[u8],
replacement: &[u8],
dst: &mut Vec<u8>,
) where
F: FnMut(&str) -> Option<usize>,
{
interpolate(
replacement,
|i, dst| {
if let Some(range) = self.get(i) {
dst.extend(&haystack[range]);
}
},
name_to_index,
dst,
)
}
}
/// NoCaptures provides an always-empty implementation of the `Captures` trait.
///
/// This type is useful for implementations of `Matcher` that don't support
/// capturing groups.
#[derive(Clone, Debug)]
pub struct NoCaptures(());
impl NoCaptures {
/// Create an empty set of capturing groups.
#[inline]
pub fn new() -> NoCaptures {
NoCaptures(())
}
}
impl Captures for NoCaptures {
#[inline]
fn len(&self) -> usize {
0
}
#[inline]
fn get(&self, _: usize) -> Option<Match> {
None
}
}
/// NoError provides an error type for matchers that never produce errors.
///
/// This error type implements the `std::error::Error` and `std::fmt::Display`
/// traits for use in matcher implementations that can never produce errors.
///
/// The `std::fmt::Debug` and `std::fmt::Display` impls for this type panics.
#[derive(Debug, Eq, PartialEq)]
pub struct NoError(());
impl std::error::Error for NoError {
fn description(&self) -> &str {
"no error"
}
}
impl std::fmt::Display for NoError {
fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
panic!("BUG for NoError: an impossible error occurred")
}
}
impl From<NoError> for std::io::Error {
fn from(_: NoError) -> std::io::Error {
panic!("BUG for NoError: an impossible error occurred")
}
}
/// The type of match for a line oriented matcher.
#[derive(Clone, Copy, Debug)]
pub enum LineMatchKind {
/// A position inside a line that is known to contain a match.
///
/// This position can be anywhere in the line. It does not need to point
/// at the location of the match.
Confirmed(usize),
/// A position inside a line that may contain a match, and must be searched
/// for verification.
///
/// This position can be anywhere in the line. It does not need to point
/// at the location of the match.
Candidate(usize),
}
/// A matcher defines an interface for regular expression implementations.
///
/// While this trait is large, there are only two required methods that
/// implementors must provide: `find_at` and `new_captures`. If captures aren't
/// supported by your implementation, then `new_captures` can be implemented
/// with [`NoCaptures`]. If your implementation does support capture groups,
/// then you should also implement the other capture related methods, as
/// dictated by the documentation. Crucially, this includes `captures_at`.
///
/// The rest of the methods on this trait provide default implementations on
/// top of `find_at` and `new_captures`. It is not uncommon for implementations
/// to be able to provide faster variants of some methods; in those cases,
/// simply override the default implementation.
pub trait Matcher {
/// The concrete type of capturing groups used for this matcher.
///
/// If this implementation does not support capturing groups, then set
/// this to `NoCaptures`.
type Captures: Captures;
/// The error type used by this matcher.
///
/// For matchers in which an error is not possible, they are encouraged to
/// use the `NoError` type in this crate. In the future, when the "never"
/// (spelled `!`) type is stabilized, then it should probably be used
/// instead.
type Error: std::fmt::Display;
/// Returns the start and end byte range of the first match in `haystack`
/// after `at`, where the byte offsets are relative to that start of
/// `haystack` (and not `at`). If no match exists, then `None` is returned.
///
/// The text encoding of `haystack` is not strictly specified. Matchers are
/// advised to assume UTF-8, or at worst, some ASCII compatible encoding.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `at == 0`.
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, Self::Error>;
/// Creates an empty group of captures suitable for use with the capturing
/// APIs of this trait.
///
/// Implementations that don't support capturing groups should use
/// the `NoCaptures` type and implement this method by calling
/// `NoCaptures::new()`.
fn new_captures(&self) -> Result<Self::Captures, Self::Error>;
/// Returns the total number of capturing groups in this matcher.
///
/// If a matcher supports capturing groups, then this value must always be
/// at least 1, where the first capturing group always corresponds to the
/// overall match.
///
/// If a matcher does not support capturing groups, then this should
/// always return 0.
///
/// By default, capturing groups are not supported, so this always
/// returns 0.
#[inline]
fn capture_count(&self) -> usize {
0
}
/// Maps the given capture group name to its corresponding capture group
/// index, if one exists. If one does not exist, then `None` is returned.
///
/// If the given capture group name maps to multiple indices, then it is
/// not specified which one is returned. However, it is guaranteed that
/// one of them is returned.
///
/// By default, capturing groups are not supported, so this always returns
/// `None`.
#[inline]
fn capture_index(&self, _name: &str) -> Option<usize> {
None
}
/// Returns the start and end byte range of the first match in `haystack`.
/// If no match exists, then `None` is returned.
///
/// The text encoding of `haystack` is not strictly specified. Matchers are
/// advised to assume UTF-8, or at worst, some ASCII compatible encoding.
#[inline]
fn find(&self, haystack: &[u8]) -> Result<Option<Match>, Self::Error> {
self.find_at(haystack, 0)
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack`. If no match exists, then the given function is never
/// called. If the function returns `false`, then iteration stops.
#[inline]
fn find_iter<F>(
&self,
haystack: &[u8],
matched: F,
) -> Result<(), Self::Error>
where
F: FnMut(Match) -> bool,
{
self.find_iter_at(haystack, 0, matched)
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack`. If no match exists, then the given function is never
/// called. If the function returns `false`, then iteration stops.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `at == 0`.
#[inline]
fn find_iter_at<F>(
&self,
haystack: &[u8],
at: usize,
mut matched: F,
) -> Result<(), Self::Error>
where
F: FnMut(Match) -> bool,
{
self.try_find_iter_at(haystack, at, |m| Ok(matched(m)))
.map(|r: Result<(), ()>| r.unwrap())
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack`. If no match exists, then the given function is never
/// called. If the function returns `false`, then iteration stops.
/// Similarly, if the function returns an error then iteration stops and
/// the error is yielded. If an error occurs while executing the search,
/// then it is converted to
/// `E`.
#[inline]
fn try_find_iter<F, E>(
&self,
haystack: &[u8],
matched: F,
) -> Result<Result<(), E>, Self::Error>
where
F: FnMut(Match) -> Result<bool, E>,
{
self.try_find_iter_at(haystack, 0, matched)
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack`. If no match exists, then the given function is never
/// called. If the function returns `false`, then iteration stops.
/// Similarly, if the function returns an error then iteration stops and
/// the error is yielded. If an error occurs while executing the search,
/// then it is converted to
/// `E`.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `at == 0`.
#[inline]
fn try_find_iter_at<F, E>(
&self,
haystack: &[u8],
at: usize,
mut matched: F,
) -> Result<Result<(), E>, Self::Error>
where
F: FnMut(Match) -> Result<bool, E>,
{
let mut last_end = at;
let mut last_match = None;
loop {
if last_end > haystack.len() {
return Ok(Ok(()));
}
let m = match self.find_at(haystack, last_end)? {
None => return Ok(Ok(())),
Some(m) => m,
};
if m.start == m.end {
// This is an empty match. To ensure we make progress, start
// the next search at the smallest possible starting position
// of the next match following this one.
last_end = m.end + 1;
// Don't accept empty matches immediately following a match.
// Just move on to the next match.
if Some(m.end) == last_match {
continue;
}
} else {
last_end = m.end;
}
last_match = Some(m.end);
match matched(m) {
Ok(true) => continue,
Ok(false) => return Ok(Ok(())),
Err(err) => return Ok(Err(err)),
}
}
}
/// Populates the first set of capture group matches from `haystack` into
/// `caps`. If no match exists, then `false` is returned.
///
/// The text encoding of `haystack` is not strictly specified. Matchers are
/// advised to assume UTF-8, or at worst, some ASCII compatible encoding.
#[inline]
fn captures(
&self,
haystack: &[u8],
caps: &mut Self::Captures,
) -> Result<bool, Self::Error> {
self.captures_at(haystack, 0, caps)
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack` with capture groups extracted from each match. If no
/// match exists, then the given function is never called. If the function
/// returns `false`, then iteration stops.
#[inline]
fn captures_iter<F>(
&self,
haystack: &[u8],
caps: &mut Self::Captures,
matched: F,
) -> Result<(), Self::Error>
where
F: FnMut(&Self::Captures) -> bool,
{
self.captures_iter_at(haystack, 0, caps, matched)
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack` with capture groups extracted from each match. If no
/// match exists, then the given function is never called. If the function
/// returns `false`, then iteration stops.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `at == 0`.
#[inline]
fn captures_iter_at<F>(
&self,
haystack: &[u8],
at: usize,
caps: &mut Self::Captures,
mut matched: F,
) -> Result<(), Self::Error>
where
F: FnMut(&Self::Captures) -> bool,
{
self.try_captures_iter_at(haystack, at, caps, |caps| Ok(matched(caps)))
.map(|r: Result<(), ()>| r.unwrap())
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack` with capture groups extracted from each match. If no
/// match exists, then the given function is never called. If the function
/// returns `false`, then iteration stops. Similarly, if the function
/// returns an error then iteration stops and the error is yielded. If
/// an error occurs while executing the search, then it is converted to
/// `E`.
#[inline]
fn try_captures_iter<F, E>(
&self,
haystack: &[u8],
caps: &mut Self::Captures,
matched: F,
) -> Result<Result<(), E>, Self::Error>
where
F: FnMut(&Self::Captures) -> Result<bool, E>,
{
self.try_captures_iter_at(haystack, 0, caps, matched)
}
/// Executes the given function over successive non-overlapping matches
/// in `haystack` with capture groups extracted from each match. If no
/// match exists, then the given function is never called. If the function
/// returns `false`, then iteration stops. Similarly, if the function
/// returns an error then iteration stops and the error is yielded. If
/// an error occurs while executing the search, then it is converted to
/// `E`.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `at == 0`.
#[inline]
fn try_captures_iter_at<F, E>(
&self,
haystack: &[u8],
at: usize,
caps: &mut Self::Captures,
mut matched: F,
) -> Result<Result<(), E>, Self::Error>
where
F: FnMut(&Self::Captures) -> Result<bool, E>,
{
let mut last_end = at;
let mut last_match = None;
loop {
if last_end > haystack.len() {
return Ok(Ok(()));
}
if !self.captures_at(haystack, last_end, caps)? {
return Ok(Ok(()));
}
let m = caps.get(0).unwrap();
if m.start == m.end {
// This is an empty match. To ensure we make progress, start
// the next search at the smallest possible starting position
// of the next match following this one.
last_end = m.end + 1;
// Don't accept empty matches immediately following a match.
// Just move on to the next match.
if Some(m.end) == last_match {
continue;
}
} else {
last_end = m.end;
}
last_match = Some(m.end);
match matched(caps) {
Ok(true) => continue,
Ok(false) => return Ok(Ok(())),
Err(err) => return Ok(Err(err)),
}
}
}
/// Populates the first set of capture group matches from `haystack`
/// into `matches` after `at`, where the byte offsets in each capturing
/// group are relative to the start of `haystack` (and not `at`). If no
/// match exists, then `false` is returned and the contents of the given
/// capturing groups are unspecified.
///
/// The text encoding of `haystack` is not strictly specified. Matchers are
/// advised to assume UTF-8, or at worst, some ASCII compatible encoding.
///
/// The significance of the starting point is that it takes the surrounding
/// context into consideration. For example, the `\A` anchor can only
/// match when `at == 0`.
///
/// By default, capturing groups aren't supported, and this implementation
/// will always behave as if a match were impossible.
///
/// Implementors that provide support for capturing groups must guarantee
/// that when a match occurs, the first capture match (at index `0`) is
/// always set to the overall match offsets.
///
/// Note that if implementors seek to support capturing groups, then they
/// should implement this method. Other methods that match based on
/// captures will then work automatically.
#[inline]
fn captures_at(
&self,
_haystack: &[u8],
_at: usize,
_caps: &mut Self::Captures,
) -> Result<bool, Self::Error> {
Ok(false)
}
/// Replaces every match in the given haystack with the result of calling
/// `append`. `append` is given the start and end of a match, along with
/// a handle to the `dst` buffer provided.
///
/// If the given `append` function returns `false`, then replacement stops.
#[inline]
fn replace<F>(
&self,
haystack: &[u8],
dst: &mut Vec<u8>,
mut append: F,
) -> Result<(), Self::Error>
where
F: FnMut(Match, &mut Vec<u8>) -> bool,
{
let mut last_match = 0;
self.find_iter(haystack, |m| {
dst.extend(&haystack[last_match..m.start]);
last_match = m.end;
append(m, dst)
})?;
dst.extend(&haystack[last_match..]);
Ok(())
}
/// Replaces every match in the given haystack with the result of calling
/// `append` with the matching capture groups.
///
/// If the given `append` function returns `false`, then replacement stops.
#[inline]
fn replace_with_captures<F>(
&self,
haystack: &[u8],
caps: &mut Self::Captures,
dst: &mut Vec<u8>,
append: F,
) -> Result<(), Self::Error>
where
F: FnMut(&Self::Captures, &mut Vec<u8>) -> bool,
{
self.replace_with_captures_at(haystack, 0, caps, dst, append)
}
/// Replaces every match in the given haystack with the result of calling
/// `append` with the matching capture groups.
///
/// If the given `append` function returns `false`, then replacement stops.
///
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/matcher/src/interpolate.rs | crates/matcher/src/interpolate.rs | use memchr::memchr;
/// Interpolate capture references in `replacement` and write the interpolation
/// result to `dst`. References in `replacement` take the form of $N or $name,
/// where `N` is a capture group index and `name` is a capture group name. The
/// function provided, `name_to_index`, maps capture group names to indices.
///
/// The `append` function given is responsible for writing the replacement
/// to the `dst` buffer. That is, it is called with the capture group index
/// of a capture group reference and is expected to resolve the index to its
/// corresponding matched text. If no such match exists, then `append` should
/// not write anything to its given buffer.
#[inline]
pub fn interpolate<A, N>(
mut replacement: &[u8],
mut append: A,
mut name_to_index: N,
dst: &mut Vec<u8>,
) where
A: FnMut(usize, &mut Vec<u8>),
N: FnMut(&str) -> Option<usize>,
{
while !replacement.is_empty() {
match memchr(b'$', replacement) {
None => break,
Some(i) => {
dst.extend(&replacement[..i]);
replacement = &replacement[i..];
}
}
if replacement.get(1).map_or(false, |&b| b == b'$') {
dst.push(b'$');
replacement = &replacement[2..];
continue;
}
debug_assert!(!replacement.is_empty());
let cap_ref = match find_cap_ref(replacement) {
Some(cap_ref) => cap_ref,
None => {
dst.push(b'$');
replacement = &replacement[1..];
continue;
}
};
replacement = &replacement[cap_ref.end..];
match cap_ref.cap {
Ref::Number(i) => append(i, dst),
Ref::Named(name) => {
if let Some(i) = name_to_index(name) {
append(i, dst);
}
}
}
}
dst.extend(replacement);
}
/// `CaptureRef` represents a reference to a capture group inside some text.
/// The reference is either a capture group name or a number.
///
/// It is also tagged with the position in the text immediately proceeding the
/// capture reference.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct CaptureRef<'a> {
cap: Ref<'a>,
end: usize,
}
/// A reference to a capture group in some text.
///
/// e.g., `$2`, `$foo`, `${foo}`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum Ref<'a> {
Named(&'a str),
Number(usize),
}
impl<'a> From<&'a str> for Ref<'a> {
#[inline]
fn from(x: &'a str) -> Ref<'a> {
Ref::Named(x)
}
}
impl From<usize> for Ref<'static> {
#[inline]
fn from(x: usize) -> Ref<'static> {
Ref::Number(x)
}
}
/// Parses a possible reference to a capture group name in the given text,
/// starting at the beginning of `replacement`.
///
/// If no such valid reference could be found, None is returned.
#[inline]
fn find_cap_ref(replacement: &[u8]) -> Option<CaptureRef<'_>> {
let mut i = 0;
if replacement.len() <= 1 || replacement[0] != b'$' {
return None;
}
let mut brace = false;
i += 1;
if replacement[i] == b'{' {
brace = true;
i += 1;
}
let mut cap_end = i;
while replacement.get(cap_end).map_or(false, is_valid_cap_letter) {
cap_end += 1;
}
if cap_end == i {
return None;
}
// We just verified that the range 0..cap_end is valid ASCII, so it must
// therefore be valid UTF-8. If we really cared, we could avoid this UTF-8
// check with an unchecked conversion or by parsing the number straight
// from &[u8].
let cap = std::str::from_utf8(&replacement[i..cap_end])
.expect("valid UTF-8 capture name");
if brace {
if !replacement.get(cap_end).map_or(false, |&b| b == b'}') {
return None;
}
cap_end += 1;
}
Some(CaptureRef {
cap: match cap.parse::<u32>() {
Ok(i) => Ref::Number(i as usize),
Err(_) => Ref::Named(cap),
},
end: cap_end,
})
}
/// Returns true if and only if the given byte is allowed in a capture name.
#[inline]
fn is_valid_cap_letter(b: &u8) -> bool {
match *b {
b'0'..=b'9' | b'a'..=b'z' | b'A'..=b'Z' | b'_' => true,
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::{CaptureRef, find_cap_ref, interpolate};
macro_rules! find {
($name:ident, $text:expr) => {
#[test]
fn $name() {
assert_eq!(None, find_cap_ref($text.as_bytes()));
}
};
($name:ident, $text:expr, $capref:expr) => {
#[test]
fn $name() {
assert_eq!(Some($capref), find_cap_ref($text.as_bytes()));
}
};
}
macro_rules! c {
($name_or_number:expr, $pos:expr) => {
CaptureRef { cap: $name_or_number.into(), end: $pos }
};
}
find!(find_cap_ref1, "$foo", c!("foo", 4));
find!(find_cap_ref2, "${foo}", c!("foo", 6));
find!(find_cap_ref3, "$0", c!(0, 2));
find!(find_cap_ref4, "$5", c!(5, 2));
find!(find_cap_ref5, "$10", c!(10, 3));
find!(find_cap_ref6, "$42a", c!("42a", 4));
find!(find_cap_ref7, "${42}a", c!(42, 5));
find!(find_cap_ref8, "${42");
find!(find_cap_ref9, "${42 ");
find!(find_cap_ref10, " $0 ");
find!(find_cap_ref11, "$");
find!(find_cap_ref12, " ");
find!(find_cap_ref13, "");
// A convenience routine for using interpolate's unwieldy but flexible API.
fn interpolate_string(
mut name_to_index: Vec<(&'static str, usize)>,
caps: Vec<&'static str>,
replacement: &str,
) -> String {
name_to_index.sort_by_key(|x| x.0);
let mut dst = vec![];
interpolate(
replacement.as_bytes(),
|i, dst| {
if let Some(&s) = caps.get(i) {
dst.extend(s.as_bytes());
}
},
|name| -> Option<usize> {
name_to_index
.binary_search_by_key(&name, |x| x.0)
.ok()
.map(|i| name_to_index[i].1)
},
&mut dst,
);
String::from_utf8(dst).unwrap()
}
macro_rules! interp {
($name:ident, $map:expr, $caps:expr, $hay:expr, $expected:expr $(,)*) => {
#[test]
fn $name() {
assert_eq!($expected, interpolate_string($map, $caps, $hay));
}
};
}
interp!(
interp1,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test $foo test",
"test xxx test",
);
interp!(
interp2,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test$footest",
"test",
);
interp!(
interp3,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test${foo}test",
"testxxxtest",
);
interp!(
interp4,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test$2test",
"test",
);
interp!(
interp5,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test${2}test",
"testxxxtest",
);
interp!(
interp6,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test $$foo test",
"test $foo test",
);
interp!(
interp7,
vec![("foo", 2)],
vec!["", "", "xxx"],
"test $foo",
"test xxx",
);
interp!(
interp8,
vec![("foo", 2)],
vec!["", "", "xxx"],
"$foo test",
"xxx test",
);
interp!(
interp9,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test $bar$foo",
"test yyyxxx",
);
interp!(
interp10,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test $ test",
"test $ test",
);
interp!(
interp11,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${} test",
"test ${} test",
);
interp!(
interp12,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${ } test",
"test ${ } test",
);
interp!(
interp13,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${a b} test",
"test ${a b} test",
);
interp!(
interp14,
vec![("bar", 1), ("foo", 2)],
vec!["", "yyy", "xxx"],
"test ${a} test",
"test test",
);
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/matcher/tests/tests.rs | crates/matcher/tests/tests.rs | mod util;
mod test_matcher;
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/matcher/tests/util.rs | crates/matcher/tests/util.rs | use std::collections::HashMap;
use {
grep_matcher::{Captures, Match, Matcher, NoCaptures, NoError},
regex::bytes::{CaptureLocations, Regex},
};
#[derive(Debug)]
pub(crate) struct RegexMatcher {
pub re: Regex,
pub names: HashMap<String, usize>,
}
impl RegexMatcher {
pub(crate) fn new(re: Regex) -> RegexMatcher {
let mut names = HashMap::new();
for (i, optional_name) in re.capture_names().enumerate() {
if let Some(name) = optional_name {
names.insert(name.to_string(), i);
}
}
RegexMatcher { re, names }
}
}
type Result<T> = std::result::Result<T, NoError>;
impl Matcher for RegexMatcher {
type Captures = RegexCaptures;
type Error = NoError;
fn find_at(&self, haystack: &[u8], at: usize) -> Result<Option<Match>> {
Ok(self
.re
.find_at(haystack, at)
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<RegexCaptures> {
Ok(RegexCaptures(self.re.capture_locations()))
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool> {
Ok(self.re.captures_read_at(&mut caps.0, haystack, at).is_some())
}
fn capture_count(&self) -> usize {
self.re.captures_len()
}
fn capture_index(&self, name: &str) -> Option<usize> {
self.names.get(name).map(|i| *i)
}
// We purposely don't implement any other methods, so that we test the
// default impls. The "real" Regex impl for Matcher provides a few more
// impls. e.g., Its `find_iter` impl is faster than what we can do here,
// since the regex crate avoids synchronization overhead.
}
#[derive(Debug)]
pub(crate) struct RegexMatcherNoCaps(pub(crate) Regex);
impl Matcher for RegexMatcherNoCaps {
type Captures = NoCaptures;
type Error = NoError;
fn find_at(&self, haystack: &[u8], at: usize) -> Result<Option<Match>> {
Ok(self
.0
.find_at(haystack, at)
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<NoCaptures> {
Ok(NoCaptures::new())
}
}
#[derive(Clone, Debug)]
pub(crate) struct RegexCaptures(CaptureLocations);
impl Captures for RegexCaptures {
fn len(&self) -> usize {
self.0.len()
}
fn get(&self, i: usize) -> Option<Match> {
self.0.pos(i).map(|(s, e)| Match::new(s, e))
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/matcher/tests/test_matcher.rs | crates/matcher/tests/test_matcher.rs | use {
grep_matcher::{Captures, Match, Matcher},
regex::bytes::Regex,
};
use crate::util::{RegexMatcher, RegexMatcherNoCaps};
fn matcher(pattern: &str) -> RegexMatcher {
RegexMatcher::new(Regex::new(pattern).unwrap())
}
fn matcher_no_caps(pattern: &str) -> RegexMatcherNoCaps {
RegexMatcherNoCaps(Regex::new(pattern).unwrap())
}
fn m(start: usize, end: usize) -> Match {
Match::new(start, end)
}
#[test]
fn find() {
let matcher = matcher(r"(\w+)\s+(\w+)");
assert_eq!(matcher.find(b" homer simpson ").unwrap(), Some(m(1, 14)));
}
#[test]
fn find_iter() {
let matcher = matcher(r"(\w+)\s+(\w+)");
let mut matches = vec![];
matcher
.find_iter(b"aa bb cc dd", |m| {
matches.push(m);
true
})
.unwrap();
assert_eq!(matches, vec![m(0, 5), m(6, 11)]);
// Test that find_iter respects short circuiting.
matches.clear();
matcher
.find_iter(b"aa bb cc dd", |m| {
matches.push(m);
false
})
.unwrap();
assert_eq!(matches, vec![m(0, 5)]);
}
#[test]
fn try_find_iter() {
#[derive(Clone, Debug, Eq, PartialEq)]
struct MyError;
let matcher = matcher(r"(\w+)\s+(\w+)");
let mut matches = vec![];
let err = matcher
.try_find_iter(b"aa bb cc dd", |m| {
if matches.is_empty() {
matches.push(m);
Ok(true)
} else {
Err(MyError)
}
})
.unwrap()
.unwrap_err();
assert_eq!(matches, vec![m(0, 5)]);
assert_eq!(err, MyError);
}
#[test]
fn shortest_match() {
let matcher = matcher(r"a+");
// This tests that the default impl isn't doing anything smart, and simply
// defers to `find`.
assert_eq!(matcher.shortest_match(b"aaa").unwrap(), Some(3));
// The actual underlying regex is smarter.
assert_eq!(matcher.re.shortest_match(b"aaa"), Some(1));
}
#[test]
fn captures() {
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
assert_eq!(matcher.capture_count(), 3);
assert_eq!(matcher.capture_index("a"), Some(1));
assert_eq!(matcher.capture_index("b"), Some(2));
assert_eq!(matcher.capture_index("nada"), None);
let mut caps = matcher.new_captures().unwrap();
assert!(matcher.captures(b" homer simpson ", &mut caps).unwrap());
assert_eq!(caps.get(0), Some(m(1, 14)));
assert_eq!(caps.get(1), Some(m(1, 6)));
assert_eq!(caps.get(2), Some(m(7, 14)));
}
#[test]
fn captures_iter() {
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
let mut caps = matcher.new_captures().unwrap();
let mut matches = vec![];
matcher
.captures_iter(b"aa bb cc dd", &mut caps, |caps| {
matches.push(caps.get(0).unwrap());
matches.push(caps.get(1).unwrap());
matches.push(caps.get(2).unwrap());
true
})
.unwrap();
assert_eq!(
matches,
vec![m(0, 5), m(0, 2), m(3, 5), m(6, 11), m(6, 8), m(9, 11),]
);
// Test that captures_iter respects short circuiting.
matches.clear();
matcher
.captures_iter(b"aa bb cc dd", &mut caps, |caps| {
matches.push(caps.get(0).unwrap());
matches.push(caps.get(1).unwrap());
matches.push(caps.get(2).unwrap());
false
})
.unwrap();
assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5),]);
}
#[test]
fn try_captures_iter() {
#[derive(Clone, Debug, Eq, PartialEq)]
struct MyError;
let matcher = matcher(r"(?P<a>\w+)\s+(?P<b>\w+)");
let mut caps = matcher.new_captures().unwrap();
let mut matches = vec![];
let err = matcher
.try_captures_iter(b"aa bb cc dd", &mut caps, |caps| {
if matches.is_empty() {
matches.push(caps.get(0).unwrap());
matches.push(caps.get(1).unwrap());
matches.push(caps.get(2).unwrap());
Ok(true)
} else {
Err(MyError)
}
})
.unwrap()
.unwrap_err();
assert_eq!(matches, vec![m(0, 5), m(0, 2), m(3, 5)]);
assert_eq!(err, MyError);
}
// Test that our default impls for capturing are correct. Namely, when
// capturing isn't supported by the underlying matcher, then all of the
// various capturing related APIs fail fast.
#[test]
fn no_captures() {
let matcher = matcher_no_caps(r"(?P<a>\w+)\s+(?P<b>\w+)");
assert_eq!(matcher.capture_count(), 0);
assert_eq!(matcher.capture_index("a"), None);
assert_eq!(matcher.capture_index("b"), None);
assert_eq!(matcher.capture_index("nada"), None);
let mut caps = matcher.new_captures().unwrap();
assert!(!matcher.captures(b"homer simpson", &mut caps).unwrap());
let mut called = false;
matcher
.captures_iter(b"homer simpson", &mut caps, |_| {
called = true;
true
})
.unwrap();
assert!(!called);
}
#[test]
fn replace() {
let matcher = matcher(r"(\w+)\s+(\w+)");
let mut dst = vec![];
matcher
.replace(b"aa bb cc dd", &mut dst, |_, dst| {
dst.push(b'z');
true
})
.unwrap();
assert_eq!(dst, b"z z");
// Test that replacements respect short circuiting.
dst.clear();
matcher
.replace(b"aa bb cc dd", &mut dst, |_, dst| {
dst.push(b'z');
false
})
.unwrap();
assert_eq!(dst, b"z cc dd");
}
#[test]
fn replace_with_captures() {
let matcher = matcher(r"(\w+)\s+(\w+)");
let haystack = b"aa bb cc dd";
let mut caps = matcher.new_captures().unwrap();
let mut dst = vec![];
matcher
.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| {
caps.interpolate(
|name| matcher.capture_index(name),
haystack,
b"$2 $1",
dst,
);
true
})
.unwrap();
assert_eq!(dst, b"bb aa dd cc");
// Test that replacements respect short circuiting.
dst.clear();
matcher
.replace_with_captures(haystack, &mut caps, &mut dst, |caps, dst| {
caps.interpolate(
|name| matcher.capture_index(name),
haystack,
b"$2 $1",
dst,
);
false
})
.unwrap();
assert_eq!(dst, b"bb aa cc dd");
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/lib.rs | crates/searcher/src/lib.rs | /*!
This crate provides an implementation of line oriented search, with optional
support for multi-line search.
# Brief overview
The principle type in this crate is a [`Searcher`], which can be configured
and built by a [`SearcherBuilder`]. A `Searcher` is responsible for reading
bytes from a source (e.g., a file), executing a search of those bytes using
a `Matcher` (e.g., a regex) and then reporting the results of that search to
a [`Sink`] (e.g., stdout). The `Searcher` itself is principally responsible
for managing the consumption of bytes from a source and applying a `Matcher`
over those bytes in an efficient way. The `Searcher` is also responsible for
inverting a search, counting lines, reporting contextual lines, detecting
binary data and even deciding whether or not to use memory maps.
A `Matcher` (which is defined in the
[`grep-matcher`](https://crates.io/crates/grep-matcher) crate) is a trait
for describing the lowest levels of pattern search in a generic way. The
interface itself is very similar to the interface of a regular expression.
For example, the [`grep-regex`](https://crates.io/crates/grep-regex)
crate provides an implementation of the `Matcher` trait using Rust's
[`regex`](https://crates.io/crates/regex) crate.
Finally, a `Sink` describes how callers receive search results producer by a
`Searcher`. This includes routines that are called at the beginning and end of
a search, in addition to routines that are called when matching or contextual
lines are found by the `Searcher`. Implementations of `Sink` can be trivially
simple, or extraordinarily complex, such as the `Standard` printer found in
the [`grep-printer`](https://crates.io/crates/grep-printer) crate, which
effectively implements grep-like output. This crate also provides convenience
`Sink` implementations in the [`sinks`] sub-module for easy searching with
closures.
# Example
This example shows how to execute the searcher and read the search results
using the [`UTF8`](sinks::UTF8) implementation of `Sink`.
```
use {
grep_matcher::Matcher,
grep_regex::RegexMatcher,
grep_searcher::Searcher,
grep_searcher::sinks::UTF8,
};
const SHERLOCK: &'static [u8] = b"\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.
";
let matcher = RegexMatcher::new(r"Doctor \w+")?;
let mut matches: Vec<(u64, String)> = vec![];
Searcher::new().search_slice(&matcher, SHERLOCK, UTF8(|lnum, line| {
// We are guaranteed to find a match, so the unwrap is OK.
let mymatch = matcher.find(line.as_bytes())?.unwrap();
matches.push((lnum, line[mymatch].to_string()));
Ok(true)
}))?;
assert_eq!(matches.len(), 2);
assert_eq!(
matches[0],
(1, "Doctor Watsons".to_string())
);
assert_eq!(
matches[1],
(5, "Doctor Watson".to_string())
);
# Ok::<(), Box<dyn std::error::Error>>(())
```
See also `examples/search-stdin.rs` from the root of this crate's directory
to see a similar example that accepts a pattern on the command line and
searches stdin.
*/
#![deny(missing_docs)]
pub use crate::{
lines::{LineIter, LineStep},
searcher::{
BinaryDetection, ConfigError, Encoding, MmapChoice, Searcher,
SearcherBuilder,
},
sink::{
Sink, SinkContext, SinkContextKind, SinkError, SinkFinish, SinkMatch,
sinks,
},
};
#[macro_use]
mod macros;
mod line_buffer;
mod lines;
mod searcher;
mod sink;
#[cfg(test)]
mod testutil;
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/sink.rs | crates/searcher/src/sink.rs | use std::io;
use grep_matcher::LineTerminator;
use crate::{
lines::LineIter,
searcher::{ConfigError, Searcher},
};
/// A trait that describes errors that can be reported by searchers and
/// implementations of `Sink`.
///
/// Unless you have a specialized use case, you probably don't need to
/// implement this trait explicitly. It's likely that using `std::io::Error`
/// (which implements this trait) for your error type is good enough,
/// largely because most errors that occur during search will likely be an
/// `std::io::Error`.
pub trait SinkError: Sized {
/// A constructor for converting any value that satisfies the
/// `std::fmt::Display` trait into an error.
fn error_message<T: std::fmt::Display>(message: T) -> Self;
/// A constructor for converting I/O errors that occur while searching into
/// an error of this type.
///
/// By default, this is implemented via the `error_message` constructor.
fn error_io(err: io::Error) -> Self {
Self::error_message(err)
}
/// A constructor for converting configuration errors that occur while
/// building a searcher into an error of this type.
///
/// By default, this is implemented via the `error_message` constructor.
fn error_config(err: ConfigError) -> Self {
Self::error_message(err)
}
}
/// An `std::io::Error` can be used as an error for `Sink` implementations out
/// of the box.
impl SinkError for io::Error {
fn error_message<T: std::fmt::Display>(message: T) -> io::Error {
io::Error::new(io::ErrorKind::Other, message.to_string())
}
fn error_io(err: io::Error) -> io::Error {
err
}
}
/// A `Box<dyn std::error::Error>` can be used as an error for `Sink`
/// implementations out of the box.
impl SinkError for Box<dyn std::error::Error> {
fn error_message<T: std::fmt::Display>(
message: T,
) -> Box<dyn std::error::Error> {
Box::<dyn std::error::Error>::from(message.to_string())
}
}
/// A trait that defines how results from searchers are handled.
///
/// In this crate, a searcher follows the "push" model. What that means is that
/// the searcher drives execution, and pushes results back to the caller. This
/// is in contrast to a "pull" model where the caller drives execution and
/// takes results as they need them. These are also known as "internal" and
/// "external" iteration strategies, respectively.
///
/// For a variety of reasons, including the complexity of the searcher
/// implementation, this crate chooses the "push" or "internal" model of
/// execution. Thus, in order to act on search results, callers must provide
/// an implementation of this trait to a searcher, and the searcher is then
/// responsible for calling the methods on this trait.
///
/// This trait defines several behaviors:
///
/// * What to do when a match is found. Callers must provide this.
/// * What to do when an error occurs. Callers must provide this via the
/// [`SinkError`] trait. Generally, callers can just use `std::io::Error` for
/// this, which already implements `SinkError`.
/// * What to do when a contextual line is found. By default, these are
/// ignored.
/// * What to do when a gap between contextual lines has been found. By
/// default, this is ignored.
/// * What to do when a search has started. By default, this does nothing.
/// * What to do when a search has finished successfully. By default, this does
/// nothing.
///
/// Callers must, at minimum, specify the behavior when an error occurs and
/// the behavior when a match occurs. The rest is optional. For each behavior,
/// callers may report an error (say, if writing the result to another
/// location failed) or simply return `false` if they want the search to stop
/// (e.g., when implementing a cap on the number of search results to show).
///
/// When errors are reported (whether in the searcher or in the implementation
/// of `Sink`), then searchers quit immediately without calling `finish`.
///
/// For simpler uses of `Sink`, callers may elect to use one of
/// the more convenient but less flexible implementations in the
/// [`sinks`] module.
pub trait Sink {
/// The type of an error that should be reported by a searcher.
///
/// Errors of this type are not only returned by the methods on this
/// trait, but the constructors defined in `SinkError` are also used in
/// the searcher implementation itself. e.g., When a I/O error occurs when
/// reading data from a file.
type Error: SinkError;
/// This method is called whenever a match is found.
///
/// If multi line is enabled on the searcher, then the match reported here
/// may span multiple lines and it may include multiple matches. When multi
/// line is disabled, then the match is guaranteed to span exactly one
/// non-empty line (where a single line is, at minimum, a line terminator).
///
/// If this returns `true`, then searching continues. If this returns
/// `false`, then searching is stopped immediately and `finish` is called.
///
/// If this returns an error, then searching is stopped immediately,
/// `finish` is not called and the error is bubbled back up to the caller
/// of the searcher.
fn matched(
&mut self,
_searcher: &Searcher,
_mat: &SinkMatch<'_>,
) -> Result<bool, Self::Error>;
/// This method is called whenever a context line is found, and is optional
/// to implement. By default, it does nothing and returns `true`.
///
/// In all cases, the context given is guaranteed to span exactly one
/// non-empty line (where a single line is, at minimum, a line terminator).
///
/// If this returns `true`, then searching continues. If this returns
/// `false`, then searching is stopped immediately and `finish` is called.
///
/// If this returns an error, then searching is stopped immediately,
/// `finish` is not called and the error is bubbled back up to the caller
/// of the searcher.
#[inline]
fn context(
&mut self,
_searcher: &Searcher,
_context: &SinkContext<'_>,
) -> Result<bool, Self::Error> {
Ok(true)
}
/// This method is called whenever a break in contextual lines is found,
/// and is optional to implement. By default, it does nothing and returns
/// `true`.
///
/// A break can only occur when context reporting is enabled (that is,
/// either or both of `before_context` or `after_context` are greater than
/// `0`). More precisely, a break occurs between non-contiguous groups of
/// lines.
///
/// If this returns `true`, then searching continues. If this returns
/// `false`, then searching is stopped immediately and `finish` is called.
///
/// If this returns an error, then searching is stopped immediately,
/// `finish` is not called and the error is bubbled back up to the caller
/// of the searcher.
#[inline]
fn context_break(
&mut self,
_searcher: &Searcher,
) -> Result<bool, Self::Error> {
Ok(true)
}
/// This method is called whenever binary detection is enabled and binary
/// data is found. If binary data is found, then this is called at least
/// once for the first occurrence with the absolute byte offset at which
/// the binary data begins.
///
/// If this returns `true`, then searching continues. If this returns
/// `false`, then searching is stopped immediately and `finish` is called.
///
/// If this returns an error, then searching is stopped immediately,
/// `finish` is not called and the error is bubbled back up to the caller
/// of the searcher.
///
/// By default, it does nothing and returns `true`.
#[inline]
fn binary_data(
&mut self,
_searcher: &Searcher,
_binary_byte_offset: u64,
) -> Result<bool, Self::Error> {
Ok(true)
}
/// This method is called when a search has begun, before any search is
/// executed. By default, this does nothing.
///
/// If this returns `true`, then searching continues. If this returns
/// `false`, then searching is stopped immediately and `finish` is called.
///
/// If this returns an error, then searching is stopped immediately,
/// `finish` is not called and the error is bubbled back up to the caller
/// of the searcher.
#[inline]
fn begin(&mut self, _searcher: &Searcher) -> Result<bool, Self::Error> {
Ok(true)
}
/// This method is called when a search has completed. By default, this
/// does nothing.
///
/// If this returns an error, the error is bubbled back up to the caller of
/// the searcher.
#[inline]
fn finish(
&mut self,
_searcher: &Searcher,
_: &SinkFinish,
) -> Result<(), Self::Error> {
Ok(())
}
}
impl<'a, S: Sink> Sink for &'a mut S {
type Error = S::Error;
#[inline]
fn matched(
&mut self,
searcher: &Searcher,
mat: &SinkMatch<'_>,
) -> Result<bool, S::Error> {
(**self).matched(searcher, mat)
}
#[inline]
fn context(
&mut self,
searcher: &Searcher,
context: &SinkContext<'_>,
) -> Result<bool, S::Error> {
(**self).context(searcher, context)
}
#[inline]
fn context_break(
&mut self,
searcher: &Searcher,
) -> Result<bool, S::Error> {
(**self).context_break(searcher)
}
#[inline]
fn binary_data(
&mut self,
searcher: &Searcher,
binary_byte_offset: u64,
) -> Result<bool, S::Error> {
(**self).binary_data(searcher, binary_byte_offset)
}
#[inline]
fn begin(&mut self, searcher: &Searcher) -> Result<bool, S::Error> {
(**self).begin(searcher)
}
#[inline]
fn finish(
&mut self,
searcher: &Searcher,
sink_finish: &SinkFinish,
) -> Result<(), S::Error> {
(**self).finish(searcher, sink_finish)
}
}
impl<S: Sink + ?Sized> Sink for Box<S> {
type Error = S::Error;
#[inline]
fn matched(
&mut self,
searcher: &Searcher,
mat: &SinkMatch<'_>,
) -> Result<bool, S::Error> {
(**self).matched(searcher, mat)
}
#[inline]
fn context(
&mut self,
searcher: &Searcher,
context: &SinkContext<'_>,
) -> Result<bool, S::Error> {
(**self).context(searcher, context)
}
#[inline]
fn context_break(
&mut self,
searcher: &Searcher,
) -> Result<bool, S::Error> {
(**self).context_break(searcher)
}
#[inline]
fn binary_data(
&mut self,
searcher: &Searcher,
binary_byte_offset: u64,
) -> Result<bool, S::Error> {
(**self).binary_data(searcher, binary_byte_offset)
}
#[inline]
fn begin(&mut self, searcher: &Searcher) -> Result<bool, S::Error> {
(**self).begin(searcher)
}
#[inline]
fn finish(
&mut self,
searcher: &Searcher,
sink_finish: &SinkFinish,
) -> Result<(), S::Error> {
(**self).finish(searcher, sink_finish)
}
}
/// Summary data reported at the end of a search.
///
/// This reports data such as the total number of bytes searched and the
/// absolute offset of the first occurrence of binary data, if any were found.
///
/// A searcher that stops early because of an error does not call `finish`.
/// A searcher that stops early because the `Sink` implementor instructed it
/// to will still call `finish`.
#[derive(Clone, Debug)]
pub struct SinkFinish {
pub(crate) byte_count: u64,
pub(crate) binary_byte_offset: Option<u64>,
}
impl SinkFinish {
/// Return the total number of bytes searched.
#[inline]
pub fn byte_count(&self) -> u64 {
self.byte_count
}
/// If binary detection is enabled and if binary data was found, then this
/// returns the absolute byte offset of the first detected byte of binary
/// data.
///
/// Note that since this is an absolute byte offset, it cannot be relied
/// upon to index into any addressable memory.
#[inline]
pub fn binary_byte_offset(&self) -> Option<u64> {
self.binary_byte_offset
}
}
/// A type that describes a match reported by a searcher.
#[derive(Clone, Debug)]
pub struct SinkMatch<'b> {
pub(crate) line_term: LineTerminator,
pub(crate) bytes: &'b [u8],
pub(crate) absolute_byte_offset: u64,
pub(crate) line_number: Option<u64>,
pub(crate) buffer: &'b [u8],
pub(crate) bytes_range_in_buffer: std::ops::Range<usize>,
}
impl<'b> SinkMatch<'b> {
/// Returns the bytes for all matching lines, including the line
/// terminators, if they exist.
#[inline]
pub fn bytes(&self) -> &'b [u8] {
self.bytes
}
/// Return an iterator over the lines in this match.
///
/// If multi line search is enabled, then this may yield more than one
/// line (but always at least one line). If multi line search is disabled,
/// then this always reports exactly one line (but may consist of just
/// the line terminator).
///
/// Lines yielded by this iterator include their terminators.
#[inline]
pub fn lines(&self) -> LineIter<'b> {
LineIter::new(self.line_term.as_byte(), self.bytes)
}
/// Returns the absolute byte offset of the start of this match. This
/// offset is absolute in that it is relative to the very beginning of the
/// input in a search, and can never be relied upon to be a valid index
/// into an in-memory slice.
#[inline]
pub fn absolute_byte_offset(&self) -> u64 {
self.absolute_byte_offset
}
/// Returns the line number of the first line in this match, if available.
///
/// Line numbers are only available when the search builder is instructed
/// to compute them.
#[inline]
pub fn line_number(&self) -> Option<u64> {
self.line_number
}
/// Exposes as much of the underlying buffer that was search as possible.
#[inline]
pub fn buffer(&self) -> &'b [u8] {
self.buffer
}
/// Returns a range that corresponds to where [`SinkMatch::bytes`] appears
/// in [`SinkMatch::buffer`].
#[inline]
pub fn bytes_range_in_buffer(&self) -> std::ops::Range<usize> {
self.bytes_range_in_buffer.clone()
}
}
/// The type of context reported by a searcher.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum SinkContextKind {
/// The line reported occurred before a match.
Before,
/// The line reported occurred after a match.
After,
/// Any other type of context reported, e.g., as a result of a searcher's
/// "passthru" mode.
Other,
}
/// A type that describes a contextual line reported by a searcher.
#[derive(Clone, Debug)]
pub struct SinkContext<'b> {
#[cfg(test)]
pub(crate) line_term: LineTerminator,
pub(crate) bytes: &'b [u8],
pub(crate) kind: SinkContextKind,
pub(crate) absolute_byte_offset: u64,
pub(crate) line_number: Option<u64>,
}
impl<'b> SinkContext<'b> {
/// Returns the context bytes, including line terminators.
#[inline]
pub fn bytes(&self) -> &'b [u8] {
self.bytes
}
/// Returns the type of context.
#[inline]
pub fn kind(&self) -> &SinkContextKind {
&self.kind
}
/// Return an iterator over the lines in this match.
///
/// This always yields exactly one line (and that one line may contain just
/// the line terminator).
///
/// Lines yielded by this iterator include their terminators.
#[cfg(test)]
pub(crate) fn lines(&self) -> LineIter<'b> {
LineIter::new(self.line_term.as_byte(), self.bytes)
}
/// Returns the absolute byte offset of the start of this context. This
/// offset is absolute in that it is relative to the very beginning of the
/// input in a search, and can never be relied upon to be a valid index
/// into an in-memory slice.
#[inline]
pub fn absolute_byte_offset(&self) -> u64 {
self.absolute_byte_offset
}
/// Returns the line number of the first line in this context, if
/// available.
///
/// Line numbers are only available when the search builder is instructed
/// to compute them.
#[inline]
pub fn line_number(&self) -> Option<u64> {
self.line_number
}
}
/// A collection of convenience implementations of `Sink`.
///
/// Each implementation in this module makes some kind of sacrifice in the name
/// of making common cases easier to use. Most frequently, each type is a
/// wrapper around a closure specified by the caller that provides limited
/// access to the full suite of information available to implementors of
/// `Sink`.
///
/// For example, the `UTF8` sink makes the following sacrifices:
///
/// * All matches must be UTF-8. An arbitrary `Sink` does not have this
/// restriction and can deal with arbitrary data. If this sink sees invalid
/// UTF-8, then an error is returned and searching stops. (Use the `Lossy`
/// sink instead to suppress this error.)
/// * The searcher must be configured to report line numbers. If it isn't,
/// an error is reported at the first match and searching stops.
/// * Context lines, context breaks and summary data reported at the end of
/// a search are all ignored.
/// * Implementors are forced to use `std::io::Error` as their error type.
///
/// If you need more flexibility, then you're advised to implement the `Sink`
/// trait directly.
pub mod sinks {
use std::io;
use crate::searcher::Searcher;
use super::{Sink, SinkError, SinkMatch};
/// A sink that provides line numbers and matches as strings while ignoring
/// everything else.
///
/// This implementation will return an error if a match contains invalid
/// UTF-8 or if the searcher was not configured to count lines. Errors
/// on invalid UTF-8 can be suppressed by using the `Lossy` sink instead
/// of this one.
///
/// The closure accepts two parameters: a line number and a UTF-8 string
/// containing the matched data. The closure returns a
/// `Result<bool, std::io::Error>`. If the `bool` is `false`, then the
/// search stops immediately. Otherwise, searching continues.
///
/// If multi line mode was enabled, the line number refers to the line
/// number of the first line in the match.
#[derive(Clone, Debug)]
pub struct UTF8<F>(pub F)
where
F: FnMut(u64, &str) -> Result<bool, io::Error>;
impl<F> Sink for UTF8<F>
where
F: FnMut(u64, &str) -> Result<bool, io::Error>,
{
type Error = io::Error;
fn matched(
&mut self,
_searcher: &Searcher,
mat: &SinkMatch<'_>,
) -> Result<bool, io::Error> {
let matched = match std::str::from_utf8(mat.bytes()) {
Ok(matched) => matched,
Err(err) => return Err(io::Error::error_message(err)),
};
let line_number = match mat.line_number() {
Some(line_number) => line_number,
None => {
let msg = "line numbers not enabled";
return Err(io::Error::error_message(msg));
}
};
(self.0)(line_number, &matched)
}
}
/// A sink that provides line numbers and matches as (lossily converted)
/// strings while ignoring everything else.
///
/// This is like `UTF8`, except that if a match contains invalid UTF-8,
/// then it will be lossily converted to valid UTF-8 by substituting
/// invalid UTF-8 with Unicode replacement characters.
///
/// This implementation will return an error on the first match if the
/// searcher was not configured to count lines.
///
/// The closure accepts two parameters: a line number and a UTF-8 string
/// containing the matched data. The closure returns a
/// `Result<bool, std::io::Error>`. If the `bool` is `false`, then the
/// search stops immediately. Otherwise, searching continues.
///
/// If multi line mode was enabled, the line number refers to the line
/// number of the first line in the match.
#[derive(Clone, Debug)]
pub struct Lossy<F>(pub F)
where
F: FnMut(u64, &str) -> Result<bool, io::Error>;
impl<F> Sink for Lossy<F>
where
F: FnMut(u64, &str) -> Result<bool, io::Error>,
{
type Error = io::Error;
fn matched(
&mut self,
_searcher: &Searcher,
mat: &SinkMatch<'_>,
) -> Result<bool, io::Error> {
use std::borrow::Cow;
let matched = match std::str::from_utf8(mat.bytes()) {
Ok(matched) => Cow::Borrowed(matched),
// TODO: In theory, it should be possible to amortize
// allocation here, but `std` doesn't provide such an API.
// Regardless, this only happens on matches with invalid UTF-8,
// which should be pretty rare.
Err(_) => String::from_utf8_lossy(mat.bytes()),
};
let line_number = match mat.line_number() {
Some(line_number) => line_number,
None => {
let msg = "line numbers not enabled";
return Err(io::Error::error_message(msg));
}
};
(self.0)(line_number, &matched)
}
}
/// A sink that provides line numbers and matches as raw bytes while
/// ignoring everything else.
///
/// This implementation will return an error on the first match if the
/// searcher was not configured to count lines.
///
/// The closure accepts two parameters: a line number and a raw byte string
/// containing the matched data. The closure returns a
/// `Result<bool, std::io::Error>`. If the `bool` is `false`, then the
/// search stops immediately. Otherwise, searching continues.
///
/// If multi line mode was enabled, the line number refers to the line
/// number of the first line in the match.
#[derive(Clone, Debug)]
pub struct Bytes<F>(pub F)
where
F: FnMut(u64, &[u8]) -> Result<bool, io::Error>;
impl<F> Sink for Bytes<F>
where
F: FnMut(u64, &[u8]) -> Result<bool, io::Error>,
{
type Error = io::Error;
fn matched(
&mut self,
_searcher: &Searcher,
mat: &SinkMatch<'_>,
) -> Result<bool, io::Error> {
let line_number = match mat.line_number() {
Some(line_number) => line_number,
None => {
let msg = "line numbers not enabled";
return Err(io::Error::error_message(msg));
}
};
(self.0)(line_number, mat.bytes())
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/macros.rs | crates/searcher/src/macros.rs | /// Like assert_eq, but nicer output for long strings.
#[cfg(test)]
#[macro_export]
macro_rules! assert_eq_printed {
($expected:expr, $got:expr, $($tt:tt)*) => {
let expected = &*$expected;
let got = &*$got;
let label = format!($($tt)*);
if expected != got {
panic!("
printed outputs differ! (label: {})
expected:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
got:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
{}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
", label, expected, got);
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/lines.rs | crates/searcher/src/lines.rs | /*!
A collection of routines for performing operations on lines.
*/
use {
bstr::ByteSlice,
grep_matcher::{LineTerminator, Match},
};
/// An iterator over lines in a particular slice of bytes.
///
/// Line terminators are considered part of the line they terminate. All lines
/// yielded by the iterator are guaranteed to be non-empty.
///
/// `'b` refers to the lifetime of the underlying bytes.
#[derive(Debug)]
pub struct LineIter<'b> {
bytes: &'b [u8],
stepper: LineStep,
}
impl<'b> LineIter<'b> {
/// Create a new line iterator that yields lines in the given bytes that
/// are terminated by `line_term`.
pub fn new(line_term: u8, bytes: &'b [u8]) -> LineIter<'b> {
let stepper = LineStep::new(line_term, 0, bytes.len());
LineIter { bytes, stepper }
}
}
impl<'b> Iterator for LineIter<'b> {
type Item = &'b [u8];
fn next(&mut self) -> Option<&'b [u8]> {
self.stepper.next_match(self.bytes).map(|m| &self.bytes[m])
}
}
/// An explicit iterator over lines in a particular slice of bytes.
///
/// This iterator avoids borrowing the bytes themselves, and instead requires
/// callers to explicitly provide the bytes when moving through the iterator.
/// While not idiomatic, this provides a simple way of iterating over lines
/// that doesn't require borrowing the slice itself, which can be convenient.
///
/// Line terminators are considered part of the line they terminate. All lines
/// yielded by the iterator are guaranteed to be non-empty.
#[derive(Debug)]
pub struct LineStep {
line_term: u8,
pos: usize,
end: usize,
}
impl LineStep {
/// Create a new line iterator over the given range of bytes using the
/// given line terminator.
///
/// Callers should provide the actual bytes for each call to `next`. The
/// same slice must be provided to each call.
///
/// This panics if `start` is not less than or equal to `end`.
pub fn new(line_term: u8, start: usize, end: usize) -> LineStep {
LineStep { line_term, pos: start, end }
}
/// Return the start and end position of the next line in the given bytes.
///
/// The caller must past exactly the same slice of bytes for each call to
/// `next`.
///
/// The range returned includes the line terminator. Ranges are always
/// non-empty.
pub fn next(&mut self, bytes: &[u8]) -> Option<(usize, usize)> {
self.next_impl(bytes)
}
/// Like next, but returns a `Match` instead of a tuple.
#[inline(always)]
pub(crate) fn next_match(&mut self, bytes: &[u8]) -> Option<Match> {
self.next_impl(bytes).map(|(s, e)| Match::new(s, e))
}
#[inline(always)]
fn next_impl(&mut self, mut bytes: &[u8]) -> Option<(usize, usize)> {
bytes = &bytes[..self.end];
match bytes[self.pos..].find_byte(self.line_term) {
None => {
if self.pos < bytes.len() {
let m = (self.pos, bytes.len());
assert!(m.0 <= m.1);
self.pos = m.1;
Some(m)
} else {
None
}
}
Some(line_end) => {
let m = (self.pos, self.pos + line_end + 1);
assert!(m.0 <= m.1);
self.pos = m.1;
Some(m)
}
}
}
}
/// Count the number of occurrences of `line_term` in `bytes`.
pub(crate) fn count(bytes: &[u8], line_term: u8) -> u64 {
memchr::memchr_iter(line_term, bytes).count() as u64
}
/// Given a line that possibly ends with a terminator, return that line without
/// the terminator.
#[inline(always)]
pub(crate) fn without_terminator(
bytes: &[u8],
line_term: LineTerminator,
) -> &[u8] {
let line_term = line_term.as_bytes();
let start = bytes.len().saturating_sub(line_term.len());
if bytes.get(start..) == Some(line_term) {
return &bytes[..bytes.len() - line_term.len()];
}
bytes
}
/// Return the start and end offsets of the lines containing the given range
/// of bytes.
///
/// Line terminators are considered part of the line they terminate.
#[inline(always)]
pub(crate) fn locate(bytes: &[u8], line_term: u8, range: Match) -> Match {
let line_start =
bytes[..range.start()].rfind_byte(line_term).map_or(0, |i| i + 1);
let line_end =
if range.end() > line_start && bytes[range.end() - 1] == line_term {
range.end()
} else {
bytes[range.end()..]
.find_byte(line_term)
.map_or(bytes.len(), |i| range.end() + i + 1)
};
Match::new(line_start, line_end)
}
/// Returns the minimal starting offset of the line that occurs `count` lines
/// before the last line in `bytes`.
///
/// Lines are terminated by `line_term`. If `count` is zero, then this returns
/// the starting offset of the last line in `bytes`.
///
/// If `bytes` ends with a line terminator, then the terminator itself is
/// considered part of the last line.
pub(crate) fn preceding(bytes: &[u8], line_term: u8, count: usize) -> usize {
preceding_by_pos(bytes, bytes.len(), line_term, count)
}
/// Returns the minimal starting offset of the line that occurs `count` lines
/// before the line containing `pos`. Lines are terminated by `line_term`.
/// If `count` is zero, then this returns the starting offset of the line
/// containing `pos`.
///
/// If `pos` points just past a line terminator, then it is considered part of
/// the line that it terminates. For example, given `bytes = b"abc\nxyz\n"`
/// and `pos = 7`, `preceding(bytes, pos, b'\n', 0)` returns `4` (as does `pos
/// = 8`) and `preceding(bytes, pos, `b'\n', 1)` returns `0`.
fn preceding_by_pos(
bytes: &[u8],
mut pos: usize,
line_term: u8,
mut count: usize,
) -> usize {
if pos == 0 {
return 0;
} else if bytes[pos - 1] == line_term {
pos -= 1;
}
loop {
match bytes[..pos].rfind_byte(line_term) {
None => {
return 0;
}
Some(i) => {
if count == 0 {
return i + 1;
} else if i == 0 {
return 0;
}
count -= 1;
pos = i;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const SHERLOCK: &'static str = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.\
";
fn m(start: usize, end: usize) -> Match {
Match::new(start, end)
}
fn lines(text: &str) -> Vec<&str> {
let mut results = vec![];
let mut it = LineStep::new(b'\n', 0, text.len());
while let Some(m) = it.next_match(text.as_bytes()) {
results.push(&text[m]);
}
results
}
fn line_ranges(text: &str) -> Vec<std::ops::Range<usize>> {
let mut results = vec![];
let mut it = LineStep::new(b'\n', 0, text.len());
while let Some(m) = it.next_match(text.as_bytes()) {
results.push(m.start()..m.end());
}
results
}
fn prev(text: &str, pos: usize, count: usize) -> usize {
preceding_by_pos(text.as_bytes(), pos, b'\n', count)
}
fn loc(text: &str, start: usize, end: usize) -> Match {
locate(text.as_bytes(), b'\n', Match::new(start, end))
}
#[test]
fn line_count() {
assert_eq!(0, count(b"", b'\n'));
assert_eq!(1, count(b"\n", b'\n'));
assert_eq!(2, count(b"\n\n", b'\n'));
assert_eq!(2, count(b"a\nb\nc", b'\n'));
}
#[test]
fn line_locate() {
let t = SHERLOCK;
let lines = line_ranges(t);
assert_eq!(
loc(t, lines[0].start, lines[0].end),
m(lines[0].start, lines[0].end)
);
assert_eq!(
loc(t, lines[0].start + 1, lines[0].end),
m(lines[0].start, lines[0].end)
);
assert_eq!(
loc(t, lines[0].end - 1, lines[0].end),
m(lines[0].start, lines[0].end)
);
assert_eq!(
loc(t, lines[0].end, lines[0].end),
m(lines[1].start, lines[1].end)
);
assert_eq!(
loc(t, lines[5].start, lines[5].end),
m(lines[5].start, lines[5].end)
);
assert_eq!(
loc(t, lines[5].start + 1, lines[5].end),
m(lines[5].start, lines[5].end)
);
assert_eq!(
loc(t, lines[5].end - 1, lines[5].end),
m(lines[5].start, lines[5].end)
);
assert_eq!(
loc(t, lines[5].end, lines[5].end),
m(lines[5].start, lines[5].end)
);
}
#[test]
fn line_locate_weird() {
assert_eq!(loc("", 0, 0), m(0, 0));
assert_eq!(loc("\n", 0, 1), m(0, 1));
assert_eq!(loc("\n", 1, 1), m(1, 1));
assert_eq!(loc("\n\n", 0, 0), m(0, 1));
assert_eq!(loc("\n\n", 0, 1), m(0, 1));
assert_eq!(loc("\n\n", 1, 1), m(1, 2));
assert_eq!(loc("\n\n", 1, 2), m(1, 2));
assert_eq!(loc("\n\n", 2, 2), m(2, 2));
assert_eq!(loc("a\nb\nc", 0, 1), m(0, 2));
assert_eq!(loc("a\nb\nc", 1, 2), m(0, 2));
assert_eq!(loc("a\nb\nc", 2, 3), m(2, 4));
assert_eq!(loc("a\nb\nc", 3, 4), m(2, 4));
assert_eq!(loc("a\nb\nc", 4, 5), m(4, 5));
assert_eq!(loc("a\nb\nc", 5, 5), m(4, 5));
}
#[test]
fn line_iter() {
assert_eq!(lines("abc"), vec!["abc"]);
assert_eq!(lines("abc\n"), vec!["abc\n"]);
assert_eq!(lines("abc\nxyz"), vec!["abc\n", "xyz"]);
assert_eq!(lines("abc\nxyz\n"), vec!["abc\n", "xyz\n"]);
assert_eq!(lines("abc\n\n"), vec!["abc\n", "\n"]);
assert_eq!(lines("abc\n\n\n"), vec!["abc\n", "\n", "\n"]);
assert_eq!(lines("abc\n\nxyz"), vec!["abc\n", "\n", "xyz"]);
assert_eq!(lines("abc\n\nxyz\n"), vec!["abc\n", "\n", "xyz\n"]);
assert_eq!(lines("abc\nxyz\n\n"), vec!["abc\n", "xyz\n", "\n"]);
assert_eq!(lines("\n"), vec!["\n"]);
assert_eq!(lines(""), Vec::<&str>::new());
}
#[test]
fn line_iter_empty() {
let mut it = LineStep::new(b'\n', 0, 0);
assert_eq!(it.next(b"abc"), None);
}
#[test]
fn preceding_lines_doc() {
// These are the examples mentions in the documentation of `preceding`.
let bytes = b"abc\nxyz\n";
assert_eq!(4, preceding_by_pos(bytes, 7, b'\n', 0));
assert_eq!(4, preceding_by_pos(bytes, 8, b'\n', 0));
assert_eq!(0, preceding_by_pos(bytes, 7, b'\n', 1));
assert_eq!(0, preceding_by_pos(bytes, 8, b'\n', 1));
}
#[test]
fn preceding_lines_sherlock() {
let t = SHERLOCK;
let lines = line_ranges(t);
// The following tests check the count == 0 case, i.e., finding the
// beginning of the line containing the given position.
assert_eq!(0, prev(t, 0, 0));
assert_eq!(0, prev(t, 1, 0));
// The line terminator is addressed by `end-1` and terminates the line
// it is part of.
assert_eq!(0, prev(t, lines[0].end - 1, 0));
assert_eq!(lines[0].start, prev(t, lines[0].end, 0));
// The end position of line addresses the byte immediately following a
// line terminator, which puts it on the following line.
assert_eq!(lines[1].start, prev(t, lines[0].end + 1, 0));
// Now tests for count > 0.
assert_eq!(0, prev(t, 0, 1));
assert_eq!(0, prev(t, 0, 2));
assert_eq!(0, prev(t, 1, 1));
assert_eq!(0, prev(t, 1, 2));
assert_eq!(0, prev(t, lines[0].end - 1, 1));
assert_eq!(0, prev(t, lines[0].end - 1, 2));
assert_eq!(0, prev(t, lines[0].end, 1));
assert_eq!(0, prev(t, lines[0].end, 2));
assert_eq!(lines[3].start, prev(t, lines[4].end - 1, 1));
assert_eq!(lines[3].start, prev(t, lines[4].end, 1));
assert_eq!(lines[4].start, prev(t, lines[4].end + 1, 1));
// The last line has no line terminator.
assert_eq!(lines[5].start, prev(t, lines[5].end, 0));
assert_eq!(lines[5].start, prev(t, lines[5].end - 1, 0));
assert_eq!(lines[4].start, prev(t, lines[5].end, 1));
assert_eq!(lines[0].start, prev(t, lines[5].end, 5));
}
#[test]
fn preceding_lines_short() {
let t = "a\nb\nc\nd\ne\nf\n";
let lines = line_ranges(t);
assert_eq!(12, t.len());
assert_eq!(lines[5].start, prev(t, lines[5].end, 0));
assert_eq!(lines[4].start, prev(t, lines[5].end, 1));
assert_eq!(lines[3].start, prev(t, lines[5].end, 2));
assert_eq!(lines[2].start, prev(t, lines[5].end, 3));
assert_eq!(lines[1].start, prev(t, lines[5].end, 4));
assert_eq!(lines[0].start, prev(t, lines[5].end, 5));
assert_eq!(lines[0].start, prev(t, lines[5].end, 6));
assert_eq!(lines[5].start, prev(t, lines[5].end - 1, 0));
assert_eq!(lines[4].start, prev(t, lines[5].end - 1, 1));
assert_eq!(lines[3].start, prev(t, lines[5].end - 1, 2));
assert_eq!(lines[2].start, prev(t, lines[5].end - 1, 3));
assert_eq!(lines[1].start, prev(t, lines[5].end - 1, 4));
assert_eq!(lines[0].start, prev(t, lines[5].end - 1, 5));
assert_eq!(lines[0].start, prev(t, lines[5].end - 1, 6));
assert_eq!(lines[4].start, prev(t, lines[5].start, 0));
assert_eq!(lines[3].start, prev(t, lines[5].start, 1));
assert_eq!(lines[2].start, prev(t, lines[5].start, 2));
assert_eq!(lines[1].start, prev(t, lines[5].start, 3));
assert_eq!(lines[0].start, prev(t, lines[5].start, 4));
assert_eq!(lines[0].start, prev(t, lines[5].start, 5));
assert_eq!(lines[3].start, prev(t, lines[4].end - 1, 1));
assert_eq!(lines[2].start, prev(t, lines[4].start, 1));
assert_eq!(lines[2].start, prev(t, lines[3].end - 1, 1));
assert_eq!(lines[1].start, prev(t, lines[3].start, 1));
assert_eq!(lines[1].start, prev(t, lines[2].end - 1, 1));
assert_eq!(lines[0].start, prev(t, lines[2].start, 1));
assert_eq!(lines[0].start, prev(t, lines[1].end - 1, 1));
assert_eq!(lines[0].start, prev(t, lines[1].start, 1));
assert_eq!(lines[0].start, prev(t, lines[0].end - 1, 1));
assert_eq!(lines[0].start, prev(t, lines[0].start, 1));
}
#[test]
fn preceding_lines_empty1() {
let t = "\n\n\nd\ne\nf\n";
let lines = line_ranges(t);
assert_eq!(9, t.len());
assert_eq!(lines[0].start, prev(t, lines[0].end, 0));
assert_eq!(lines[0].start, prev(t, lines[0].end, 1));
assert_eq!(lines[1].start, prev(t, lines[1].end, 0));
assert_eq!(lines[0].start, prev(t, lines[1].end, 1));
assert_eq!(lines[5].start, prev(t, lines[5].end, 0));
assert_eq!(lines[4].start, prev(t, lines[5].end, 1));
assert_eq!(lines[3].start, prev(t, lines[5].end, 2));
assert_eq!(lines[2].start, prev(t, lines[5].end, 3));
assert_eq!(lines[1].start, prev(t, lines[5].end, 4));
assert_eq!(lines[0].start, prev(t, lines[5].end, 5));
assert_eq!(lines[0].start, prev(t, lines[5].end, 6));
}
#[test]
fn preceding_lines_empty2() {
let t = "a\n\n\nd\ne\nf\n";
let lines = line_ranges(t);
assert_eq!(10, t.len());
assert_eq!(lines[0].start, prev(t, lines[0].end, 0));
assert_eq!(lines[0].start, prev(t, lines[0].end, 1));
assert_eq!(lines[1].start, prev(t, lines[1].end, 0));
assert_eq!(lines[0].start, prev(t, lines[1].end, 1));
assert_eq!(lines[5].start, prev(t, lines[5].end, 0));
assert_eq!(lines[4].start, prev(t, lines[5].end, 1));
assert_eq!(lines[3].start, prev(t, lines[5].end, 2));
assert_eq!(lines[2].start, prev(t, lines[5].end, 3));
assert_eq!(lines[1].start, prev(t, lines[5].end, 4));
assert_eq!(lines[0].start, prev(t, lines[5].end, 5));
assert_eq!(lines[0].start, prev(t, lines[5].end, 6));
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/testutil.rs | crates/searcher/src/testutil.rs | use std::io::{self, Write};
use {
bstr::ByteSlice,
grep_matcher::{
LineMatchKind, LineTerminator, Match, Matcher, NoCaptures, NoError,
},
regex::bytes::{Regex, RegexBuilder},
};
use crate::{
searcher::{BinaryDetection, Searcher, SearcherBuilder},
sink::{Sink, SinkContext, SinkFinish, SinkMatch},
};
/// A simple regex matcher.
///
/// This supports setting the matcher's line terminator configuration directly,
/// which we use for testing purposes. That is, the caller explicitly
/// determines whether the line terminator optimization is enabled. (In reality
/// this optimization is detected automatically by inspecting and possibly
/// modifying the regex itself.)
#[derive(Clone, Debug)]
pub(crate) struct RegexMatcher {
regex: Regex,
line_term: Option<LineTerminator>,
every_line_is_candidate: bool,
}
impl RegexMatcher {
/// Create a new regex matcher.
pub(crate) fn new(pattern: &str) -> RegexMatcher {
let regex = RegexBuilder::new(pattern)
.multi_line(true) // permits ^ and $ to match at \n boundaries
.build()
.unwrap();
RegexMatcher { regex, line_term: None, every_line_is_candidate: false }
}
/// Forcefully set the line terminator of this matcher.
///
/// By default, this matcher has no line terminator set.
pub(crate) fn set_line_term(
&mut self,
line_term: Option<LineTerminator>,
) -> &mut RegexMatcher {
self.line_term = line_term;
self
}
/// Whether to return every line as a candidate or not.
///
/// This forces searchers to handle the case of reporting a false positive.
pub(crate) fn every_line_is_candidate(
&mut self,
yes: bool,
) -> &mut RegexMatcher {
self.every_line_is_candidate = yes;
self
}
}
impl Matcher for RegexMatcher {
type Captures = NoCaptures;
type Error = NoError;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
Ok(self
.regex
.find_at(haystack, at)
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<NoCaptures, NoError> {
Ok(NoCaptures::new())
}
fn line_terminator(&self) -> Option<LineTerminator> {
self.line_term
}
fn find_candidate_line(
&self,
haystack: &[u8],
) -> Result<Option<LineMatchKind>, NoError> {
if self.every_line_is_candidate {
assert!(self.line_term.is_some());
if haystack.is_empty() {
return Ok(None);
}
// Make it interesting and return the last byte in the current
// line.
let i = haystack
.find_byte(self.line_term.unwrap().as_byte())
.map(|i| i)
.unwrap_or(haystack.len() - 1);
Ok(Some(LineMatchKind::Candidate(i)))
} else {
Ok(self.shortest_match(haystack)?.map(LineMatchKind::Confirmed))
}
}
}
/// An implementation of Sink that prints all available information.
///
/// This is useful for tests because it lets us easily confirm whether data
/// is being passed to Sink correctly.
#[derive(Clone, Debug)]
pub(crate) struct KitchenSink(Vec<u8>);
impl KitchenSink {
/// Create a new implementation of Sink that includes everything in the
/// kitchen.
pub(crate) fn new() -> KitchenSink {
KitchenSink(vec![])
}
/// Return the data written to this sink.
pub(crate) fn as_bytes(&self) -> &[u8] {
&self.0
}
}
impl Sink for KitchenSink {
type Error = io::Error;
fn matched(
&mut self,
_searcher: &Searcher,
mat: &SinkMatch<'_>,
) -> Result<bool, io::Error> {
assert!(!mat.bytes().is_empty());
assert!(mat.lines().count() >= 1);
let mut line_number = mat.line_number();
let mut byte_offset = mat.absolute_byte_offset();
for line in mat.lines() {
if let Some(ref mut n) = line_number {
write!(self.0, "{}:", n)?;
*n += 1;
}
write!(self.0, "{}:", byte_offset)?;
byte_offset += line.len() as u64;
self.0.write_all(line)?;
}
Ok(true)
}
fn context(
&mut self,
_searcher: &Searcher,
context: &SinkContext<'_>,
) -> Result<bool, io::Error> {
assert!(!context.bytes().is_empty());
assert!(context.lines().count() == 1);
if let Some(line_number) = context.line_number() {
write!(self.0, "{}-", line_number)?;
}
write!(self.0, "{}-", context.absolute_byte_offset)?;
self.0.write_all(context.bytes())?;
Ok(true)
}
fn context_break(
&mut self,
_searcher: &Searcher,
) -> Result<bool, io::Error> {
self.0.write_all(b"--\n")?;
Ok(true)
}
fn finish(
&mut self,
_searcher: &Searcher,
sink_finish: &SinkFinish,
) -> Result<(), io::Error> {
writeln!(self.0, "")?;
writeln!(self.0, "byte count:{}", sink_finish.byte_count())?;
if let Some(offset) = sink_finish.binary_byte_offset() {
writeln!(self.0, "binary offset:{}", offset)?;
}
Ok(())
}
}
/// A type for expressing tests on a searcher.
///
/// The searcher code has a lot of different code paths, mostly for the
/// purposes of optimizing a bunch of different use cases. The intent of the
/// searcher is to pick the best code path based on the configuration, which
/// means there is no obviously direct way to ask that a specific code path
/// be exercised. Thus, the purpose of this tester is to explicitly check as
/// many code paths that make sense.
///
/// The tester works by assuming you want to test all pertinent code paths.
/// These can be trimmed down as necessary via the various builder methods.
#[derive(Debug)]
pub(crate) struct SearcherTester {
haystack: String,
pattern: String,
filter: Option<::regex::Regex>,
print_labels: bool,
expected_no_line_number: Option<String>,
expected_with_line_number: Option<String>,
expected_slice_no_line_number: Option<String>,
expected_slice_with_line_number: Option<String>,
by_line: bool,
multi_line: bool,
invert_match: bool,
line_number: bool,
binary: BinaryDetection,
auto_heap_limit: bool,
after_context: usize,
before_context: usize,
passthru: bool,
}
impl SearcherTester {
/// Create a new tester for testing searchers.
pub(crate) fn new(haystack: &str, pattern: &str) -> SearcherTester {
SearcherTester {
haystack: haystack.to_string(),
pattern: pattern.to_string(),
filter: None,
print_labels: false,
expected_no_line_number: None,
expected_with_line_number: None,
expected_slice_no_line_number: None,
expected_slice_with_line_number: None,
by_line: true,
multi_line: true,
invert_match: false,
line_number: true,
binary: BinaryDetection::none(),
auto_heap_limit: true,
after_context: 0,
before_context: 0,
passthru: false,
}
}
/// Execute the test. If the test succeeds, then this returns successfully.
/// If the test fails, then it panics with an informative message.
pub(crate) fn test(&self) {
// Check for configuration errors.
if self.expected_no_line_number.is_none() {
panic!("an 'expected' string with NO line numbers must be given");
}
if self.line_number && self.expected_with_line_number.is_none() {
panic!(
"an 'expected' string with line numbers must be given, \
or disable testing with line numbers"
);
}
let configs = self.configs();
if configs.is_empty() {
panic!("test configuration resulted in nothing being tested");
}
if self.print_labels {
for config in &configs {
let labels = vec![
format!("reader-{}", config.label),
format!("slice-{}", config.label),
];
for label in &labels {
if self.include(label) {
println!("{}", label);
} else {
println!("{} (ignored)", label);
}
}
}
}
for config in &configs {
let label = format!("reader-{}", config.label);
if self.include(&label) {
let got = config.search_reader(&self.haystack);
assert_eq_printed!(config.expected_reader, got, "{}", label);
}
let label = format!("slice-{}", config.label);
if self.include(&label) {
let got = config.search_slice(&self.haystack);
assert_eq_printed!(config.expected_slice, got, "{}", label);
}
}
}
/// Set a regex pattern to filter the tests that are run.
///
/// By default, no filter is present. When a filter is set, only test
/// configurations with a label matching the given pattern will be run.
///
/// This is often useful when debugging tests, e.g., when you want to do
/// printf debugging and only want one particular test configuration to
/// execute.
#[allow(dead_code)]
pub(crate) fn filter(&mut self, pattern: &str) -> &mut SearcherTester {
self.filter = Some(::regex::Regex::new(pattern).unwrap());
self
}
/// When set, the labels for all test configurations are printed before
/// executing any test.
///
/// Note that in order to see these in tests that aren't failing, you'll
/// want to use `cargo test -- --nocapture`.
#[allow(dead_code)]
pub(crate) fn print_labels(&mut self, yes: bool) -> &mut SearcherTester {
self.print_labels = yes;
self
}
/// Set the expected search results, without line numbers.
pub(crate) fn expected_no_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_no_line_number = Some(exp.to_string());
self
}
/// Set the expected search results, with line numbers.
pub(crate) fn expected_with_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_with_line_number = Some(exp.to_string());
self
}
/// Set the expected search results, without line numbers, when performing
/// a search on a slice. When not present, `expected_no_line_number` is
/// used instead.
pub(crate) fn expected_slice_no_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_slice_no_line_number = Some(exp.to_string());
self
}
/// Set the expected search results, with line numbers, when performing a
/// search on a slice. When not present, `expected_with_line_number` is
/// used instead.
#[allow(dead_code)]
pub(crate) fn expected_slice_with_line_number(
&mut self,
exp: &str,
) -> &mut SearcherTester {
self.expected_slice_with_line_number = Some(exp.to_string());
self
}
/// Whether to test search with line numbers or not.
///
/// This is enabled by default. When enabled, the string that is expected
/// when line numbers are present must be provided. Otherwise, the expected
/// string isn't required.
pub(crate) fn line_number(&mut self, yes: bool) -> &mut SearcherTester {
self.line_number = yes;
self
}
/// Whether to test search using the line-by-line searcher or not.
///
/// By default, this is enabled.
pub(crate) fn by_line(&mut self, yes: bool) -> &mut SearcherTester {
self.by_line = yes;
self
}
/// Whether to test search using the multi line searcher or not.
///
/// By default, this is enabled.
#[allow(dead_code)]
pub(crate) fn multi_line(&mut self, yes: bool) -> &mut SearcherTester {
self.multi_line = yes;
self
}
/// Whether to perform an inverted search or not.
///
/// By default, this is disabled.
pub(crate) fn invert_match(&mut self, yes: bool) -> &mut SearcherTester {
self.invert_match = yes;
self
}
/// Whether to enable binary detection on all searches.
///
/// By default, this is disabled.
pub(crate) fn binary_detection(
&mut self,
detection: BinaryDetection,
) -> &mut SearcherTester {
self.binary = detection;
self
}
/// Whether to automatically attempt to test the heap limit setting or not.
///
/// By default, one of the test configurations includes setting the heap
/// limit to its minimal value for normal operation, which checks that
/// everything works even at the extremes. However, in some cases, the heap
/// limit can (expectedly) alter the output slightly. For example, it can
/// impact the number of bytes searched when performing binary detection.
/// For convenience, it can be useful to disable the automatic heap limit
/// test.
pub(crate) fn auto_heap_limit(
&mut self,
yes: bool,
) -> &mut SearcherTester {
self.auto_heap_limit = yes;
self
}
/// Set the number of lines to include in the "after" context.
///
/// The default is `0`, which is equivalent to not printing any context.
pub(crate) fn after_context(
&mut self,
lines: usize,
) -> &mut SearcherTester {
self.after_context = lines;
self
}
/// Set the number of lines to include in the "before" context.
///
/// The default is `0`, which is equivalent to not printing any context.
pub(crate) fn before_context(
&mut self,
lines: usize,
) -> &mut SearcherTester {
self.before_context = lines;
self
}
/// Whether to enable the "passthru" feature or not.
///
/// When passthru is enabled, it effectively treats all non-matching lines
/// as contextual lines. In other words, enabling this is akin to
/// requesting an unbounded number of before and after contextual lines.
///
/// This is disabled by default.
pub(crate) fn passthru(&mut self, yes: bool) -> &mut SearcherTester {
self.passthru = yes;
self
}
/// Return the minimum size of a buffer required for a successful search.
///
/// Generally, this corresponds to the maximum length of a line (including
/// its terminator), but if context settings are enabled, then this must
/// include the sum of the longest N lines.
///
/// Note that this must account for whether the test is using multi line
/// search or not, since multi line search requires being able to fit the
/// entire haystack into memory.
fn minimal_heap_limit(&self, multi_line: bool) -> usize {
if multi_line {
1 + self.haystack.len()
} else if self.before_context == 0 && self.after_context == 0 {
1 + self.haystack.lines().map(|s| s.len()).max().unwrap_or(0)
} else {
let mut lens: Vec<usize> =
self.haystack.lines().map(|s| s.len()).collect();
lens.sort();
lens.reverse();
let context_count = if self.passthru {
self.haystack.lines().count()
} else {
// Why do we add 2 here? Well, we need to add 1 in order to
// have room to search at least one line. We add another
// because the implementation will occasionally include
// an additional line when handling the context. There's
// no particularly good reason, other than keeping the
// implementation simple.
2 + self.before_context + self.after_context
};
// We add 1 to each line since `str::lines` doesn't include the
// line terminator.
lens.into_iter()
.take(context_count)
.map(|len| len + 1)
.sum::<usize>()
}
}
/// Returns true if and only if the given label should be included as part
/// of executing `test`.
///
/// Inclusion is determined by the filter specified. If no filter has been
/// given, then this always returns `true`.
fn include(&self, label: &str) -> bool {
let re = match self.filter {
None => return true,
Some(ref re) => re,
};
re.is_match(label)
}
/// Configs generates a set of all search configurations that should be
/// tested. The configs generated are based on the configuration in this
/// builder.
fn configs(&self) -> Vec<TesterConfig> {
let mut configs = vec![];
let matcher = RegexMatcher::new(&self.pattern);
let mut builder = SearcherBuilder::new();
builder
.line_number(false)
.invert_match(self.invert_match)
.binary_detection(self.binary.clone())
.after_context(self.after_context)
.before_context(self.before_context)
.passthru(self.passthru);
if self.by_line {
let mut matcher = matcher.clone();
let mut builder = builder.clone();
let expected_reader =
self.expected_no_line_number.as_ref().unwrap().to_string();
let expected_slice = match self.expected_slice_no_line_number {
None => expected_reader.clone(),
Some(ref e) => e.to_string(),
};
configs.push(TesterConfig {
label: "byline-noterm-nonumber".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
if self.auto_heap_limit {
builder.heap_limit(Some(self.minimal_heap_limit(false)));
configs.push(TesterConfig {
label: "byline-noterm-nonumber-heaplimit".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(None);
}
matcher.set_line_term(Some(LineTerminator::byte(b'\n')));
configs.push(TesterConfig {
label: "byline-term-nonumber".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
matcher.every_line_is_candidate(true);
configs.push(TesterConfig {
label: "byline-term-nonumber-candidates".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
}
if self.by_line && self.line_number {
let mut matcher = matcher.clone();
let mut builder = builder.clone();
let expected_reader =
self.expected_with_line_number.as_ref().unwrap().to_string();
let expected_slice = match self.expected_slice_with_line_number {
None => expected_reader.clone(),
Some(ref e) => e.to_string(),
};
builder.line_number(true);
configs.push(TesterConfig {
label: "byline-noterm-number".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
matcher.set_line_term(Some(LineTerminator::byte(b'\n')));
configs.push(TesterConfig {
label: "byline-term-number".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
matcher.every_line_is_candidate(true);
configs.push(TesterConfig {
label: "byline-term-number-candidates".to_string(),
expected_reader: expected_reader.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
}
if self.multi_line {
let mut builder = builder.clone();
let expected_slice = match self.expected_slice_no_line_number {
None => {
self.expected_no_line_number.as_ref().unwrap().to_string()
}
Some(ref e) => e.to_string(),
};
builder.multi_line(true);
configs.push(TesterConfig {
label: "multiline-nonumber".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
if self.auto_heap_limit {
builder.heap_limit(Some(self.minimal_heap_limit(true)));
configs.push(TesterConfig {
label: "multiline-nonumber-heaplimit".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(None);
}
}
if self.multi_line && self.line_number {
let mut builder = builder.clone();
let expected_slice = match self.expected_slice_with_line_number {
None => self
.expected_with_line_number
.as_ref()
.unwrap()
.to_string(),
Some(ref e) => e.to_string(),
};
builder.multi_line(true);
builder.line_number(true);
configs.push(TesterConfig {
label: "multiline-number".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(Some(self.minimal_heap_limit(true)));
configs.push(TesterConfig {
label: "multiline-number-heaplimit".to_string(),
expected_reader: expected_slice.clone(),
expected_slice: expected_slice.clone(),
builder: builder.clone(),
matcher: matcher.clone(),
});
builder.heap_limit(None);
}
configs
}
}
#[derive(Debug)]
struct TesterConfig {
label: String,
expected_reader: String,
expected_slice: String,
builder: SearcherBuilder,
matcher: RegexMatcher,
}
impl TesterConfig {
/// Execute a search using a reader. This exercises the incremental search
/// strategy, where the entire contents of the corpus aren't necessarily
/// in memory at once.
fn search_reader(&self, haystack: &str) -> String {
let mut sink = KitchenSink::new();
let mut searcher = self.builder.build();
let result = searcher.search_reader(
&self.matcher,
haystack.as_bytes(),
&mut sink,
);
if let Err(err) = result {
let label = format!("reader-{}", self.label);
panic!("error running '{}': {}", label, err);
}
String::from_utf8(sink.as_bytes().to_vec()).unwrap()
}
/// Execute a search using a slice. This exercises the search routines that
/// have the entire contents of the corpus in memory at one time.
fn search_slice(&self, haystack: &str) -> String {
let mut sink = KitchenSink::new();
let mut searcher = self.builder.build();
let result = searcher.search_slice(
&self.matcher,
haystack.as_bytes(),
&mut sink,
);
if let Err(err) = result {
let label = format!("slice-{}", self.label);
panic!("error running '{}': {}", label, err);
}
String::from_utf8(sink.as_bytes().to_vec()).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn m(start: usize, end: usize) -> Match {
Match::new(start, end)
}
#[test]
fn empty_line1() {
let haystack = b"";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(0, 0))));
}
#[test]
fn empty_line2() {
let haystack = b"\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(0, 0))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(1, 1))));
}
#[test]
fn empty_line3() {
let haystack = b"\n\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(0, 0))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(1, 1))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
}
#[test]
fn empty_line4() {
let haystack = b"a\n\nb\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 3), Ok(Some(m(5, 5))));
assert_eq!(matcher.find_at(haystack, 4), Ok(Some(m(5, 5))));
assert_eq!(matcher.find_at(haystack, 5), Ok(Some(m(5, 5))));
}
#[test]
fn empty_line5() {
let haystack = b"a\n\nb\nc";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 3), Ok(None));
assert_eq!(matcher.find_at(haystack, 4), Ok(None));
assert_eq!(matcher.find_at(haystack, 5), Ok(None));
assert_eq!(matcher.find_at(haystack, 6), Ok(None));
}
#[test]
fn empty_line6() {
let haystack = b"a\n";
let matcher = RegexMatcher::new(r"^$");
assert_eq!(matcher.find_at(haystack, 0), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 1), Ok(Some(m(2, 2))));
assert_eq!(matcher.find_at(haystack, 2), Ok(Some(m(2, 2))));
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/line_buffer.rs | crates/searcher/src/line_buffer.rs | use std::io;
use bstr::ByteSlice;
/// The default buffer capacity that we use for the line buffer.
pub(crate) const DEFAULT_BUFFER_CAPACITY: usize = 64 * (1 << 10); // 64 KB
/// The behavior of a searcher in the face of long lines and big contexts.
///
/// When searching data incrementally using a fixed size buffer, this controls
/// the amount of *additional* memory to allocate beyond the size of the buffer
/// to accommodate lines (which may include the lines in a context window, when
/// enabled) that do not fit in the buffer.
///
/// The default is to eagerly allocate without a limit.
#[derive(Clone, Copy, Debug)]
pub(crate) enum BufferAllocation {
/// Attempt to expand the size of the buffer until either at least the next
/// line fits into memory or until all available memory is exhausted.
///
/// This is the default.
Eager,
/// Limit the amount of additional memory allocated to the given size. If
/// a line is found that requires more memory than is allowed here, then
/// stop reading and return an error.
Error(usize),
}
impl Default for BufferAllocation {
fn default() -> BufferAllocation {
BufferAllocation::Eager
}
}
/// Create a new error to be used when a configured allocation limit has been
/// reached.
pub(crate) fn alloc_error(limit: usize) -> io::Error {
let msg = format!("configured allocation limit ({}) exceeded", limit);
io::Error::new(io::ErrorKind::Other, msg)
}
/// The behavior of binary detection in the line buffer.
///
/// Binary detection is the process of _heuristically_ identifying whether a
/// given chunk of data is binary or not, and then taking an action based on
/// the result of that heuristic. The motivation behind detecting binary data
/// is that binary data often indicates data that is undesirable to search
/// using textual patterns. Of course, there are many cases in which this isn't
/// true, which is why binary detection is disabled by default.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum BinaryDetection {
/// No binary detection is performed. Data reported by the line buffer may
/// contain arbitrary bytes.
None,
/// The given byte is searched in all contents read by the line buffer. If
/// it occurs, then the data is considered binary and the line buffer acts
/// as if it reached EOF. The line buffer guarantees that this byte will
/// never be observable by callers.
Quit(u8),
/// The given byte is searched in all contents read by the line buffer. If
/// it occurs, then it is replaced by the line terminator. The line buffer
/// guarantees that this byte will never be observable by callers.
Convert(u8),
}
impl Default for BinaryDetection {
fn default() -> BinaryDetection {
BinaryDetection::None
}
}
impl BinaryDetection {
/// Returns true if and only if the detection heuristic demands that
/// the line buffer stop read data once binary data is observed.
fn is_quit(&self) -> bool {
match *self {
BinaryDetection::Quit(_) => true,
_ => false,
}
}
}
/// The configuration of a buffer. This contains options that are fixed once
/// a buffer has been constructed.
#[derive(Clone, Copy, Debug)]
struct Config {
/// The number of bytes to attempt to read at a time.
capacity: usize,
/// The line terminator.
lineterm: u8,
/// The behavior for handling long lines.
buffer_alloc: BufferAllocation,
/// When set, the presence of the given byte indicates binary content.
binary: BinaryDetection,
}
impl Default for Config {
fn default() -> Config {
Config {
capacity: DEFAULT_BUFFER_CAPACITY,
lineterm: b'\n',
buffer_alloc: BufferAllocation::default(),
binary: BinaryDetection::default(),
}
}
}
/// A builder for constructing line buffers.
#[derive(Clone, Debug, Default)]
pub(crate) struct LineBufferBuilder {
config: Config,
}
impl LineBufferBuilder {
/// Create a new builder for a buffer.
pub(crate) fn new() -> LineBufferBuilder {
LineBufferBuilder { config: Config::default() }
}
/// Create a new line buffer from this builder's configuration.
pub(crate) fn build(&self) -> LineBuffer {
LineBuffer {
config: self.config,
buf: vec![0; self.config.capacity],
pos: 0,
last_lineterm: 0,
end: 0,
absolute_byte_offset: 0,
binary_byte_offset: None,
}
}
/// Set the default capacity to use for a buffer.
///
/// In general, the capacity of a buffer corresponds to the amount of data
/// to hold in memory, and the size of the reads to make to the underlying
/// reader.
///
/// This is set to a reasonable default and probably shouldn't be changed
/// unless there's a specific reason to do so.
pub(crate) fn capacity(
&mut self,
capacity: usize,
) -> &mut LineBufferBuilder {
self.config.capacity = capacity;
self
}
/// Set the line terminator for the buffer.
///
/// Every buffer has a line terminator, and this line terminator is used
/// to determine how to roll the buffer forward. For example, when a read
/// to the buffer's underlying reader occurs, the end of the data that is
/// read is likely to correspond to an incomplete line. As a line buffer,
/// callers should not access this data since it is incomplete. The line
/// terminator is how the line buffer determines the part of the read that
/// is incomplete.
///
/// By default, this is set to `b'\n'`.
pub(crate) fn line_terminator(
&mut self,
lineterm: u8,
) -> &mut LineBufferBuilder {
self.config.lineterm = lineterm;
self
}
/// Set the maximum amount of additional memory to allocate for long lines.
///
/// In order to enable line oriented search, a fundamental requirement is
/// that, at a minimum, each line must be able to fit into memory. This
/// setting controls how big that line is allowed to be. By default, this
/// is set to `BufferAllocation::Eager`, which means a line buffer will
/// attempt to allocate as much memory as possible to fit a line, and will
/// only be limited by available memory.
///
/// Note that this setting only applies to the amount of *additional*
/// memory to allocate, beyond the capacity of the buffer. That means that
/// a value of `0` is sensible, and in particular, will guarantee that a
/// line buffer will never allocate additional memory beyond its initial
/// capacity.
pub(crate) fn buffer_alloc(
&mut self,
behavior: BufferAllocation,
) -> &mut LineBufferBuilder {
self.config.buffer_alloc = behavior;
self
}
/// Whether to enable binary detection or not. Depending on the setting,
/// this can either cause the line buffer to report EOF early or it can
/// cause the line buffer to clean the data.
///
/// By default, this is disabled. In general, binary detection should be
/// viewed as an imperfect heuristic.
pub(crate) fn binary_detection(
&mut self,
detection: BinaryDetection,
) -> &mut LineBufferBuilder {
self.config.binary = detection;
self
}
}
/// A line buffer reader efficiently reads a line oriented buffer from an
/// arbitrary reader.
#[derive(Debug)]
pub(crate) struct LineBufferReader<'b, R> {
rdr: R,
line_buffer: &'b mut LineBuffer,
}
impl<'b, R: io::Read> LineBufferReader<'b, R> {
/// Create a new buffered reader that reads from `rdr` and uses the given
/// `line_buffer` as an intermediate buffer.
///
/// This does not change the binary detection behavior of the given line
/// buffer.
pub(crate) fn new(
rdr: R,
line_buffer: &'b mut LineBuffer,
) -> LineBufferReader<'b, R> {
line_buffer.clear();
LineBufferReader { rdr, line_buffer }
}
/// The absolute byte offset which corresponds to the starting offsets
/// of the data returned by `buffer` relative to the beginning of the
/// underlying reader's contents. As such, this offset does not generally
/// correspond to an offset in memory. It is typically used for reporting
/// purposes. It can also be used for counting the number of bytes that
/// have been searched.
pub(crate) fn absolute_byte_offset(&self) -> u64 {
self.line_buffer.absolute_byte_offset()
}
/// If binary data was detected, then this returns the absolute byte offset
/// at which binary data was initially found.
pub(crate) fn binary_byte_offset(&self) -> Option<u64> {
self.line_buffer.binary_byte_offset()
}
/// Fill the contents of this buffer by discarding the part of the buffer
/// that has been consumed. The free space created by discarding the
/// consumed part of the buffer is then filled with new data from the
/// reader.
///
/// If EOF is reached, then `false` is returned. Otherwise, `true` is
/// returned. (Note that if this line buffer's binary detection is set to
/// `Quit`, then the presence of binary data will cause this buffer to
/// behave as if it had seen EOF at the first occurrence of binary data.)
///
/// This forwards any errors returned by the underlying reader, and will
/// also return an error if the buffer must be expanded past its allocation
/// limit, as governed by the buffer allocation strategy.
pub(crate) fn fill(&mut self) -> Result<bool, io::Error> {
self.line_buffer.fill(&mut self.rdr)
}
/// Return the contents of this buffer.
pub(crate) fn buffer(&self) -> &[u8] {
self.line_buffer.buffer()
}
/// Return the buffer as a BStr, used for convenient equality checking
/// in tests only.
#[cfg(test)]
fn bstr(&self) -> &bstr::BStr {
self.buffer().as_bstr()
}
/// Consume the number of bytes provided. This must be less than or equal
/// to the number of bytes returned by `buffer`.
pub(crate) fn consume(&mut self, amt: usize) {
self.line_buffer.consume(amt);
}
/// Consumes the remainder of the buffer. Subsequent calls to `buffer` are
/// guaranteed to return an empty slice until the buffer is refilled.
///
/// This is a convenience function for `consume(buffer.len())`.
#[cfg(test)]
fn consume_all(&mut self) {
self.line_buffer.consume_all();
}
}
/// A line buffer manages a (typically fixed) buffer for holding lines.
///
/// Callers should create line buffers sparingly and reuse them when possible.
/// Line buffers cannot be used directly, but instead must be used via the
/// LineBufferReader.
#[derive(Clone, Debug)]
pub(crate) struct LineBuffer {
/// The configuration of this buffer.
config: Config,
/// The primary buffer with which to hold data.
buf: Vec<u8>,
/// The current position of this buffer. This is always a valid sliceable
/// index into `buf`, and its maximum value is the length of `buf`.
pos: usize,
/// The end position of searchable content in this buffer. This is either
/// set to just after the final line terminator in the buffer, or to just
/// after the end of the last byte emitted by the reader when the reader
/// has been exhausted.
last_lineterm: usize,
/// The end position of the buffer. This is always greater than or equal to
/// last_lineterm. The bytes between last_lineterm and end, if any, always
/// correspond to a partial line.
end: usize,
/// The absolute byte offset corresponding to `pos`. This is most typically
/// not a valid index into addressable memory, but rather, an offset that
/// is relative to all data that passes through a line buffer (since
/// construction or since the last time `clear` was called).
///
/// When the line buffer reaches EOF, this is set to the position just
/// after the last byte read from the underlying reader. That is, it
/// becomes the total count of bytes that have been read.
absolute_byte_offset: u64,
/// If binary data was found, this records the absolute byte offset at
/// which it was first detected.
binary_byte_offset: Option<u64>,
}
impl LineBuffer {
/// Set the binary detection method used on this line buffer.
///
/// This permits dynamically changing the binary detection strategy on
/// an existing line buffer without needing to create a new one.
pub(crate) fn set_binary_detection(&mut self, binary: BinaryDetection) {
self.config.binary = binary;
}
/// Reset this buffer, such that it can be used with a new reader.
fn clear(&mut self) {
self.pos = 0;
self.last_lineterm = 0;
self.end = 0;
self.absolute_byte_offset = 0;
self.binary_byte_offset = None;
}
/// The absolute byte offset which corresponds to the starting offsets
/// of the data returned by `buffer` relative to the beginning of the
/// reader's contents. As such, this offset does not generally correspond
/// to an offset in memory. It is typically used for reporting purposes,
/// particularly in error messages.
///
/// This is reset to `0` when `clear` is called.
fn absolute_byte_offset(&self) -> u64 {
self.absolute_byte_offset
}
/// If binary data was detected, then this returns the absolute byte offset
/// at which binary data was initially found.
fn binary_byte_offset(&self) -> Option<u64> {
self.binary_byte_offset
}
/// Return the contents of this buffer.
fn buffer(&self) -> &[u8] {
&self.buf[self.pos..self.last_lineterm]
}
/// Return the contents of the free space beyond the end of the buffer as
/// a mutable slice.
fn free_buffer(&mut self) -> &mut [u8] {
&mut self.buf[self.end..]
}
/// Consume the number of bytes provided. This must be less than or equal
/// to the number of bytes returned by `buffer`.
fn consume(&mut self, amt: usize) {
assert!(amt <= self.buffer().len());
self.pos += amt;
self.absolute_byte_offset += amt as u64;
}
/// Consumes the remainder of the buffer. Subsequent calls to `buffer` are
/// guaranteed to return an empty slice until the buffer is refilled.
///
/// This is a convenience function for `consume(buffer.len())`.
#[cfg(test)]
fn consume_all(&mut self) {
let amt = self.buffer().len();
self.consume(amt);
}
/// Fill the contents of this buffer by discarding the part of the buffer
/// that has been consumed. The free space created by discarding the
/// consumed part of the buffer is then filled with new data from the given
/// reader.
///
/// Callers should provide the same reader to this line buffer in
/// subsequent calls to fill. A different reader can only be used
/// immediately following a call to `clear`.
///
/// If EOF is reached, then `false` is returned. Otherwise, `true` is
/// returned. (Note that if this line buffer's binary detection is set to
/// `Quit`, then the presence of binary data will cause this buffer to
/// behave as if it had seen EOF.)
///
/// This forwards any errors returned by `rdr`, and will also return an
/// error if the buffer must be expanded past its allocation limit, as
/// governed by the buffer allocation strategy.
fn fill<R: io::Read>(&mut self, mut rdr: R) -> Result<bool, io::Error> {
// If the binary detection heuristic tells us to quit once binary data
// has been observed, then we no longer read new data and reach EOF
// once the current buffer has been consumed.
if self.config.binary.is_quit() && self.binary_byte_offset.is_some() {
return Ok(!self.buffer().is_empty());
}
self.roll();
assert_eq!(self.pos, 0);
loop {
self.ensure_capacity()?;
let readlen = rdr.read(self.free_buffer().as_bytes_mut())?;
if readlen == 0 {
// We're only done reading for good once the caller has
// consumed everything.
self.last_lineterm = self.end;
return Ok(!self.buffer().is_empty());
}
// Get a mutable view into the bytes we've just read. These are
// the bytes that we do binary detection on, and also the bytes we
// search to find the last line terminator. We need a mutable slice
// in the case of binary conversion.
let oldend = self.end;
self.end += readlen;
let newbytes = &mut self.buf[oldend..self.end];
// Binary detection.
match self.config.binary {
BinaryDetection::None => {} // nothing to do
BinaryDetection::Quit(byte) => {
if let Some(i) = newbytes.find_byte(byte) {
self.end = oldend + i;
self.last_lineterm = self.end;
self.binary_byte_offset =
Some(self.absolute_byte_offset + self.end as u64);
// If the first byte in our buffer is a binary byte,
// then our buffer is empty and we should report as
// such to the caller.
return Ok(self.pos < self.end);
}
}
BinaryDetection::Convert(byte) => {
if let Some(i) =
replace_bytes(newbytes, byte, self.config.lineterm)
{
// Record only the first binary offset.
if self.binary_byte_offset.is_none() {
self.binary_byte_offset = Some(
self.absolute_byte_offset
+ (oldend + i) as u64,
);
}
}
}
}
// Update our `last_lineterm` positions if we read one.
if let Some(i) = newbytes.rfind_byte(self.config.lineterm) {
self.last_lineterm = oldend + i + 1;
return Ok(true);
}
// At this point, if we couldn't find a line terminator, then we
// don't have a complete line. Therefore, we try to read more!
}
}
/// Roll the unconsumed parts of the buffer to the front.
///
/// This operation is idempotent.
///
/// After rolling, `last_lineterm` and `end` point to the same location,
/// and `pos` is always set to `0`.
fn roll(&mut self) {
if self.pos == self.end {
self.pos = 0;
self.last_lineterm = 0;
self.end = 0;
return;
}
let roll_len = self.end - self.pos;
self.buf.copy_within(self.pos..self.end, 0);
self.pos = 0;
self.last_lineterm = roll_len;
self.end = roll_len;
}
/// Ensures that the internal buffer has a non-zero amount of free space
/// in which to read more data. If there is no free space, then more is
/// allocated. If the allocation must exceed the configured limit, then
/// this returns an error.
fn ensure_capacity(&mut self) -> Result<(), io::Error> {
if !self.free_buffer().is_empty() {
return Ok(());
}
// `len` is used for computing the next allocation size. The capacity
// is permitted to start at `0`, so we make sure it's at least `1`.
let len = std::cmp::max(1, self.buf.len());
let additional = match self.config.buffer_alloc {
BufferAllocation::Eager => len * 2,
BufferAllocation::Error(limit) => {
let used = self.buf.len() - self.config.capacity;
let n = std::cmp::min(len * 2, limit - used);
if n == 0 {
return Err(alloc_error(self.config.capacity + limit));
}
n
}
};
assert!(additional > 0);
let newlen = self.buf.len() + additional;
self.buf.resize(newlen, 0);
assert!(!self.free_buffer().is_empty());
Ok(())
}
}
/// Replaces `src` with `replacement` in bytes, and return the offset of the
/// first replacement, if one exists.
fn replace_bytes(
mut bytes: &mut [u8],
src: u8,
replacement: u8,
) -> Option<usize> {
if src == replacement {
return None;
}
let first_pos = bytes.find_byte(src)?;
bytes[first_pos] = replacement;
bytes = &mut bytes[first_pos + 1..];
while let Some(i) = bytes.find_byte(src) {
bytes[i] = replacement;
bytes = &mut bytes[i + 1..];
// To search for adjacent `src` bytes we use a different strategy.
// Since binary data tends to have long runs of NUL terminators,
// it is faster to compare one-byte-at-a-time than to stop and start
// memchr (through `find_byte`) for every byte in a sequence.
while bytes.get(0) == Some(&src) {
bytes[0] = replacement;
bytes = &mut bytes[1..];
}
}
Some(first_pos)
}
#[cfg(test)]
mod tests {
use bstr::ByteVec;
use super::*;
const SHERLOCK: &'static str = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.\
";
fn s(slice: &str) -> String {
slice.to_string()
}
fn replace_str(
slice: &str,
src: u8,
replacement: u8,
) -> (String, Option<usize>) {
let mut dst = Vec::from(slice);
let result = replace_bytes(&mut dst, src, replacement);
(dst.into_string().unwrap(), result)
}
#[test]
fn replace() {
assert_eq!(replace_str("", b'b', b'z'), (s(""), None));
assert_eq!(replace_str("a", b'a', b'a'), (s("a"), None));
assert_eq!(replace_str("a", b'b', b'z'), (s("a"), None));
assert_eq!(replace_str("abc", b'b', b'z'), (s("azc"), Some(1)));
assert_eq!(replace_str("abb", b'b', b'z'), (s("azz"), Some(1)));
assert_eq!(replace_str("aba", b'a', b'z'), (s("zbz"), Some(0)));
assert_eq!(replace_str("bbb", b'b', b'z'), (s("zzz"), Some(0)));
assert_eq!(replace_str("bac", b'b', b'z'), (s("zac"), Some(0)));
}
#[test]
fn buffer_basics1() {
let bytes = "homer\nlisa\nmaggie";
let mut linebuf = LineBufferBuilder::new().build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.buffer().is_empty());
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\nlisa\n");
assert_eq!(rdr.absolute_byte_offset(), 0);
rdr.consume(5);
assert_eq!(rdr.absolute_byte_offset(), 5);
rdr.consume_all();
assert_eq!(rdr.absolute_byte_offset(), 11);
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "maggie");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_basics2() {
let bytes = "homer\nlisa\nmaggie\n";
let mut linebuf = LineBufferBuilder::new().build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie\n");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_basics3() {
let bytes = "\n";
let mut linebuf = LineBufferBuilder::new().build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "\n");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_basics4() {
let bytes = "\n\n";
let mut linebuf = LineBufferBuilder::new().build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "\n\n");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_empty() {
let bytes = "";
let mut linebuf = LineBufferBuilder::new().build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_zero_capacity() {
let bytes = "homer\nlisa\nmaggie";
let mut linebuf = LineBufferBuilder::new().capacity(0).build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
while rdr.fill().unwrap() {
rdr.consume_all();
}
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_small_capacity() {
let bytes = "homer\nlisa\nmaggie";
let mut linebuf = LineBufferBuilder::new().capacity(1).build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
let mut got = vec![];
while rdr.fill().unwrap() {
got.push_str(rdr.buffer());
rdr.consume_all();
}
assert_eq!(bytes, got.as_bstr());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_limited_capacity1() {
let bytes = "homer\nlisa\nmaggie";
let mut linebuf = LineBufferBuilder::new()
.capacity(1)
.buffer_alloc(BufferAllocation::Error(5))
.build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\n");
rdr.consume_all();
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "lisa\n");
rdr.consume_all();
// This returns an error because while we have just enough room to
// store maggie in the buffer, we *don't* have enough room to read one
// more byte, so we don't know whether we're at EOF or not, and
// therefore must give up.
assert!(rdr.fill().is_err());
// We can mush on though!
assert_eq!(rdr.bstr(), "m");
rdr.consume_all();
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "aggie");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
}
#[test]
fn buffer_limited_capacity2() {
let bytes = "homer\nlisa\nmaggie";
let mut linebuf = LineBufferBuilder::new()
.capacity(1)
.buffer_alloc(BufferAllocation::Error(6))
.build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\n");
rdr.consume_all();
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "lisa\n");
rdr.consume_all();
// We have just enough space.
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "maggie");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
}
#[test]
fn buffer_limited_capacity3() {
let bytes = "homer\nlisa\nmaggie";
let mut linebuf = LineBufferBuilder::new()
.capacity(1)
.buffer_alloc(BufferAllocation::Error(0))
.build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.fill().is_err());
assert_eq!(rdr.bstr(), "");
}
#[test]
fn buffer_binary_none() {
let bytes = "homer\nli\x00sa\nmaggie\n";
let mut linebuf = LineBufferBuilder::new().build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.buffer().is_empty());
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\nli\x00sa\nmaggie\n");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64);
assert_eq!(rdr.binary_byte_offset(), None);
}
#[test]
fn buffer_binary_quit1() {
let bytes = "homer\nli\x00sa\nmaggie\n";
let mut linebuf = LineBufferBuilder::new()
.binary_detection(BinaryDetection::Quit(b'\x00'))
.build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.buffer().is_empty());
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\nli");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), 8);
assert_eq!(rdr.binary_byte_offset(), Some(8));
}
#[test]
fn buffer_binary_quit2() {
let bytes = "\x00homer\nlisa\nmaggie\n";
let mut linebuf = LineBufferBuilder::new()
.binary_detection(BinaryDetection::Quit(b'\x00'))
.build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "");
assert_eq!(rdr.absolute_byte_offset(), 0);
assert_eq!(rdr.binary_byte_offset(), Some(0));
}
#[test]
fn buffer_binary_quit3() {
let bytes = "homer\nlisa\nmaggie\n\x00";
let mut linebuf = LineBufferBuilder::new()
.binary_detection(BinaryDetection::Quit(b'\x00'))
.build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.buffer().is_empty());
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie\n");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64 - 1);
assert_eq!(rdr.binary_byte_offset(), Some(bytes.len() as u64 - 1));
}
#[test]
fn buffer_binary_quit4() {
let bytes = "homer\nlisa\nmaggie\x00\n";
let mut linebuf = LineBufferBuilder::new()
.binary_detection(BinaryDetection::Quit(b'\x00'))
.build();
let mut rdr = LineBufferReader::new(bytes.as_bytes(), &mut linebuf);
assert!(rdr.buffer().is_empty());
assert!(rdr.fill().unwrap());
assert_eq!(rdr.bstr(), "homer\nlisa\nmaggie");
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), bytes.len() as u64 - 2);
assert_eq!(rdr.binary_byte_offset(), Some(bytes.len() as u64 - 2));
}
#[test]
fn buffer_binary_quit5() {
let mut linebuf = LineBufferBuilder::new()
.binary_detection(BinaryDetection::Quit(b'u'))
.build();
let mut rdr = LineBufferReader::new(SHERLOCK.as_bytes(), &mut linebuf);
assert!(rdr.buffer().is_empty());
assert!(rdr.fill().unwrap());
assert_eq!(
rdr.bstr(),
"\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, s\
"
);
rdr.consume_all();
assert!(!rdr.fill().unwrap());
assert_eq!(rdr.absolute_byte_offset(), 76);
assert_eq!(rdr.binary_byte_offset(), Some(76));
assert_eq!(SHERLOCK.as_bytes()[76], b'u');
}
#[test]
fn buffer_binary_convert1() {
let bytes = "homer\nli\x00sa\nmaggie\n";
let mut linebuf = LineBufferBuilder::new()
.binary_detection(BinaryDetection::Convert(b'\x00'))
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/searcher/mmap.rs | crates/searcher/src/searcher/mmap.rs | use std::{fs::File, path::Path};
use memmap::Mmap;
/// Controls the strategy used for determining when to use memory maps.
///
/// If a searcher is called in circumstances where it is possible to use memory
/// maps, and memory maps are enabled, then it will attempt to do so if it
/// believes it will make the search faster.
///
/// By default, memory maps are disabled.
#[derive(Clone, Debug)]
pub struct MmapChoice(MmapChoiceImpl);
#[derive(Clone, Debug)]
enum MmapChoiceImpl {
Auto,
Never,
}
impl Default for MmapChoice {
fn default() -> MmapChoice {
MmapChoice(MmapChoiceImpl::Never)
}
}
impl MmapChoice {
/// Use memory maps when they are believed to be advantageous.
///
/// The heuristics used to determine whether to use a memory map or not
/// may depend on many things, including but not limited to, file size
/// and platform.
///
/// If memory maps are unavailable or cannot be used for a specific input,
/// then normal OS read calls are used instead.
///
/// # Safety
///
/// This constructor is not safe because there is no obvious way to
/// encapsulate the safety of file backed memory maps on all platforms
/// without simultaneously negating some or all of their benefits.
///
/// The specific contract the caller is required to uphold isn't precise,
/// but it basically amounts to something like, "the caller guarantees that
/// the underlying file won't be mutated." This, of course, isn't feasible
/// in many environments. However, command line tools may still decide to
/// take the risk of, say, a `SIGBUS` occurring while attempting to read a
/// memory map.
pub unsafe fn auto() -> MmapChoice {
MmapChoice(MmapChoiceImpl::Auto)
}
/// Never use memory maps, no matter what. This is the default.
pub fn never() -> MmapChoice {
MmapChoice(MmapChoiceImpl::Never)
}
/// Return a memory map if memory maps are enabled and if creating a
/// memory from the given file succeeded and if memory maps are believed
/// to be advantageous for performance.
///
/// If this does attempt to open a memory map and it fails, then `None`
/// is returned and the corresponding error (along with the file path, if
/// present) is logged at the debug level.
pub(crate) fn open(
&self,
file: &File,
path: Option<&Path>,
) -> Option<Mmap> {
if !self.is_enabled() {
return None;
}
if cfg!(target_os = "macos") {
// I guess memory maps on macOS aren't great. Should re-evaluate.
return None;
}
// SAFETY: This is acceptable because the only way `MmapChoiceImpl` can
// be `Auto` is if the caller invoked the `auto` constructor, which
// is itself not safe. Thus, this is a propagation of the caller's
// assertion that using memory maps is safe.
match unsafe { Mmap::map(file) } {
Ok(mmap) => Some(mmap),
Err(err) => {
if let Some(path) = path {
log::debug!(
"{}: failed to open memory map: {}",
path.display(),
err
);
} else {
log::debug!("failed to open memory map: {}", err);
}
None
}
}
}
/// Whether this strategy may employ memory maps or not.
pub(crate) fn is_enabled(&self) -> bool {
match self.0 {
MmapChoiceImpl::Auto => true,
MmapChoiceImpl::Never => false,
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/searcher/core.rs | crates/searcher/src/searcher/core.rs | use bstr::ByteSlice;
use grep_matcher::{LineMatchKind, Matcher};
use crate::{
line_buffer::BinaryDetection,
lines::{self, LineStep},
searcher::{Config, Range, Searcher},
sink::{
Sink, SinkContext, SinkContextKind, SinkError, SinkFinish, SinkMatch,
},
};
enum FastMatchResult {
Continue,
Stop,
SwitchToSlow,
}
#[derive(Debug)]
pub(crate) struct Core<'s, M: 's, S> {
config: &'s Config,
matcher: M,
searcher: &'s Searcher,
sink: S,
binary: bool,
pos: usize,
absolute_byte_offset: u64,
binary_byte_offset: Option<usize>,
line_number: Option<u64>,
last_line_counted: usize,
last_line_visited: usize,
after_context_left: usize,
has_sunk: bool,
has_matched: bool,
count: u64,
}
impl<'s, M: Matcher, S: Sink> Core<'s, M, S> {
pub(crate) fn new(
searcher: &'s Searcher,
matcher: M,
sink: S,
binary: bool,
) -> Core<'s, M, S> {
let line_number =
if searcher.config.line_number { Some(1) } else { None };
let core = Core {
config: &searcher.config,
matcher,
searcher,
sink,
binary,
pos: 0,
absolute_byte_offset: 0,
binary_byte_offset: None,
line_number,
last_line_counted: 0,
last_line_visited: 0,
after_context_left: 0,
has_sunk: false,
has_matched: false,
count: 0,
};
if !core.searcher.multi_line_with_matcher(&core.matcher) {
if core.is_line_by_line_fast() {
log::trace!("searcher core: will use fast line searcher");
} else {
log::trace!("searcher core: will use slow line searcher");
}
}
core
}
pub(crate) fn pos(&self) -> usize {
self.pos
}
pub(crate) fn set_pos(&mut self, pos: usize) {
self.pos = pos;
}
fn count(&self) -> u64 {
self.count
}
fn increment_count(&mut self) {
self.count += 1;
}
pub(crate) fn binary_byte_offset(&self) -> Option<u64> {
self.binary_byte_offset.map(|offset| offset as u64)
}
pub(crate) fn matcher(&self) -> &M {
&self.matcher
}
pub(crate) fn matched(
&mut self,
buf: &[u8],
range: &Range,
) -> Result<bool, S::Error> {
self.sink_matched(buf, range)
}
pub(crate) fn binary_data(
&mut self,
binary_byte_offset: u64,
) -> Result<bool, S::Error> {
self.sink.binary_data(&self.searcher, binary_byte_offset)
}
fn is_match(&self, line: &[u8]) -> Result<bool, S::Error> {
// We need to strip the line terminator here to match the
// semantics of line-by-line searching. Namely, regexes
// like `(?m)^$` can match at the final position beyond a
// line terminator, which is non-sensical in line oriented
// matching.
let line = lines::without_terminator(line, self.config.line_term);
self.matcher.is_match(line).map_err(S::Error::error_message)
}
pub(crate) fn find(
&mut self,
slice: &[u8],
) -> Result<Option<Range>, S::Error> {
if self.has_exceeded_match_limit() {
return Ok(None);
}
match self.matcher().find(slice) {
Err(err) => Err(S::Error::error_message(err)),
Ok(None) => Ok(None),
Ok(Some(m)) => {
self.increment_count();
Ok(Some(m))
}
}
}
fn shortest_match(
&mut self,
slice: &[u8],
) -> Result<Option<usize>, S::Error> {
if self.has_exceeded_match_limit() {
return Ok(None);
}
match self.matcher.shortest_match(slice) {
Err(err) => return Err(S::Error::error_message(err)),
Ok(None) => return Ok(None),
Ok(Some(m)) => Ok(Some(m)),
}
}
pub(crate) fn begin(&mut self) -> Result<bool, S::Error> {
self.sink.begin(&self.searcher)
}
pub(crate) fn finish(
&mut self,
byte_count: u64,
binary_byte_offset: Option<u64>,
) -> Result<(), S::Error> {
self.sink.finish(
&self.searcher,
&SinkFinish { byte_count, binary_byte_offset },
)
}
pub(crate) fn match_by_line(
&mut self,
buf: &[u8],
) -> Result<bool, S::Error> {
if self.is_line_by_line_fast() {
match self.match_by_line_fast(buf)? {
FastMatchResult::SwitchToSlow => self.match_by_line_slow(buf),
FastMatchResult::Continue => Ok(true),
FastMatchResult::Stop => Ok(false),
}
} else {
self.match_by_line_slow(buf)
}
}
pub(crate) fn roll(&mut self, buf: &[u8]) -> usize {
let consumed = if self.config.max_context() == 0 {
buf.len()
} else {
// It might seem like all we need to care about here is just
// the "before context," but in order to sink the context
// separator (when before_context==0 and after_context>0), we
// need to know something about the position of the previous
// line visited, even if we're at the beginning of the buffer.
//
// ... however, we only need to find the N preceding lines based
// on before context. We can skip this (potentially costly, for
// large values of N) step when before_context==0.
let context_start = lines::preceding(
buf,
self.config.line_term.as_byte(),
self.config.before_context,
);
let consumed =
std::cmp::max(context_start, self.last_line_visited);
consumed
};
self.count_lines(buf, consumed);
self.absolute_byte_offset += consumed as u64;
self.last_line_counted = 0;
self.last_line_visited = 0;
self.set_pos(buf.len() - consumed);
consumed
}
pub(crate) fn detect_binary(
&mut self,
buf: &[u8],
range: &Range,
) -> Result<bool, S::Error> {
if self.binary_byte_offset.is_some() {
return Ok(self.config.binary.quit_byte().is_some());
}
let binary_byte = match self.config.binary.0 {
BinaryDetection::Quit(b) => b,
BinaryDetection::Convert(b) => b,
_ => return Ok(false),
};
if let Some(i) = buf[*range].find_byte(binary_byte) {
let offset = range.start() + i;
self.binary_byte_offset = Some(offset);
if !self.binary_data(offset as u64)? {
return Ok(true);
}
Ok(self.config.binary.quit_byte().is_some())
} else {
Ok(false)
}
}
pub(crate) fn before_context_by_line(
&mut self,
buf: &[u8],
upto: usize,
) -> Result<bool, S::Error> {
if self.config.before_context == 0 {
return Ok(true);
}
let range = Range::new(self.last_line_visited, upto);
if range.is_empty() {
return Ok(true);
}
let before_context_start = range.start()
+ lines::preceding(
&buf[range],
self.config.line_term.as_byte(),
self.config.before_context - 1,
);
let range = Range::new(before_context_start, range.end());
let mut stepper = LineStep::new(
self.config.line_term.as_byte(),
range.start(),
range.end(),
);
while let Some(line) = stepper.next_match(buf) {
if !self.sink_break_context(line.start())? {
return Ok(false);
}
if !self.sink_before_context(buf, &line)? {
return Ok(false);
}
}
Ok(true)
}
pub(crate) fn after_context_by_line(
&mut self,
buf: &[u8],
upto: usize,
) -> Result<bool, S::Error> {
if self.after_context_left == 0 {
return Ok(true);
}
let exceeded_match_limit = self.has_exceeded_match_limit();
let range = Range::new(self.last_line_visited, upto);
let mut stepper = LineStep::new(
self.config.line_term.as_byte(),
range.start(),
range.end(),
);
while let Some(line) = stepper.next_match(buf) {
if exceeded_match_limit
&& self.is_match(&buf[line])? != self.config.invert_match
{
let after_context_left = self.after_context_left;
self.set_pos(line.end());
if !self.sink_matched(buf, &line)? {
return Ok(false);
}
self.after_context_left = after_context_left - 1;
} else if !self.sink_after_context(buf, &line)? {
return Ok(false);
}
if self.after_context_left == 0 {
break;
}
}
Ok(true)
}
pub(crate) fn other_context_by_line(
&mut self,
buf: &[u8],
upto: usize,
) -> Result<bool, S::Error> {
let range = Range::new(self.last_line_visited, upto);
let mut stepper = LineStep::new(
self.config.line_term.as_byte(),
range.start(),
range.end(),
);
while let Some(line) = stepper.next_match(buf) {
if !self.sink_other_context(buf, &line)? {
return Ok(false);
}
}
Ok(true)
}
fn match_by_line_slow(&mut self, buf: &[u8]) -> Result<bool, S::Error> {
debug_assert!(!self.searcher.multi_line_with_matcher(&self.matcher));
let range = Range::new(self.pos(), buf.len());
let mut stepper = LineStep::new(
self.config.line_term.as_byte(),
range.start(),
range.end(),
);
while let Some(line) = stepper.next_match(buf) {
if self.has_exceeded_match_limit()
&& !self.config.passthru
&& self.after_context_left == 0
{
return Ok(false);
}
let matched = {
// Stripping the line terminator is necessary to prevent some
// classes of regexes from matching the empty position *after*
// the end of the line. For example, `(?m)^$` will match at
// position (2, 2) in the string `a\n`.
let slice = lines::without_terminator(
&buf[line],
self.config.line_term,
);
self.shortest_match(slice)?.is_some()
};
self.set_pos(line.end());
let success = matched != self.config.invert_match;
if success {
self.has_matched = true;
self.increment_count();
if !self.before_context_by_line(buf, line.start())? {
return Ok(false);
}
if !self.sink_matched(buf, &line)? {
return Ok(false);
}
} else if self.after_context_left >= 1 {
if !self.sink_after_context(buf, &line)? {
return Ok(false);
}
} else if self.config.passthru {
if !self.sink_other_context(buf, &line)? {
return Ok(false);
}
}
if self.config.stop_on_nonmatch && !success && self.has_matched {
return Ok(false);
}
}
Ok(true)
}
fn match_by_line_fast(
&mut self,
buf: &[u8],
) -> Result<FastMatchResult, S::Error> {
use FastMatchResult::*;
debug_assert!(!self.config.passthru);
while !buf[self.pos()..].is_empty() {
if self.config.stop_on_nonmatch && self.has_matched {
return Ok(SwitchToSlow);
}
if self.config.invert_match {
if !self.match_by_line_fast_invert(buf)? {
break;
}
} else if let Some(line) = self.find_by_line_fast(buf)? {
self.has_matched = true;
self.increment_count();
if self.config.max_context() > 0 {
if !self.after_context_by_line(buf, line.start())? {
return Ok(Stop);
}
if !self.before_context_by_line(buf, line.start())? {
return Ok(Stop);
}
}
self.set_pos(line.end());
if !self.sink_matched(buf, &line)? {
return Ok(Stop);
}
} else {
break;
}
}
if !self.after_context_by_line(buf, buf.len())? {
return Ok(Stop);
}
if self.has_exceeded_match_limit() && self.after_context_left == 0 {
return Ok(Stop);
}
self.set_pos(buf.len());
Ok(Continue)
}
#[inline(always)]
fn match_by_line_fast_invert(
&mut self,
buf: &[u8],
) -> Result<bool, S::Error> {
assert!(self.config.invert_match);
let invert_match = match self.find_by_line_fast(buf)? {
None => {
let range = Range::new(self.pos(), buf.len());
self.set_pos(range.end());
range
}
Some(line) => {
let range = Range::new(self.pos(), line.start());
self.set_pos(line.end());
range
}
};
if invert_match.is_empty() {
return Ok(true);
}
self.has_matched = true;
if !self.after_context_by_line(buf, invert_match.start())? {
return Ok(false);
}
if !self.before_context_by_line(buf, invert_match.start())? {
return Ok(false);
}
let mut stepper = LineStep::new(
self.config.line_term.as_byte(),
invert_match.start(),
invert_match.end(),
);
while let Some(line) = stepper.next_match(buf) {
self.increment_count();
if !self.sink_matched(buf, &line)? {
return Ok(false);
}
if self.has_exceeded_match_limit() {
return Ok(false);
}
}
Ok(true)
}
#[inline(always)]
fn find_by_line_fast(
&mut self,
buf: &[u8],
) -> Result<Option<Range>, S::Error> {
debug_assert!(!self.searcher.multi_line_with_matcher(&self.matcher));
debug_assert!(self.is_line_by_line_fast());
let mut pos = self.pos();
while !buf[pos..].is_empty() {
if self.has_exceeded_match_limit() {
return Ok(None);
}
match self.matcher.find_candidate_line(&buf[pos..]) {
Err(err) => return Err(S::Error::error_message(err)),
Ok(None) => return Ok(None),
Ok(Some(LineMatchKind::Confirmed(i))) => {
let line = lines::locate(
buf,
self.config.line_term.as_byte(),
Range::zero(i).offset(pos),
);
// If we matched beyond the end of the buffer, then we
// don't report this as a match.
if line.start() == buf.len() {
pos = buf.len();
continue;
}
return Ok(Some(line));
}
Ok(Some(LineMatchKind::Candidate(i))) => {
let line = lines::locate(
buf,
self.config.line_term.as_byte(),
Range::zero(i).offset(pos),
);
if self.is_match(&buf[line])? {
return Ok(Some(line));
}
pos = line.end();
}
}
}
Ok(None)
}
#[inline(always)]
fn sink_matched(
&mut self,
buf: &[u8],
range: &Range,
) -> Result<bool, S::Error> {
if self.binary && self.detect_binary(buf, range)? {
return Ok(false);
}
if !self.sink_break_context(range.start())? {
return Ok(false);
}
self.count_lines(buf, range.start());
let offset = self.absolute_byte_offset + range.start() as u64;
let linebuf = &buf[*range];
let keepgoing = self.sink.matched(
&self.searcher,
&SinkMatch {
line_term: self.config.line_term,
bytes: linebuf,
absolute_byte_offset: offset,
line_number: self.line_number,
buffer: buf,
bytes_range_in_buffer: range.start()..range.end(),
},
)?;
if !keepgoing {
return Ok(false);
}
self.last_line_visited = range.end();
self.after_context_left = self.config.after_context;
self.has_sunk = true;
Ok(true)
}
fn sink_before_context(
&mut self,
buf: &[u8],
range: &Range,
) -> Result<bool, S::Error> {
if self.binary && self.detect_binary(buf, range)? {
return Ok(false);
}
self.count_lines(buf, range.start());
let offset = self.absolute_byte_offset + range.start() as u64;
let keepgoing = self.sink.context(
&self.searcher,
&SinkContext {
#[cfg(test)]
line_term: self.config.line_term,
bytes: &buf[*range],
kind: SinkContextKind::Before,
absolute_byte_offset: offset,
line_number: self.line_number,
},
)?;
if !keepgoing {
return Ok(false);
}
self.last_line_visited = range.end();
self.has_sunk = true;
Ok(true)
}
fn sink_after_context(
&mut self,
buf: &[u8],
range: &Range,
) -> Result<bool, S::Error> {
assert!(self.after_context_left >= 1);
if self.binary && self.detect_binary(buf, range)? {
return Ok(false);
}
self.count_lines(buf, range.start());
let offset = self.absolute_byte_offset + range.start() as u64;
let keepgoing = self.sink.context(
&self.searcher,
&SinkContext {
#[cfg(test)]
line_term: self.config.line_term,
bytes: &buf[*range],
kind: SinkContextKind::After,
absolute_byte_offset: offset,
line_number: self.line_number,
},
)?;
if !keepgoing {
return Ok(false);
}
self.last_line_visited = range.end();
self.after_context_left -= 1;
self.has_sunk = true;
Ok(true)
}
fn sink_other_context(
&mut self,
buf: &[u8],
range: &Range,
) -> Result<bool, S::Error> {
if self.binary && self.detect_binary(buf, range)? {
return Ok(false);
}
self.count_lines(buf, range.start());
let offset = self.absolute_byte_offset + range.start() as u64;
let keepgoing = self.sink.context(
&self.searcher,
&SinkContext {
#[cfg(test)]
line_term: self.config.line_term,
bytes: &buf[*range],
kind: SinkContextKind::Other,
absolute_byte_offset: offset,
line_number: self.line_number,
},
)?;
if !keepgoing {
return Ok(false);
}
self.last_line_visited = range.end();
self.has_sunk = true;
Ok(true)
}
fn sink_break_context(
&mut self,
start_of_line: usize,
) -> Result<bool, S::Error> {
let is_gap = self.last_line_visited < start_of_line;
let any_context =
self.config.before_context > 0 || self.config.after_context > 0;
if !any_context || !self.has_sunk || !is_gap {
Ok(true)
} else {
self.sink.context_break(&self.searcher)
}
}
fn count_lines(&mut self, buf: &[u8], upto: usize) {
if let Some(ref mut line_number) = self.line_number {
if self.last_line_counted >= upto {
return;
}
let slice = &buf[self.last_line_counted..upto];
let count = lines::count(slice, self.config.line_term.as_byte());
*line_number += count;
self.last_line_counted = upto;
}
}
fn is_line_by_line_fast(&self) -> bool {
debug_assert!(!self.searcher.multi_line_with_matcher(&self.matcher));
if self.config.passthru {
return false;
}
if self.config.stop_on_nonmatch && self.has_matched {
return false;
}
if let Some(line_term) = self.matcher.line_terminator() {
// FIXME: This works around a bug in grep-regex where it does
// not set the line terminator of the regex itself, and thus
// line anchors like `(?m:^)` and `(?m:$)` will not match
// anything except for `\n`. So for now, we just disable the fast
// line-by-line searcher which requires the regex to be able to
// deal with line terminators correctly. The slow line-by-line
// searcher strips line terminators and thus absolves the regex
// engine from needing to care about whether they are `\n` or NUL.
if line_term.as_byte() == b'\x00' {
return false;
}
if line_term == self.config.line_term {
return true;
}
}
if let Some(non_matching) = self.matcher.non_matching_bytes() {
// If the line terminator is CRLF, we don't actually need to care
// whether the regex can match `\r` or not. Namely, a `\r` is
// neither necessary nor sufficient to terminate a line. A `\n` is
// always required.
if non_matching.contains(self.config.line_term.as_byte()) {
return true;
}
}
false
}
fn has_exceeded_match_limit(&self) -> bool {
self.config.max_matches.map_or(false, |limit| self.count() >= limit)
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/searcher/glue.rs | crates/searcher/src/searcher/glue.rs | use grep_matcher::Matcher;
use crate::{
line_buffer::{DEFAULT_BUFFER_CAPACITY, LineBufferReader},
lines::{self, LineStep},
searcher::{Config, Range, Searcher, core::Core},
sink::{Sink, SinkError},
};
#[derive(Debug)]
pub(crate) struct ReadByLine<'s, M, R, S> {
config: &'s Config,
core: Core<'s, M, S>,
rdr: LineBufferReader<'s, R>,
}
impl<'s, M, R, S> ReadByLine<'s, M, R, S>
where
M: Matcher,
R: std::io::Read,
S: Sink,
{
pub(crate) fn new(
searcher: &'s Searcher,
matcher: M,
read_from: LineBufferReader<'s, R>,
write_to: S,
) -> ReadByLine<'s, M, R, S> {
debug_assert!(!searcher.multi_line_with_matcher(&matcher));
ReadByLine {
config: &searcher.config,
core: Core::new(searcher, matcher, write_to, false),
rdr: read_from,
}
}
pub(crate) fn run(mut self) -> Result<(), S::Error> {
if self.core.begin()? {
while self.fill()? {
if !self.core.match_by_line(self.rdr.buffer())? {
self.consume_remaining();
break;
}
}
}
self.core.finish(
self.rdr.absolute_byte_offset(),
self.rdr.binary_byte_offset(),
)
}
fn consume_remaining(&mut self) {
let consumed = self.core.pos();
self.rdr.consume(consumed);
}
fn fill(&mut self) -> Result<bool, S::Error> {
assert!(self.rdr.buffer()[self.core.pos()..].is_empty());
let already_binary = self.rdr.binary_byte_offset().is_some();
let old_buf_len = self.rdr.buffer().len();
let consumed = self.core.roll(self.rdr.buffer());
self.rdr.consume(consumed);
let didread = match self.rdr.fill() {
Err(err) => return Err(S::Error::error_io(err)),
Ok(didread) => didread,
};
if !already_binary {
if let Some(offset) = self.rdr.binary_byte_offset() {
if !self.core.binary_data(offset)? {
return Ok(false);
}
}
}
if !didread || self.should_binary_quit() {
return Ok(false);
}
// If rolling the buffer didn't result in consuming anything and if
// re-filling the buffer didn't add any bytes, then the only thing in
// our buffer is leftover context, which we no longer need since there
// is nothing left to search. So forcefully quit.
if consumed == 0 && old_buf_len == self.rdr.buffer().len() {
self.rdr.consume(old_buf_len);
return Ok(false);
}
Ok(true)
}
fn should_binary_quit(&self) -> bool {
self.rdr.binary_byte_offset().is_some()
&& self.config.binary.quit_byte().is_some()
}
}
#[derive(Debug)]
pub(crate) struct SliceByLine<'s, M, S> {
core: Core<'s, M, S>,
slice: &'s [u8],
}
impl<'s, M: Matcher, S: Sink> SliceByLine<'s, M, S> {
pub(crate) fn new(
searcher: &'s Searcher,
matcher: M,
slice: &'s [u8],
write_to: S,
) -> SliceByLine<'s, M, S> {
debug_assert!(!searcher.multi_line_with_matcher(&matcher));
SliceByLine {
core: Core::new(searcher, matcher, write_to, true),
slice,
}
}
pub(crate) fn run(mut self) -> Result<(), S::Error> {
if self.core.begin()? {
let binary_upto =
std::cmp::min(self.slice.len(), DEFAULT_BUFFER_CAPACITY);
let binary_range = Range::new(0, binary_upto);
if !self.core.detect_binary(self.slice, &binary_range)? {
while !self.slice[self.core.pos()..].is_empty()
&& self.core.match_by_line(self.slice)?
{}
}
}
let byte_count = self.byte_count();
let binary_byte_offset = self.core.binary_byte_offset();
self.core.finish(byte_count, binary_byte_offset)
}
fn byte_count(&mut self) -> u64 {
match self.core.binary_byte_offset() {
Some(offset) if offset < self.core.pos() as u64 => offset,
_ => self.core.pos() as u64,
}
}
}
#[derive(Debug)]
pub(crate) struct MultiLine<'s, M, S> {
config: &'s Config,
core: Core<'s, M, S>,
slice: &'s [u8],
last_match: Option<Range>,
}
impl<'s, M: Matcher, S: Sink> MultiLine<'s, M, S> {
pub(crate) fn new(
searcher: &'s Searcher,
matcher: M,
slice: &'s [u8],
write_to: S,
) -> MultiLine<'s, M, S> {
debug_assert!(searcher.multi_line_with_matcher(&matcher));
MultiLine {
config: &searcher.config,
core: Core::new(searcher, matcher, write_to, true),
slice,
last_match: None,
}
}
pub(crate) fn run(mut self) -> Result<(), S::Error> {
if self.core.begin()? {
let binary_upto =
std::cmp::min(self.slice.len(), DEFAULT_BUFFER_CAPACITY);
let binary_range = Range::new(0, binary_upto);
if !self.core.detect_binary(self.slice, &binary_range)? {
let mut keepgoing = true;
while !self.slice[self.core.pos()..].is_empty() && keepgoing {
keepgoing = self.sink()?;
}
if keepgoing {
keepgoing = match self.last_match.take() {
None => true,
Some(last_match) => {
if self.sink_context(&last_match)? {
self.sink_matched(&last_match)?;
}
true
}
};
}
// Take care of any remaining context after the last match.
if keepgoing {
if self.config.passthru {
self.core.other_context_by_line(
self.slice,
self.slice.len(),
)?;
} else {
self.core.after_context_by_line(
self.slice,
self.slice.len(),
)?;
}
}
}
}
let byte_count = self.byte_count();
let binary_byte_offset = self.core.binary_byte_offset();
self.core.finish(byte_count, binary_byte_offset)
}
fn sink(&mut self) -> Result<bool, S::Error> {
if self.config.invert_match {
return self.sink_matched_inverted();
}
let mat = match self.find()? {
Some(range) => range,
None => {
self.core.set_pos(self.slice.len());
return Ok(true);
}
};
self.advance(&mat);
let line =
lines::locate(self.slice, self.config.line_term.as_byte(), mat);
// We delay sinking the match to make sure we group adjacent matches
// together in a single sink. Adjacent matches are distinct matches
// that start and end on the same line, respectively. This guarantees
// that a single line is never sinked more than once.
match self.last_match.take() {
None => {
self.last_match = Some(line);
Ok(true)
}
Some(last_match) => {
// If the lines in the previous match overlap with the lines
// in this match, then simply grow the match and move on. This
// happens when the next match begins on the same line that the
// last match ends on.
//
// Note that we do not technically require strict overlap here.
// Instead, we only require that the lines are adjacent. This
// provides larger blocks of lines to the printer, and results
// in overall better behavior with respect to how replacements
// are handled.
//
// See: https://github.com/BurntSushi/ripgrep/issues/1311
// And also the associated commit fixing #1311.
if last_match.end() >= line.start() {
self.last_match = Some(last_match.with_end(line.end()));
Ok(true)
} else {
self.last_match = Some(line);
if !self.sink_context(&last_match)? {
return Ok(false);
}
self.sink_matched(&last_match)
}
}
}
}
fn sink_matched_inverted(&mut self) -> Result<bool, S::Error> {
assert!(self.config.invert_match);
let invert_match = match self.find()? {
None => {
let range = Range::new(self.core.pos(), self.slice.len());
self.core.set_pos(range.end());
range
}
Some(mat) => {
let line = lines::locate(
self.slice,
self.config.line_term.as_byte(),
mat,
);
let range = Range::new(self.core.pos(), line.start());
self.advance(&line);
range
}
};
if invert_match.is_empty() {
return Ok(true);
}
if !self.sink_context(&invert_match)? {
return Ok(false);
}
let mut stepper = LineStep::new(
self.config.line_term.as_byte(),
invert_match.start(),
invert_match.end(),
);
while let Some(line) = stepper.next_match(self.slice) {
if !self.sink_matched(&line)? {
return Ok(false);
}
}
Ok(true)
}
fn sink_matched(&mut self, range: &Range) -> Result<bool, S::Error> {
if range.is_empty() {
// The only way we can produce an empty line for a match is if we
// match the position immediately following the last byte that we
// search, and where that last byte is also the line terminator. We
// never want to report that match, and we know we're done at that
// point anyway, so stop the search.
return Ok(false);
}
self.core.matched(self.slice, range)
}
fn sink_context(&mut self, range: &Range) -> Result<bool, S::Error> {
if self.config.passthru {
if !self.core.other_context_by_line(self.slice, range.start())? {
return Ok(false);
}
} else {
if !self.core.after_context_by_line(self.slice, range.start())? {
return Ok(false);
}
if !self.core.before_context_by_line(self.slice, range.start())? {
return Ok(false);
}
}
Ok(true)
}
fn find(&mut self) -> Result<Option<Range>, S::Error> {
self.core
.find(&self.slice[self.core.pos()..])
.map(|m| m.map(|m| m.offset(self.core.pos())))
}
/// Advance the search position based on the previous match.
///
/// If the previous match is zero width, then this advances the search
/// position one byte past the end of the match.
fn advance(&mut self, range: &Range) {
self.core.set_pos(range.end());
if range.is_empty() && self.core.pos() < self.slice.len() {
let newpos = self.core.pos() + 1;
self.core.set_pos(newpos);
}
}
fn byte_count(&mut self) -> u64 {
match self.core.binary_byte_offset() {
Some(offset) if offset < self.core.pos() as u64 => offset,
_ => self.core.pos() as u64,
}
}
}
#[cfg(test)]
mod tests {
use crate::{
searcher::{BinaryDetection, SearcherBuilder},
testutil::{KitchenSink, RegexMatcher, SearcherTester},
};
use super::*;
const SHERLOCK: &'static str = "\
For the Doctor Watsons of this world, as opposed to the Sherlock
Holmeses, success in the province of detective work must always
be, to a very large extent, the result of luck. Sherlock Holmes
can extract a clew from a wisp of straw or a flake of cigar ash;
but Doctor Watson has to have it taken out for him and dusted,
and exhibited clearly, with a label attached.\
";
const CODE: &'static str = "\
extern crate snap;
use std::io;
fn main() {
let stdin = io::stdin();
let stdout = io::stdout();
// Wrap the stdin reader in a Snappy reader.
let mut rdr = snap::Reader::new(stdin.lock());
let mut wtr = stdout.lock();
io::copy(&mut rdr, &mut wtr).expect(\"I/O operation failed\");
}
";
#[test]
fn basic1() {
let exp = "\
0:For the Doctor Watsons of this world, as opposed to the Sherlock
129:be, to a very large extent, the result of luck. Sherlock Holmes
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn basic2() {
let exp = "\nbyte count:366\n";
SearcherTester::new(SHERLOCK, "NADA")
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn basic3() {
let exp = "\
0:For the Doctor Watsons of this world, as opposed to the Sherlock
65:Holmeses, success in the province of detective work must always
129:be, to a very large extent, the result of luck. Sherlock Holmes
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258:but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
SearcherTester::new(SHERLOCK, "a")
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn basic4() {
let haystack = "\
a
b
c
d
";
let byte_count = haystack.len();
let exp = format!("0:a\n\nbyte count:{}\n", byte_count);
SearcherTester::new(haystack, "a")
.line_number(false)
.expected_no_line_number(&exp)
.test();
}
#[test]
fn invert1() {
let exp = "\
65:Holmeses, success in the province of detective work must always
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258:but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.line_number(false)
.invert_match(true)
.expected_no_line_number(exp)
.test();
}
#[test]
fn line_number1() {
let exp = "\
0:For the Doctor Watsons of this world, as opposed to the Sherlock
129:be, to a very large extent, the result of luck. Sherlock Holmes
byte count:366
";
let exp_line = "\
1:0:For the Doctor Watsons of this world, as opposed to the Sherlock
3:129:be, to a very large extent, the result of luck. Sherlock Holmes
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.expected_no_line_number(exp)
.expected_with_line_number(exp_line)
.test();
}
#[test]
fn line_number_invert1() {
let exp = "\
65:Holmeses, success in the province of detective work must always
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258:but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
let exp_line = "\
2:65:Holmeses, success in the province of detective work must always
4:193:can extract a clew from a wisp of straw or a flake of cigar ash;
5:258:but Doctor Watson has to have it taken out for him and dusted,
6:321:and exhibited clearly, with a label attached.
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.invert_match(true)
.expected_no_line_number(exp)
.expected_with_line_number(exp_line)
.test();
}
#[test]
fn multi_line_overlap1() {
let haystack = "xxx\nabc\ndefxxxabc\ndefxxx\nxxx";
let byte_count = haystack.len();
let exp = format!(
"4:abc\n8:defxxxabc\n18:defxxx\n\nbyte count:{}\n",
byte_count
);
SearcherTester::new(haystack, "abc\ndef")
.by_line(false)
.line_number(false)
.expected_no_line_number(&exp)
.test();
}
#[test]
fn multi_line_overlap2() {
let haystack = "xxx\nabc\ndefabc\ndefxxx\nxxx";
let byte_count = haystack.len();
let exp = format!(
"4:abc\n8:defabc\n15:defxxx\n\nbyte count:{}\n",
byte_count
);
SearcherTester::new(haystack, "abc\ndef")
.by_line(false)
.line_number(false)
.expected_no_line_number(&exp)
.test();
}
#[test]
fn empty_line1() {
let exp = "\nbyte count:0\n";
SearcherTester::new("", r"^$")
.expected_no_line_number(exp)
.expected_with_line_number(exp)
.test();
}
#[test]
fn empty_line2() {
let exp = "0:\n\nbyte count:1\n";
let exp_line = "1:0:\n\nbyte count:1\n";
SearcherTester::new("\n", r"^$")
.expected_no_line_number(exp)
.expected_with_line_number(exp_line)
.test();
}
#[test]
fn empty_line3() {
let exp = "0:\n1:\n\nbyte count:2\n";
let exp_line = "1:0:\n2:1:\n\nbyte count:2\n";
SearcherTester::new("\n\n", r"^$")
.expected_no_line_number(exp)
.expected_with_line_number(exp_line)
.test();
}
#[test]
fn empty_line4() {
// See: https://github.com/BurntSushi/ripgrep/issues/441
let haystack = "\
a
b
c
d
";
let byte_count = haystack.len();
let exp = format!("4:\n7:\n8:\n\nbyte count:{}\n", byte_count);
let exp_line =
format!("3:4:\n5:7:\n6:8:\n\nbyte count:{}\n", byte_count);
SearcherTester::new(haystack, r"^$")
.expected_no_line_number(&exp)
.expected_with_line_number(&exp_line)
.test();
}
#[test]
fn empty_line5() {
// See: https://github.com/BurntSushi/ripgrep/issues/441
// This is like empty_line4, but lacks the trailing line terminator.
let haystack = "\
a
b
c
d";
let byte_count = haystack.len();
let exp = format!("4:\n7:\n8:\n\nbyte count:{}\n", byte_count);
let exp_line =
format!("3:4:\n5:7:\n6:8:\n\nbyte count:{}\n", byte_count);
SearcherTester::new(haystack, r"^$")
.expected_no_line_number(&exp)
.expected_with_line_number(&exp_line)
.test();
}
#[test]
fn empty_line6() {
// See: https://github.com/BurntSushi/ripgrep/issues/441
// This is like empty_line4, but includes an empty line at the end.
let haystack = "\
a
b
c
d
";
let byte_count = haystack.len();
let exp = format!("4:\n7:\n8:\n11:\n\nbyte count:{}\n", byte_count);
let exp_line =
format!("3:4:\n5:7:\n6:8:\n8:11:\n\nbyte count:{}\n", byte_count);
SearcherTester::new(haystack, r"^$")
.expected_no_line_number(&exp)
.expected_with_line_number(&exp_line)
.test();
}
#[test]
fn big1() {
let mut haystack = String::new();
haystack.push_str("a\n");
// Pick an arbitrary number above the capacity.
for _ in 0..(4 * (DEFAULT_BUFFER_CAPACITY + 7)) {
haystack.push_str("zzz\n");
}
haystack.push_str("a\n");
let byte_count = haystack.len();
let exp = format!("0:a\n1048690:a\n\nbyte count:{}\n", byte_count);
SearcherTester::new(&haystack, "a")
.line_number(false)
.expected_no_line_number(&exp)
.test();
}
#[test]
fn big_error_one_line() {
let mut haystack = String::new();
haystack.push_str("a\n");
// Pick an arbitrary number above the capacity.
for _ in 0..(4 * (DEFAULT_BUFFER_CAPACITY + 7)) {
haystack.push_str("zzz\n");
}
haystack.push_str("a\n");
let matcher = RegexMatcher::new("a");
let mut sink = KitchenSink::new();
let mut searcher = SearcherBuilder::new()
.heap_limit(Some(3)) // max line length is 4, one byte short
.build();
let result =
searcher.search_reader(&matcher, haystack.as_bytes(), &mut sink);
assert!(result.is_err());
}
#[test]
fn big_error_multi_line() {
let mut haystack = String::new();
haystack.push_str("a\n");
// Pick an arbitrary number above the capacity.
for _ in 0..(4 * (DEFAULT_BUFFER_CAPACITY + 7)) {
haystack.push_str("zzz\n");
}
haystack.push_str("a\n");
let matcher = RegexMatcher::new("a");
let mut sink = KitchenSink::new();
let mut searcher = SearcherBuilder::new()
.multi_line(true)
.heap_limit(Some(haystack.len())) // actually need one more byte
.build();
let result =
searcher.search_reader(&matcher, haystack.as_bytes(), &mut sink);
assert!(result.is_err());
}
#[test]
fn binary1() {
let haystack = "\x00a";
let exp = "\nbyte count:0\nbinary offset:0\n";
SearcherTester::new(haystack, "a")
.binary_detection(BinaryDetection::quit(0))
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn binary2() {
let haystack = "a\x00";
let exp = "\nbyte count:0\nbinary offset:1\n";
SearcherTester::new(haystack, "a")
.binary_detection(BinaryDetection::quit(0))
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn binary3() {
let mut haystack = String::new();
haystack.push_str("a\n");
for _ in 0..DEFAULT_BUFFER_CAPACITY {
haystack.push_str("zzz\n");
}
haystack.push_str("a\n");
haystack.push_str("zzz\n");
haystack.push_str("a\x00a\n");
haystack.push_str("zzz\n");
haystack.push_str("a\n");
// The line buffered searcher has slightly different semantics here.
// Namely, it will *always* detect binary data in the current buffer
// before searching it. Thus, the total number of bytes searched is
// smaller than below.
let exp = "0:a\n\nbyte count:262146\nbinary offset:262153\n";
// In contrast, the slice readers (for multi line as well) will only
// look for binary data in the initial chunk of bytes. After that
// point, it only looks for binary data in matches. Note though that
// the binary offset remains the same. (See the binary4 test for a case
// where the offset is explicitly different.)
let exp_slice =
"0:a\n262146:a\n\nbyte count:262153\nbinary offset:262153\n";
SearcherTester::new(&haystack, "a")
.binary_detection(BinaryDetection::quit(0))
.line_number(false)
.auto_heap_limit(false)
.expected_no_line_number(exp)
.expected_slice_no_line_number(exp_slice)
.test();
}
#[test]
fn binary4() {
let mut haystack = String::new();
haystack.push_str("a\n");
for _ in 0..DEFAULT_BUFFER_CAPACITY {
haystack.push_str("zzz\n");
}
haystack.push_str("a\n");
// The Read searcher will detect binary data here, but since this is
// beyond the initial buffer size and doesn't otherwise contain a
// match, the Slice reader won't detect the binary data until the next
// line (which is a match).
haystack.push_str("b\x00b\n");
haystack.push_str("a\x00a\n");
haystack.push_str("a\n");
let exp = "0:a\n\nbyte count:262146\nbinary offset:262149\n";
// The binary offset for the Slice readers corresponds to the binary
// data in `a\x00a\n` since the first line with binary data
// (`b\x00b\n`) isn't part of a match, and is therefore undetected.
let exp_slice =
"0:a\n262146:a\n\nbyte count:262153\nbinary offset:262153\n";
SearcherTester::new(&haystack, "a")
.binary_detection(BinaryDetection::quit(0))
.line_number(false)
.auto_heap_limit(false)
.expected_no_line_number(exp)
.expected_slice_no_line_number(exp_slice)
.test();
}
#[test]
fn passthru_sherlock1() {
let exp = "\
0:For the Doctor Watsons of this world, as opposed to the Sherlock
65-Holmeses, success in the province of detective work must always
129:be, to a very large extent, the result of luck. Sherlock Holmes
193-can extract a clew from a wisp of straw or a flake of cigar ash;
258-but Doctor Watson has to have it taken out for him and dusted,
321-and exhibited clearly, with a label attached.
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.passthru(true)
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn passthru_sherlock_invert1() {
let exp = "\
0-For the Doctor Watsons of this world, as opposed to the Sherlock
65:Holmeses, success in the province of detective work must always
129-be, to a very large extent, the result of luck. Sherlock Holmes
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258:but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.passthru(true)
.line_number(false)
.invert_match(true)
.expected_no_line_number(exp)
.test();
}
#[test]
fn context_sherlock1() {
let exp = "\
0:For the Doctor Watsons of this world, as opposed to the Sherlock
65-Holmeses, success in the province of detective work must always
129:be, to a very large extent, the result of luck. Sherlock Holmes
193-can extract a clew from a wisp of straw or a flake of cigar ash;
byte count:366
";
let exp_lines = "\
1:0:For the Doctor Watsons of this world, as opposed to the Sherlock
2-65-Holmeses, success in the province of detective work must always
3:129:be, to a very large extent, the result of luck. Sherlock Holmes
4-193-can extract a clew from a wisp of straw or a flake of cigar ash;
byte count:366
";
// before and after + line numbers
SearcherTester::new(SHERLOCK, "Sherlock")
.after_context(1)
.before_context(1)
.line_number(true)
.expected_no_line_number(exp)
.expected_with_line_number(exp_lines)
.test();
// after
SearcherTester::new(SHERLOCK, "Sherlock")
.after_context(1)
.line_number(false)
.expected_no_line_number(exp)
.test();
// before
let exp = "\
0:For the Doctor Watsons of this world, as opposed to the Sherlock
65-Holmeses, success in the province of detective work must always
129:be, to a very large extent, the result of luck. Sherlock Holmes
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.before_context(1)
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn context_sherlock_invert1() {
let exp = "\
0-For the Doctor Watsons of this world, as opposed to the Sherlock
65:Holmeses, success in the province of detective work must always
129-be, to a very large extent, the result of luck. Sherlock Holmes
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258:but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
let exp_lines = "\
1-0-For the Doctor Watsons of this world, as opposed to the Sherlock
2:65:Holmeses, success in the province of detective work must always
3-129-be, to a very large extent, the result of luck. Sherlock Holmes
4:193:can extract a clew from a wisp of straw or a flake of cigar ash;
5:258:but Doctor Watson has to have it taken out for him and dusted,
6:321:and exhibited clearly, with a label attached.
byte count:366
";
// before and after + line numbers
SearcherTester::new(SHERLOCK, "Sherlock")
.after_context(1)
.before_context(1)
.line_number(true)
.invert_match(true)
.expected_no_line_number(exp)
.expected_with_line_number(exp_lines)
.test();
// before
SearcherTester::new(SHERLOCK, "Sherlock")
.before_context(1)
.line_number(false)
.invert_match(true)
.expected_no_line_number(exp)
.test();
// after
let exp = "\
65:Holmeses, success in the province of detective work must always
129-be, to a very large extent, the result of luck. Sherlock Holmes
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258:but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
SearcherTester::new(SHERLOCK, "Sherlock")
.after_context(1)
.line_number(false)
.invert_match(true)
.expected_no_line_number(exp)
.test();
}
#[test]
fn context_sherlock2() {
let exp = "\
65-Holmeses, success in the province of detective work must always
129:be, to a very large extent, the result of luck. Sherlock Holmes
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258-but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
let exp_lines = "\
2-65-Holmeses, success in the province of detective work must always
3:129:be, to a very large extent, the result of luck. Sherlock Holmes
4:193:can extract a clew from a wisp of straw or a flake of cigar ash;
5-258-but Doctor Watson has to have it taken out for him and dusted,
6:321:and exhibited clearly, with a label attached.
byte count:366
";
// before + after + line numbers
SearcherTester::new(SHERLOCK, " a ")
.after_context(1)
.before_context(1)
.line_number(true)
.expected_no_line_number(exp)
.expected_with_line_number(exp_lines)
.test();
// before
SearcherTester::new(SHERLOCK, " a ")
.before_context(1)
.line_number(false)
.expected_no_line_number(exp)
.test();
// after
let exp = "\
129:be, to a very large extent, the result of luck. Sherlock Holmes
193:can extract a clew from a wisp of straw or a flake of cigar ash;
258-but Doctor Watson has to have it taken out for him and dusted,
321:and exhibited clearly, with a label attached.
byte count:366
";
SearcherTester::new(SHERLOCK, " a ")
.after_context(1)
.line_number(false)
.expected_no_line_number(exp)
.test();
}
#[test]
fn context_sherlock_invert2() {
let exp = "\
0:For the Doctor Watsons of this world, as opposed to the Sherlock
65:Holmeses, success in the province of detective work must always
129-be, to a very large extent, the result of luck. Sherlock Holmes
193-can extract a clew from a wisp of straw or a flake of cigar ash;
258:but Doctor Watson has to have it taken out for him and dusted,
321-and exhibited clearly, with a label attached.
byte count:366
";
let exp_lines = "\
1:0:For the Doctor Watsons of this world, as opposed to the Sherlock
2:65:Holmeses, success in the province of detective work must always
3-129-be, to a very large extent, the result of luck. Sherlock Holmes
4-193-can extract a clew from a wisp of straw or a flake of cigar ash;
5:258:but Doctor Watson has to have it taken out for him and dusted,
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/src/searcher/mod.rs | crates/searcher/src/searcher/mod.rs | use std::{
cell::RefCell,
cmp,
fs::File,
io::{self, Read},
path::Path,
};
use {
encoding_rs_io::DecodeReaderBytesBuilder,
grep_matcher::{LineTerminator, Match, Matcher},
};
use crate::{
line_buffer::{
self, BufferAllocation, DEFAULT_BUFFER_CAPACITY, LineBuffer,
LineBufferBuilder, LineBufferReader, alloc_error,
},
searcher::glue::{MultiLine, ReadByLine, SliceByLine},
sink::{Sink, SinkError},
};
pub use self::mmap::MmapChoice;
mod core;
mod glue;
mod mmap;
/// We use this type alias since we want the ergonomics of a matcher's `Match`
/// type, but in practice, we use it for arbitrary ranges, so give it a more
/// accurate name. This is only used in the searcher's internals.
type Range = Match;
/// The behavior of binary detection while searching.
///
/// Binary detection is the process of _heuristically_ identifying whether a
/// given chunk of data is binary or not, and then taking an action based on
/// the result of that heuristic. The motivation behind detecting binary data
/// is that binary data often indicates data that is undesirable to search
/// using textual patterns. Of course, there are many cases in which this isn't
/// true, which is why binary detection is disabled by default.
///
/// Unfortunately, binary detection works differently depending on the type of
/// search being executed:
///
/// 1. When performing a search using a fixed size buffer, binary detection is
/// applied to the buffer's contents as it is filled. Binary detection must
/// be applied to the buffer directly because binary files may not contain
/// line terminators, which could result in exorbitant memory usage.
/// 2. When performing a search using memory maps or by reading data off the
/// heap, then binary detection is only guaranteed to be applied to the
/// parts corresponding to a match. When `Quit` is enabled, then the first
/// few KB of the data are searched for binary data.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct BinaryDetection(line_buffer::BinaryDetection);
impl BinaryDetection {
/// No binary detection is performed. Data reported by the searcher may
/// contain arbitrary bytes.
///
/// This is the default.
pub fn none() -> BinaryDetection {
BinaryDetection(line_buffer::BinaryDetection::None)
}
/// Binary detection is performed by looking for the given byte.
///
/// When searching is performed using a fixed size buffer, then the
/// contents of that buffer are always searched for the presence of this
/// byte. If it is found, then the underlying data is considered binary
/// and the search stops as if it reached EOF.
///
/// When searching is performed with the entire contents mapped into
/// memory, then binary detection is more conservative. Namely, only a
/// fixed sized region at the beginning of the contents are detected for
/// binary data. As a compromise, any subsequent matching (or context)
/// lines are also searched for binary data. If binary data is detected at
/// any point, then the search stops as if it reached EOF.
pub fn quit(binary_byte: u8) -> BinaryDetection {
BinaryDetection(line_buffer::BinaryDetection::Quit(binary_byte))
}
/// Binary detection is performed by looking for the given byte, and
/// replacing it with the line terminator configured on the searcher.
/// (If the searcher is configured to use `CRLF` as the line terminator,
/// then this byte is replaced by just `LF`.)
///
/// When searching is performed using a fixed size buffer, then the
/// contents of that buffer are always searched for the presence of this
/// byte and replaced with the line terminator. In effect, the caller is
/// guaranteed to never observe this byte while searching.
///
/// When searching is performed with the entire contents mapped into
/// memory, then this setting has no effect and is ignored.
pub fn convert(binary_byte: u8) -> BinaryDetection {
BinaryDetection(line_buffer::BinaryDetection::Convert(binary_byte))
}
/// If this binary detection uses the "quit" strategy, then this returns
/// the byte that will cause a search to quit. In any other case, this
/// returns `None`.
pub fn quit_byte(&self) -> Option<u8> {
match self.0 {
line_buffer::BinaryDetection::Quit(b) => Some(b),
_ => None,
}
}
/// If this binary detection uses the "convert" strategy, then this returns
/// the byte that will be replaced by the line terminator. In any other
/// case, this returns `None`.
pub fn convert_byte(&self) -> Option<u8> {
match self.0 {
line_buffer::BinaryDetection::Convert(b) => Some(b),
_ => None,
}
}
}
/// An encoding to use when searching.
///
/// An encoding can be used to configure a [`SearcherBuilder`] to transcode
/// source data from an encoding to UTF-8 before searching.
///
/// An `Encoding` will always be cheap to clone.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Encoding(&'static encoding_rs::Encoding);
impl Encoding {
/// Create a new encoding for the specified label.
///
/// The encoding label provided is mapped to an encoding via the set of
/// available choices specified in the
/// [Encoding Standard](https://encoding.spec.whatwg.org/#concept-encoding-get).
/// If the given label does not correspond to a valid encoding, then this
/// returns an error.
pub fn new(label: &str) -> Result<Encoding, ConfigError> {
let label = label.as_bytes();
match encoding_rs::Encoding::for_label_no_replacement(label) {
Some(encoding) => Ok(Encoding(encoding)),
None => {
Err(ConfigError::UnknownEncoding { label: label.to_vec() })
}
}
}
}
/// The internal configuration of a searcher. This is shared among several
/// search related types, but is only ever written to by the SearcherBuilder.
#[derive(Clone, Debug)]
pub struct Config {
/// The line terminator to use.
line_term: LineTerminator,
/// Whether to invert matching.
invert_match: bool,
/// The number of lines after a match to include.
after_context: usize,
/// The number of lines before a match to include.
before_context: usize,
/// Whether to enable unbounded context or not.
passthru: bool,
/// Whether to count line numbers.
line_number: bool,
/// The maximum amount of heap memory to use.
///
/// When not given, no explicit limit is enforced. When set to `0`, then
/// only the memory map search strategy is available.
heap_limit: Option<usize>,
/// The memory map strategy.
mmap: MmapChoice,
/// The binary data detection strategy.
binary: BinaryDetection,
/// Whether to enable matching across multiple lines.
multi_line: bool,
/// An encoding that, when present, causes the searcher to transcode all
/// input from the encoding to UTF-8.
encoding: Option<Encoding>,
/// Whether to do automatic transcoding based on a BOM or not.
bom_sniffing: bool,
/// Whether to stop searching when a non-matching line is found after a
/// matching line.
stop_on_nonmatch: bool,
/// The maximum number of matches this searcher should emit.
max_matches: Option<u64>,
}
impl Default for Config {
fn default() -> Config {
Config {
line_term: LineTerminator::default(),
invert_match: false,
after_context: 0,
before_context: 0,
passthru: false,
line_number: true,
heap_limit: None,
mmap: MmapChoice::default(),
binary: BinaryDetection::default(),
multi_line: false,
encoding: None,
bom_sniffing: true,
stop_on_nonmatch: false,
max_matches: None,
}
}
}
impl Config {
/// Return the maximal amount of lines needed to fulfill this
/// configuration's context.
///
/// If this returns `0`, then no context is ever needed.
fn max_context(&self) -> usize {
cmp::max(self.before_context, self.after_context)
}
/// Build a line buffer from this configuration.
fn line_buffer(&self) -> LineBuffer {
let mut builder = LineBufferBuilder::new();
builder
.line_terminator(self.line_term.as_byte())
.binary_detection(self.binary.0);
if let Some(limit) = self.heap_limit {
let (capacity, additional) = if limit <= DEFAULT_BUFFER_CAPACITY {
(limit, 0)
} else {
(DEFAULT_BUFFER_CAPACITY, limit - DEFAULT_BUFFER_CAPACITY)
};
builder
.capacity(capacity)
.buffer_alloc(BufferAllocation::Error(additional));
}
builder.build()
}
}
/// An error that can occur when building a searcher.
///
/// This error occurs when a non-sensical configuration is present when trying
/// to construct a `Searcher` from a `SearcherBuilder`.
#[derive(Clone, Debug, Eq, PartialEq)]
#[non_exhaustive]
pub enum ConfigError {
/// Indicates that the heap limit configuration prevents all possible
/// search strategies from being used. For example, if the heap limit is
/// set to 0 and memory map searching is disabled or unavailable.
SearchUnavailable,
/// Occurs when a matcher reports a line terminator that is different than
/// the one configured in the searcher.
MismatchedLineTerminators {
/// The matcher's line terminator.
matcher: LineTerminator,
/// The searcher's line terminator.
searcher: LineTerminator,
},
/// Occurs when no encoding could be found for a particular label.
UnknownEncoding {
/// The provided encoding label that could not be found.
label: Vec<u8>,
},
}
impl std::error::Error for ConfigError {}
impl std::fmt::Display for ConfigError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
ConfigError::SearchUnavailable => {
write!(f, "grep config error: no available searchers")
}
ConfigError::MismatchedLineTerminators { matcher, searcher } => {
write!(
f,
"grep config error: mismatched line terminators, \
matcher has {:?} but searcher has {:?}",
matcher, searcher
)
}
ConfigError::UnknownEncoding { ref label } => write!(
f,
"grep config error: unknown encoding: {}",
String::from_utf8_lossy(label),
),
}
}
}
/// A builder for configuring a searcher.
///
/// A search builder permits specifying the configuration of a searcher,
/// including options like whether to invert the search or to enable multi
/// line search.
///
/// Once a searcher has been built, it is beneficial to reuse that searcher
/// for multiple searches, if possible.
#[derive(Clone, Debug)]
pub struct SearcherBuilder {
config: Config,
}
impl Default for SearcherBuilder {
fn default() -> SearcherBuilder {
SearcherBuilder::new()
}
}
impl SearcherBuilder {
/// Create a new searcher builder with a default configuration.
pub fn new() -> SearcherBuilder {
SearcherBuilder { config: Config::default() }
}
/// Build a searcher with the given matcher.
pub fn build(&self) -> Searcher {
let mut config = self.config.clone();
if config.passthru {
config.before_context = 0;
config.after_context = 0;
}
let mut decode_builder = DecodeReaderBytesBuilder::new();
decode_builder
.encoding(self.config.encoding.as_ref().map(|e| e.0))
.utf8_passthru(true)
.strip_bom(self.config.bom_sniffing)
.bom_override(true)
.bom_sniffing(self.config.bom_sniffing);
Searcher {
config,
decode_builder,
decode_buffer: RefCell::new(vec![0; 8 * (1 << 10)]),
line_buffer: RefCell::new(self.config.line_buffer()),
multi_line_buffer: RefCell::new(vec![]),
}
}
/// Set the line terminator that is used by the searcher.
///
/// When using a searcher, if the matcher provided has a line terminator
/// set, then it must be the same as this one. If they aren't, building
/// a searcher will return an error.
///
/// By default, this is set to `b'\n'`.
pub fn line_terminator(
&mut self,
line_term: LineTerminator,
) -> &mut SearcherBuilder {
self.config.line_term = line_term;
self
}
/// Whether to invert matching, whereby lines that don't match are reported
/// instead of reporting lines that do match.
///
/// By default, this is disabled.
pub fn invert_match(&mut self, yes: bool) -> &mut SearcherBuilder {
self.config.invert_match = yes;
self
}
/// Whether to count and include line numbers with matching lines.
///
/// This is enabled by default. There is a small performance penalty
/// associated with computing line numbers, so this can be disabled when
/// this isn't desirable.
pub fn line_number(&mut self, yes: bool) -> &mut SearcherBuilder {
self.config.line_number = yes;
self
}
/// Whether to enable multi line search or not.
///
/// When multi line search is enabled, matches *may* match across multiple
/// lines. Conversely, when multi line search is disabled, it is impossible
/// for any match to span more than one line.
///
/// **Warning:** multi line search requires having the entire contents to
/// search mapped in memory at once. When searching files, memory maps
/// will be used if possible and if they are enabled, which avoids using
/// your program's heap. However, if memory maps cannot be used (e.g.,
/// for searching streams like `stdin` or if transcoding is necessary),
/// then the entire contents of the stream are read on to the heap before
/// starting the search.
///
/// This is disabled by default.
pub fn multi_line(&mut self, yes: bool) -> &mut SearcherBuilder {
self.config.multi_line = yes;
self
}
/// Whether to include a fixed number of lines after every match.
///
/// When this is set to a non-zero number, then the searcher will report
/// `line_count` contextual lines after every match.
///
/// This is set to `0` by default.
pub fn after_context(
&mut self,
line_count: usize,
) -> &mut SearcherBuilder {
self.config.after_context = line_count;
self
}
/// Whether to include a fixed number of lines before every match.
///
/// When this is set to a non-zero number, then the searcher will report
/// `line_count` contextual lines before every match.
///
/// This is set to `0` by default.
pub fn before_context(
&mut self,
line_count: usize,
) -> &mut SearcherBuilder {
self.config.before_context = line_count;
self
}
/// Whether to enable the "passthru" feature or not.
///
/// When passthru is enabled, it effectively treats all non-matching lines
/// as contextual lines. In other words, enabling this is akin to
/// requesting an unbounded number of before and after contextual lines.
///
/// When passthru mode is enabled, any `before_context` or `after_context`
/// settings are ignored by setting them to `0`.
///
/// This is disabled by default.
pub fn passthru(&mut self, yes: bool) -> &mut SearcherBuilder {
self.config.passthru = yes;
self
}
/// Set an approximate limit on the amount of heap space used by a
/// searcher.
///
/// The heap limit is enforced in two scenarios:
///
/// * When searching using a fixed size buffer, the heap limit controls
/// how big this buffer is allowed to be. Assuming contexts are disabled,
/// the minimum size of this buffer is the length (in bytes) of the
/// largest single line in the contents being searched. If any line
/// exceeds the heap limit, then an error will be returned.
/// * When performing a multi line search, a fixed size buffer cannot be
/// used. Thus, the only choices are to read the entire contents on to
/// the heap, or use memory maps. In the former case, the heap limit set
/// here is enforced.
///
/// If a heap limit is set to `0`, then no heap space is used. If there are
/// no alternative strategies available for searching without heap space
/// (e.g., memory maps are disabled), then the searcher wil return an error
/// immediately.
///
/// By default, no limit is set.
pub fn heap_limit(
&mut self,
bytes: Option<usize>,
) -> &mut SearcherBuilder {
self.config.heap_limit = bytes;
self
}
/// Set the strategy to employ use of memory maps.
///
/// Currently, there are only two strategies that can be employed:
///
/// * **Automatic** - A searcher will use heuristics, including but not
/// limited to file size and platform, to determine whether to use memory
/// maps or not.
/// * **Never** - Memory maps will never be used. If multi line search is
/// enabled, then the entire contents will be read on to the heap before
/// searching begins.
///
/// The default behavior is **never**. Generally speaking, and perhaps
/// against conventional wisdom, memory maps don't necessarily enable
/// faster searching. For example, depending on the platform, using memory
/// maps while searching a large directory can actually be quite a bit
/// slower than using normal read calls because of the overhead of managing
/// the memory maps.
///
/// Memory maps can be faster in some cases however. On some platforms,
/// when searching a very large file that *is already in memory*, it can
/// be slightly faster to search it as a memory map instead of using
/// normal read calls.
///
/// Finally, memory maps have a somewhat complicated safety story in Rust.
/// If you aren't sure whether enabling memory maps is worth it, then just
/// don't bother with it.
///
/// **WARNING**: If your process is searching a file backed memory map
/// at the same time that file is truncated, then it's possible for the
/// process to terminate with a bus error.
pub fn memory_map(
&mut self,
strategy: MmapChoice,
) -> &mut SearcherBuilder {
self.config.mmap = strategy;
self
}
/// Set the binary detection strategy.
///
/// The binary detection strategy determines not only how the searcher
/// detects binary data, but how it responds to the presence of binary
/// data. See the [`BinaryDetection`] type for more information.
///
/// By default, binary detection is disabled.
pub fn binary_detection(
&mut self,
detection: BinaryDetection,
) -> &mut SearcherBuilder {
self.config.binary = detection;
self
}
/// Set the encoding used to read the source data before searching.
///
/// When an encoding is provided, then the source data is _unconditionally_
/// transcoded using the encoding, unless a BOM is present. If a BOM is
/// present, then the encoding indicated by the BOM is used instead. If the
/// transcoding process encounters an error, then bytes are replaced with
/// the Unicode replacement codepoint.
///
/// When no encoding is specified (the default), then BOM sniffing is
/// used (if it's enabled, which it is, by default) to determine whether
/// the source data is UTF-8 or UTF-16, and transcoding will be performed
/// automatically. If no BOM could be found, then the source data is
/// searched _as if_ it were UTF-8. However, so long as the source data is
/// at least ASCII compatible, then it is possible for a search to produce
/// useful results.
pub fn encoding(
&mut self,
encoding: Option<Encoding>,
) -> &mut SearcherBuilder {
self.config.encoding = encoding;
self
}
/// Enable automatic transcoding based on BOM sniffing.
///
/// When this is enabled and an explicit encoding is not set, then this
/// searcher will try to detect the encoding of the bytes being searched
/// by sniffing its byte-order mark (BOM). In particular, when this is
/// enabled, UTF-16 encoded files will be searched seamlessly.
///
/// When this is disabled and if an explicit encoding is not set, then
/// the bytes from the source stream will be passed through unchanged,
/// including its BOM, if one is present.
///
/// This is enabled by default.
pub fn bom_sniffing(&mut self, yes: bool) -> &mut SearcherBuilder {
self.config.bom_sniffing = yes;
self
}
/// Stop searching a file when a non-matching line is found after a
/// matching line.
///
/// This is useful for searching sorted files where it is expected that all
/// the matches will be on adjacent lines.
pub fn stop_on_nonmatch(
&mut self,
stop_on_nonmatch: bool,
) -> &mut SearcherBuilder {
self.config.stop_on_nonmatch = stop_on_nonmatch;
self
}
/// Sets the maximum number of matches that should be emitted by this
/// searcher.
///
/// If multi line search is enabled and a match spans multiple lines, then
/// that match is counted exactly once for the purposes of enforcing this
/// limit, regardless of how many lines it spans.
///
/// Note that `0` is a legal value. This will cause the searcher to
/// immediately quick without searching anything.
///
/// By default, no limit is set.
#[inline]
pub fn max_matches(&mut self, limit: Option<u64>) -> &mut SearcherBuilder {
self.config.max_matches = limit;
self
}
}
/// A searcher executes searches over a haystack and writes results to a caller
/// provided sink.
///
/// Matches are detected via implementations of the `Matcher` trait, which must
/// be provided by the caller when executing a search.
///
/// When possible, a searcher should be reused.
#[derive(Clone, Debug)]
pub struct Searcher {
/// The configuration for this searcher.
///
/// We make most of these settings available to users of `Searcher` via
/// public API methods, which can be queried in implementations of `Sink`
/// if necessary.
config: Config,
/// A builder for constructing a streaming reader that transcodes source
/// data according to either an explicitly specified encoding or via an
/// automatically detected encoding via BOM sniffing.
///
/// When no transcoding is needed, then the transcoder built will pass
/// through the underlying bytes with no additional overhead.
decode_builder: DecodeReaderBytesBuilder,
/// A buffer that is used for transcoding scratch space.
decode_buffer: RefCell<Vec<u8>>,
/// A line buffer for use in line oriented searching.
///
/// We wrap it in a RefCell to permit lending out borrows of `Searcher`
/// to sinks. We still require a mutable borrow to execute a search, so
/// we statically prevent callers from causing RefCell to panic at runtime
/// due to a borrowing violation.
line_buffer: RefCell<LineBuffer>,
/// A buffer in which to store the contents of a reader when performing a
/// multi line search. In particular, multi line searches cannot be
/// performed incrementally, and need the entire haystack in memory at
/// once.
multi_line_buffer: RefCell<Vec<u8>>,
}
impl Searcher {
/// Create a new searcher with a default configuration.
///
/// To configure the searcher (e.g., invert matching, enable memory maps,
/// enable contexts, etc.), use the [`SearcherBuilder`].
pub fn new() -> Searcher {
SearcherBuilder::new().build()
}
/// Execute a search over the file with the given path and write the
/// results to the given sink.
///
/// If memory maps are enabled and the searcher heuristically believes
/// memory maps will help the search run faster, then this will use
/// memory maps. For this reason, callers should prefer using this method
/// or `search_file` over the more generic `search_reader` when possible.
pub fn search_path<P, M, S>(
&mut self,
matcher: M,
path: P,
write_to: S,
) -> Result<(), S::Error>
where
P: AsRef<Path>,
M: Matcher,
S: Sink,
{
let path = path.as_ref();
let file = File::open(path).map_err(S::Error::error_io)?;
self.search_file_maybe_path(matcher, Some(path), &file, write_to)
}
/// Execute a search over a file and write the results to the given sink.
///
/// If memory maps are enabled and the searcher heuristically believes
/// memory maps will help the search run faster, then this will use
/// memory maps. For this reason, callers should prefer using this method
/// or `search_path` over the more generic `search_reader` when possible.
pub fn search_file<M, S>(
&mut self,
matcher: M,
file: &File,
write_to: S,
) -> Result<(), S::Error>
where
M: Matcher,
S: Sink,
{
self.search_file_maybe_path(matcher, None, file, write_to)
}
fn search_file_maybe_path<M, S>(
&mut self,
matcher: M,
path: Option<&Path>,
file: &File,
write_to: S,
) -> Result<(), S::Error>
where
M: Matcher,
S: Sink,
{
if let Some(mmap) = self.config.mmap.open(file, path) {
log::trace!("{:?}: searching via memory map", path);
return self.search_slice(matcher, &mmap, write_to);
}
// Fast path for multi-line searches of files when memory maps are not
// enabled. This pre-allocates a buffer roughly the size of the file,
// which isn't possible when searching an arbitrary std::io::Read.
if self.multi_line_with_matcher(&matcher) {
log::trace!(
"{:?}: reading entire file on to heap for mulitline",
path
);
self.fill_multi_line_buffer_from_file::<S>(file)?;
log::trace!("{:?}: searching via multiline strategy", path);
MultiLine::new(
self,
matcher,
&*self.multi_line_buffer.borrow(),
write_to,
)
.run()
} else {
log::trace!("{:?}: searching using generic reader", path);
self.search_reader(matcher, file, write_to)
}
}
/// Execute a search over any implementation of `std::io::Read` and write
/// the results to the given sink.
///
/// When possible, this implementation will search the reader incrementally
/// without reading it into memory. In some cases---for example, if multi
/// line search is enabled---an incremental search isn't possible and the
/// given reader is consumed completely and placed on the heap before
/// searching begins. For this reason, when multi line search is enabled,
/// one should try to use higher level APIs (e.g., searching by file or
/// file path) so that memory maps can be used if they are available and
/// enabled.
pub fn search_reader<M, R, S>(
&mut self,
matcher: M,
read_from: R,
write_to: S,
) -> Result<(), S::Error>
where
M: Matcher,
R: io::Read,
S: Sink,
{
self.check_config(&matcher).map_err(S::Error::error_config)?;
let mut decode_buffer = self.decode_buffer.borrow_mut();
let decoder = self
.decode_builder
.build_with_buffer(read_from, &mut *decode_buffer)
.map_err(S::Error::error_io)?;
if self.multi_line_with_matcher(&matcher) {
log::trace!(
"generic reader: reading everything to heap for multiline"
);
self.fill_multi_line_buffer_from_reader::<_, S>(decoder)?;
log::trace!("generic reader: searching via multiline strategy");
MultiLine::new(
self,
matcher,
&*self.multi_line_buffer.borrow(),
write_to,
)
.run()
} else {
let mut line_buffer = self.line_buffer.borrow_mut();
let rdr = LineBufferReader::new(decoder, &mut *line_buffer);
log::trace!("generic reader: searching via roll buffer strategy");
ReadByLine::new(self, matcher, rdr, write_to).run()
}
}
/// Execute a search over the given slice and write the results to the
/// given sink.
pub fn search_slice<M, S>(
&mut self,
matcher: M,
slice: &[u8],
write_to: S,
) -> Result<(), S::Error>
where
M: Matcher,
S: Sink,
{
self.check_config(&matcher).map_err(S::Error::error_config)?;
// We can search the slice directly, unless we need to do transcoding.
if self.slice_needs_transcoding(slice) {
log::trace!(
"slice reader: needs transcoding, using generic reader"
);
return self.search_reader(matcher, slice, write_to);
}
if self.multi_line_with_matcher(&matcher) {
log::trace!("slice reader: searching via multiline strategy");
MultiLine::new(self, matcher, slice, write_to).run()
} else {
log::trace!("slice reader: searching via slice-by-line strategy");
SliceByLine::new(self, matcher, slice, write_to).run()
}
}
/// Set the binary detection method used on this searcher.
pub fn set_binary_detection(&mut self, detection: BinaryDetection) {
self.config.binary = detection.clone();
self.line_buffer.borrow_mut().set_binary_detection(detection.0);
}
/// Check that the searcher's configuration and the matcher are consistent
/// with each other.
fn check_config<M: Matcher>(&self, matcher: M) -> Result<(), ConfigError> {
if self.config.heap_limit == Some(0) && !self.config.mmap.is_enabled()
{
return Err(ConfigError::SearchUnavailable);
}
let matcher_line_term = match matcher.line_terminator() {
None => return Ok(()),
Some(line_term) => line_term,
};
if matcher_line_term != self.config.line_term {
return Err(ConfigError::MismatchedLineTerminators {
matcher: matcher_line_term,
searcher: self.config.line_term,
});
}
Ok(())
}
/// Returns true if and only if the given slice needs to be transcoded.
fn slice_needs_transcoding(&self, slice: &[u8]) -> bool {
self.config.encoding.is_some()
|| (self.config.bom_sniffing && slice_has_bom(slice))
}
}
/// The following methods permit querying the configuration of a searcher.
/// These can be useful in generic implementations of [`Sink`], where the
/// output may be tailored based on how the searcher is configured.
impl Searcher {
/// Returns the line terminator used by this searcher.
#[inline]
pub fn line_terminator(&self) -> LineTerminator {
self.config.line_term
}
/// Returns the type of binary detection configured on this searcher.
#[inline]
pub fn binary_detection(&self) -> &BinaryDetection {
&self.config.binary
}
/// Returns true if and only if this searcher is configured to invert its
/// search results. That is, matching lines are lines that do **not** match
/// the searcher's matcher.
#[inline]
pub fn invert_match(&self) -> bool {
self.config.invert_match
}
/// Returns true if and only if this searcher is configured to count line
/// numbers.
#[inline]
pub fn line_number(&self) -> bool {
self.config.line_number
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/searcher/examples/search-stdin.rs | crates/searcher/examples/search-stdin.rs | use std::env;
use std::error::Error;
use std::io;
use std::process;
use grep_regex::RegexMatcher;
use grep_searcher::Searcher;
use grep_searcher::sinks::UTF8;
fn main() {
if let Err(err) = example() {
eprintln!("{}", err);
process::exit(1);
}
}
fn example() -> Result<(), Box<dyn Error>> {
let pattern = match env::args().nth(1) {
Some(pattern) => pattern,
None => {
return Err(From::from(format!("Usage: search-stdin <pattern>")));
}
};
let matcher = RegexMatcher::new(&pattern)?;
Searcher::new().search_reader(
&matcher,
io::stdin(),
UTF8(|lnum, line| {
print!("{}:{}", lnum, line);
Ok(true)
}),
)?;
Ok(())
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/config.rs | crates/regex/src/config.rs | use {
grep_matcher::{ByteSet, LineTerminator},
regex_automata::meta::Regex,
regex_syntax::{
ast,
hir::{self, Hir},
},
};
use crate::{
ast::AstAnalysis, ban, error::Error, non_matching::non_matching_bytes,
strip::strip_from_match,
};
/// Config represents the configuration of a regex matcher in this crate.
/// The configuration is itself a rough combination of the knobs found in
/// the `regex` crate itself, along with additional `grep-matcher` specific
/// options.
///
/// The configuration can be used to build a "configured" HIR expression. A
/// configured HIR expression is an HIR expression that is aware of the
/// configuration which generated it, and provides transformation on that HIR
/// such that the configuration is preserved.
#[derive(Clone, Debug)]
pub(crate) struct Config {
pub(crate) case_insensitive: bool,
pub(crate) case_smart: bool,
pub(crate) multi_line: bool,
pub(crate) dot_matches_new_line: bool,
pub(crate) swap_greed: bool,
pub(crate) ignore_whitespace: bool,
pub(crate) unicode: bool,
pub(crate) octal: bool,
pub(crate) size_limit: usize,
pub(crate) dfa_size_limit: usize,
pub(crate) nest_limit: u32,
pub(crate) line_terminator: Option<LineTerminator>,
pub(crate) ban: Option<u8>,
pub(crate) crlf: bool,
pub(crate) word: bool,
pub(crate) fixed_strings: bool,
pub(crate) whole_line: bool,
}
impl Default for Config {
fn default() -> Config {
Config {
case_insensitive: false,
case_smart: false,
multi_line: false,
dot_matches_new_line: false,
swap_greed: false,
ignore_whitespace: false,
unicode: true,
octal: false,
// These size limits are much bigger than what's in the regex
// crate by default.
size_limit: 100 * (1 << 20),
dfa_size_limit: 1000 * (1 << 20),
nest_limit: 250,
line_terminator: None,
ban: None,
crlf: false,
word: false,
fixed_strings: false,
whole_line: false,
}
}
}
impl Config {
/// Use this configuration to build an HIR from the given patterns. The HIR
/// returned corresponds to a single regex that is an alternation of the
/// patterns given.
pub(crate) fn build_many<P: AsRef<str>>(
&self,
patterns: &[P],
) -> Result<ConfiguredHIR, Error> {
ConfiguredHIR::new(self.clone(), patterns)
}
/// Accounting for the `smart_case` config knob, return true if and only if
/// this pattern should be matched case insensitively.
fn is_case_insensitive(&self, analysis: &AstAnalysis) -> bool {
if self.case_insensitive {
return true;
}
if !self.case_smart {
return false;
}
analysis.any_literal() && !analysis.any_uppercase()
}
/// Returns whether the given patterns should be treated as "fixed strings"
/// literals. This is different from just querying the `fixed_strings` knob
/// in that if the knob is false, this will still return true in some cases
/// if the patterns are themselves indistinguishable from literals.
///
/// The main idea here is that if this returns true, then it is safe
/// to build an `regex_syntax::hir::Hir` value directly from the given
/// patterns as an alternation of `hir::Literal` values.
fn is_fixed_strings<P: AsRef<str>>(&self, patterns: &[P]) -> bool {
// When these are enabled, we really need to parse the patterns and
// let them go through the standard HIR translation process in order
// for case folding transforms to be applied.
if self.case_insensitive || self.case_smart {
return false;
}
// Even if whole_line or word is enabled, both of those things can
// be implemented by wrapping the Hir generated by an alternation of
// fixed string literals. So for here at least, we don't care about the
// word or whole_line settings.
if self.fixed_strings {
// ... but if any literal contains a line terminator, then we've
// got to bail out because this will ultimately result in an error.
if let Some(lineterm) = self.line_terminator {
for p in patterns.iter() {
if has_line_terminator(lineterm, p.as_ref()) {
return false;
}
}
}
return true;
}
// In this case, the only way we can hand construct the Hir is if none
// of the patterns contain meta characters. If they do, then we need to
// send them through the standard parsing/translation process.
for p in patterns.iter() {
let p = p.as_ref();
if p.chars().any(regex_syntax::is_meta_character) {
return false;
}
// Same deal as when fixed_strings is set above. If the pattern has
// a line terminator anywhere, then we need to bail out and let
// an error occur.
if let Some(lineterm) = self.line_terminator {
if has_line_terminator(lineterm, p) {
return false;
}
}
}
true
}
}
/// A "configured" HIR expression, which is aware of the configuration which
/// produced this HIR.
///
/// Since the configuration is tracked, values with this type can be
/// transformed into other HIR expressions (or regular expressions) in a way
/// that preserves the configuration. For example, the `fast_line_regex`
/// method will apply literal extraction to the inner HIR and use that to build
/// a new regex that matches the extracted literals in a way that is
/// consistent with the configuration that produced this HIR. For example, the
/// size limits set on the configured HIR will be propagated out to any
/// subsequently constructed HIR or regular expression.
#[derive(Clone, Debug)]
pub(crate) struct ConfiguredHIR {
config: Config,
hir: Hir,
}
impl ConfiguredHIR {
/// Parse the given patterns into a single HIR expression that represents
/// an alternation of the patterns given.
fn new<P: AsRef<str>>(
config: Config,
patterns: &[P],
) -> Result<ConfiguredHIR, Error> {
let hir = if config.is_fixed_strings(patterns) {
let mut alts = vec![];
for p in patterns.iter() {
alts.push(Hir::literal(p.as_ref().as_bytes()));
}
log::debug!(
"assembling HIR from {} fixed string literals",
alts.len()
);
let hir = Hir::alternation(alts);
hir
} else {
let mut alts = vec![];
for p in patterns.iter() {
alts.push(if config.fixed_strings {
format!("(?:{})", regex_syntax::escape(p.as_ref()))
} else {
format!("(?:{})", p.as_ref())
});
}
let pattern = alts.join("|");
let ast = ast::parse::ParserBuilder::new()
.nest_limit(config.nest_limit)
.octal(config.octal)
.ignore_whitespace(config.ignore_whitespace)
.build()
.parse(&pattern)
.map_err(Error::generic)?;
let analysis = AstAnalysis::from_ast(&ast);
let mut hir = hir::translate::TranslatorBuilder::new()
.utf8(false)
.case_insensitive(config.is_case_insensitive(&analysis))
.multi_line(config.multi_line)
.dot_matches_new_line(config.dot_matches_new_line)
.crlf(config.crlf)
.swap_greed(config.swap_greed)
.unicode(config.unicode)
.build()
.translate(&pattern, &ast)
.map_err(Error::generic)?;
if let Some(byte) = config.ban {
ban::check(&hir, byte)?;
}
// We don't need to do this for the fixed-strings case above
// because is_fixed_strings will return false if any pattern
// contains a line terminator. Therefore, we don't need to strip
// it.
//
// We go to some pains to avoid doing this in the fixed-strings
// case because this can result in building a new HIR when ripgrep
// is given a huge set of literals to search for. And this can
// actually take a little time. It's not huge, but it's noticeable.
hir = match config.line_terminator {
None => hir,
Some(line_term) => strip_from_match(hir, line_term)?,
};
hir
};
Ok(ConfiguredHIR { config, hir })
}
/// Return a reference to the underlying configuration.
pub(crate) fn config(&self) -> &Config {
&self.config
}
/// Return a reference to the underlying HIR.
pub(crate) fn hir(&self) -> &Hir {
&self.hir
}
/// Convert this HIR to a regex that can be used for matching.
pub(crate) fn to_regex(&self) -> Result<Regex, Error> {
let meta = Regex::config()
.utf8_empty(false)
.nfa_size_limit(Some(self.config.size_limit))
// We don't expose a knob for this because the one-pass DFA is
// usually not a perf bottleneck for ripgrep. But we give it some
// extra room than the default.
.onepass_size_limit(Some(10 * (1 << 20)))
// Same deal here. The default limit for full DFAs is VERY small,
// but with ripgrep we can afford to spend a bit more time on
// building them I think.
.dfa_size_limit(Some(1 * (1 << 20)))
.dfa_state_limit(Some(1_000))
.hybrid_cache_capacity(self.config.dfa_size_limit);
Regex::builder()
.configure(meta)
.build_from_hir(&self.hir)
.map_err(Error::regex)
}
/// Compute the set of non-matching bytes for this HIR expression.
pub(crate) fn non_matching_bytes(&self) -> ByteSet {
non_matching_bytes(&self.hir)
}
/// Returns the line terminator configured on this expression.
///
/// When we have beginning/end anchors (NOT line anchors), the fast line
/// searching path isn't quite correct. Or at least, doesn't match the slow
/// path. Namely, the slow path strips line terminators while the fast path
/// does not. Since '$' (when multi-line mode is disabled) doesn't match at
/// line boundaries, the existence of a line terminator might cause it to
/// not match when it otherwise would with the line terminator stripped.
///
/// Since searching with text anchors is exceptionally rare in the context
/// of line oriented searching (multi-line mode is basically always
/// enabled), we just disable this optimization when there are text
/// anchors. We disable it by not returning a line terminator, since
/// without a line terminator, the fast search path can't be executed.
///
/// Actually, the above is no longer quite correct. Later on, another
/// optimization was added where if the line terminator was in the set of
/// bytes that was guaranteed to never be part of a match, then the higher
/// level search infrastructure assumes that the fast line-by-line search
/// path can still be taken. This optimization applies when multi-line
/// search (not multi-line mode) is enabled. In that case, there is no
/// configured line terminator since the regex is permitted to match a
/// line terminator. But if the regex is guaranteed to never match across
/// multiple lines despite multi-line search being requested, we can still
/// do the faster and more flexible line-by-line search. This is why the
/// non-matching extraction routine removes `\n` when `\A` and `\z` are
/// present even though that's not quite correct...
///
/// See: <https://github.com/BurntSushi/ripgrep/issues/2260>
pub(crate) fn line_terminator(&self) -> Option<LineTerminator> {
if self.hir.properties().look_set().contains_anchor_haystack() {
None
} else {
self.config.line_terminator
}
}
/// Turns this configured HIR into an equivalent one, but where it must
/// match at the start and end of a line.
pub(crate) fn into_whole_line(self) -> ConfiguredHIR {
let line_anchor_start = Hir::look(self.line_anchor_start());
let line_anchor_end = Hir::look(self.line_anchor_end());
let hir =
Hir::concat(vec![line_anchor_start, self.hir, line_anchor_end]);
ConfiguredHIR { config: self.config, hir }
}
/// Turns this configured HIR into an equivalent one, but where it must
/// match at word boundaries.
pub(crate) fn into_word(self) -> ConfiguredHIR {
let hir = Hir::concat(vec![
Hir::look(if self.config.unicode {
hir::Look::WordStartHalfUnicode
} else {
hir::Look::WordStartHalfAscii
}),
self.hir,
Hir::look(if self.config.unicode {
hir::Look::WordEndHalfUnicode
} else {
hir::Look::WordEndHalfAscii
}),
]);
ConfiguredHIR { config: self.config, hir }
}
/// Returns the "start line" anchor for this configuration.
fn line_anchor_start(&self) -> hir::Look {
if self.config.crlf {
hir::Look::StartCRLF
} else {
hir::Look::StartLF
}
}
/// Returns the "end line" anchor for this configuration.
fn line_anchor_end(&self) -> hir::Look {
if self.config.crlf { hir::Look::EndCRLF } else { hir::Look::EndLF }
}
}
/// Returns true if the given literal string contains any byte from the line
/// terminator given.
fn has_line_terminator(lineterm: LineTerminator, literal: &str) -> bool {
if lineterm.is_crlf() {
literal.as_bytes().iter().copied().any(|b| b == b'\r' || b == b'\n')
} else {
literal.as_bytes().iter().copied().any(|b| b == lineterm.as_byte())
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/ban.rs | crates/regex/src/ban.rs | use regex_syntax::hir::{
self, ClassBytesRange, ClassUnicodeRange, Hir, HirKind,
};
use crate::error::{Error, ErrorKind};
/// Returns an error when a sub-expression in `expr` must match `byte`.
pub(crate) fn check(expr: &Hir, byte: u8) -> Result<(), Error> {
assert!(byte.is_ascii(), "ban byte must be ASCII");
let ch = char::from(byte);
let invalid = || Err(Error::new(ErrorKind::Banned(byte)));
match *expr.kind() {
HirKind::Empty => {}
HirKind::Literal(hir::Literal(ref lit)) => {
if lit.iter().find(|&&b| b == byte).is_some() {
return invalid();
}
}
HirKind::Class(hir::Class::Unicode(ref cls)) => {
if cls.ranges().iter().map(|r| r.len()).sum::<usize>() == 1 {
let contains =
|r: &&ClassUnicodeRange| r.start() <= ch && ch <= r.end();
if cls.ranges().iter().find(contains).is_some() {
return invalid();
}
}
}
HirKind::Class(hir::Class::Bytes(ref cls)) => {
if cls.ranges().iter().map(|r| r.len()).sum::<usize>() == 1 {
let contains = |r: &&ClassBytesRange| {
r.start() <= byte && byte <= r.end()
};
if cls.ranges().iter().find(contains).is_some() {
return invalid();
}
}
}
HirKind::Look(_) => {}
HirKind::Repetition(ref x) => check(&x.sub, byte)?,
HirKind::Capture(ref x) => check(&x.sub, byte)?,
HirKind::Concat(ref xs) => {
for x in xs.iter() {
check(x, byte)?;
}
}
HirKind::Alternation(ref xs) => {
for x in xs.iter() {
check(x, byte)?;
}
}
};
Ok(())
}
#[cfg(test)]
mod tests {
use regex_syntax::Parser;
/// Returns true when the given pattern is detected to contain the given
/// banned byte.
fn check(pattern: &str, byte: u8) -> bool {
let hir = Parser::new().parse(pattern).unwrap();
super::check(&hir, byte).is_err()
}
#[test]
fn various() {
assert!(check(r"\x00", 0));
assert!(check(r"a\x00", 0));
assert!(check(r"\x00b", 0));
assert!(check(r"a\x00b", 0));
assert!(check(r"\x00|ab", 0));
assert!(check(r"ab|\x00", 0));
assert!(check(r"\x00?", 0));
assert!(check(r"(\x00)", 0));
assert!(check(r"[\x00]", 0));
assert!(check(r"[^[^\x00]]", 0));
assert!(!check(r"[^\x00]", 0));
assert!(!check(r"[\x00a]", 0));
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/ast.rs | crates/regex/src/ast.rs | use regex_syntax::ast::{self, Ast};
/// The results of analyzing AST of a regular expression (e.g., for supporting
/// smart case).
#[derive(Clone, Debug)]
pub(crate) struct AstAnalysis {
/// True if and only if a literal uppercase character occurs in the regex.
any_uppercase: bool,
/// True if and only if the regex contains any literal at all.
any_literal: bool,
}
impl AstAnalysis {
/// Returns a `AstAnalysis` value by doing analysis on the AST of `pattern`.
///
/// If `pattern` is not a valid regular expression, then `None` is
/// returned.
#[cfg(test)]
pub(crate) fn from_pattern(pattern: &str) -> Option<AstAnalysis> {
regex_syntax::ast::parse::Parser::new()
.parse(pattern)
.map(|ast| AstAnalysis::from_ast(&ast))
.ok()
}
/// Perform an AST analysis given the AST.
pub(crate) fn from_ast(ast: &Ast) -> AstAnalysis {
let mut analysis = AstAnalysis::new();
analysis.from_ast_impl(ast);
analysis
}
/// Returns true if and only if a literal uppercase character occurs in
/// the pattern.
///
/// For example, a pattern like `\pL` contains no uppercase literals,
/// even though `L` is uppercase and the `\pL` class contains uppercase
/// characters.
pub(crate) fn any_uppercase(&self) -> bool {
self.any_uppercase
}
/// Returns true if and only if the regex contains any literal at all.
///
/// For example, a pattern like `\pL` reports `false`, but a pattern like
/// `\pLfoo` reports `true`.
pub(crate) fn any_literal(&self) -> bool {
self.any_literal
}
/// Creates a new `AstAnalysis` value with an initial configuration.
fn new() -> AstAnalysis {
AstAnalysis { any_uppercase: false, any_literal: false }
}
fn from_ast_impl(&mut self, ast: &Ast) {
if self.done() {
return;
}
match *ast {
Ast::Empty(_) => {}
Ast::Flags(_)
| Ast::Dot(_)
| Ast::Assertion(_)
| Ast::ClassUnicode(_)
| Ast::ClassPerl(_) => {}
Ast::Literal(ref x) => {
self.from_ast_literal(x);
}
Ast::ClassBracketed(ref x) => {
self.from_ast_class_set(&x.kind);
}
Ast::Repetition(ref x) => {
self.from_ast_impl(&x.ast);
}
Ast::Group(ref x) => {
self.from_ast_impl(&x.ast);
}
Ast::Alternation(ref alt) => {
for x in &alt.asts {
self.from_ast_impl(x);
}
}
Ast::Concat(ref alt) => {
for x in &alt.asts {
self.from_ast_impl(x);
}
}
}
}
fn from_ast_class_set(&mut self, ast: &ast::ClassSet) {
if self.done() {
return;
}
match *ast {
ast::ClassSet::Item(ref item) => {
self.from_ast_class_set_item(item);
}
ast::ClassSet::BinaryOp(ref x) => {
self.from_ast_class_set(&x.lhs);
self.from_ast_class_set(&x.rhs);
}
}
}
fn from_ast_class_set_item(&mut self, ast: &ast::ClassSetItem) {
if self.done() {
return;
}
match *ast {
ast::ClassSetItem::Empty(_)
| ast::ClassSetItem::Ascii(_)
| ast::ClassSetItem::Unicode(_)
| ast::ClassSetItem::Perl(_) => {}
ast::ClassSetItem::Literal(ref x) => {
self.from_ast_literal(x);
}
ast::ClassSetItem::Range(ref x) => {
self.from_ast_literal(&x.start);
self.from_ast_literal(&x.end);
}
ast::ClassSetItem::Bracketed(ref x) => {
self.from_ast_class_set(&x.kind);
}
ast::ClassSetItem::Union(ref union) => {
for x in &union.items {
self.from_ast_class_set_item(x);
}
}
}
}
fn from_ast_literal(&mut self, ast: &ast::Literal) {
self.any_literal = true;
self.any_uppercase = self.any_uppercase || ast.c.is_uppercase();
}
/// Returns true if and only if the attributes can never change no matter
/// what other AST it might see.
fn done(&self) -> bool {
self.any_uppercase && self.any_literal
}
}
#[cfg(test)]
mod tests {
use super::*;
fn analysis(pattern: &str) -> AstAnalysis {
AstAnalysis::from_pattern(pattern).unwrap()
}
#[test]
fn various() {
let x = analysis("");
assert!(!x.any_uppercase);
assert!(!x.any_literal);
let x = analysis("foo");
assert!(!x.any_uppercase);
assert!(x.any_literal);
let x = analysis("Foo");
assert!(x.any_uppercase);
assert!(x.any_literal);
let x = analysis("foO");
assert!(x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo\\");
assert!(!x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo\w");
assert!(!x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo\S");
assert!(!x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo\p{Ll}");
assert!(!x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo[a-z]");
assert!(!x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo[A-Z]");
assert!(x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo[\S\t]");
assert!(!x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"foo\\S");
assert!(x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"\p{Ll}");
assert!(!x.any_uppercase);
assert!(!x.any_literal);
let x = analysis(r"aBc\w");
assert!(x.any_uppercase);
assert!(x.any_literal);
let x = analysis(r"a\u0061");
assert!(!x.any_uppercase);
assert!(x.any_literal);
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/lib.rs | crates/regex/src/lib.rs | /*!
An implementation of `grep-matcher`'s `Matcher` trait for Rust's regex engine.
*/
#![deny(missing_docs)]
pub use crate::{
error::{Error, ErrorKind},
matcher::{RegexCaptures, RegexMatcher, RegexMatcherBuilder},
};
mod ast;
mod ban;
mod config;
mod error;
mod literal;
mod matcher;
mod non_matching;
mod strip;
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/strip.rs | crates/regex/src/strip.rs | use {
grep_matcher::LineTerminator,
regex_syntax::hir::{self, Hir, HirKind},
};
use crate::error::{Error, ErrorKind};
/// Return an HIR that is guaranteed to never match the given line terminator,
/// if possible.
///
/// If the transformation isn't possible, then an error is returned.
///
/// In general, if a literal line terminator occurs anywhere in the HIR, then
/// this will return an error. However, if the line terminator occurs within
/// a character class with at least one other character (that isn't also a line
/// terminator), then the line terminator is simply stripped from that class.
///
/// If the given line terminator is not ASCII, then this function returns an
/// error.
///
/// Note that as of regex 1.9, this routine could theoretically be implemented
/// without returning an error. Namely, for example, we could turn
/// `foo\nbar` into `foo[a&&b]bar`. That is, replace line terminator with a
/// sub-expression that can never match anything. Thus, ripgrep would accept
/// such regexes and just silently not match anything. Regex versions prior
/// to 1.8 don't support such constructs. I ended up deciding to leave the
/// existing behavior of returning an error instead. For example:
///
/// ```text
/// $ echo -n 'foo\nbar\n' | rg 'foo\nbar'
/// the literal '"\n"' is not allowed in a regex
///
/// Consider enabling multiline mode with the --multiline flag (or -U for short).
/// When multiline mode is enabled, new line characters can be matched.
/// ```
///
/// This looks like a good error message to me, and even suggests a flag that
/// the user can use instead.
pub(crate) fn strip_from_match(
expr: Hir,
line_term: LineTerminator,
) -> Result<Hir, Error> {
if line_term.is_crlf() {
let expr1 = strip_from_match_ascii(expr, b'\r')?;
strip_from_match_ascii(expr1, b'\n')
} else {
strip_from_match_ascii(expr, line_term.as_byte())
}
}
/// The implementation of strip_from_match. The given byte must be ASCII.
/// This function returns an error otherwise. It also returns an error if
/// it couldn't remove `\n` from the given regex without leaving an empty
/// character class in its place.
fn strip_from_match_ascii(expr: Hir, byte: u8) -> Result<Hir, Error> {
if !byte.is_ascii() {
return Err(Error::new(ErrorKind::InvalidLineTerminator(byte)));
}
let ch = char::from(byte);
let invalid = || Err(Error::new(ErrorKind::NotAllowed(ch.to_string())));
Ok(match expr.into_kind() {
HirKind::Empty => Hir::empty(),
HirKind::Literal(hir::Literal(lit)) => {
if lit.iter().find(|&&b| b == byte).is_some() {
return invalid();
}
Hir::literal(lit)
}
HirKind::Class(hir::Class::Unicode(mut cls)) => {
if cls.ranges().is_empty() {
return Ok(Hir::class(hir::Class::Unicode(cls)));
}
let remove = hir::ClassUnicode::new(Some(
hir::ClassUnicodeRange::new(ch, ch),
));
cls.difference(&remove);
if cls.ranges().is_empty() {
return invalid();
}
Hir::class(hir::Class::Unicode(cls))
}
HirKind::Class(hir::Class::Bytes(mut cls)) => {
if cls.ranges().is_empty() {
return Ok(Hir::class(hir::Class::Bytes(cls)));
}
let remove = hir::ClassBytes::new(Some(
hir::ClassBytesRange::new(byte, byte),
));
cls.difference(&remove);
if cls.ranges().is_empty() {
return invalid();
}
Hir::class(hir::Class::Bytes(cls))
}
HirKind::Look(x) => Hir::look(x),
HirKind::Repetition(mut x) => {
x.sub = Box::new(strip_from_match_ascii(*x.sub, byte)?);
Hir::repetition(x)
}
HirKind::Capture(mut x) => {
x.sub = Box::new(strip_from_match_ascii(*x.sub, byte)?);
Hir::capture(x)
}
HirKind::Concat(xs) => {
let xs = xs
.into_iter()
.map(|e| strip_from_match_ascii(e, byte))
.collect::<Result<Vec<Hir>, Error>>()?;
Hir::concat(xs)
}
HirKind::Alternation(xs) => {
let xs = xs
.into_iter()
.map(|e| strip_from_match_ascii(e, byte))
.collect::<Result<Vec<Hir>, Error>>()?;
Hir::alternation(xs)
}
})
}
#[cfg(test)]
mod tests {
use regex_syntax::Parser;
use super::{LineTerminator, strip_from_match};
use crate::error::Error;
fn roundtrip(pattern: &str, byte: u8) -> String {
roundtrip_line_term(pattern, LineTerminator::byte(byte)).unwrap()
}
fn roundtrip_crlf(pattern: &str) -> String {
roundtrip_line_term(pattern, LineTerminator::crlf()).unwrap()
}
fn roundtrip_err(pattern: &str, byte: u8) -> Result<String, Error> {
roundtrip_line_term(pattern, LineTerminator::byte(byte))
}
fn roundtrip_line_term(
pattern: &str,
line_term: LineTerminator,
) -> Result<String, Error> {
let expr1 = Parser::new().parse(pattern).unwrap();
let expr2 = strip_from_match(expr1, line_term)?;
Ok(expr2.to_string())
}
#[test]
fn various() {
assert_eq!(roundtrip(r"[a\n]", b'\n'), "a");
assert_eq!(roundtrip(r"[a\n]", b'a'), "\n");
assert_eq!(roundtrip_crlf(r"[a\n]"), "a");
assert_eq!(roundtrip_crlf(r"[a\r]"), "a");
assert_eq!(roundtrip_crlf(r"[a\r\n]"), "a");
assert_eq!(roundtrip(r"(?-u)\s", b'a'), r"(?-u:[\x09-\x0D\x20])");
assert_eq!(roundtrip(r"(?-u)\s", b'\n'), r"(?-u:[\x09\x0B-\x0D\x20])");
assert!(roundtrip_err(r"\n", b'\n').is_err());
assert!(roundtrip_err(r"abc\n", b'\n').is_err());
assert!(roundtrip_err(r"\nabc", b'\n').is_err());
assert!(roundtrip_err(r"abc\nxyz", b'\n').is_err());
assert!(roundtrip_err(r"\x0A", b'\n').is_err());
assert!(roundtrip_err(r"\u000A", b'\n').is_err());
assert!(roundtrip_err(r"\U0000000A", b'\n').is_err());
assert!(roundtrip_err(r"\u{A}", b'\n').is_err());
assert!(roundtrip_err("\n", b'\n').is_err());
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/error.rs | crates/regex/src/error.rs | /// An error that can occur in this crate.
///
/// Generally, this error corresponds to problems building a regular
/// expression, whether it's in parsing, compilation or a problem with
/// guaranteeing a configured optimization.
#[derive(Clone, Debug)]
pub struct Error {
kind: ErrorKind,
}
impl Error {
pub(crate) fn new(kind: ErrorKind) -> Error {
Error { kind }
}
pub(crate) fn regex(err: regex_automata::meta::BuildError) -> Error {
if let Some(size_limit) = err.size_limit() {
let kind = ErrorKind::Regex(format!(
"compiled regex exceeds size limit of {size_limit}",
));
Error { kind }
} else if let Some(ref err) = err.syntax_error() {
Error::generic(err)
} else {
Error::generic(err)
}
}
pub(crate) fn generic<E: std::error::Error>(err: E) -> Error {
Error { kind: ErrorKind::Regex(err.to_string()) }
}
/// Return the kind of this error.
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
}
/// The kind of an error that can occur.
#[derive(Clone, Debug)]
#[non_exhaustive]
pub enum ErrorKind {
/// An error that occurred as a result of parsing a regular expression.
/// This can be a syntax error or an error that results from attempting to
/// compile a regular expression that is too big.
///
/// The string here is the underlying error converted to a string.
Regex(String),
/// An error that occurs when a building a regex that isn't permitted to
/// match a line terminator. In general, building the regex will do its
/// best to make matching a line terminator impossible (e.g., by removing
/// `\n` from the `\s` character class), but if the regex contains a
/// `\n` literal, then there is no reasonable choice that can be made and
/// therefore an error is reported.
///
/// The string is the literal sequence found in the regex that is not
/// allowed.
NotAllowed(String),
/// This error occurs when a non-ASCII line terminator was provided.
///
/// The invalid byte is included in this error.
InvalidLineTerminator(u8),
/// Occurs when a banned byte was found in a pattern.
Banned(u8),
}
impl std::error::Error for Error {}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use bstr::ByteSlice;
match self.kind {
ErrorKind::Regex(ref s) => write!(f, "{}", s),
ErrorKind::NotAllowed(ref lit) => {
write!(f, "the literal {:?} is not allowed in a regex", lit)
}
ErrorKind::InvalidLineTerminator(byte) => {
write!(
f,
"line terminators must be ASCII, but {byte:?} is not",
byte = [byte].as_bstr(),
)
}
ErrorKind::Banned(byte) => {
write!(
f,
"pattern contains {byte:?} but it is impossible to match",
byte = [byte].as_bstr(),
)
}
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/literal.rs | crates/regex/src/literal.rs | use {
regex_automata::meta::Regex,
regex_syntax::hir::{
self, Hir,
literal::{Literal, Seq},
},
};
use crate::{config::ConfiguredHIR, error::Error};
/// A type that encapsulates "inner" literal extractiong from a regex.
///
/// It uses a huge pile of heuristics to try to pluck out literals from a regex
/// that are in turn used to build a simpler regex that is more amenable to
/// optimization.
///
/// The main idea underlying the validity of this technique is the fact
/// that ripgrep searches individuals lines and not across lines. (Unless
/// -U/--multiline is enabled.) Namely, we can pluck literals out of the regex,
/// search for them, find the bounds of the line in which that literal occurs
/// and then run the original regex on only that line. This overall works
/// really really well in throughput oriented searches because it potentially
/// allows ripgrep to spend a lot more time in a fast vectorized routine for
/// finding literals as opposed to the (much) slower regex engine.
///
/// This optimization was far more important in the old days, but since then,
/// Rust's regex engine has actually grown its own (albeit limited) support for
/// inner literal optimizations. So this technique doesn't apply as much as it
/// used to.
///
/// A good example of a regex where this particular extractor helps is
/// `\s+(Sherlock|[A-Z]atso[a-z]|Moriarty)\s+`. The `[A-Z]` before the `atso`
/// in particular is what inhibits the regex engine's own inner literal
/// optimizations from kicking in. This particular regex also did not have any
/// inner literals extracted in the old implementation (ripgrep <=13). So this
/// particular implementation represents a strict improvement from both the old
/// implementation and from the regex engine's own optimizations. (Which could
/// in theory be improved still.)
#[derive(Clone, Debug)]
pub(crate) struct InnerLiterals {
seq: Seq,
}
impl InnerLiterals {
/// Create a set of inner literals from the given HIR expression.
///
/// If no line terminator was configured, then this always declines to
/// extract literals because the inner literal optimization may not be
/// valid.
///
/// Note that this requires the actual regex that will be used for a search
/// because it will query some state about the compiled regex. That state
/// may influence inner literal extraction.
pub(crate) fn new(chir: &ConfiguredHIR, re: &Regex) -> InnerLiterals {
// If there's no line terminator, then the inner literal optimization
// at this level is not valid.
if chir.config().line_terminator.is_none() {
log::trace!(
"skipping inner literal extraction, \
no line terminator is set"
);
return InnerLiterals::none();
}
// If we believe the regex is already accelerated, then just let
// the regex engine do its thing. We'll skip the inner literal
// optimization.
//
// ... but only if the regex doesn't have any Unicode word boundaries.
// If it does, there's enough of a chance of the regex engine falling
// back to a slower engine that it's worth trying our own inner literal
// optimization.
if re.is_accelerated() {
if !chir.hir().properties().look_set().contains_word_unicode() {
log::trace!(
"skipping inner literal extraction, \
existing regex is believed to already be accelerated",
);
return InnerLiterals::none();
}
}
// In this case, we pretty much know that the regex engine will handle
// it as best as possible, even if it isn't reported as accelerated.
if chir.hir().properties().is_alternation_literal() {
log::trace!(
"skipping inner literal extraction, \
found alternation of literals, deferring to regex engine",
);
return InnerLiterals::none();
}
let seq = Extractor::new().extract_untagged(chir.hir());
InnerLiterals { seq }
}
/// Returns a infinite set of inner literals, such that it can never
/// produce a matcher.
pub(crate) fn none() -> InnerLiterals {
InnerLiterals { seq: Seq::infinite() }
}
/// If it is deemed advantageous to do so (via various suspicious
/// heuristics), this will return a single regular expression pattern that
/// matches a subset of the language matched by the regular expression that
/// generated these literal sets. The idea here is that the pattern
/// returned by this method is much cheaper to search for. i.e., It is
/// usually a single literal or an alternation of literals.
pub(crate) fn one_regex(&self) -> Result<Option<Regex>, Error> {
let Some(lits) = self.seq.literals() else { return Ok(None) };
if lits.is_empty() {
return Ok(None);
}
let mut alts = vec![];
for lit in lits.iter() {
alts.push(Hir::literal(lit.as_bytes()));
}
let hir = Hir::alternation(alts);
log::debug!("extracted fast line regex: {:?}", hir.to_string());
let re = Regex::builder()
.configure(Regex::config().utf8_empty(false))
.build_from_hir(&hir)
.map_err(Error::regex)?;
Ok(Some(re))
}
}
/// An inner literal extractor.
///
/// This is a somewhat stripped down version of the extractor from
/// regex-syntax. The main difference is that we try to identify a "best" set
/// of required literals while traversing the HIR.
#[derive(Debug)]
struct Extractor {
limit_class: usize,
limit_repeat: usize,
limit_literal_len: usize,
limit_total: usize,
}
impl Extractor {
/// Create a new inner literal extractor with a default configuration.
fn new() -> Extractor {
Extractor {
limit_class: 10,
limit_repeat: 10,
limit_literal_len: 100,
limit_total: 64,
}
}
/// Execute the extractor at the top-level and return an untagged sequence
/// of literals.
fn extract_untagged(&self, hir: &Hir) -> Seq {
let mut seq = self.extract(hir);
log::trace!("extracted inner literals: {:?}", seq.seq);
seq.seq.optimize_for_prefix_by_preference();
log::trace!(
"extracted inner literals after optimization: {:?}",
seq.seq
);
if !seq.is_good() {
log::trace!(
"throwing away inner literals because they might be slow"
);
seq.make_infinite();
}
seq.seq
}
/// Execute the extractor and return a sequence of literals.
fn extract(&self, hir: &Hir) -> TSeq {
use regex_syntax::hir::HirKind::*;
match *hir.kind() {
Empty | Look(_) => TSeq::singleton(self::Literal::exact(vec![])),
Literal(hir::Literal(ref bytes)) => {
let mut seq =
TSeq::singleton(self::Literal::exact(bytes.to_vec()));
self.enforce_literal_len(&mut seq);
seq
}
Class(hir::Class::Unicode(ref cls)) => {
self.extract_class_unicode(cls)
}
Class(hir::Class::Bytes(ref cls)) => self.extract_class_bytes(cls),
Repetition(ref rep) => self.extract_repetition(rep),
Capture(hir::Capture { ref sub, .. }) => self.extract(sub),
Concat(ref hirs) => self.extract_concat(hirs.iter()),
Alternation(ref hirs) => self.extract_alternation(hirs.iter()),
}
}
/// Extract a sequence from the given concatenation. Sequences from each of
/// the child HIR expressions are combined via cross product.
///
/// This short circuits once the cross product turns into a sequence
/// containing only inexact literals.
fn extract_concat<'a, I: Iterator<Item = &'a Hir>>(&self, it: I) -> TSeq {
let mut seq = TSeq::singleton(self::Literal::exact(vec![]));
let mut prev: Option<TSeq> = None;
for hir in it {
// If every element in the sequence is inexact, then a cross
// product will always be a no-op. Thus, there is nothing else we
// can add to it and can quit early. Note that this also includes
// infinite sequences.
if seq.is_inexact() {
// If a concatenation has an empty sequence anywhere, then
// it's impossible for the concatenantion to ever match. So we
// can just quit now.
if seq.is_empty() {
return seq;
}
if seq.is_really_good() {
return seq;
}
prev = Some(match prev {
None => seq,
Some(prev) => prev.choose(seq),
});
seq = TSeq::singleton(self::Literal::exact(vec![]));
seq.make_not_prefix();
}
// Note that 'cross' also dispatches based on whether we're
// extracting prefixes or suffixes.
seq = self.cross(seq, self.extract(hir));
}
if let Some(prev) = prev { prev.choose(seq) } else { seq }
}
/// Extract a sequence from the given alternation.
///
/// This short circuits once the union turns into an infinite sequence.
fn extract_alternation<'a, I: Iterator<Item = &'a Hir>>(
&self,
it: I,
) -> TSeq {
let mut seq = TSeq::empty();
for hir in it {
// Once our 'seq' is infinite, every subsequent union
// operation on it will itself always result in an
// infinite sequence. Thus, it can never change and we can
// short-circuit.
if !seq.is_finite() {
break;
}
seq = self.union(seq, &mut self.extract(hir));
}
seq
}
/// Extract a sequence of literals from the given repetition. We do our
/// best, Some examples:
///
/// 'a*' => [inexact(a), exact("")]
/// 'a*?' => [exact(""), inexact(a)]
/// 'a+' => [inexact(a)]
/// 'a{3}' => [exact(aaa)]
/// 'a{3,5} => [inexact(aaa)]
///
/// The key here really is making sure we get the 'inexact' vs 'exact'
/// attributes correct on each of the literals we add. For example, the
/// fact that 'a*' gives us an inexact 'a' and an exact empty string means
/// that a regex like 'ab*c' will result in [inexact(ab), exact(ac)]
/// literals being extracted, which might actually be a better prefilter
/// than just 'a'.
fn extract_repetition(&self, rep: &hir::Repetition) -> TSeq {
let mut subseq = self.extract(&rep.sub);
match *rep {
hir::Repetition { min: 0, max, greedy, .. } => {
// When 'max=1', we can retain exactness, since 'a?' is
// equivalent to 'a|'. Similarly below, 'a??' is equivalent to
// '|a'.
if max != Some(1) {
subseq.make_inexact();
}
let mut empty = TSeq::singleton(Literal::exact(vec![]));
if !greedy {
std::mem::swap(&mut subseq, &mut empty);
}
self.union(subseq, &mut empty)
}
hir::Repetition { min, max: Some(max), .. } if min == max => {
assert!(min > 0); // handled above
let limit =
u32::try_from(self.limit_repeat).unwrap_or(u32::MAX);
let mut seq = TSeq::singleton(Literal::exact(vec![]));
for _ in 0..std::cmp::min(min, limit) {
if seq.is_inexact() {
break;
}
seq = self.cross(seq, subseq.clone());
}
if usize::try_from(min).is_err() || min > limit {
seq.make_inexact();
}
seq
}
hir::Repetition { min, max: Some(max), .. } if min < max => {
assert!(min > 0); // handled above
let limit =
u32::try_from(self.limit_repeat).unwrap_or(u32::MAX);
let mut seq = TSeq::singleton(Literal::exact(vec![]));
for _ in 0..std::cmp::min(min, limit) {
if seq.is_inexact() {
break;
}
seq = self.cross(seq, subseq.clone());
}
seq.make_inexact();
seq
}
hir::Repetition { .. } => {
subseq.make_inexact();
subseq
}
}
}
/// Convert the given Unicode class into a sequence of literals if the
/// class is small enough. If the class is too big, return an infinite
/// sequence.
fn extract_class_unicode(&self, cls: &hir::ClassUnicode) -> TSeq {
if self.class_over_limit_unicode(cls) {
return TSeq::infinite();
}
let mut seq = TSeq::empty();
for r in cls.iter() {
for ch in r.start()..=r.end() {
seq.push(Literal::from(ch));
}
}
self.enforce_literal_len(&mut seq);
seq
}
/// Convert the given byte class into a sequence of literals if the class
/// is small enough. If the class is too big, return an infinite sequence.
fn extract_class_bytes(&self, cls: &hir::ClassBytes) -> TSeq {
if self.class_over_limit_bytes(cls) {
return TSeq::infinite();
}
let mut seq = TSeq::empty();
for r in cls.iter() {
for b in r.start()..=r.end() {
seq.push(Literal::from(b));
}
}
self.enforce_literal_len(&mut seq);
seq
}
/// Returns true if the given Unicode class exceeds the configured limits
/// on this extractor.
fn class_over_limit_unicode(&self, cls: &hir::ClassUnicode) -> bool {
let mut count = 0;
for r in cls.iter() {
if count > self.limit_class {
return true;
}
count += r.len();
}
count > self.limit_class
}
/// Returns true if the given byte class exceeds the configured limits on
/// this extractor.
fn class_over_limit_bytes(&self, cls: &hir::ClassBytes) -> bool {
let mut count = 0;
for r in cls.iter() {
if count > self.limit_class {
return true;
}
count += r.len();
}
count > self.limit_class
}
/// Compute the cross product of the two sequences if the result would be
/// within configured limits. Otherwise, make `seq2` infinite and cross the
/// infinite sequence with `seq1`.
fn cross(&self, mut seq1: TSeq, mut seq2: TSeq) -> TSeq {
if !seq2.prefix {
return seq1.choose(seq2);
}
if seq1
.max_cross_len(&seq2)
.map_or(false, |len| len > self.limit_total)
{
seq2.make_infinite();
}
seq1.cross_forward(&mut seq2);
assert!(seq1.len().map_or(true, |x| x <= self.limit_total));
self.enforce_literal_len(&mut seq1);
seq1
}
/// Union the two sequences if the result would be within configured
/// limits. Otherwise, make `seq2` infinite and union the infinite sequence
/// with `seq1`.
fn union(&self, mut seq1: TSeq, seq2: &mut TSeq) -> TSeq {
if seq1.max_union_len(seq2).map_or(false, |len| len > self.limit_total)
{
// We try to trim our literal sequences to see if we can make
// room for more literals. The idea is that we'd rather trim down
// literals already in our sequence if it means we can add a few
// more and retain a finite sequence. Otherwise, we'll union with
// an infinite sequence and that infects everything and effectively
// stops literal extraction in its tracks.
//
// We do we keep 4 bytes here? Well, it's a bit of an abstraction
// leakage. Downstream, the literals may wind up getting fed to
// the Teddy algorithm, which supports searching literals up to
// length 4. So that's why we pick that number here. Arguably this
// should be a tuneable parameter, but it seems a little tricky to
// describe. And I'm still unsure if this is the right way to go
// about culling literal sequences.
seq1.keep_first_bytes(4);
seq2.keep_first_bytes(4);
seq1.dedup();
seq2.dedup();
if seq1
.max_union_len(seq2)
.map_or(false, |len| len > self.limit_total)
{
seq2.make_infinite();
}
}
seq1.union(seq2);
assert!(seq1.len().map_or(true, |x| x <= self.limit_total));
seq1.prefix = seq1.prefix && seq2.prefix;
seq1
}
/// Applies the literal length limit to the given sequence. If none of the
/// literals in the sequence exceed the limit, then this is a no-op.
fn enforce_literal_len(&self, seq: &mut TSeq) {
seq.keep_first_bytes(self.limit_literal_len);
}
}
#[derive(Clone, Debug)]
struct TSeq {
seq: Seq,
prefix: bool,
}
#[allow(dead_code)]
impl TSeq {
fn empty() -> TSeq {
TSeq { seq: Seq::empty(), prefix: true }
}
fn infinite() -> TSeq {
TSeq { seq: Seq::infinite(), prefix: true }
}
fn singleton(lit: Literal) -> TSeq {
TSeq { seq: Seq::singleton(lit), prefix: true }
}
fn new<I, B>(it: I) -> TSeq
where
I: IntoIterator<Item = B>,
B: AsRef<[u8]>,
{
TSeq { seq: Seq::new(it), prefix: true }
}
fn literals(&self) -> Option<&[Literal]> {
self.seq.literals()
}
fn push(&mut self, lit: Literal) {
self.seq.push(lit);
}
fn make_inexact(&mut self) {
self.seq.make_inexact();
}
fn make_infinite(&mut self) {
self.seq.make_infinite();
}
fn cross_forward(&mut self, other: &mut TSeq) {
assert!(other.prefix);
self.seq.cross_forward(&mut other.seq);
}
fn union(&mut self, other: &mut TSeq) {
self.seq.union(&mut other.seq);
}
fn dedup(&mut self) {
self.seq.dedup();
}
fn sort(&mut self) {
self.seq.sort();
}
fn keep_first_bytes(&mut self, len: usize) {
self.seq.keep_first_bytes(len);
}
fn is_finite(&self) -> bool {
self.seq.is_finite()
}
fn is_empty(&self) -> bool {
self.seq.is_empty()
}
fn len(&self) -> Option<usize> {
self.seq.len()
}
fn is_exact(&self) -> bool {
self.seq.is_exact()
}
fn is_inexact(&self) -> bool {
self.seq.is_inexact()
}
fn max_union_len(&self, other: &TSeq) -> Option<usize> {
self.seq.max_union_len(&other.seq)
}
fn max_cross_len(&self, other: &TSeq) -> Option<usize> {
assert!(other.prefix);
self.seq.max_cross_len(&other.seq)
}
fn min_literal_len(&self) -> Option<usize> {
self.seq.min_literal_len()
}
fn max_literal_len(&self) -> Option<usize> {
self.seq.max_literal_len()
}
// Below are methods specific to a TSeq that aren't just forwarding calls
// to a Seq method.
/// Tags this sequence as "not a prefix." When this happens, this sequence
/// can't be crossed as a suffix of another sequence.
fn make_not_prefix(&mut self) {
self.prefix = false;
}
/// Returns true if it's believed that the sequence given is "good" for
/// acceleration. This is useful for determining whether a sequence of
/// literals has any shot of being fast.
fn is_good(&self) -> bool {
if self.has_poisonous_literal() {
return false;
}
let Some(min) = self.min_literal_len() else { return false };
let Some(len) = self.len() else { return false };
// If we have some very short literals, then let's require that our
// sequence is itself very small.
if min <= 1 {
return len <= 3;
}
min >= 2 && len <= 64
}
/// Returns true if it's believed that the sequence given is "really
/// good" for acceleration. This is useful for short circuiting literal
/// extraction.
fn is_really_good(&self) -> bool {
if self.has_poisonous_literal() {
return false;
}
let Some(min) = self.min_literal_len() else { return false };
let Some(len) = self.len() else { return false };
min >= 3 && len <= 8
}
/// Returns true if the given sequence contains a poisonous literal.
fn has_poisonous_literal(&self) -> bool {
let Some(lits) = self.literals() else { return false };
lits.iter().any(is_poisonous)
}
/// Compare the two sequences and return the one that is believed to be
/// best according to a hodge podge of heuristics.
fn choose(self, other: TSeq) -> TSeq {
let (mut seq1, mut seq2) = (self, other);
// Whichever one we pick, by virtue of picking one, we choose
// to not take the other. So we must consider the result inexact.
seq1.make_inexact();
seq2.make_inexact();
if !seq1.is_finite() {
return seq2;
} else if !seq2.is_finite() {
return seq1;
}
if seq1.has_poisonous_literal() {
return seq2;
} else if seq2.has_poisonous_literal() {
return seq1;
}
let Some(min1) = seq1.min_literal_len() else { return seq2 };
let Some(min2) = seq2.min_literal_len() else { return seq1 };
if min1 < min2 {
return seq2;
} else if min2 < min1 {
return seq1;
}
// OK because we know both sequences are finite, otherwise they wouldn't
// have a minimum literal length.
let len1 = seq1.len().unwrap();
let len2 = seq2.len().unwrap();
if len1 < len2 {
return seq2;
} else if len2 < len1 {
return seq1;
}
// We could do extra stuff like looking at a background frequency
// distribution of bytes and picking the one that looks more rare, but for
// now we just pick one.
seq1
}
}
impl FromIterator<Literal> for TSeq {
fn from_iter<T: IntoIterator<Item = Literal>>(it: T) -> TSeq {
TSeq { seq: Seq::from_iter(it), prefix: true }
}
}
/// Returns true if it is believe that this literal is likely to match very
/// frequently, and is thus not a good candidate for a prefilter.
fn is_poisonous(lit: &Literal) -> bool {
use regex_syntax::hir::literal::rank;
lit.is_empty() || (lit.len() == 1 && rank(lit.as_bytes()[0]) >= 250)
}
#[cfg(test)]
mod tests {
use super::*;
fn e(pattern: impl AsRef<str>) -> Seq {
let pattern = pattern.as_ref();
let hir = regex_syntax::ParserBuilder::new()
.utf8(false)
.build()
.parse(pattern)
.unwrap();
Extractor::new().extract_untagged(&hir)
}
#[allow(non_snake_case)]
fn E(x: &str) -> Literal {
Literal::exact(x.as_bytes())
}
#[allow(non_snake_case)]
fn I(x: &str) -> Literal {
Literal::inexact(x.as_bytes())
}
fn seq<I: IntoIterator<Item = Literal>>(it: I) -> Seq {
Seq::from_iter(it)
}
fn inexact<I>(it: I) -> Seq
where
I: IntoIterator<Item = Literal>,
{
Seq::from_iter(it)
}
fn exact<B: AsRef<[u8]>, I: IntoIterator<Item = B>>(it: I) -> Seq {
Seq::new(it)
}
#[test]
fn various() {
assert_eq!(e(r"foo"), seq([E("foo")]));
assert_eq!(e(r"[a-z]foo[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z](foo)(bar)[a-z]"), seq([I("foobar")]));
assert_eq!(e(r"[a-z]([a-z]foo)(bar[a-z])[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z]([a-z]foo)([a-z]foo)[a-z]"), seq([I("foo")]));
assert_eq!(e(r"(\d{1,3}\.){3}\d{1,3}"), seq([I(".")]));
assert_eq!(e(r"[a-z]([a-z]foo){3}[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z](foo[a-z]){3}[a-z]"), seq([I("foo")]));
assert_eq!(e(r"[a-z]([a-z]foo[a-z]){3}[a-z]"), seq([I("foo")]));
assert_eq!(
e(r"[a-z]([a-z]foo){3}(bar[a-z]){3}[a-z]"),
seq([I("foo")])
);
}
// These test that some of our suspicious heuristics try to "pick better
// literals."
#[test]
fn heuristics() {
// Here, the first literals we stumble across are {ab, cd, ef}. But we
// keep going and our heuristics decide that {hiya} is better. (And it
// should be, since it's just one literal and it's longer.)
assert_eq!(e(r"[a-z]+(ab|cd|ef)[a-z]+hiya[a-z]+"), seq([I("hiya")]));
// But here, the first alternation becomes "good enough" that literal
// extraction short circuits early. {hiya} is probably still a better
// choice here, but {abc, def, ghi} is not bad.
assert_eq!(
e(r"[a-z]+(abc|def|ghi)[a-z]+hiya[a-z]+"),
seq([I("abc"), I("def"), I("ghi")])
);
}
#[test]
fn literal() {
assert_eq!(exact(["a"]), e("a"));
assert_eq!(exact(["aaaaa"]), e("aaaaa"));
assert_eq!(exact(["A", "a"]), e("(?i-u)a"));
assert_eq!(exact(["AB", "Ab", "aB", "ab"]), e("(?i-u)ab"));
assert_eq!(exact(["abC", "abc"]), e("ab(?i-u)c"));
assert_eq!(Seq::infinite(), e(r"(?-u:\xFF)"));
assert_eq!(exact([b"Z"]), e(r"Z"));
assert_eq!(exact(["☃"]), e("☃"));
assert_eq!(exact(["☃"]), e("(?i)☃"));
assert_eq!(exact(["☃☃☃☃☃"]), e("☃☃☃☃☃"));
assert_eq!(exact(["Δ"]), e("Δ"));
assert_eq!(exact(["δ"]), e("δ"));
assert_eq!(exact(["Δ", "δ"]), e("(?i)Δ"));
assert_eq!(exact(["Δ", "δ"]), e("(?i)δ"));
assert_eq!(exact(["S", "s", "ſ"]), e("(?i)S"));
assert_eq!(exact(["S", "s", "ſ"]), e("(?i)s"));
assert_eq!(exact(["S", "s", "ſ"]), e("(?i)ſ"));
let letters = "ͱͳͷΐάέήίΰαβγδεζηθικλμνξοπρςστυφχψωϊϋ";
assert_eq!(exact([letters]), e(letters));
}
#[test]
fn class() {
assert_eq!(exact(["a", "b", "c"]), e("[abc]"));
assert_eq!(exact(["a1b", "a2b", "a3b"]), e("a[123]b"));
assert_eq!(exact(["δ", "ε"]), e("[εδ]"));
assert_eq!(exact(["Δ", "Ε", "δ", "ε", "ϵ"]), e(r"(?i)[εδ]"));
}
#[test]
fn look() {
assert_eq!(exact(["ab"]), e(r"a\Ab"));
assert_eq!(exact(["ab"]), e(r"a\zb"));
assert_eq!(exact(["ab"]), e(r"a(?m:^)b"));
assert_eq!(exact(["ab"]), e(r"a(?m:$)b"));
assert_eq!(exact(["ab"]), e(r"a\bb"));
assert_eq!(exact(["ab"]), e(r"a\Bb"));
assert_eq!(exact(["ab"]), e(r"a(?-u:\b)b"));
assert_eq!(exact(["ab"]), e(r"a(?-u:\B)b"));
assert_eq!(exact(["ab"]), e(r"^ab"));
assert_eq!(exact(["ab"]), e(r"$ab"));
assert_eq!(exact(["ab"]), e(r"(?m:^)ab"));
assert_eq!(exact(["ab"]), e(r"(?m:$)ab"));
assert_eq!(exact(["ab"]), e(r"\bab"));
assert_eq!(exact(["ab"]), e(r"\Bab"));
assert_eq!(exact(["ab"]), e(r"(?-u:\b)ab"));
assert_eq!(exact(["ab"]), e(r"(?-u:\B)ab"));
assert_eq!(exact(["ab"]), e(r"ab^"));
assert_eq!(exact(["ab"]), e(r"ab$"));
assert_eq!(exact(["ab"]), e(r"ab(?m:^)"));
assert_eq!(exact(["ab"]), e(r"ab(?m:$)"));
assert_eq!(exact(["ab"]), e(r"ab\b"));
assert_eq!(exact(["ab"]), e(r"ab\B"));
assert_eq!(exact(["ab"]), e(r"ab(?-u:\b)"));
assert_eq!(exact(["ab"]), e(r"ab(?-u:\B)"));
assert_eq!(seq([I("aZ"), E("ab")]), e(r"^aZ*b"));
}
#[test]
fn repetition() {
assert_eq!(Seq::infinite(), e(r"a?"));
assert_eq!(Seq::infinite(), e(r"a??"));
assert_eq!(Seq::infinite(), e(r"a*"));
assert_eq!(Seq::infinite(), e(r"a*?"));
assert_eq!(inexact([I("a")]), e(r"a+"));
assert_eq!(inexact([I("a")]), e(r"(a+)+"));
assert_eq!(exact(["ab"]), e(r"aZ{0}b"));
assert_eq!(exact(["aZb", "ab"]), e(r"aZ?b"));
assert_eq!(exact(["ab", "aZb"]), e(r"aZ??b"));
assert_eq!(inexact([I("aZ"), E("ab")]), e(r"aZ*b"));
assert_eq!(inexact([E("ab"), I("aZ")]), e(r"aZ*?b"));
assert_eq!(inexact([I("aZ")]), e(r"aZ+b"));
assert_eq!(inexact([I("aZ")]), e(r"aZ+?b"));
assert_eq!(exact(["aZZb"]), e(r"aZ{2}b"));
assert_eq!(inexact([I("aZZ")]), e(r"aZ{2,3}b"));
assert_eq!(Seq::infinite(), e(r"(abc)?"));
assert_eq!(Seq::infinite(), e(r"(abc)??"));
assert_eq!(inexact([I("a"), E("b")]), e(r"a*b"));
assert_eq!(inexact([E("b"), I("a")]), e(r"a*?b"));
assert_eq!(inexact([I("ab")]), e(r"ab+"));
assert_eq!(inexact([I("a"), I("b")]), e(r"a*b+"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"a*b*c"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"(a+)?(b+)?c"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"(a+|)(b+|)c"));
// A few more similarish but not identical regexes. These may have a
// similar problem as above.
assert_eq!(Seq::infinite(), e(r"a*b*c*"));
assert_eq!(inexact([I("a"), I("b"), I("c")]), e(r"a*b*c+"));
assert_eq!(inexact([I("a"), I("b")]), e(r"a*b+c"));
assert_eq!(inexact([I("a"), I("b")]), e(r"a*b+c*"));
assert_eq!(inexact([I("ab"), E("a")]), e(r"ab*"));
assert_eq!(inexact([I("ab"), E("ac")]), e(r"ab*c"));
assert_eq!(inexact([I("ab")]), e(r"ab+"));
assert_eq!(inexact([I("ab")]), e(r"ab+c"));
assert_eq!(inexact([I("z"), E("azb")]), e(r"z*azb"));
let expected =
exact(["aaa", "aab", "aba", "abb", "baa", "bab", "bba", "bbb"]);
assert_eq!(expected, e(r"[ab]{3}"));
let expected = inexact([
I("aaa"),
I("aab"),
I("aba"),
I("abb"),
I("baa"),
I("bab"),
I("bba"),
I("bbb"),
]);
assert_eq!(expected, e(r"[ab]{3,4}"));
}
#[test]
fn concat() {
assert_eq!(exact(["abcxyz"]), e(r"abc()xyz"));
assert_eq!(exact(["abcxyz"]), e(r"(abc)(xyz)"));
assert_eq!(exact(["abcmnoxyz"]), e(r"abc()mno()xyz"));
assert_eq!(Seq::infinite(), e(r"abc[a&&b]xyz"));
assert_eq!(exact(["abcxyz"]), e(r"abc[a&&b]*xyz"));
}
#[test]
fn alternation() {
assert_eq!(exact(["abc", "mno", "xyz"]), e(r"abc|mno|xyz"));
assert_eq!(
inexact([E("abc"), I("mZ"), E("mo"), E("xyz")]),
e(r"abc|mZ*o|xyz")
);
assert_eq!(exact(["abc", "xyz"]), e(r"abc|M[a&&b]N|xyz"));
assert_eq!(exact(["abc", "MN", "xyz"]), e(r"abc|M[a&&b]*N|xyz"));
assert_eq!(exact(["aaa"]), e(r"(?:|aa)aaa"));
assert_eq!(Seq::infinite(), e(r"(?:|aa)(?:aaa)*"));
assert_eq!(Seq::infinite(), e(r"(?:|aa)(?:aaa)*?"));
assert_eq!(Seq::infinite(), e(r"a|b*"));
assert_eq!(inexact([E("a"), I("b")]), e(r"a|b+"));
assert_eq!(inexact([I("a"), E("b"), E("c")]), e(r"a*b|c"));
assert_eq!(Seq::infinite(), e(r"a|(?:b|c*)"));
assert_eq!(inexact([I("a"), I("b"), E("c")]), e(r"(a|b)*c|(a|ab)*c"));
assert_eq!(
exact(["abef", "abgh", "cdef", "cdgh"]),
e(r"(ab|cd)(ef|gh)")
);
assert_eq!(
exact([
"abefij", "abefkl", "abghij", "abghkl", "cdefij", "cdefkl",
"cdghij", "cdghkl",
]),
e(r"(ab|cd)(ef|gh)(ij|kl)")
);
}
#[test]
fn impossible() {
// N.B. The extractor in this module "optimizes" the sequence and makes
// it infinite if it isn't "good." An empty sequence (generated by a
// concatenantion containing an expression that can never match) is
// considered "not good." Since infinite sequences are not actionably
// and disable optimizations, this winds up being okay.
//
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/matcher.rs | crates/regex/src/matcher.rs | use {
grep_matcher::{
ByteSet, Captures, LineMatchKind, LineTerminator, Match, Matcher,
NoError,
},
regex_automata::{
Input, PatternID, meta::Regex,
util::captures::Captures as AutomataCaptures,
},
};
use crate::{config::Config, error::Error, literal::InnerLiterals};
/// A builder for constructing a `Matcher` using regular expressions.
///
/// This builder re-exports many of the same options found on the regex crate's
/// builder, in addition to a few other options such as smart case, word
/// matching and the ability to set a line terminator which may enable certain
/// types of optimizations.
///
/// The syntax supported is documented as part of the regex crate:
/// <https://docs.rs/regex/#syntax>.
#[derive(Clone, Debug)]
pub struct RegexMatcherBuilder {
config: Config,
}
impl Default for RegexMatcherBuilder {
fn default() -> RegexMatcherBuilder {
RegexMatcherBuilder::new()
}
}
impl RegexMatcherBuilder {
/// Create a new builder for configuring a regex matcher.
pub fn new() -> RegexMatcherBuilder {
RegexMatcherBuilder { config: Config::default() }
}
/// Build a new matcher using the current configuration for the provided
/// pattern.
///
/// The syntax supported is documented as part of the regex crate:
/// <https://docs.rs/regex/#syntax>.
pub fn build(&self, pattern: &str) -> Result<RegexMatcher, Error> {
self.build_many(&[pattern])
}
/// Build a new matcher using the current configuration for the provided
/// patterns. The resulting matcher behaves as if all of the patterns
/// given are joined together into a single alternation. That is, it
/// reports matches where at least one of the given patterns matches.
pub fn build_many<P: AsRef<str>>(
&self,
patterns: &[P],
) -> Result<RegexMatcher, Error> {
let mut chir = self.config.build_many(patterns)?;
// 'whole_line' is a strict subset of 'word', so when it is enabled,
// we don't need to both with any specific to word matching.
if chir.config().whole_line {
chir = chir.into_whole_line();
} else if chir.config().word {
chir = chir.into_word();
}
let regex = chir.to_regex()?;
log::trace!("final regex: {:?}", chir.hir().to_string());
let non_matching_bytes = chir.non_matching_bytes();
// If we can pick out some literals from the regex, then we might be
// able to build a faster regex that quickly identifies candidate
// matching lines. The regex engine will do what it can on its own, but
// we can specifically do a little more when a line terminator is set.
// For example, for a regex like `\w+foo\w+`, we can look for `foo`,
// and when a match is found, look for the line containing `foo` and
// then run the original regex on only that line. (In this case, the
// regex engine is likely to handle this case for us since it's so
// simple, but the idea applies.)
let fast_line_regex = InnerLiterals::new(&chir, ®ex).one_regex()?;
// We override the line terminator in case the configured HIR doesn't
// support it.
let mut config = self.config.clone();
config.line_terminator = chir.line_terminator();
Ok(RegexMatcher { config, regex, fast_line_regex, non_matching_bytes })
}
/// Build a new matcher from a plain alternation of literals.
///
/// Depending on the configuration set by the builder, this may be able to
/// build a matcher substantially faster than by joining the patterns with
/// a `|` and calling `build`.
pub fn build_literals<B: AsRef<str>>(
&self,
literals: &[B],
) -> Result<RegexMatcher, Error> {
self.build_many(literals)
}
/// Set the value for the case insensitive (`i`) flag.
///
/// When enabled, letters in the pattern will match both upper case and
/// lower case variants.
pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.case_insensitive = yes;
self
}
/// Whether to enable "smart case" or not.
///
/// When smart case is enabled, the builder will automatically enable
/// case insensitive matching based on how the pattern is written. Namely,
/// case insensitive mode is enabled when both of the following things
/// are true:
///
/// 1. The pattern contains at least one literal character. For example,
/// `a\w` contains a literal (`a`) but `\w` does not.
/// 2. Of the literals in the pattern, none of them are considered to be
/// uppercase according to Unicode. For example, `foo\pL` has no
/// uppercase literals but `Foo\pL` does.
pub fn case_smart(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.case_smart = yes;
self
}
/// Set the value for the multi-line matching (`m`) flag.
///
/// When enabled, `^` matches the beginning of lines and `$` matches the
/// end of lines.
///
/// By default, they match beginning/end of the input.
pub fn multi_line(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.multi_line = yes;
self
}
/// Set the value for the any character (`s`) flag, where in `.` matches
/// anything when `s` is set and matches anything except for new line when
/// it is not set (the default).
///
/// N.B. "matches anything" means "any byte" when Unicode is disabled and
/// means "any valid UTF-8 encoding of any Unicode scalar value" when
/// Unicode is enabled.
pub fn dot_matches_new_line(
&mut self,
yes: bool,
) -> &mut RegexMatcherBuilder {
self.config.dot_matches_new_line = yes;
self
}
/// Set the value for the greedy swap (`U`) flag.
///
/// When enabled, a pattern like `a*` is lazy (tries to find shortest
/// match) and `a*?` is greedy (tries to find longest match).
///
/// By default, `a*` is greedy and `a*?` is lazy.
pub fn swap_greed(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.swap_greed = yes;
self
}
/// Set the value for the ignore whitespace (`x`) flag.
///
/// When enabled, whitespace such as new lines and spaces will be ignored
/// between expressions of the pattern, and `#` can be used to start a
/// comment until the next new line.
pub fn ignore_whitespace(
&mut self,
yes: bool,
) -> &mut RegexMatcherBuilder {
self.config.ignore_whitespace = yes;
self
}
/// Set the value for the Unicode (`u`) flag.
///
/// Enabled by default. When disabled, character classes such as `\w` only
/// match ASCII word characters instead of all Unicode word characters.
pub fn unicode(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.unicode = yes;
self
}
/// Whether to support octal syntax or not.
///
/// Octal syntax is a little-known way of uttering Unicode codepoints in
/// a regular expression. For example, `a`, `\x61`, `\u0061` and
/// `\141` are all equivalent regular expressions, where the last example
/// shows octal syntax.
///
/// While supporting octal syntax isn't in and of itself a problem, it does
/// make good error messages harder. That is, in PCRE based regex engines,
/// syntax like `\0` invokes a backreference, which is explicitly
/// unsupported in Rust's regex engine. However, many users expect it to
/// be supported. Therefore, when octal support is disabled, the error
/// message will explicitly mention that backreferences aren't supported.
///
/// Octal syntax is disabled by default.
pub fn octal(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.octal = yes;
self
}
/// Set the approximate size limit of the compiled regular expression.
///
/// This roughly corresponds to the number of bytes occupied by a single
/// compiled program. If the program exceeds this number, then a
/// compilation error is returned.
pub fn size_limit(&mut self, bytes: usize) -> &mut RegexMatcherBuilder {
self.config.size_limit = bytes;
self
}
/// Set the approximate size of the cache used by the DFA.
///
/// This roughly corresponds to the number of bytes that the DFA will
/// use while searching.
///
/// Note that this is a *per thread* limit. There is no way to set a global
/// limit. In particular, if a regex is used from multiple threads
/// simultaneously, then each thread may use up to the number of bytes
/// specified here.
pub fn dfa_size_limit(
&mut self,
bytes: usize,
) -> &mut RegexMatcherBuilder {
self.config.dfa_size_limit = bytes;
self
}
/// Set the nesting limit for this parser.
///
/// The nesting limit controls how deep the abstract syntax tree is allowed
/// to be. If the AST exceeds the given limit (e.g., with too many nested
/// groups), then an error is returned by the parser.
///
/// The purpose of this limit is to act as a heuristic to prevent stack
/// overflow for consumers that do structural induction on an `Ast` using
/// explicit recursion. While this crate never does this (instead using
/// constant stack space and moving the call stack to the heap), other
/// crates may.
///
/// This limit is not checked until the entire Ast is parsed. Therefore,
/// if callers want to put a limit on the amount of heap space used, then
/// they should impose a limit on the length, in bytes, of the concrete
/// pattern string. In particular, this is viable since this parser
/// implementation will limit itself to heap space proportional to the
/// length of the pattern string.
///
/// Note that a nest limit of `0` will return a nest limit error for most
/// patterns but not all. For example, a nest limit of `0` permits `a` but
/// not `ab`, since `ab` requires a concatenation, which results in a nest
/// depth of `1`. In general, a nest limit is not something that manifests
/// in an obvious way in the concrete syntax, therefore, it should not be
/// used in a granular way.
pub fn nest_limit(&mut self, limit: u32) -> &mut RegexMatcherBuilder {
self.config.nest_limit = limit;
self
}
/// Set an ASCII line terminator for the matcher.
///
/// The purpose of setting a line terminator is to enable a certain class
/// of optimizations that can make line oriented searching faster. Namely,
/// when a line terminator is enabled, then the builder will guarantee that
/// the resulting matcher will never be capable of producing a match that
/// contains the line terminator. Because of this guarantee, users of the
/// resulting matcher do not need to slowly execute a search line by line
/// for line oriented search.
///
/// If the aforementioned guarantee about not matching a line terminator
/// cannot be made because of how the pattern was written, then the builder
/// will return an error when attempting to construct the matcher. For
/// example, the pattern `a\sb` will be transformed such that it can never
/// match `a\nb` (when `\n` is the line terminator), but the pattern `a\nb`
/// will result in an error since the `\n` cannot be easily removed without
/// changing the fundamental intent of the pattern.
///
/// If the given line terminator isn't an ASCII byte (`<=127`), then the
/// builder will return an error when constructing the matcher.
pub fn line_terminator(
&mut self,
line_term: Option<u8>,
) -> &mut RegexMatcherBuilder {
self.config.line_terminator = line_term.map(LineTerminator::byte);
self
}
/// Ban a byte from occurring in a regular expression pattern.
///
/// If this byte is found in the regex pattern, then an error will be
/// returned at construction time.
///
/// This is useful when binary detection is enabled. Callers will likely
/// want to ban the same byte that is used to detect binary data, i.e.,
/// the NUL byte. The reason for this is that when binary detection is
/// enabled, it's impossible to match a NUL byte because binary detection
/// will either quit when one is found, or will convert NUL bytes to line
/// terminators to avoid exorbitant heap usage.
pub fn ban_byte(&mut self, byte: Option<u8>) -> &mut RegexMatcherBuilder {
self.config.ban = byte;
self
}
/// Set the line terminator to `\r\n` and enable CRLF matching for `$` in
/// regex patterns.
///
/// This method sets two distinct settings:
///
/// 1. It causes the line terminator for the matcher to be `\r\n`. Namely,
/// this prevents the matcher from ever producing a match that contains
/// a `\r` or `\n`.
/// 2. It enables CRLF mode for `^` and `$`. This means that line anchors
/// will treat both `\r` and `\n` as line terminators, but will never
/// match between a `\r` and `\n`.
///
/// Note that if you do not wish to set the line terminator but would
/// still like `$` to match `\r\n` line terminators, then it is valid to
/// call `crlf(true)` followed by `line_terminator(None)`. Ordering is
/// important, since `crlf` sets the line terminator, but `line_terminator`
/// does not touch the `crlf` setting.
pub fn crlf(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
if yes {
self.config.line_terminator = Some(LineTerminator::crlf());
} else {
self.config.line_terminator = None;
}
self.config.crlf = yes;
self
}
/// Require that all matches occur on word boundaries.
///
/// Enabling this option is subtly different than putting `\b` assertions
/// on both sides of your pattern. In particular, a `\b` assertion requires
/// that one side of it match a word character while the other match a
/// non-word character. This option, in contrast, merely requires that
/// one side match a non-word character.
///
/// For example, `\b-2\b` will not match `foo -2 bar` since `-` is not a
/// word character. However, `-2` with this `word` option enabled will
/// match the `-2` in `foo -2 bar`.
pub fn word(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.word = yes;
self
}
/// Whether the patterns should be treated as literal strings or not. When
/// this is active, all characters, including ones that would normally be
/// special regex meta characters, are matched literally.
pub fn fixed_strings(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.fixed_strings = yes;
self
}
/// Whether each pattern should match the entire line or not. This is
/// equivalent to surrounding the pattern with `(?m:^)` and `(?m:$)`.
pub fn whole_line(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.whole_line = yes;
self
}
}
/// An implementation of the `Matcher` trait using Rust's standard regex
/// library.
#[derive(Clone, Debug)]
pub struct RegexMatcher {
/// The configuration specified by the caller.
config: Config,
/// The regular expression compiled from the pattern provided by the
/// caller.
regex: Regex,
/// A regex that never reports false negatives but may report false
/// positives that is believed to be capable of being matched more quickly
/// than `regex`. Typically, this is a single literal or an alternation
/// of literals.
fast_line_regex: Option<Regex>,
/// A set of bytes that will never appear in a match.
non_matching_bytes: ByteSet,
}
impl RegexMatcher {
/// Create a new matcher from the given pattern using the default
/// configuration.
pub fn new(pattern: &str) -> Result<RegexMatcher, Error> {
RegexMatcherBuilder::new().build(pattern)
}
/// Create a new matcher from the given pattern using the default
/// configuration, but matches lines terminated by `\n`.
///
/// This is meant to be a convenience constructor for
/// using a `RegexMatcherBuilder` and setting its
/// [`line_terminator`](RegexMatcherBuilder::method.line_terminator) to
/// `\n`. The purpose of using this constructor is to permit special
/// optimizations that help speed up line oriented search. These types of
/// optimizations are only appropriate when matches span no more than one
/// line. For this reason, this constructor will return an error if the
/// given pattern contains a literal `\n`. Other uses of `\n` (such as in
/// `\s`) are removed transparently.
pub fn new_line_matcher(pattern: &str) -> Result<RegexMatcher, Error> {
RegexMatcherBuilder::new().line_terminator(Some(b'\n')).build(pattern)
}
}
// This implementation just dispatches on the internal matcher impl except
// for the line terminator optimization, which is possibly executed via
// `fast_line_regex`.
impl Matcher for RegexMatcher {
type Captures = RegexCaptures;
type Error = NoError;
#[inline]
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
let input = Input::new(haystack).span(at..haystack.len());
Ok(self.regex.find(input).map(|m| Match::new(m.start(), m.end())))
}
#[inline]
fn new_captures(&self) -> Result<RegexCaptures, NoError> {
Ok(RegexCaptures::new(self.regex.create_captures()))
}
#[inline]
fn capture_count(&self) -> usize {
self.regex.captures_len()
}
#[inline]
fn capture_index(&self, name: &str) -> Option<usize> {
self.regex.group_info().to_index(PatternID::ZERO, name)
}
#[inline]
fn try_find_iter<F, E>(
&self,
haystack: &[u8],
mut matched: F,
) -> Result<Result<(), E>, NoError>
where
F: FnMut(Match) -> Result<bool, E>,
{
for m in self.regex.find_iter(haystack) {
match matched(Match::new(m.start(), m.end())) {
Ok(true) => continue,
Ok(false) => return Ok(Ok(())),
Err(err) => return Ok(Err(err)),
}
}
Ok(Ok(()))
}
#[inline]
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool, NoError> {
let input = Input::new(haystack).span(at..haystack.len());
let caps = caps.captures_mut();
self.regex.search_captures(&input, caps);
Ok(caps.is_match())
}
#[inline]
fn shortest_match_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<usize>, NoError> {
let input = Input::new(haystack).span(at..haystack.len());
Ok(self.regex.search_half(&input).map(|hm| hm.offset()))
}
#[inline]
fn non_matching_bytes(&self) -> Option<&ByteSet> {
Some(&self.non_matching_bytes)
}
#[inline]
fn line_terminator(&self) -> Option<LineTerminator> {
self.config.line_terminator
}
#[inline]
fn find_candidate_line(
&self,
haystack: &[u8],
) -> Result<Option<LineMatchKind>, NoError> {
Ok(match self.fast_line_regex {
Some(ref regex) => {
let input = Input::new(haystack);
regex
.search_half(&input)
.map(|hm| LineMatchKind::Candidate(hm.offset()))
}
None => {
self.shortest_match(haystack)?.map(LineMatchKind::Confirmed)
}
})
}
}
/// Represents the match offsets of each capturing group in a match.
///
/// The first, or `0`th capture group, always corresponds to the entire match
/// and is guaranteed to be present when a match occurs. The next capture
/// group, at index `1`, corresponds to the first capturing group in the regex,
/// ordered by the position at which the left opening parenthesis occurs.
///
/// Note that not all capturing groups are guaranteed to be present in a match.
/// For example, in the regex, `(?P<foo>\w)|(?P<bar>\W)`, only one of `foo`
/// or `bar` will ever be set in any given match.
///
/// In order to access a capture group by name, you'll need to first find the
/// index of the group using the corresponding matcher's `capture_index`
/// method, and then use that index with `RegexCaptures::get`.
#[derive(Clone, Debug)]
pub struct RegexCaptures {
/// Where the captures are stored.
caps: AutomataCaptures,
}
impl Captures for RegexCaptures {
#[inline]
fn len(&self) -> usize {
self.caps.group_info().all_group_len()
}
#[inline]
fn get(&self, i: usize) -> Option<Match> {
self.caps.get_group(i).map(|sp| Match::new(sp.start, sp.end))
}
}
impl RegexCaptures {
#[inline]
pub(crate) fn new(caps: AutomataCaptures) -> RegexCaptures {
RegexCaptures { caps }
}
#[inline]
pub(crate) fn captures_mut(&mut self) -> &mut AutomataCaptures {
&mut self.caps
}
}
#[cfg(test)]
mod tests {
use super::*;
// Test that enabling word matches does the right thing and demonstrate
// the difference between it and surrounding the regex in `\b`.
#[test]
fn word() {
let matcher =
RegexMatcherBuilder::new().word(true).build(r"-2").unwrap();
assert!(matcher.is_match(b"abc -2 foo").unwrap());
let matcher =
RegexMatcherBuilder::new().word(false).build(r"\b-2\b").unwrap();
assert!(!matcher.is_match(b"abc -2 foo").unwrap());
}
// Test that enabling a line terminator prevents it from matching through
// said line terminator.
#[test]
fn line_terminator() {
// This works, because there's no line terminator specified.
let matcher = RegexMatcherBuilder::new().build(r"abc\sxyz").unwrap();
assert!(matcher.is_match(b"abc\nxyz").unwrap());
// This doesn't.
let matcher = RegexMatcherBuilder::new()
.line_terminator(Some(b'\n'))
.build(r"abc\sxyz")
.unwrap();
assert!(!matcher.is_match(b"abc\nxyz").unwrap());
}
// Ensure that the builder returns an error if a line terminator is set
// and the regex could not be modified to remove a line terminator.
#[test]
fn line_terminator_error() {
assert!(
RegexMatcherBuilder::new()
.line_terminator(Some(b'\n'))
.build(r"a\nz")
.is_err()
)
}
// Test that enabling CRLF permits `$` to match at the end of a line.
#[test]
fn line_terminator_crlf() {
// Test normal use of `$` with a `\n` line terminator.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.build(r"abc$")
.unwrap();
assert!(matcher.is_match(b"abc\n").unwrap());
// Test that `$` doesn't match at `\r\n` boundary normally.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.build(r"abc$")
.unwrap();
assert!(!matcher.is_match(b"abc\r\n").unwrap());
// Now check the CRLF handling.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.crlf(true)
.build(r"abc$")
.unwrap();
assert!(matcher.is_match(b"abc\r\n").unwrap());
}
// Test that smart case works.
#[test]
fn case_smart() {
let matcher =
RegexMatcherBuilder::new().case_smart(true).build(r"abc").unwrap();
assert!(matcher.is_match(b"ABC").unwrap());
let matcher =
RegexMatcherBuilder::new().case_smart(true).build(r"aBc").unwrap();
assert!(!matcher.is_match(b"ABC").unwrap());
}
// Test that finding candidate lines works as expected.
// FIXME: Re-enable this test once inner literal extraction works.
#[test]
#[ignore]
fn candidate_lines() {
fn is_confirmed(m: LineMatchKind) -> bool {
match m {
LineMatchKind::Confirmed(_) => true,
_ => false,
}
}
fn is_candidate(m: LineMatchKind) -> bool {
match m {
LineMatchKind::Candidate(_) => true,
_ => false,
}
}
// With no line terminator set, we can't employ any optimizations,
// so we get a confirmed match.
let matcher = RegexMatcherBuilder::new().build(r"\wfoo\s").unwrap();
let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap();
assert!(is_confirmed(m));
// With a line terminator and a regex specially crafted to have an
// easy-to-detect inner literal, we can apply an optimization that
// quickly finds candidate matches.
let matcher = RegexMatcherBuilder::new()
.line_terminator(Some(b'\n'))
.build(r"\wfoo\s")
.unwrap();
let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap();
assert!(is_candidate(m));
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/regex/src/non_matching.rs | crates/regex/src/non_matching.rs | use {
grep_matcher::ByteSet,
regex_syntax::{
hir::{self, Hir, HirKind, Look},
utf8::Utf8Sequences,
},
};
/// Return a confirmed set of non-matching bytes from the given expression.
pub(crate) fn non_matching_bytes(expr: &Hir) -> ByteSet {
let mut set = ByteSet::full();
remove_matching_bytes(expr, &mut set);
set
}
/// Remove any bytes from the given set that can occur in a matched produced by
/// the given expression.
fn remove_matching_bytes(expr: &Hir, set: &mut ByteSet) {
match *expr.kind() {
HirKind::Empty
| HirKind::Look(Look::WordAscii | Look::WordAsciiNegate)
| HirKind::Look(Look::WordUnicode | Look::WordUnicodeNegate)
| HirKind::Look(Look::WordStartAscii | Look::WordStartUnicode)
| HirKind::Look(Look::WordEndAscii | Look::WordEndUnicode)
| HirKind::Look(
Look::WordStartHalfAscii | Look::WordStartHalfUnicode,
)
| HirKind::Look(Look::WordEndHalfAscii | Look::WordEndHalfUnicode) => {
}
HirKind::Look(Look::Start | Look::End) => {
// FIXME: This is wrong, but not doing this leads to incorrect
// results because of how anchored searches are implemented in
// the 'grep-searcher' crate.
set.remove(b'\n');
}
HirKind::Look(Look::StartLF | Look::EndLF) => {
set.remove(b'\n');
}
HirKind::Look(Look::StartCRLF | Look::EndCRLF) => {
set.remove(b'\r');
set.remove(b'\n');
}
HirKind::Literal(hir::Literal(ref lit)) => {
for &b in lit.iter() {
set.remove(b);
}
}
HirKind::Class(hir::Class::Unicode(ref cls)) => {
for range in cls.iter() {
// This is presumably faster than encoding every codepoint
// to UTF-8 and then removing those bytes from the set.
for seq in Utf8Sequences::new(range.start(), range.end()) {
for byte_range in seq.as_slice() {
set.remove_all(byte_range.start, byte_range.end);
}
}
}
}
HirKind::Class(hir::Class::Bytes(ref cls)) => {
for range in cls.iter() {
set.remove_all(range.start(), range.end());
}
}
HirKind::Repetition(ref x) => {
remove_matching_bytes(&x.sub, set);
}
HirKind::Capture(ref x) => {
remove_matching_bytes(&x.sub, set);
}
HirKind::Concat(ref xs) => {
for x in xs {
remove_matching_bytes(x, set);
}
}
HirKind::Alternation(ref xs) => {
for x in xs {
remove_matching_bytes(x, set);
}
}
}
}
#[cfg(test)]
mod tests {
use {grep_matcher::ByteSet, regex_syntax::ParserBuilder};
use super::non_matching_bytes;
fn extract(pattern: &str) -> ByteSet {
let expr =
ParserBuilder::new().utf8(false).build().parse(pattern).unwrap();
non_matching_bytes(&expr)
}
fn sparse(set: &ByteSet) -> Vec<u8> {
let mut sparse_set = vec![];
for b in (0..256).map(|b| b as u8) {
if set.contains(b) {
sparse_set.push(b);
}
}
sparse_set
}
fn sparse_except(except: &[u8]) -> Vec<u8> {
let mut except_set = vec![false; 256];
for &b in except {
except_set[b as usize] = true;
}
let mut set = vec![];
for b in (0..256).map(|b| b as u8) {
if !except_set[b as usize] {
set.push(b);
}
}
set
}
#[test]
fn dot() {
assert_eq!(
sparse(&extract(".")),
vec![
b'\n', 192, 193, 245, 246, 247, 248, 249, 250, 251, 252, 253,
254, 255,
]
);
assert_eq!(
sparse(&extract("(?s).")),
vec![
192, 193, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
255,
]
);
assert_eq!(sparse(&extract("(?-u).")), vec![b'\n']);
assert_eq!(sparse(&extract("(?s-u).")), vec![]);
}
#[test]
fn literal() {
assert_eq!(sparse(&extract("a")), sparse_except(&[b'a']));
assert_eq!(sparse(&extract("☃")), sparse_except(&[0xE2, 0x98, 0x83]));
assert_eq!(sparse(&extract(r"\xFF")), sparse_except(&[0xC3, 0xBF]));
assert_eq!(sparse(&extract(r"(?-u)\xFF")), sparse_except(&[0xFF]));
}
#[test]
fn anchor() {
// FIXME: The first four tests below should correspond to a full set
// of bytes for the non-matching bytes I think.
assert_eq!(sparse(&extract(r"^")), sparse_except(&[b'\n']));
assert_eq!(sparse(&extract(r"$")), sparse_except(&[b'\n']));
assert_eq!(sparse(&extract(r"\A")), sparse_except(&[b'\n']));
assert_eq!(sparse(&extract(r"\z")), sparse_except(&[b'\n']));
assert_eq!(sparse(&extract(r"(?m)^")), sparse_except(&[b'\n']));
assert_eq!(sparse(&extract(r"(?m)$")), sparse_except(&[b'\n']));
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/grep/src/lib.rs | crates/grep/src/lib.rs | /*!
ripgrep, as a library.
This library is intended to provide a high level facade to the crates that
make up ripgrep's core searching routines. However, there is no high level
documentation available yet guiding users on how to fit all of the pieces
together.
Every public API item in the constituent crates is documented, but examples
are sparse.
A cookbook and a guide are planned.
*/
pub extern crate grep_cli as cli;
pub extern crate grep_matcher as matcher;
#[cfg(feature = "pcre2")]
pub extern crate grep_pcre2 as pcre2;
pub extern crate grep_printer as printer;
pub extern crate grep_regex as regex;
pub extern crate grep_searcher as searcher;
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/grep/examples/simplegrep.rs | crates/grep/examples/simplegrep.rs | use std::{env, error::Error, ffi::OsString, io::IsTerminal, process};
use {
grep::{
cli,
printer::{ColorSpecs, StandardBuilder},
regex::RegexMatcher,
searcher::{BinaryDetection, SearcherBuilder},
},
termcolor::ColorChoice,
walkdir::WalkDir,
};
fn main() {
if let Err(err) = try_main() {
eprintln!("{}", err);
process::exit(1);
}
}
fn try_main() -> Result<(), Box<dyn Error>> {
let mut args: Vec<OsString> = env::args_os().collect();
if args.len() < 2 {
return Err("Usage: simplegrep <pattern> [<path> ...]".into());
}
if args.len() == 2 {
args.push(OsString::from("./"));
}
search(cli::pattern_from_os(&args[1])?, &args[2..])
}
fn search(pattern: &str, paths: &[OsString]) -> Result<(), Box<dyn Error>> {
let matcher = RegexMatcher::new_line_matcher(&pattern)?;
let mut searcher = SearcherBuilder::new()
.binary_detection(BinaryDetection::quit(b'\x00'))
.line_number(false)
.build();
let mut printer = StandardBuilder::new()
.color_specs(ColorSpecs::default_with_color())
.build(cli::stdout(if std::io::stdout().is_terminal() {
ColorChoice::Auto
} else {
ColorChoice::Never
}));
for path in paths {
for result in WalkDir::new(path) {
let dent = match result {
Ok(dent) => dent,
Err(err) => {
eprintln!("{}", err);
continue;
}
};
if !dent.file_type().is_file() {
continue;
}
let result = searcher.search_path(
&matcher,
dent.path(),
printer.sink_with_path(&matcher, dent.path()),
);
if let Err(err) = result {
eprintln!("{}: {}", dent.path().display(), err);
}
}
}
Ok(())
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/gitignore.rs | crates/ignore/src/gitignore.rs | /*!
The gitignore module provides a way to match globs from a gitignore file
against file paths.
Note that this module implements the specification as described in the
`gitignore` man page from scratch. That is, this module does *not* shell out to
the `git` command line tool.
*/
use std::{
fs::File,
io::{BufRead, BufReader, Read},
path::{Path, PathBuf},
sync::Arc,
};
use {
globset::{Candidate, GlobBuilder, GlobSet, GlobSetBuilder},
regex_automata::util::pool::Pool,
};
use crate::{
Error, Match, PartialErrorBuilder,
pathutil::{is_file_name, strip_prefix},
};
/// Glob represents a single glob in a gitignore file.
///
/// This is used to report information about the highest precedent glob that
/// matched in one or more gitignore files.
#[derive(Clone, Debug)]
pub struct Glob {
/// The file path that this glob was extracted from.
from: Option<PathBuf>,
/// The original glob string.
original: String,
/// The actual glob string used to convert to a regex.
actual: String,
/// Whether this is a whitelisted glob or not.
is_whitelist: bool,
/// Whether this glob should only match directories or not.
is_only_dir: bool,
}
impl Glob {
/// Returns the file path that defined this glob.
pub fn from(&self) -> Option<&Path> {
self.from.as_ref().map(|p| &**p)
}
/// The original glob as it was defined in a gitignore file.
pub fn original(&self) -> &str {
&self.original
}
/// The actual glob that was compiled to respect gitignore
/// semantics.
pub fn actual(&self) -> &str {
&self.actual
}
/// Whether this was a whitelisted glob or not.
pub fn is_whitelist(&self) -> bool {
self.is_whitelist
}
/// Whether this glob must match a directory or not.
pub fn is_only_dir(&self) -> bool {
self.is_only_dir
}
/// Returns true if and only if this glob has a `**/` prefix.
fn has_doublestar_prefix(&self) -> bool {
self.actual.starts_with("**/") || self.actual == "**"
}
}
/// Gitignore is a matcher for the globs in one or more gitignore files
/// in the same directory.
#[derive(Clone, Debug)]
pub struct Gitignore {
set: GlobSet,
root: PathBuf,
globs: Vec<Glob>,
num_ignores: u64,
num_whitelists: u64,
matches: Option<Arc<Pool<Vec<usize>>>>,
}
impl Gitignore {
/// Creates a new gitignore matcher from the gitignore file path given.
///
/// If it's desirable to include multiple gitignore files in a single
/// matcher, or read gitignore globs from a different source, then
/// use `GitignoreBuilder`.
///
/// This always returns a valid matcher, even if it's empty. In particular,
/// a Gitignore file can be partially valid, e.g., when one glob is invalid
/// but the rest aren't.
///
/// Note that I/O errors are ignored. For more granular control over
/// errors, use `GitignoreBuilder`.
pub fn new<P: AsRef<Path>>(
gitignore_path: P,
) -> (Gitignore, Option<Error>) {
let path = gitignore_path.as_ref();
let parent = path.parent().unwrap_or(Path::new("/"));
let mut builder = GitignoreBuilder::new(parent);
let mut errs = PartialErrorBuilder::default();
errs.maybe_push_ignore_io(builder.add(path));
match builder.build() {
Ok(gi) => (gi, errs.into_error_option()),
Err(err) => {
errs.push(err);
(Gitignore::empty(), errs.into_error_option())
}
}
}
/// Creates a new gitignore matcher from the global ignore file, if one
/// exists.
///
/// The global config file path is specified by git's `core.excludesFile`
/// config option.
///
/// Git's config file location is `$HOME/.gitconfig`. If `$HOME/.gitconfig`
/// does not exist or does not specify `core.excludesFile`, then
/// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not
/// set or is empty, then `$HOME/.config/git/ignore` is used instead.
pub fn global() -> (Gitignore, Option<Error>) {
match std::env::current_dir() {
Ok(cwd) => GitignoreBuilder::new(cwd).build_global(),
Err(err) => (Gitignore::empty(), Some(err.into())),
}
}
/// Creates a new empty gitignore matcher that never matches anything.
///
/// Its path is empty.
pub fn empty() -> Gitignore {
Gitignore {
set: GlobSet::empty(),
root: PathBuf::from(""),
globs: vec![],
num_ignores: 0,
num_whitelists: 0,
matches: None,
}
}
/// Returns the directory containing this gitignore matcher.
///
/// All matches are done relative to this path.
pub fn path(&self) -> &Path {
&*self.root
}
/// Returns true if and only if this gitignore has zero globs, and
/// therefore never matches any file path.
pub fn is_empty(&self) -> bool {
self.set.is_empty()
}
/// Returns the total number of globs, which should be equivalent to
/// `num_ignores + num_whitelists`.
pub fn len(&self) -> usize {
self.set.len()
}
/// Returns the total number of ignore globs.
pub fn num_ignores(&self) -> u64 {
self.num_ignores
}
/// Returns the total number of whitelisted globs.
pub fn num_whitelists(&self) -> u64 {
self.num_whitelists
}
/// Returns whether the given path (file or directory) matched a pattern in
/// this gitignore matcher.
///
/// `is_dir` should be true if the path refers to a directory and false
/// otherwise.
///
/// The given path is matched relative to the path given when building
/// the matcher. Specifically, before matching `path`, its prefix (as
/// determined by a common suffix of the directory containing this
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
/// then `path` is assumed to be relative to this matcher.
pub fn matched<P: AsRef<Path>>(
&self,
path: P,
is_dir: bool,
) -> Match<&Glob> {
if self.is_empty() {
return Match::None;
}
self.matched_stripped(self.strip(path.as_ref()), is_dir)
}
/// Returns whether the given path (file or directory, and expected to be
/// under the root) or any of its parent directories (up to the root)
/// matched a pattern in this gitignore matcher.
///
/// NOTE: This method is more expensive than walking the directory hierarchy
/// top-to-bottom and matching the entries. But, is easier to use in cases
/// when a list of paths are available without a hierarchy.
///
/// `is_dir` should be true if the path refers to a directory and false
/// otherwise.
///
/// The given path is matched relative to the path given when building
/// the matcher. Specifically, before matching `path`, its prefix (as
/// determined by a common suffix of the directory containing this
/// gitignore) is stripped. If there is no common suffix/prefix overlap,
/// then `path` is assumed to be relative to this matcher.
///
/// # Panics
///
/// This method panics if the given file path is not under the root path
/// of this matcher.
pub fn matched_path_or_any_parents<P: AsRef<Path>>(
&self,
path: P,
is_dir: bool,
) -> Match<&Glob> {
if self.is_empty() {
return Match::None;
}
let mut path = self.strip(path.as_ref());
assert!(!path.has_root(), "path is expected to be under the root");
match self.matched_stripped(path, is_dir) {
Match::None => (), // walk up
a_match => return a_match,
}
while let Some(parent) = path.parent() {
match self.matched_stripped(parent, /* is_dir */ true) {
Match::None => path = parent, // walk up
a_match => return a_match,
}
}
Match::None
}
/// Like matched, but takes a path that has already been stripped.
fn matched_stripped<P: AsRef<Path>>(
&self,
path: P,
is_dir: bool,
) -> Match<&Glob> {
if self.is_empty() {
return Match::None;
}
let path = path.as_ref();
let mut matches = self.matches.as_ref().unwrap().get();
let candidate = Candidate::new(path);
self.set.matches_candidate_into(&candidate, &mut *matches);
for &i in matches.iter().rev() {
let glob = &self.globs[i];
if !glob.is_only_dir() || is_dir {
return if glob.is_whitelist() {
Match::Whitelist(glob)
} else {
Match::Ignore(glob)
};
}
}
Match::None
}
/// Strips the given path such that it's suitable for matching with this
/// gitignore matcher.
fn strip<'a, P: 'a + AsRef<Path> + ?Sized>(
&'a self,
path: &'a P,
) -> &'a Path {
let mut path = path.as_ref();
// A leading ./ is completely superfluous. We also strip it from
// our gitignore root path, so we need to strip it from our candidate
// path too.
if let Some(p) = strip_prefix("./", path) {
path = p;
}
// Strip any common prefix between the candidate path and the root
// of the gitignore, to make sure we get relative matching right.
// BUT, a file name might not have any directory components to it,
// in which case, we don't want to accidentally strip any part of the
// file name.
//
// As an additional special case, if the root is just `.`, then we
// shouldn't try to strip anything, e.g., when path begins with a `.`.
if self.root != Path::new(".") && !is_file_name(path) {
if let Some(p) = strip_prefix(&self.root, path) {
path = p;
// If we're left with a leading slash, get rid of it.
if let Some(p) = strip_prefix("/", path) {
path = p;
}
}
}
path
}
}
/// Builds a matcher for a single set of globs from a .gitignore file.
#[derive(Clone, Debug)]
pub struct GitignoreBuilder {
builder: GlobSetBuilder,
root: PathBuf,
globs: Vec<Glob>,
case_insensitive: bool,
allow_unclosed_class: bool,
}
impl GitignoreBuilder {
/// Create a new builder for a gitignore file.
///
/// The path given should be the path at which the globs for this gitignore
/// file should be matched. Note that paths are always matched relative
/// to the root path given here. Generally, the root path should correspond
/// to the *directory* containing a `.gitignore` file.
pub fn new<P: AsRef<Path>>(root: P) -> GitignoreBuilder {
let root = root.as_ref();
GitignoreBuilder {
builder: GlobSetBuilder::new(),
root: strip_prefix("./", root).unwrap_or(root).to_path_buf(),
globs: vec![],
case_insensitive: false,
allow_unclosed_class: true,
}
}
/// Builds a new matcher from the globs added so far.
///
/// Once a matcher is built, no new globs can be added to it.
pub fn build(&self) -> Result<Gitignore, Error> {
let nignore = self.globs.iter().filter(|g| !g.is_whitelist()).count();
let nwhite = self.globs.iter().filter(|g| g.is_whitelist()).count();
let set = self
.builder
.build()
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
Ok(Gitignore {
set,
root: self.root.clone(),
globs: self.globs.clone(),
num_ignores: nignore as u64,
num_whitelists: nwhite as u64,
matches: Some(Arc::new(Pool::new(|| vec![]))),
})
}
/// Build a global gitignore matcher using the configuration in this
/// builder.
///
/// This consumes ownership of the builder unlike `build` because it
/// must mutate the builder to add the global gitignore globs.
///
/// Note that this ignores the path given to this builder's constructor
/// and instead derives the path automatically from git's global
/// configuration.
pub fn build_global(mut self) -> (Gitignore, Option<Error>) {
match gitconfig_excludes_path() {
None => (Gitignore::empty(), None),
Some(path) => {
if !path.is_file() {
(Gitignore::empty(), None)
} else {
let mut errs = PartialErrorBuilder::default();
errs.maybe_push_ignore_io(self.add(path));
match self.build() {
Ok(gi) => (gi, errs.into_error_option()),
Err(err) => {
errs.push(err);
(Gitignore::empty(), errs.into_error_option())
}
}
}
}
}
}
/// Add each glob from the file path given.
///
/// The file given should be formatted as a `gitignore` file.
///
/// Note that partial errors can be returned. For example, if there was
/// a problem adding one glob, an error for that will be returned, but
/// all other valid globs will still be added.
pub fn add<P: AsRef<Path>>(&mut self, path: P) -> Option<Error> {
let path = path.as_ref();
let file = match File::open(path) {
Err(err) => return Some(Error::Io(err).with_path(path)),
Ok(file) => file,
};
log::debug!("opened gitignore file: {}", path.display());
let rdr = BufReader::new(file);
let mut errs = PartialErrorBuilder::default();
for (i, line) in rdr.lines().enumerate() {
let lineno = (i + 1) as u64;
let line = match line {
Ok(line) => line,
Err(err) => {
errs.push(Error::Io(err).tagged(path, lineno));
break;
}
};
// Match Git's handling of .gitignore files that begin with the Unicode BOM
const UTF8_BOM: &str = "\u{feff}";
let line =
if i == 0 { line.trim_start_matches(UTF8_BOM) } else { &line };
if let Err(err) = self.add_line(Some(path.to_path_buf()), &line) {
errs.push(err.tagged(path, lineno));
}
}
errs.into_error_option()
}
/// Add each glob line from the string given.
///
/// If this string came from a particular `gitignore` file, then its path
/// should be provided here.
///
/// The string given should be formatted as a `gitignore` file.
#[cfg(test)]
fn add_str(
&mut self,
from: Option<PathBuf>,
gitignore: &str,
) -> Result<&mut GitignoreBuilder, Error> {
for line in gitignore.lines() {
self.add_line(from.clone(), line)?;
}
Ok(self)
}
/// Add a line from a gitignore file to this builder.
///
/// If this line came from a particular `gitignore` file, then its path
/// should be provided here.
///
/// If the line could not be parsed as a glob, then an error is returned.
pub fn add_line(
&mut self,
from: Option<PathBuf>,
mut line: &str,
) -> Result<&mut GitignoreBuilder, Error> {
#![allow(deprecated)]
if line.starts_with("#") {
return Ok(self);
}
if !line.ends_with("\\ ") {
line = line.trim_right();
}
if line.is_empty() {
return Ok(self);
}
let mut glob = Glob {
from,
original: line.to_string(),
actual: String::new(),
is_whitelist: false,
is_only_dir: false,
};
let mut is_absolute = false;
if line.starts_with("\\!") || line.starts_with("\\#") {
line = &line[1..];
is_absolute = line.chars().nth(0) == Some('/');
} else {
if line.starts_with("!") {
glob.is_whitelist = true;
line = &line[1..];
}
if line.starts_with("/") {
// `man gitignore` says that if a glob starts with a slash,
// then the glob can only match the beginning of a path
// (relative to the location of gitignore). We achieve this by
// simply banning wildcards from matching /.
line = &line[1..];
is_absolute = true;
}
}
// If it ends with a slash, then this should only match directories,
// but the slash should otherwise not be used while globbing.
if line.as_bytes().last() == Some(&b'/') {
glob.is_only_dir = true;
line = &line[..line.len() - 1];
// If the slash was escaped, then remove the escape.
// See: https://github.com/BurntSushi/ripgrep/issues/2236
if line.as_bytes().last() == Some(&b'\\') {
line = &line[..line.len() - 1];
}
}
glob.actual = line.to_string();
// If there is a literal slash, then this is a glob that must match the
// entire path name. Otherwise, we should let it match anywhere, so use
// a **/ prefix.
if !is_absolute && !line.chars().any(|c| c == '/') {
// ... but only if we don't already have a **/ prefix.
if !glob.has_doublestar_prefix() {
glob.actual = format!("**/{}", glob.actual);
}
}
// If the glob ends with `/**`, then we should only match everything
// inside a directory, but not the directory itself. Standard globs
// will match the directory. So we add `/*` to force the issue.
if glob.actual.ends_with("/**") {
glob.actual = format!("{}/*", glob.actual);
}
let parsed = GlobBuilder::new(&glob.actual)
.literal_separator(true)
.case_insensitive(self.case_insensitive)
.backslash_escape(true)
.allow_unclosed_class(self.allow_unclosed_class)
.build()
.map_err(|err| Error::Glob {
glob: Some(glob.original.clone()),
err: err.kind().to_string(),
})?;
self.builder.add(parsed);
self.globs.push(glob);
Ok(self)
}
/// Toggle whether the globs should be matched case insensitively or not.
///
/// When this option is changed, only globs added after the change will be
/// affected.
///
/// This is disabled by default.
pub fn case_insensitive(
&mut self,
yes: bool,
) -> Result<&mut GitignoreBuilder, Error> {
// TODO: This should not return a `Result`. Fix this in the next semver
// release.
self.case_insensitive = yes;
Ok(self)
}
/// Toggle whether unclosed character classes are allowed. When allowed,
/// a `[` without a matching `]` is treated literally instead of resulting
/// in a parse error.
///
/// For example, if this is set then the glob `[abc` will be treated as the
/// literal string `[abc` instead of returning an error.
///
/// By default, this is true in order to match established `gitignore`
/// semantics. Generally speaking, enabling this leads to worse failure
/// modes since the glob parser becomes more permissive. You might want to
/// enable this when compatibility (e.g., with POSIX glob implementations)
/// is more important than good error messages.
pub fn allow_unclosed_class(
&mut self,
yes: bool,
) -> &mut GitignoreBuilder {
self.allow_unclosed_class = yes;
self
}
}
/// Return the file path of the current environment's global gitignore file.
///
/// Note that the file path returned may not exist.
pub fn gitconfig_excludes_path() -> Option<PathBuf> {
// git supports $HOME/.gitconfig and $XDG_CONFIG_HOME/git/config. Notably,
// both can be active at the same time, where $HOME/.gitconfig takes
// precedent. So if $HOME/.gitconfig defines a `core.excludesFile`, then
// we're done.
match gitconfig_home_contents().and_then(|x| parse_excludes_file(&x)) {
Some(path) => return Some(path),
None => {}
}
match gitconfig_xdg_contents().and_then(|x| parse_excludes_file(&x)) {
Some(path) => return Some(path),
None => {}
}
excludes_file_default()
}
/// Returns the file contents of git's global config file, if one exists, in
/// the user's home directory.
fn gitconfig_home_contents() -> Option<Vec<u8>> {
let home = match home_dir() {
None => return None,
Some(home) => home,
};
let mut file = match File::open(home.join(".gitconfig")) {
Err(_) => return None,
Ok(file) => BufReader::new(file),
};
let mut contents = vec![];
file.read_to_end(&mut contents).ok().map(|_| contents)
}
/// Returns the file contents of git's global config file, if one exists, in
/// the user's XDG_CONFIG_HOME directory.
fn gitconfig_xdg_contents() -> Option<Vec<u8>> {
let path = std::env::var_os("XDG_CONFIG_HOME")
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
.or_else(|| home_dir().map(|p| p.join(".config")))
.map(|x| x.join("git/config"));
let mut file = match path.and_then(|p| File::open(p).ok()) {
None => return None,
Some(file) => BufReader::new(file),
};
let mut contents = vec![];
file.read_to_end(&mut contents).ok().map(|_| contents)
}
/// Returns the default file path for a global .gitignore file.
///
/// Specifically, this respects XDG_CONFIG_HOME.
fn excludes_file_default() -> Option<PathBuf> {
std::env::var_os("XDG_CONFIG_HOME")
.and_then(|x| if x.is_empty() { None } else { Some(PathBuf::from(x)) })
.or_else(|| home_dir().map(|p| p.join(".config")))
.map(|x| x.join("git/ignore"))
}
/// Extract git's `core.excludesfile` config setting from the raw file contents
/// given.
fn parse_excludes_file(data: &[u8]) -> Option<PathBuf> {
use std::sync::OnceLock;
use regex_automata::{meta::Regex, util::syntax};
// N.B. This is the lazy approach, and isn't technically correct, but
// probably works in more circumstances. I guess we would ideally have
// a full INI parser. Yuck.
static RE: OnceLock<Regex> = OnceLock::new();
let re = RE.get_or_init(|| {
Regex::builder()
.configure(Regex::config().utf8_empty(false))
.syntax(syntax::Config::new().utf8(false))
.build(r#"(?im-u)^\s*excludesfile\s*=\s*"?\s*(\S+?)\s*"?\s*$"#)
.unwrap()
});
// We don't care about amortizing allocs here I think. This should only
// be called ~once per traversal or so? (Although it's not guaranteed...)
let mut caps = re.create_captures();
re.captures(data, &mut caps);
let span = caps.get_group(1)?;
let candidate = &data[span];
std::str::from_utf8(candidate).ok().map(|s| PathBuf::from(expand_tilde(s)))
}
/// Expands ~ in file paths to the value of $HOME.
fn expand_tilde(path: &str) -> String {
let home = match home_dir() {
None => return path.to_string(),
Some(home) => home.to_string_lossy().into_owned(),
};
path.replace("~", &home)
}
/// Returns the location of the user's home directory.
fn home_dir() -> Option<PathBuf> {
// We're fine with using std::env::home_dir for now. Its bugs are, IMO,
// pretty minor corner cases.
#![allow(deprecated)]
std::env::home_dir()
}
#[cfg(test)]
mod tests {
use std::path::Path;
use super::{Gitignore, GitignoreBuilder};
fn gi_from_str<P: AsRef<Path>>(root: P, s: &str) -> Gitignore {
let mut builder = GitignoreBuilder::new(root);
builder.add_str(None, s).unwrap();
builder.build().unwrap()
}
macro_rules! ignored {
($name:ident, $root:expr, $gi:expr, $path:expr) => {
ignored!($name, $root, $gi, $path, false);
};
($name:ident, $root:expr, $gi:expr, $path:expr, $is_dir:expr) => {
#[test]
fn $name() {
let gi = gi_from_str($root, $gi);
assert!(gi.matched($path, $is_dir).is_ignore());
}
};
}
macro_rules! not_ignored {
($name:ident, $root:expr, $gi:expr, $path:expr) => {
not_ignored!($name, $root, $gi, $path, false);
};
($name:ident, $root:expr, $gi:expr, $path:expr, $is_dir:expr) => {
#[test]
fn $name() {
let gi = gi_from_str($root, $gi);
assert!(!gi.matched($path, $is_dir).is_ignore());
}
};
}
const ROOT: &'static str = "/home/foobar/rust/rg";
ignored!(ig1, ROOT, "months", "months");
ignored!(ig2, ROOT, "*.lock", "Cargo.lock");
ignored!(ig3, ROOT, "*.rs", "src/main.rs");
ignored!(ig4, ROOT, "src/*.rs", "src/main.rs");
ignored!(ig5, ROOT, "/*.c", "cat-file.c");
ignored!(ig6, ROOT, "/src/*.rs", "src/main.rs");
ignored!(ig7, ROOT, "!src/main.rs\n*.rs", "src/main.rs");
ignored!(ig8, ROOT, "foo/", "foo", true);
ignored!(ig9, ROOT, "**/foo", "foo");
ignored!(ig10, ROOT, "**/foo", "src/foo");
ignored!(ig11, ROOT, "**/foo/**", "src/foo/bar");
ignored!(ig12, ROOT, "**/foo/**", "wat/src/foo/bar/baz");
ignored!(ig13, ROOT, "**/foo/bar", "foo/bar");
ignored!(ig14, ROOT, "**/foo/bar", "src/foo/bar");
ignored!(ig15, ROOT, "abc/**", "abc/x");
ignored!(ig16, ROOT, "abc/**", "abc/x/y");
ignored!(ig17, ROOT, "abc/**", "abc/x/y/z");
ignored!(ig18, ROOT, "a/**/b", "a/b");
ignored!(ig19, ROOT, "a/**/b", "a/x/b");
ignored!(ig20, ROOT, "a/**/b", "a/x/y/b");
ignored!(ig21, ROOT, r"\!xy", "!xy");
ignored!(ig22, ROOT, r"\#foo", "#foo");
ignored!(ig23, ROOT, "foo", "./foo");
ignored!(ig24, ROOT, "target", "grep/target");
ignored!(ig25, ROOT, "Cargo.lock", "./tabwriter-bin/Cargo.lock");
ignored!(ig26, ROOT, "/foo/bar/baz", "./foo/bar/baz");
ignored!(ig27, ROOT, "foo/", "xyz/foo", true);
ignored!(ig28, "./src", "/llvm/", "./src/llvm", true);
ignored!(ig29, ROOT, "node_modules/ ", "node_modules", true);
ignored!(ig30, ROOT, "**/", "foo/bar", true);
ignored!(ig31, ROOT, "path1/*", "path1/foo");
ignored!(ig32, ROOT, ".a/b", ".a/b");
ignored!(ig33, "./", ".a/b", ".a/b");
ignored!(ig34, ".", ".a/b", ".a/b");
ignored!(ig35, "./.", ".a/b", ".a/b");
ignored!(ig36, "././", ".a/b", ".a/b");
ignored!(ig37, "././.", ".a/b", ".a/b");
ignored!(ig38, ROOT, "\\[", "[");
ignored!(ig39, ROOT, "\\?", "?");
ignored!(ig40, ROOT, "\\*", "*");
ignored!(ig41, ROOT, "\\a", "a");
ignored!(ig42, ROOT, "s*.rs", "sfoo.rs");
ignored!(ig43, ROOT, "**", "foo.rs");
ignored!(ig44, ROOT, "**/**/*", "a/foo.rs");
not_ignored!(ignot1, ROOT, "amonths", "months");
not_ignored!(ignot2, ROOT, "monthsa", "months");
not_ignored!(ignot3, ROOT, "/src/*.rs", "src/grep/src/main.rs");
not_ignored!(ignot4, ROOT, "/*.c", "mozilla-sha1/sha1.c");
not_ignored!(ignot5, ROOT, "/src/*.rs", "src/grep/src/main.rs");
not_ignored!(ignot6, ROOT, "*.rs\n!src/main.rs", "src/main.rs");
not_ignored!(ignot7, ROOT, "foo/", "foo", false);
not_ignored!(ignot8, ROOT, "**/foo/**", "wat/src/afoo/bar/baz");
not_ignored!(ignot9, ROOT, "**/foo/**", "wat/src/fooa/bar/baz");
not_ignored!(ignot10, ROOT, "**/foo/bar", "foo/src/bar");
not_ignored!(ignot11, ROOT, "#foo", "#foo");
not_ignored!(ignot12, ROOT, "\n\n\n", "foo");
not_ignored!(ignot13, ROOT, "foo/**", "foo", true);
not_ignored!(
ignot14,
"./third_party/protobuf",
"m4/ltoptions.m4",
"./third_party/protobuf/csharp/src/packages/repositories.config"
);
not_ignored!(ignot15, ROOT, "!/bar", "foo/bar");
not_ignored!(ignot16, ROOT, "*\n!**/", "foo", true);
not_ignored!(ignot17, ROOT, "src/*.rs", "src/grep/src/main.rs");
not_ignored!(ignot18, ROOT, "path1/*", "path2/path1/foo");
not_ignored!(ignot19, ROOT, "s*.rs", "src/foo.rs");
fn bytes(s: &str) -> Vec<u8> {
s.to_string().into_bytes()
}
fn path_string<P: AsRef<Path>>(path: P) -> String {
path.as_ref().to_str().unwrap().to_string()
}
#[test]
fn parse_excludes_file1() {
let data = bytes("[core]\nexcludesFile = /foo/bar");
let got = super::parse_excludes_file(&data).unwrap();
assert_eq!(path_string(got), "/foo/bar");
}
#[test]
fn parse_excludes_file2() {
let data = bytes("[core]\nexcludesFile = ~/foo/bar");
let got = super::parse_excludes_file(&data).unwrap();
assert_eq!(path_string(got), super::expand_tilde("~/foo/bar"));
}
#[test]
fn parse_excludes_file3() {
let data = bytes("[core]\nexcludeFile = /foo/bar");
assert!(super::parse_excludes_file(&data).is_none());
}
#[test]
fn parse_excludes_file4() {
let data = bytes("[core]\nexcludesFile = \"~/foo/bar\"");
let got = super::parse_excludes_file(&data);
assert_eq!(
path_string(got.unwrap()),
super::expand_tilde("~/foo/bar")
);
}
#[test]
fn parse_excludes_file5() {
let data = bytes("[core]\nexcludesFile = \" \"~/foo/bar \" \"");
assert!(super::parse_excludes_file(&data).is_none());
}
// See: https://github.com/BurntSushi/ripgrep/issues/106
#[test]
fn regression_106() {
gi_from_str("/", " ");
}
#[test]
fn case_insensitive() {
let gi = GitignoreBuilder::new(ROOT)
.case_insensitive(true)
.unwrap()
.add_str(None, "*.html")
.unwrap()
.build()
.unwrap();
assert!(gi.matched("foo.html", false).is_ignore());
assert!(gi.matched("foo.HTML", false).is_ignore());
assert!(!gi.matched("foo.htm", false).is_ignore());
assert!(!gi.matched("foo.HTM", false).is_ignore());
}
ignored!(cs1, ROOT, "*.html", "foo.html");
not_ignored!(cs2, ROOT, "*.html", "foo.HTML");
not_ignored!(cs3, ROOT, "*.html", "foo.htm");
not_ignored!(cs4, ROOT, "*.html", "foo.HTM");
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/lib.rs | crates/ignore/src/lib.rs | /*!
The ignore crate provides a fast recursive directory iterator that respects
various filters such as globs, file types and `.gitignore` files. The precise
matching rules and precedence is explained in the documentation for
`WalkBuilder`.
Secondarily, this crate exposes gitignore and file type matchers for use cases
that demand more fine-grained control.
# Example
This example shows the most basic usage of this crate. This code will
recursively traverse the current directory while automatically filtering out
files and directories according to ignore globs found in files like
`.ignore` and `.gitignore`:
```rust,no_run
use ignore::Walk;
for result in Walk::new("./") {
// Each item yielded by the iterator is either a directory entry or an
// error, so either print the path or the error.
match result {
Ok(entry) => println!("{}", entry.path().display()),
Err(err) => println!("ERROR: {}", err),
}
}
```
# Example: advanced
By default, the recursive directory iterator will ignore hidden files and
directories. This can be disabled by building the iterator with `WalkBuilder`:
```rust,no_run
use ignore::WalkBuilder;
for result in WalkBuilder::new("./").hidden(false).build() {
println!("{:?}", result);
}
```
See the documentation for `WalkBuilder` for many other options.
*/
#![deny(missing_docs)]
use std::path::{Path, PathBuf};
pub use crate::walk::{
DirEntry, ParallelVisitor, ParallelVisitorBuilder, Walk, WalkBuilder,
WalkParallel, WalkState,
};
mod default_types;
mod dir;
pub mod gitignore;
pub mod overrides;
mod pathutil;
pub mod types;
mod walk;
/// Represents an error that can occur when parsing a gitignore file.
#[derive(Debug)]
pub enum Error {
/// A collection of "soft" errors. These occur when adding an ignore
/// file partially succeeded.
Partial(Vec<Error>),
/// An error associated with a specific line number.
WithLineNumber {
/// The line number.
line: u64,
/// The underlying error.
err: Box<Error>,
},
/// An error associated with a particular file path.
WithPath {
/// The file path.
path: PathBuf,
/// The underlying error.
err: Box<Error>,
},
/// An error associated with a particular directory depth when recursively
/// walking a directory.
WithDepth {
/// The directory depth.
depth: usize,
/// The underlying error.
err: Box<Error>,
},
/// An error that occurs when a file loop is detected when traversing
/// symbolic links.
Loop {
/// The ancestor file path in the loop.
ancestor: PathBuf,
/// The child file path in the loop.
child: PathBuf,
},
/// An error that occurs when doing I/O, such as reading an ignore file.
Io(std::io::Error),
/// An error that occurs when trying to parse a glob.
Glob {
/// The original glob that caused this error. This glob, when
/// available, always corresponds to the glob provided by an end user.
/// e.g., It is the glob as written in a `.gitignore` file.
///
/// (This glob may be distinct from the glob that is actually
/// compiled, after accounting for `gitignore` semantics.)
glob: Option<String>,
/// The underlying glob error as a string.
err: String,
},
/// A type selection for a file type that is not defined.
UnrecognizedFileType(String),
/// A user specified file type definition could not be parsed.
InvalidDefinition,
}
impl Clone for Error {
fn clone(&self) -> Error {
match *self {
Error::Partial(ref errs) => Error::Partial(errs.clone()),
Error::WithLineNumber { line, ref err } => {
Error::WithLineNumber { line, err: err.clone() }
}
Error::WithPath { ref path, ref err } => {
Error::WithPath { path: path.clone(), err: err.clone() }
}
Error::WithDepth { depth, ref err } => {
Error::WithDepth { depth, err: err.clone() }
}
Error::Loop { ref ancestor, ref child } => Error::Loop {
ancestor: ancestor.clone(),
child: child.clone(),
},
Error::Io(ref err) => match err.raw_os_error() {
Some(e) => Error::Io(std::io::Error::from_raw_os_error(e)),
None => {
Error::Io(std::io::Error::new(err.kind(), err.to_string()))
}
},
Error::Glob { ref glob, ref err } => {
Error::Glob { glob: glob.clone(), err: err.clone() }
}
Error::UnrecognizedFileType(ref err) => {
Error::UnrecognizedFileType(err.clone())
}
Error::InvalidDefinition => Error::InvalidDefinition,
}
}
}
impl Error {
/// Returns true if this is a partial error.
///
/// A partial error occurs when only some operations failed while others
/// may have succeeded. For example, an ignore file may contain an invalid
/// glob among otherwise valid globs.
pub fn is_partial(&self) -> bool {
match *self {
Error::Partial(_) => true,
Error::WithLineNumber { ref err, .. } => err.is_partial(),
Error::WithPath { ref err, .. } => err.is_partial(),
Error::WithDepth { ref err, .. } => err.is_partial(),
_ => false,
}
}
/// Returns true if this error is exclusively an I/O error.
pub fn is_io(&self) -> bool {
match *self {
Error::Partial(ref errs) => errs.len() == 1 && errs[0].is_io(),
Error::WithLineNumber { ref err, .. } => err.is_io(),
Error::WithPath { ref err, .. } => err.is_io(),
Error::WithDepth { ref err, .. } => err.is_io(),
Error::Loop { .. } => false,
Error::Io(_) => true,
Error::Glob { .. } => false,
Error::UnrecognizedFileType(_) => false,
Error::InvalidDefinition => false,
}
}
/// Inspect the original [`std::io::Error`] if there is one.
///
/// [`None`] is returned if the [`Error`] doesn't correspond to an
/// [`std::io::Error`]. This might happen, for example, when the error was
/// produced because a cycle was found in the directory tree while
/// following symbolic links.
///
/// This method returns a borrowed value that is bound to the lifetime of the [`Error`]. To
/// obtain an owned value, the [`into_io_error`] can be used instead.
///
/// > This is the original [`std::io::Error`] and is _not_ the same as
/// > [`impl From<Error> for std::io::Error`][impl] which contains
/// > additional context about the error.
///
/// [`None`]: https://doc.rust-lang.org/stable/std/option/enum.Option.html#variant.None
/// [`std::io::Error`]: https://doc.rust-lang.org/stable/std/io/struct.Error.html
/// [`From`]: https://doc.rust-lang.org/stable/std/convert/trait.From.html
/// [`Error`]: struct.Error.html
/// [`into_io_error`]: struct.Error.html#method.into_io_error
/// [impl]: struct.Error.html#impl-From%3CError%3E
pub fn io_error(&self) -> Option<&std::io::Error> {
match *self {
Error::Partial(ref errs) => {
if errs.len() == 1 {
errs[0].io_error()
} else {
None
}
}
Error::WithLineNumber { ref err, .. } => err.io_error(),
Error::WithPath { ref err, .. } => err.io_error(),
Error::WithDepth { ref err, .. } => err.io_error(),
Error::Loop { .. } => None,
Error::Io(ref err) => Some(err),
Error::Glob { .. } => None,
Error::UnrecognizedFileType(_) => None,
Error::InvalidDefinition => None,
}
}
/// Similar to [`io_error`] except consumes self to convert to the original
/// [`std::io::Error`] if one exists.
///
/// [`io_error`]: struct.Error.html#method.io_error
/// [`std::io::Error`]: https://doc.rust-lang.org/stable/std/io/struct.Error.html
pub fn into_io_error(self) -> Option<std::io::Error> {
match self {
Error::Partial(mut errs) => {
if errs.len() == 1 {
errs.remove(0).into_io_error()
} else {
None
}
}
Error::WithLineNumber { err, .. } => err.into_io_error(),
Error::WithPath { err, .. } => err.into_io_error(),
Error::WithDepth { err, .. } => err.into_io_error(),
Error::Loop { .. } => None,
Error::Io(err) => Some(err),
Error::Glob { .. } => None,
Error::UnrecognizedFileType(_) => None,
Error::InvalidDefinition => None,
}
}
/// Returns a depth associated with recursively walking a directory (if
/// this error was generated from a recursive directory iterator).
pub fn depth(&self) -> Option<usize> {
match *self {
Error::WithPath { ref err, .. } => err.depth(),
Error::WithDepth { depth, .. } => Some(depth),
_ => None,
}
}
/// Turn an error into a tagged error with the given file path.
fn with_path<P: AsRef<Path>>(self, path: P) -> Error {
Error::WithPath {
path: path.as_ref().to_path_buf(),
err: Box::new(self),
}
}
/// Turn an error into a tagged error with the given depth.
fn with_depth(self, depth: usize) -> Error {
Error::WithDepth { depth, err: Box::new(self) }
}
/// Turn an error into a tagged error with the given file path and line
/// number. If path is empty, then it is omitted from the error.
fn tagged<P: AsRef<Path>>(self, path: P, lineno: u64) -> Error {
let errline =
Error::WithLineNumber { line: lineno, err: Box::new(self) };
if path.as_ref().as_os_str().is_empty() {
return errline;
}
errline.with_path(path)
}
/// Build an error from a walkdir error.
fn from_walkdir(err: walkdir::Error) -> Error {
let depth = err.depth();
if let (Some(anc), Some(child)) = (err.loop_ancestor(), err.path()) {
return Error::WithDepth {
depth,
err: Box::new(Error::Loop {
ancestor: anc.to_path_buf(),
child: child.to_path_buf(),
}),
};
}
let path = err.path().map(|p| p.to_path_buf());
let mut ig_err = Error::Io(std::io::Error::from(err));
if let Some(path) = path {
ig_err = Error::WithPath { path, err: Box::new(ig_err) };
}
ig_err
}
}
impl std::error::Error for Error {
#[allow(deprecated)]
fn description(&self) -> &str {
match *self {
Error::Partial(_) => "partial error",
Error::WithLineNumber { ref err, .. } => err.description(),
Error::WithPath { ref err, .. } => err.description(),
Error::WithDepth { ref err, .. } => err.description(),
Error::Loop { .. } => "file system loop found",
Error::Io(ref err) => err.description(),
Error::Glob { ref err, .. } => err,
Error::UnrecognizedFileType(_) => "unrecognized file type",
Error::InvalidDefinition => "invalid definition",
}
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Error::Partial(ref errs) => {
let msgs: Vec<String> =
errs.iter().map(|err| err.to_string()).collect();
write!(f, "{}", msgs.join("\n"))
}
Error::WithLineNumber { line, ref err } => {
write!(f, "line {}: {}", line, err)
}
Error::WithPath { ref path, ref err } => {
write!(f, "{}: {}", path.display(), err)
}
Error::WithDepth { ref err, .. } => err.fmt(f),
Error::Loop { ref ancestor, ref child } => write!(
f,
"File system loop found: \
{} points to an ancestor {}",
child.display(),
ancestor.display()
),
Error::Io(ref err) => err.fmt(f),
Error::Glob { glob: None, ref err } => write!(f, "{}", err),
Error::Glob { glob: Some(ref glob), ref err } => {
write!(f, "error parsing glob '{}': {}", glob, err)
}
Error::UnrecognizedFileType(ref ty) => {
write!(f, "unrecognized file type: {}", ty)
}
Error::InvalidDefinition => write!(
f,
"invalid definition (format is type:glob, e.g., \
html:*.html)"
),
}
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Error {
Error::Io(err)
}
}
#[derive(Debug, Default)]
struct PartialErrorBuilder(Vec<Error>);
impl PartialErrorBuilder {
fn push(&mut self, err: Error) {
self.0.push(err);
}
fn push_ignore_io(&mut self, err: Error) {
if !err.is_io() {
self.push(err);
}
}
fn maybe_push(&mut self, err: Option<Error>) {
if let Some(err) = err {
self.push(err);
}
}
fn maybe_push_ignore_io(&mut self, err: Option<Error>) {
if let Some(err) = err {
self.push_ignore_io(err);
}
}
fn into_error_option(mut self) -> Option<Error> {
if self.0.is_empty() {
None
} else if self.0.len() == 1 {
Some(self.0.pop().unwrap())
} else {
Some(Error::Partial(self.0))
}
}
}
/// The result of a glob match.
///
/// The type parameter `T` typically refers to a type that provides more
/// information about a particular match. For example, it might identify
/// the specific gitignore file and the specific glob pattern that caused
/// the match.
#[derive(Clone, Debug)]
pub enum Match<T> {
/// The path didn't match any glob.
None,
/// The highest precedent glob matched indicates the path should be
/// ignored.
Ignore(T),
/// The highest precedent glob matched indicates the path should be
/// whitelisted.
Whitelist(T),
}
impl<T> Match<T> {
/// Returns true if the match result didn't match any globs.
pub fn is_none(&self) -> bool {
match *self {
Match::None => true,
Match::Ignore(_) | Match::Whitelist(_) => false,
}
}
/// Returns true if the match result implies the path should be ignored.
pub fn is_ignore(&self) -> bool {
match *self {
Match::Ignore(_) => true,
Match::None | Match::Whitelist(_) => false,
}
}
/// Returns true if the match result implies the path should be
/// whitelisted.
pub fn is_whitelist(&self) -> bool {
match *self {
Match::Whitelist(_) => true,
Match::None | Match::Ignore(_) => false,
}
}
/// Inverts the match so that `Ignore` becomes `Whitelist` and
/// `Whitelist` becomes `Ignore`. A non-match remains the same.
pub fn invert(self) -> Match<T> {
match self {
Match::None => Match::None,
Match::Ignore(t) => Match::Whitelist(t),
Match::Whitelist(t) => Match::Ignore(t),
}
}
/// Return the value inside this match if it exists.
pub fn inner(&self) -> Option<&T> {
match *self {
Match::None => None,
Match::Ignore(ref t) => Some(t),
Match::Whitelist(ref t) => Some(t),
}
}
/// Apply the given function to the value inside this match.
///
/// If the match has no value, then return the match unchanged.
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Match<U> {
match self {
Match::None => Match::None,
Match::Ignore(t) => Match::Ignore(f(t)),
Match::Whitelist(t) => Match::Whitelist(f(t)),
}
}
/// Return the match if it is not none. Otherwise, return other.
pub fn or(self, other: Self) -> Self {
if self.is_none() { other } else { self }
}
}
#[cfg(test)]
mod tests {
use std::{
env, fs,
path::{Path, PathBuf},
};
/// A convenient result type alias.
pub(crate) type Result<T> =
std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
macro_rules! err {
($($tt:tt)*) => {
Box::<dyn std::error::Error + Send + Sync>::from(format!($($tt)*))
}
}
/// A simple wrapper for creating a temporary directory that is
/// automatically deleted when it's dropped.
///
/// We use this in lieu of tempfile because tempfile brings in too many
/// dependencies.
#[derive(Debug)]
pub struct TempDir(PathBuf);
impl Drop for TempDir {
fn drop(&mut self) {
fs::remove_dir_all(&self.0).unwrap();
}
}
impl TempDir {
/// Create a new empty temporary directory under the system's configured
/// temporary directory.
pub fn new() -> Result<TempDir> {
use std::sync::atomic::{AtomicUsize, Ordering};
static TRIES: usize = 100;
static COUNTER: AtomicUsize = AtomicUsize::new(0);
let tmpdir = env::temp_dir();
for _ in 0..TRIES {
let count = COUNTER.fetch_add(1, Ordering::Relaxed);
let path = tmpdir.join("rust-ignore").join(count.to_string());
if path.is_dir() {
continue;
}
fs::create_dir_all(&path).map_err(|e| {
err!("failed to create {}: {}", path.display(), e)
})?;
return Ok(TempDir(path));
}
Err(err!("failed to create temp dir after {} tries", TRIES))
}
/// Return the underlying path to this temporary directory.
pub fn path(&self) -> &Path {
&self.0
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/pathutil.rs | crates/ignore/src/pathutil.rs | use std::{ffi::OsStr, path::Path};
use crate::walk::DirEntry;
/// Returns true if and only if this entry is considered to be hidden.
///
/// This only returns true if the base name of the path starts with a `.`.
///
/// On Unix, this implements a more optimized check.
#[cfg(unix)]
pub(crate) fn is_hidden(dent: &DirEntry) -> bool {
use std::os::unix::ffi::OsStrExt;
if let Some(name) = file_name(dent.path()) {
name.as_bytes().get(0) == Some(&b'.')
} else {
false
}
}
/// Returns true if and only if this entry is considered to be hidden.
///
/// On Windows, this returns true if one of the following is true:
///
/// * The base name of the path starts with a `.`.
/// * The file attributes have the `HIDDEN` property set.
#[cfg(windows)]
pub(crate) fn is_hidden(dent: &DirEntry) -> bool {
use std::os::windows::fs::MetadataExt;
use winapi_util::file;
// This looks like we're doing an extra stat call, but on Windows, the
// directory traverser reuses the metadata retrieved from each directory
// entry and stores it on the DirEntry itself. So this is "free."
if let Ok(md) = dent.metadata() {
if file::is_hidden(md.file_attributes() as u64) {
return true;
}
}
if let Some(name) = file_name(dent.path()) {
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
} else {
false
}
}
/// Returns true if and only if this entry is considered to be hidden.
///
/// This only returns true if the base name of the path starts with a `.`.
#[cfg(not(any(unix, windows)))]
pub(crate) fn is_hidden(dent: &DirEntry) -> bool {
if let Some(name) = file_name(dent.path()) {
name.to_str().map(|s| s.starts_with(".")).unwrap_or(false)
} else {
false
}
}
/// Strip `prefix` from the `path` and return the remainder.
///
/// If `path` doesn't have a prefix `prefix`, then return `None`.
#[cfg(unix)]
pub(crate) fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
prefix: &'a P,
path: &'a Path,
) -> Option<&'a Path> {
use std::os::unix::ffi::OsStrExt;
let prefix = prefix.as_ref().as_os_str().as_bytes();
let path = path.as_os_str().as_bytes();
if prefix.len() > path.len() || prefix != &path[0..prefix.len()] {
None
} else {
Some(&Path::new(OsStr::from_bytes(&path[prefix.len()..])))
}
}
/// Strip `prefix` from the `path` and return the remainder.
///
/// If `path` doesn't have a prefix `prefix`, then return `None`.
#[cfg(not(unix))]
pub(crate) fn strip_prefix<'a, P: AsRef<Path> + ?Sized>(
prefix: &'a P,
path: &'a Path,
) -> Option<&'a Path> {
path.strip_prefix(prefix).ok()
}
/// Returns true if this file path is just a file name. i.e., Its parent is
/// the empty string.
#[cfg(unix)]
pub(crate) fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
use std::os::unix::ffi::OsStrExt;
use memchr::memchr;
let path = path.as_ref().as_os_str().as_bytes();
memchr(b'/', path).is_none()
}
/// Returns true if this file path is just a file name. i.e., Its parent is
/// the empty string.
#[cfg(not(unix))]
pub(crate) fn is_file_name<P: AsRef<Path>>(path: P) -> bool {
path.as_ref().parent().map(|p| p.as_os_str().is_empty()).unwrap_or(false)
}
/// The final component of the path, if it is a normal file.
///
/// If the path terminates in ., .., or consists solely of a root of prefix,
/// file_name will return None.
#[cfg(unix)]
pub(crate) fn file_name<'a, P: AsRef<Path> + ?Sized>(
path: &'a P,
) -> Option<&'a OsStr> {
use memchr::memrchr;
use std::os::unix::ffi::OsStrExt;
let path = path.as_ref().as_os_str().as_bytes();
if path.is_empty() {
return None;
} else if path.len() == 1 && path[0] == b'.' {
return None;
} else if path.last() == Some(&b'.') {
return None;
} else if path.len() >= 2 && &path[path.len() - 2..] == &b".."[..] {
return None;
}
let last_slash = memrchr(b'/', path).map(|i| i + 1).unwrap_or(0);
Some(OsStr::from_bytes(&path[last_slash..]))
}
/// The final component of the path, if it is a normal file.
///
/// If the path terminates in ., .., or consists solely of a root of prefix,
/// file_name will return None.
#[cfg(not(unix))]
pub(crate) fn file_name<'a, P: AsRef<Path> + ?Sized>(
path: &'a P,
) -> Option<&'a OsStr> {
path.as_ref().file_name()
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/overrides.rs | crates/ignore/src/overrides.rs | /*!
The overrides module provides a way to specify a set of override globs.
This provides functionality similar to `--include` or `--exclude` in command
line tools.
*/
use std::path::Path;
use crate::{
Error, Match,
gitignore::{self, Gitignore, GitignoreBuilder},
};
/// Glob represents a single glob in an override matcher.
///
/// This is used to report information about the highest precedent glob
/// that matched.
///
/// Note that not all matches necessarily correspond to a specific glob. For
/// example, if there are one or more whitelist globs and a file path doesn't
/// match any glob in the set, then the file path is considered to be ignored.
///
/// The lifetime `'a` refers to the lifetime of the matcher that produced
/// this glob.
#[derive(Clone, Debug)]
#[allow(dead_code)]
pub struct Glob<'a>(GlobInner<'a>);
#[derive(Clone, Debug)]
#[allow(dead_code)]
enum GlobInner<'a> {
/// No glob matched, but the file path should still be ignored.
UnmatchedIgnore,
/// A glob matched.
Matched(&'a gitignore::Glob),
}
impl<'a> Glob<'a> {
fn unmatched() -> Glob<'a> {
Glob(GlobInner::UnmatchedIgnore)
}
}
/// Manages a set of overrides provided explicitly by the end user.
#[derive(Clone, Debug)]
pub struct Override(Gitignore);
impl Override {
/// Returns an empty matcher that never matches any file path.
pub fn empty() -> Override {
Override(Gitignore::empty())
}
/// Returns the directory of this override set.
///
/// All matches are done relative to this path.
pub fn path(&self) -> &Path {
self.0.path()
}
/// Returns true if and only if this matcher is empty.
///
/// When a matcher is empty, it will never match any file path.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns the total number of ignore globs.
pub fn num_ignores(&self) -> u64 {
self.0.num_whitelists()
}
/// Returns the total number of whitelisted globs.
pub fn num_whitelists(&self) -> u64 {
self.0.num_ignores()
}
/// Returns whether the given file path matched a pattern in this override
/// matcher.
///
/// `is_dir` should be true if the path refers to a directory and false
/// otherwise.
///
/// If there are no overrides, then this always returns `Match::None`.
///
/// If there is at least one whitelist override and `is_dir` is false, then
/// this never returns `Match::None`, since non-matches are interpreted as
/// ignored.
///
/// The given path is matched to the globs relative to the path given
/// when building the override matcher. Specifically, before matching
/// `path`, its prefix (as determined by a common suffix of the directory
/// given) is stripped. If there is no common suffix/prefix overlap, then
/// `path` is assumed to reside in the same directory as the root path for
/// this set of overrides.
pub fn matched<'a, P: AsRef<Path>>(
&'a self,
path: P,
is_dir: bool,
) -> Match<Glob<'a>> {
if self.is_empty() {
return Match::None;
}
let mat = self.0.matched(path, is_dir).invert();
if mat.is_none() && self.num_whitelists() > 0 && !is_dir {
return Match::Ignore(Glob::unmatched());
}
mat.map(move |giglob| Glob(GlobInner::Matched(giglob)))
}
}
/// Builds a matcher for a set of glob overrides.
#[derive(Clone, Debug)]
pub struct OverrideBuilder {
builder: GitignoreBuilder,
}
impl OverrideBuilder {
/// Create a new override builder.
///
/// Matching is done relative to the directory path provided.
pub fn new<P: AsRef<Path>>(path: P) -> OverrideBuilder {
let mut builder = GitignoreBuilder::new(path);
builder.allow_unclosed_class(false);
OverrideBuilder { builder }
}
/// Builds a new override matcher from the globs added so far.
///
/// Once a matcher is built, no new globs can be added to it.
pub fn build(&self) -> Result<Override, Error> {
Ok(Override(self.builder.build()?))
}
/// Add a glob to the set of overrides.
///
/// Globs provided here have precisely the same semantics as a single
/// line in a `gitignore` file, where the meaning of `!` is inverted:
/// namely, `!` at the beginning of a glob will ignore a file. Without `!`,
/// all matches of the glob provided are treated as whitelist matches.
pub fn add(&mut self, glob: &str) -> Result<&mut OverrideBuilder, Error> {
self.builder.add_line(None, glob)?;
Ok(self)
}
/// Toggle whether the globs should be matched case insensitively or not.
///
/// When this option is changed, only globs added after the change will be
/// affected.
///
/// This is disabled by default.
pub fn case_insensitive(
&mut self,
yes: bool,
) -> Result<&mut OverrideBuilder, Error> {
// TODO: This should not return a `Result`. Fix this in the next semver
// release.
self.builder.case_insensitive(yes)?;
Ok(self)
}
/// Toggle whether unclosed character classes are allowed. When allowed,
/// a `[` without a matching `]` is treated literally instead of resulting
/// in a parse error.
///
/// For example, if this is set then the glob `[abc` will be treated as the
/// literal string `[abc` instead of returning an error.
///
/// By default, this is false. Generally speaking, enabling this leads to
/// worse failure modes since the glob parser becomes more permissive. You
/// might want to enable this when compatibility (e.g., with POSIX glob
/// implementations) is more important than good error messages.
///
/// This default is different from the default for [`Gitignore`]. Namely,
/// [`Gitignore`] is intended to match git's behavior as-is. But this
/// abstraction for "override" globs does not necessarily conform to any
/// other known specification and instead prioritizes better error
/// messages.
pub fn allow_unclosed_class(&mut self, yes: bool) -> &mut OverrideBuilder {
self.builder.allow_unclosed_class(yes);
self
}
}
#[cfg(test)]
mod tests {
use super::{Override, OverrideBuilder};
const ROOT: &'static str = "/home/andrew/foo";
fn ov(globs: &[&str]) -> Override {
let mut builder = OverrideBuilder::new(ROOT);
for glob in globs {
builder.add(glob).unwrap();
}
builder.build().unwrap()
}
#[test]
fn empty() {
let ov = ov(&[]);
assert!(ov.matched("a.foo", false).is_none());
assert!(ov.matched("a", false).is_none());
assert!(ov.matched("", false).is_none());
}
#[test]
fn simple() {
let ov = ov(&["*.foo", "!*.bar"]);
assert!(ov.matched("a.foo", false).is_whitelist());
assert!(ov.matched("a.foo", true).is_whitelist());
assert!(ov.matched("a.rs", false).is_ignore());
assert!(ov.matched("a.rs", true).is_none());
assert!(ov.matched("a.bar", false).is_ignore());
assert!(ov.matched("a.bar", true).is_ignore());
}
#[test]
fn only_ignores() {
let ov = ov(&["!*.bar"]);
assert!(ov.matched("a.rs", false).is_none());
assert!(ov.matched("a.rs", true).is_none());
assert!(ov.matched("a.bar", false).is_ignore());
assert!(ov.matched("a.bar", true).is_ignore());
}
#[test]
fn precedence() {
let ov = ov(&["*.foo", "!*.bar.foo"]);
assert!(ov.matched("a.foo", false).is_whitelist());
assert!(ov.matched("a.baz", false).is_ignore());
assert!(ov.matched("a.bar.foo", false).is_ignore());
}
#[test]
fn gitignore() {
let ov = ov(&["/foo", "bar/*.rs", "baz/**"]);
assert!(ov.matched("bar/lib.rs", false).is_whitelist());
assert!(ov.matched("bar/wat/lib.rs", false).is_ignore());
assert!(ov.matched("wat/bar/lib.rs", false).is_ignore());
assert!(ov.matched("foo", false).is_whitelist());
assert!(ov.matched("wat/foo", false).is_ignore());
assert!(ov.matched("baz", false).is_ignore());
assert!(ov.matched("baz/a", false).is_whitelist());
assert!(ov.matched("baz/a/b", false).is_whitelist());
}
#[test]
fn allow_directories() {
// This tests that directories are NOT ignored when they are unmatched.
let ov = ov(&["*.rs"]);
assert!(ov.matched("foo.rs", false).is_whitelist());
assert!(ov.matched("foo.c", false).is_ignore());
assert!(ov.matched("foo", false).is_ignore());
assert!(ov.matched("foo", true).is_none());
assert!(ov.matched("src/foo.rs", false).is_whitelist());
assert!(ov.matched("src/foo.c", false).is_ignore());
assert!(ov.matched("src/foo", false).is_ignore());
assert!(ov.matched("src/foo", true).is_none());
}
#[test]
fn absolute_path() {
let ov = ov(&["!/bar"]);
assert!(ov.matched("./foo/bar", false).is_none());
}
#[test]
fn case_insensitive() {
let ov = OverrideBuilder::new(ROOT)
.case_insensitive(true)
.unwrap()
.add("*.html")
.unwrap()
.build()
.unwrap();
assert!(ov.matched("foo.html", false).is_whitelist());
assert!(ov.matched("foo.HTML", false).is_whitelist());
assert!(ov.matched("foo.htm", false).is_ignore());
assert!(ov.matched("foo.HTM", false).is_ignore());
}
#[test]
fn default_case_sensitive() {
let ov =
OverrideBuilder::new(ROOT).add("*.html").unwrap().build().unwrap();
assert!(ov.matched("foo.html", false).is_whitelist());
assert!(ov.matched("foo.HTML", false).is_ignore());
assert!(ov.matched("foo.htm", false).is_ignore());
assert!(ov.matched("foo.HTM", false).is_ignore());
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/walk.rs | crates/ignore/src/walk.rs | use std::{
cmp::Ordering,
ffi::OsStr,
fs::{self, FileType, Metadata},
io,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering},
sync::{Arc, OnceLock},
};
use {
crossbeam_deque::{Stealer, Worker as Deque},
same_file::Handle,
walkdir::WalkDir,
};
use crate::{
Error, PartialErrorBuilder,
dir::{Ignore, IgnoreBuilder},
gitignore::GitignoreBuilder,
overrides::Override,
types::Types,
};
/// A directory entry with a possible error attached.
///
/// The error typically refers to a problem parsing ignore files in a
/// particular directory.
#[derive(Clone, Debug)]
pub struct DirEntry {
dent: DirEntryInner,
err: Option<Error>,
}
impl DirEntry {
/// The full path that this entry represents.
pub fn path(&self) -> &Path {
self.dent.path()
}
/// The full path that this entry represents.
/// Analogous to [`DirEntry::path`], but moves ownership of the path.
pub fn into_path(self) -> PathBuf {
self.dent.into_path()
}
/// Whether this entry corresponds to a symbolic link or not.
pub fn path_is_symlink(&self) -> bool {
self.dent.path_is_symlink()
}
/// Returns true if and only if this entry corresponds to stdin.
///
/// i.e., The entry has depth 0 and its file name is `-`.
pub fn is_stdin(&self) -> bool {
self.dent.is_stdin()
}
/// Return the metadata for the file that this entry points to.
pub fn metadata(&self) -> Result<Metadata, Error> {
self.dent.metadata()
}
/// Return the file type for the file that this entry points to.
///
/// This entry doesn't have a file type if it corresponds to stdin.
pub fn file_type(&self) -> Option<FileType> {
self.dent.file_type()
}
/// Return the file name of this entry.
///
/// If this entry has no file name (e.g., `/`), then the full path is
/// returned.
pub fn file_name(&self) -> &OsStr {
self.dent.file_name()
}
/// Returns the depth at which this entry was created relative to the root.
pub fn depth(&self) -> usize {
self.dent.depth()
}
/// Returns the underlying inode number if one exists.
///
/// If this entry doesn't have an inode number, then `None` is returned.
#[cfg(unix)]
pub fn ino(&self) -> Option<u64> {
self.dent.ino()
}
/// Returns an error, if one exists, associated with processing this entry.
///
/// An example of an error is one that occurred while parsing an ignore
/// file. Errors related to traversing a directory tree itself are reported
/// as part of yielding the directory entry, and not with this method.
pub fn error(&self) -> Option<&Error> {
self.err.as_ref()
}
/// Returns true if and only if this entry points to a directory.
pub(crate) fn is_dir(&self) -> bool {
self.dent.is_dir()
}
fn new_stdin() -> DirEntry {
DirEntry { dent: DirEntryInner::Stdin, err: None }
}
fn new_walkdir(dent: walkdir::DirEntry, err: Option<Error>) -> DirEntry {
DirEntry { dent: DirEntryInner::Walkdir(dent), err }
}
fn new_raw(dent: DirEntryRaw, err: Option<Error>) -> DirEntry {
DirEntry { dent: DirEntryInner::Raw(dent), err }
}
}
/// DirEntryInner is the implementation of DirEntry.
///
/// It specifically represents three distinct sources of directory entries:
///
/// 1. From the walkdir crate.
/// 2. Special entries that represent things like stdin.
/// 3. From a path.
///
/// Specifically, (3) has to essentially re-create the DirEntry implementation
/// from WalkDir.
#[derive(Clone, Debug)]
enum DirEntryInner {
Stdin,
Walkdir(walkdir::DirEntry),
Raw(DirEntryRaw),
}
impl DirEntryInner {
fn path(&self) -> &Path {
use self::DirEntryInner::*;
match *self {
Stdin => Path::new("<stdin>"),
Walkdir(ref x) => x.path(),
Raw(ref x) => x.path(),
}
}
fn into_path(self) -> PathBuf {
use self::DirEntryInner::*;
match self {
Stdin => PathBuf::from("<stdin>"),
Walkdir(x) => x.into_path(),
Raw(x) => x.into_path(),
}
}
fn path_is_symlink(&self) -> bool {
use self::DirEntryInner::*;
match *self {
Stdin => false,
Walkdir(ref x) => x.path_is_symlink(),
Raw(ref x) => x.path_is_symlink(),
}
}
fn is_stdin(&self) -> bool {
match *self {
DirEntryInner::Stdin => true,
_ => false,
}
}
fn metadata(&self) -> Result<Metadata, Error> {
use self::DirEntryInner::*;
match *self {
Stdin => {
let err = Error::Io(io::Error::new(
io::ErrorKind::Other,
"<stdin> has no metadata",
));
Err(err.with_path("<stdin>"))
}
Walkdir(ref x) => x.metadata().map_err(|err| {
Error::Io(io::Error::from(err)).with_path(x.path())
}),
Raw(ref x) => x.metadata(),
}
}
fn file_type(&self) -> Option<FileType> {
use self::DirEntryInner::*;
match *self {
Stdin => None,
Walkdir(ref x) => Some(x.file_type()),
Raw(ref x) => Some(x.file_type()),
}
}
fn file_name(&self) -> &OsStr {
use self::DirEntryInner::*;
match *self {
Stdin => OsStr::new("<stdin>"),
Walkdir(ref x) => x.file_name(),
Raw(ref x) => x.file_name(),
}
}
fn depth(&self) -> usize {
use self::DirEntryInner::*;
match *self {
Stdin => 0,
Walkdir(ref x) => x.depth(),
Raw(ref x) => x.depth(),
}
}
#[cfg(unix)]
fn ino(&self) -> Option<u64> {
use self::DirEntryInner::*;
use walkdir::DirEntryExt;
match *self {
Stdin => None,
Walkdir(ref x) => Some(x.ino()),
Raw(ref x) => Some(x.ino()),
}
}
/// Returns true if and only if this entry points to a directory.
fn is_dir(&self) -> bool {
self.file_type().map(|ft| ft.is_dir()).unwrap_or(false)
}
}
/// DirEntryRaw is essentially copied from the walkdir crate so that we can
/// build `DirEntry`s from whole cloth in the parallel iterator.
#[derive(Clone)]
struct DirEntryRaw {
/// The path as reported by the `fs::ReadDir` iterator (even if it's a
/// symbolic link).
path: PathBuf,
/// The file type. Necessary for recursive iteration, so store it.
ty: FileType,
/// Is set when this entry was created from a symbolic link and the user
/// expects the iterator to follow symbolic links.
follow_link: bool,
/// The depth at which this entry was generated relative to the root.
depth: usize,
/// The underlying inode number (Unix only).
#[cfg(unix)]
ino: u64,
/// The underlying metadata (Windows only). We store this on Windows
/// because this comes for free while reading a directory.
#[cfg(windows)]
metadata: fs::Metadata,
}
impl std::fmt::Debug for DirEntryRaw {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Leaving out FileType because it doesn't have a debug impl
// in Rust 1.9. We could add it if we really wanted to by manually
// querying each possibly file type. Meh. ---AG
f.debug_struct("DirEntryRaw")
.field("path", &self.path)
.field("follow_link", &self.follow_link)
.field("depth", &self.depth)
.finish()
}
}
impl DirEntryRaw {
fn path(&self) -> &Path {
&self.path
}
fn into_path(self) -> PathBuf {
self.path
}
fn path_is_symlink(&self) -> bool {
self.ty.is_symlink() || self.follow_link
}
fn metadata(&self) -> Result<Metadata, Error> {
self.metadata_internal()
}
#[cfg(windows)]
fn metadata_internal(&self) -> Result<fs::Metadata, Error> {
if self.follow_link {
fs::metadata(&self.path)
} else {
Ok(self.metadata.clone())
}
.map_err(|err| Error::Io(io::Error::from(err)).with_path(&self.path))
}
#[cfg(not(windows))]
fn metadata_internal(&self) -> Result<fs::Metadata, Error> {
if self.follow_link {
fs::metadata(&self.path)
} else {
fs::symlink_metadata(&self.path)
}
.map_err(|err| Error::Io(io::Error::from(err)).with_path(&self.path))
}
fn file_type(&self) -> FileType {
self.ty
}
fn file_name(&self) -> &OsStr {
self.path.file_name().unwrap_or_else(|| self.path.as_os_str())
}
fn depth(&self) -> usize {
self.depth
}
#[cfg(unix)]
fn ino(&self) -> u64 {
self.ino
}
fn from_entry(
depth: usize,
ent: &fs::DirEntry,
) -> Result<DirEntryRaw, Error> {
let ty = ent.file_type().map_err(|err| {
let err = Error::Io(io::Error::from(err)).with_path(ent.path());
Error::WithDepth { depth, err: Box::new(err) }
})?;
DirEntryRaw::from_entry_os(depth, ent, ty)
}
#[cfg(windows)]
fn from_entry_os(
depth: usize,
ent: &fs::DirEntry,
ty: fs::FileType,
) -> Result<DirEntryRaw, Error> {
let md = ent.metadata().map_err(|err| {
let err = Error::Io(io::Error::from(err)).with_path(ent.path());
Error::WithDepth { depth, err: Box::new(err) }
})?;
Ok(DirEntryRaw {
path: ent.path(),
ty,
follow_link: false,
depth,
metadata: md,
})
}
#[cfg(unix)]
fn from_entry_os(
depth: usize,
ent: &fs::DirEntry,
ty: fs::FileType,
) -> Result<DirEntryRaw, Error> {
use std::os::unix::fs::DirEntryExt;
Ok(DirEntryRaw {
path: ent.path(),
ty,
follow_link: false,
depth,
ino: ent.ino(),
})
}
// Placeholder implementation to allow compiling on non-standard platforms
// (e.g. wasm32).
#[cfg(not(any(windows, unix)))]
fn from_entry_os(
depth: usize,
ent: &fs::DirEntry,
ty: fs::FileType,
) -> Result<DirEntryRaw, Error> {
Err(Error::Io(io::Error::new(
io::ErrorKind::Other,
"unsupported platform",
)))
}
#[cfg(windows)]
fn from_path(
depth: usize,
pb: PathBuf,
link: bool,
) -> Result<DirEntryRaw, Error> {
let md =
fs::metadata(&pb).map_err(|err| Error::Io(err).with_path(&pb))?;
Ok(DirEntryRaw {
path: pb,
ty: md.file_type(),
follow_link: link,
depth,
metadata: md,
})
}
#[cfg(unix)]
fn from_path(
depth: usize,
pb: PathBuf,
link: bool,
) -> Result<DirEntryRaw, Error> {
use std::os::unix::fs::MetadataExt;
let md =
fs::metadata(&pb).map_err(|err| Error::Io(err).with_path(&pb))?;
Ok(DirEntryRaw {
path: pb,
ty: md.file_type(),
follow_link: link,
depth,
ino: md.ino(),
})
}
// Placeholder implementation to allow compiling on non-standard platforms
// (e.g. wasm32).
#[cfg(not(any(windows, unix)))]
fn from_path(
depth: usize,
pb: PathBuf,
link: bool,
) -> Result<DirEntryRaw, Error> {
Err(Error::Io(io::Error::new(
io::ErrorKind::Other,
"unsupported platform",
)))
}
}
/// WalkBuilder builds a recursive directory iterator.
///
/// The builder supports a large number of configurable options. This includes
/// specific glob overrides, file type matching, toggling whether hidden
/// files are ignored or not, and of course, support for respecting gitignore
/// files.
///
/// By default, all ignore files found are respected. This includes `.ignore`,
/// `.gitignore`, `.git/info/exclude` and even your global gitignore
/// globs, usually found in `$XDG_CONFIG_HOME/git/ignore`.
///
/// Some standard recursive directory options are also supported, such as
/// limiting the recursive depth or whether to follow symbolic links (disabled
/// by default).
///
/// # Ignore rules
///
/// There are many rules that influence whether a particular file or directory
/// is skipped by this iterator. Those rules are documented here. Note that
/// the rules assume a default configuration.
///
/// * First, glob overrides are checked. If a path matches a glob override,
/// then matching stops. The path is then only skipped if the glob that matched
/// the path is an ignore glob. (An override glob is a whitelist glob unless it
/// starts with a `!`, in which case it is an ignore glob.)
/// * Second, ignore files are checked. Ignore files currently only come from
/// git ignore files (`.gitignore`, `.git/info/exclude` and the configured
/// global gitignore file), plain `.ignore` files, which have the same format
/// as gitignore files, or explicitly added ignore files. The precedence order
/// is: `.ignore`, `.gitignore`, `.git/info/exclude`, global gitignore and
/// finally explicitly added ignore files. Note that precedence between
/// different types of ignore files is not impacted by the directory hierarchy;
/// any `.ignore` file overrides all `.gitignore` files. Within each precedence
/// level, more nested ignore files have a higher precedence than less nested
/// ignore files.
/// * Third, if the previous step yields an ignore match, then all matching
/// is stopped and the path is skipped. If it yields a whitelist match, then
/// matching continues. A whitelist match can be overridden by a later matcher.
/// * Fourth, unless the path is a directory, the file type matcher is run on
/// the path. As above, if it yields an ignore match, then all matching is
/// stopped and the path is skipped. If it yields a whitelist match, then
/// matching continues.
/// * Fifth, if the path hasn't been whitelisted and it is hidden, then the
/// path is skipped.
/// * Sixth, unless the path is a directory, the size of the file is compared
/// against the max filesize limit. If it exceeds the limit, it is skipped.
/// * Seventh, if the path has made it this far then it is yielded in the
/// iterator.
#[derive(Clone)]
pub struct WalkBuilder {
paths: Vec<PathBuf>,
ig_builder: IgnoreBuilder,
max_depth: Option<usize>,
min_depth: Option<usize>,
max_filesize: Option<u64>,
follow_links: bool,
same_file_system: bool,
sorter: Option<Sorter>,
threads: usize,
skip: Option<Arc<Handle>>,
filter: Option<Filter>,
/// The directory that gitignores should be interpreted relative to.
///
/// Usually this is the directory containing the gitignore file. But in
/// some cases, like for global gitignores or for gitignores specified
/// explicitly, this should generally be set to the current working
/// directory. This is only used for global gitignores or "explicit"
/// gitignores.
///
/// When `None`, the CWD is fetched from `std::env::current_dir()`. If
/// that fails, then global gitignores are ignored (an error is logged).
global_gitignores_relative_to:
OnceLock<Result<PathBuf, Arc<std::io::Error>>>,
}
#[derive(Clone)]
enum Sorter {
ByName(Arc<dyn Fn(&OsStr, &OsStr) -> Ordering + Send + Sync + 'static>),
ByPath(Arc<dyn Fn(&Path, &Path) -> Ordering + Send + Sync + 'static>),
}
#[derive(Clone)]
struct Filter(Arc<dyn Fn(&DirEntry) -> bool + Send + Sync + 'static>);
impl std::fmt::Debug for WalkBuilder {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("WalkBuilder")
.field("paths", &self.paths)
.field("ig_builder", &self.ig_builder)
.field("max_depth", &self.max_depth)
.field("min_depth", &self.min_depth)
.field("max_filesize", &self.max_filesize)
.field("follow_links", &self.follow_links)
.field("same_file_system", &self.same_file_system)
.field("sorter", &"<...>")
.field("threads", &self.threads)
.field("skip", &self.skip)
.field("filter", &"<...>")
.field(
"global_gitignores_relative_to",
&self.global_gitignores_relative_to,
)
.finish()
}
}
impl WalkBuilder {
/// Create a new builder for a recursive directory iterator for the
/// directory given.
///
/// Note that if you want to traverse multiple different directories, it
/// is better to call `add` on this builder than to create multiple
/// `Walk` values.
pub fn new<P: AsRef<Path>>(path: P) -> WalkBuilder {
WalkBuilder {
paths: vec![path.as_ref().to_path_buf()],
ig_builder: IgnoreBuilder::new(),
max_depth: None,
min_depth: None,
max_filesize: None,
follow_links: false,
same_file_system: false,
sorter: None,
threads: 0,
skip: None,
filter: None,
global_gitignores_relative_to: OnceLock::new(),
}
}
/// Build a new `Walk` iterator.
pub fn build(&self) -> Walk {
let follow_links = self.follow_links;
let max_depth = self.max_depth;
let min_depth = self.min_depth;
let sorter = self.sorter.clone();
let its = self
.paths
.iter()
.map(move |p| {
if p == Path::new("-") {
(p.to_path_buf(), None)
} else {
let mut wd = WalkDir::new(p);
wd = wd.follow_links(follow_links || p.is_file());
wd = wd.same_file_system(self.same_file_system);
if let Some(max_depth) = max_depth {
wd = wd.max_depth(max_depth);
}
if let Some(min_depth) = min_depth {
wd = wd.min_depth(min_depth);
}
if let Some(ref sorter) = sorter {
match sorter.clone() {
Sorter::ByName(cmp) => {
wd = wd.sort_by(move |a, b| {
cmp(a.file_name(), b.file_name())
});
}
Sorter::ByPath(cmp) => {
wd = wd.sort_by(move |a, b| {
cmp(a.path(), b.path())
});
}
}
}
(p.to_path_buf(), Some(WalkEventIter::from(wd)))
}
})
.collect::<Vec<_>>()
.into_iter();
let ig_root = self
.get_or_set_current_dir()
.map(|cwd| self.ig_builder.build_with_cwd(Some(cwd.to_path_buf())))
.unwrap_or_else(|| self.ig_builder.build());
Walk {
its,
it: None,
ig_root: ig_root.clone(),
ig: ig_root.clone(),
max_filesize: self.max_filesize,
skip: self.skip.clone(),
filter: self.filter.clone(),
}
}
/// Build a new `WalkParallel` iterator.
///
/// Note that this *doesn't* return something that implements `Iterator`.
/// Instead, the returned value must be run with a closure. e.g.,
/// `builder.build_parallel().run(|| |path| { println!("{path:?}"); WalkState::Continue })`.
pub fn build_parallel(&self) -> WalkParallel {
let ig_root = self
.get_or_set_current_dir()
.map(|cwd| self.ig_builder.build_with_cwd(Some(cwd.to_path_buf())))
.unwrap_or_else(|| self.ig_builder.build());
WalkParallel {
paths: self.paths.clone().into_iter(),
ig_root,
max_depth: self.max_depth,
min_depth: self.min_depth,
max_filesize: self.max_filesize,
follow_links: self.follow_links,
same_file_system: self.same_file_system,
threads: self.threads,
skip: self.skip.clone(),
filter: self.filter.clone(),
}
}
/// Add a file path to the iterator.
///
/// Each additional file path added is traversed recursively. This should
/// be preferred over building multiple `Walk` iterators since this
/// enables reusing resources across iteration.
pub fn add<P: AsRef<Path>>(&mut self, path: P) -> &mut WalkBuilder {
self.paths.push(path.as_ref().to_path_buf());
self
}
/// The maximum depth to recurse.
///
/// The default, `None`, imposes no depth restriction.
pub fn max_depth(&mut self, depth: Option<usize>) -> &mut WalkBuilder {
self.max_depth = depth;
if self.min_depth.is_some()
&& self.max_depth.is_some()
&& self.max_depth < self.min_depth
{
self.max_depth = self.min_depth;
}
self
}
/// The minimum depth to recurse.
///
/// The default, `None`, imposes no minimum depth restriction.
pub fn min_depth(&mut self, depth: Option<usize>) -> &mut WalkBuilder {
self.min_depth = depth;
if self.max_depth.is_some()
&& self.min_depth.is_some()
&& self.min_depth > self.max_depth
{
self.min_depth = self.max_depth;
}
self
}
/// Whether to follow symbolic links or not.
pub fn follow_links(&mut self, yes: bool) -> &mut WalkBuilder {
self.follow_links = yes;
self
}
/// Whether to ignore files above the specified limit.
pub fn max_filesize(&mut self, filesize: Option<u64>) -> &mut WalkBuilder {
self.max_filesize = filesize;
self
}
/// The number of threads to use for traversal.
///
/// Note that this only has an effect when using `build_parallel`.
///
/// The default setting is `0`, which chooses the number of threads
/// automatically using heuristics.
pub fn threads(&mut self, n: usize) -> &mut WalkBuilder {
self.threads = n;
self
}
/// Add a global ignore file to the matcher.
///
/// This has lower precedence than all other sources of ignore rules.
///
/// # Errors
///
/// If there was a problem adding the ignore file, then an error is
/// returned. Note that the error may indicate *partial* failure. For
/// example, if an ignore file contains an invalid glob, all other globs
/// are still applied.
///
/// An error will also occur if this walker could not get the current
/// working directory (and `WalkBuilder::current_dir` isn't set).
pub fn add_ignore<P: AsRef<Path>>(&mut self, path: P) -> Option<Error> {
let path = path.as_ref();
let Some(cwd) = self.get_or_set_current_dir() else {
let err = std::io::Error::other(format!(
"CWD is not known, ignoring global gitignore {}",
path.display()
));
return Some(err.into());
};
let mut builder = GitignoreBuilder::new(cwd);
let mut errs = PartialErrorBuilder::default();
errs.maybe_push(builder.add(path));
match builder.build() {
Ok(gi) => {
self.ig_builder.add_ignore(gi);
}
Err(err) => {
errs.push(err);
}
}
errs.into_error_option()
}
/// Add a custom ignore file name
///
/// These ignore files have higher precedence than all other ignore files.
///
/// When specifying multiple names, earlier names have lower precedence than
/// later names.
pub fn add_custom_ignore_filename<S: AsRef<OsStr>>(
&mut self,
file_name: S,
) -> &mut WalkBuilder {
self.ig_builder.add_custom_ignore_filename(file_name);
self
}
/// Add an override matcher.
///
/// By default, no override matcher is used.
///
/// This overrides any previous setting.
pub fn overrides(&mut self, overrides: Override) -> &mut WalkBuilder {
self.ig_builder.overrides(overrides);
self
}
/// Add a file type matcher.
///
/// By default, no file type matcher is used.
///
/// This overrides any previous setting.
pub fn types(&mut self, types: Types) -> &mut WalkBuilder {
self.ig_builder.types(types);
self
}
/// Enables all the standard ignore filters.
///
/// This toggles, as a group, all the filters that are enabled by default:
///
/// - [hidden()](#method.hidden)
/// - [parents()](#method.parents)
/// - [ignore()](#method.ignore)
/// - [git_ignore()](#method.git_ignore)
/// - [git_global()](#method.git_global)
/// - [git_exclude()](#method.git_exclude)
///
/// They may still be toggled individually after calling this function.
///
/// This is (by definition) enabled by default.
pub fn standard_filters(&mut self, yes: bool) -> &mut WalkBuilder {
self.hidden(yes)
.parents(yes)
.ignore(yes)
.git_ignore(yes)
.git_global(yes)
.git_exclude(yes)
}
/// Enables ignoring hidden files.
///
/// This is enabled by default.
pub fn hidden(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.hidden(yes);
self
}
/// Enables reading ignore files from parent directories.
///
/// If this is enabled, then .gitignore files in parent directories of each
/// file path given are respected. Otherwise, they are ignored.
///
/// This is enabled by default.
pub fn parents(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.parents(yes);
self
}
/// Enables reading `.ignore` files.
///
/// `.ignore` files have the same semantics as `gitignore` files and are
/// supported by search tools such as ripgrep and The Silver Searcher.
///
/// This is enabled by default.
pub fn ignore(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.ignore(yes);
self
}
/// Enables reading a global gitignore file, whose path is specified in
/// git's `core.excludesFile` config option.
///
/// Git's config file location is `$HOME/.gitconfig`. If `$HOME/.gitconfig`
/// does not exist or does not specify `core.excludesFile`, then
/// `$XDG_CONFIG_HOME/git/ignore` is read. If `$XDG_CONFIG_HOME` is not
/// set or is empty, then `$HOME/.config/git/ignore` is used instead.
///
/// This is enabled by default.
pub fn git_global(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.git_global(yes);
self
}
/// Enables reading `.gitignore` files.
///
/// `.gitignore` files have match semantics as described in the `gitignore`
/// man page.
///
/// This is enabled by default.
pub fn git_ignore(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.git_ignore(yes);
self
}
/// Enables reading `.git/info/exclude` files.
///
/// `.git/info/exclude` files have match semantics as described in the
/// `gitignore` man page.
///
/// This is enabled by default.
pub fn git_exclude(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.git_exclude(yes);
self
}
/// Whether a git repository is required to apply git-related ignore
/// rules (global rules, .gitignore and local exclude rules).
///
/// When disabled, git-related ignore rules are applied even when searching
/// outside a git repository.
///
/// In particular, if this is `false` then `.gitignore` files will be read
/// from parent directories above the git root directory containing `.git`,
/// which is different from the git behavior.
pub fn require_git(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.require_git(yes);
self
}
/// Process ignore files case insensitively
///
/// This is disabled by default.
pub fn ignore_case_insensitive(&mut self, yes: bool) -> &mut WalkBuilder {
self.ig_builder.ignore_case_insensitive(yes);
self
}
/// Set a function for sorting directory entries by their path.
///
/// If a compare function is set, the resulting iterator will return all
/// paths in sorted order. The compare function will be called to compare
/// entries from the same directory.
///
/// This is like `sort_by_file_name`, except the comparator accepts
/// a `&Path` instead of the base file name, which permits it to sort by
/// more criteria.
///
/// This method will override any previous sorter set by this method or
/// by `sort_by_file_name`.
///
/// Note that this is not used in the parallel iterator.
pub fn sort_by_file_path<F>(&mut self, cmp: F) -> &mut WalkBuilder
where
F: Fn(&Path, &Path) -> Ordering + Send + Sync + 'static,
{
self.sorter = Some(Sorter::ByPath(Arc::new(cmp)));
self
}
/// Set a function for sorting directory entries by file name.
///
/// If a compare function is set, the resulting iterator will return all
/// paths in sorted order. The compare function will be called to compare
/// names from entries from the same directory using only the name of the
/// entry.
///
/// This method will override any previous sorter set by this method or
/// by `sort_by_file_path`.
///
/// Note that this is not used in the parallel iterator.
pub fn sort_by_file_name<F>(&mut self, cmp: F) -> &mut WalkBuilder
where
F: Fn(&OsStr, &OsStr) -> Ordering + Send + Sync + 'static,
{
self.sorter = Some(Sorter::ByName(Arc::new(cmp)));
self
}
/// Do not cross file system boundaries.
///
/// When this option is enabled, directory traversal will not descend into
/// directories that are on a different file system from the root path.
///
/// Currently, this option is only supported on Unix and Windows. If this
/// option is used on an unsupported platform, then directory traversal
/// will immediately return an error and will not yield any entries.
pub fn same_file_system(&mut self, yes: bool) -> &mut WalkBuilder {
self.same_file_system = yes;
self
}
/// Do not yield directory entries that are believed to correspond to
/// stdout.
///
/// This is useful when a command is invoked via shell redirection to a
/// file that is also being read. For example, `grep -r foo ./ > results`
/// might end up trying to search `results` even though it is also writing
/// to it, which could cause an unbounded feedback loop. Setting this
/// option prevents this from happening by skipping over the `results`
/// file.
///
/// This is disabled by default.
pub fn skip_stdout(&mut self, yes: bool) -> &mut WalkBuilder {
if yes {
self.skip = stdout_handle().map(Arc::new);
} else {
self.skip = None;
}
self
}
/// Yields only entries which satisfy the given predicate and skips
/// descending into directories that do not satisfy the given predicate.
///
/// The predicate is applied to all entries. If the predicate is
/// true, iteration carries on as normal. If the predicate is false, the
/// entry is ignored and if it is a directory, it is not descended into.
///
/// Note that the errors for reading entries that may not satisfy the
/// predicate will still be yielded.
///
/// Note also that only one filter predicate can be applied to a
/// `WalkBuilder`. Calling this subsequent times overrides previous filter
/// predicates.
pub fn filter_entry<P>(&mut self, filter: P) -> &mut WalkBuilder
where
P: Fn(&DirEntry) -> bool + Send + Sync + 'static,
{
self.filter = Some(Filter(Arc::new(filter)));
self
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/types.rs | crates/ignore/src/types.rs | /*!
The types module provides a way of associating globs on file names to file
types.
This can be used to match specific types of files. For example, among
the default file types provided, the Rust file type is defined to be `*.rs`
with name `rust`. Similarly, the C file type is defined to be `*.{c,h}` with
name `c`.
Note that the set of default types may change over time.
# Example
This shows how to create and use a simple file type matcher using the default
file types defined in this crate.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.select("rust");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_whitelist());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: negation
This is like the previous example, but shows how negating a file type works.
That is, this will let us match file paths that *don't* correspond to a
particular file type.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.negate("c");
let matcher = builder.build().unwrap();
assert!(matcher.matched("foo.rs", false).is_none());
assert!(matcher.matched("foo.c", false).is_ignore());
```
# Example: custom file type definitions
This shows how to extend this library default file type definitions with
your own.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.add("foo", "*.foo");
// Another way of adding a file type definition.
// This is useful when accepting input from an end user.
builder.add_def("bar:*.bar");
// Note: we only select `foo`, not `bar`.
builder.select("foo");
let matcher = builder.build().unwrap();
assert!(matcher.matched("x.foo", false).is_whitelist());
// This is ignored because we only selected the `foo` file type.
assert!(matcher.matched("x.bar", false).is_ignore());
```
We can also add file type definitions based on other definitions.
```
use ignore::types::TypesBuilder;
let mut builder = TypesBuilder::new();
builder.add_defaults();
builder.add("foo", "*.foo");
builder.add_def("bar:include:foo,cpp");
builder.select("bar");
let matcher = builder.build().unwrap();
assert!(matcher.matched("x.foo", false).is_whitelist());
assert!(matcher.matched("y.cpp", false).is_whitelist());
```
*/
use std::{collections::HashMap, path::Path, sync::Arc};
use {
globset::{GlobBuilder, GlobSet, GlobSetBuilder},
regex_automata::util::pool::Pool,
};
use crate::{Error, Match, default_types::DEFAULT_TYPES, pathutil::file_name};
/// Glob represents a single glob in a set of file type definitions.
///
/// There may be more than one glob for a particular file type.
///
/// This is used to report information about the highest precedent glob
/// that matched.
///
/// Note that not all matches necessarily correspond to a specific glob.
/// For example, if there are one or more selections and a file path doesn't
/// match any of those selections, then the file path is considered to be
/// ignored.
///
/// The lifetime `'a` refers to the lifetime of the underlying file type
/// definition, which corresponds to the lifetime of the file type matcher.
#[derive(Clone, Debug)]
pub struct Glob<'a>(GlobInner<'a>);
#[derive(Clone, Debug)]
enum GlobInner<'a> {
/// No glob matched, but the file path should still be ignored.
UnmatchedIgnore,
/// A glob matched.
Matched {
/// The file type definition which provided the glob.
def: &'a FileTypeDef,
},
}
impl<'a> Glob<'a> {
fn unmatched() -> Glob<'a> {
Glob(GlobInner::UnmatchedIgnore)
}
/// Return the file type definition that matched, if one exists. A file type
/// definition always exists when a specific definition matches a file
/// path.
pub fn file_type_def(&self) -> Option<&FileTypeDef> {
match self {
Glob(GlobInner::UnmatchedIgnore) => None,
Glob(GlobInner::Matched { def, .. }) => Some(def),
}
}
}
/// A single file type definition.
///
/// File type definitions can be retrieved in aggregate from a file type
/// matcher. File type definitions are also reported when its responsible
/// for a match.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FileTypeDef {
name: String,
globs: Vec<String>,
}
impl FileTypeDef {
/// Return the name of this file type.
pub fn name(&self) -> &str {
&self.name
}
/// Return the globs used to recognize this file type.
pub fn globs(&self) -> &[String] {
&self.globs
}
}
/// Types is a file type matcher.
#[derive(Clone, Debug)]
pub struct Types {
/// All of the file type definitions, sorted lexicographically by name.
defs: Vec<FileTypeDef>,
/// All of the selections made by the user.
selections: Vec<Selection<FileTypeDef>>,
/// Whether there is at least one Selection::Select in our selections.
/// When this is true, a Match::None is converted to Match::Ignore.
has_selected: bool,
/// A mapping from glob index in the set to two indices. The first is an
/// index into `selections` and the second is an index into the
/// corresponding file type definition's list of globs.
glob_to_selection: Vec<(usize, usize)>,
/// The set of all glob selections, used for actual matching.
set: GlobSet,
/// Temporary storage for globs that match.
matches: Arc<Pool<Vec<usize>>>,
}
/// Indicates the type of a selection for a particular file type.
#[derive(Clone, Debug)]
enum Selection<T> {
Select(String, T),
Negate(String, T),
}
impl<T> Selection<T> {
fn is_negated(&self) -> bool {
match *self {
Selection::Select(..) => false,
Selection::Negate(..) => true,
}
}
fn name(&self) -> &str {
match *self {
Selection::Select(ref name, _) => name,
Selection::Negate(ref name, _) => name,
}
}
fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Selection<U> {
match self {
Selection::Select(name, inner) => {
Selection::Select(name, f(inner))
}
Selection::Negate(name, inner) => {
Selection::Negate(name, f(inner))
}
}
}
fn inner(&self) -> &T {
match *self {
Selection::Select(_, ref inner) => inner,
Selection::Negate(_, ref inner) => inner,
}
}
}
impl Types {
/// Creates a new file type matcher that never matches any path and
/// contains no file type definitions.
pub fn empty() -> Types {
Types {
defs: vec![],
selections: vec![],
has_selected: false,
glob_to_selection: vec![],
set: GlobSetBuilder::new().build().unwrap(),
matches: Arc::new(Pool::new(|| vec![])),
}
}
/// Returns true if and only if this matcher has zero selections.
pub fn is_empty(&self) -> bool {
self.selections.is_empty()
}
/// Returns the number of selections used in this matcher.
pub fn len(&self) -> usize {
self.selections.len()
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> &[FileTypeDef] {
&self.defs
}
/// Returns a match for the given path against this file type matcher.
///
/// The path is considered whitelisted if it matches a selected file type.
/// The path is considered ignored if it matches a negated file type.
/// If at least one file type is selected and `path` doesn't match, then
/// the path is also considered ignored.
pub fn matched<'a, P: AsRef<Path>>(
&'a self,
path: P,
is_dir: bool,
) -> Match<Glob<'a>> {
// File types don't apply to directories, and we can't do anything
// if our glob set is empty.
if is_dir || self.set.is_empty() {
return Match::None;
}
// We only want to match against the file name, so extract it.
// If one doesn't exist, then we can't match it.
let name = match file_name(path.as_ref()) {
Some(name) => name,
None if self.has_selected => {
return Match::Ignore(Glob::unmatched());
}
None => {
return Match::None;
}
};
let mut matches = self.matches.get();
self.set.matches_into(name, &mut *matches);
// The highest precedent match is the last one.
if let Some(&i) = matches.last() {
let (isel, _) = self.glob_to_selection[i];
let sel = &self.selections[isel];
let glob = Glob(GlobInner::Matched { def: sel.inner() });
return if sel.is_negated() {
Match::Ignore(glob)
} else {
Match::Whitelist(glob)
};
}
if self.has_selected {
Match::Ignore(Glob::unmatched())
} else {
Match::None
}
}
}
/// TypesBuilder builds a type matcher from a set of file type definitions and
/// a set of file type selections.
pub struct TypesBuilder {
types: HashMap<String, FileTypeDef>,
selections: Vec<Selection<()>>,
}
impl TypesBuilder {
/// Create a new builder for a file type matcher.
///
/// The builder contains *no* type definitions to start with. A set
/// of default type definitions can be added with `add_defaults`, and
/// additional type definitions can be added with `select` and `negate`.
pub fn new() -> TypesBuilder {
TypesBuilder { types: HashMap::new(), selections: vec![] }
}
/// Build the current set of file type definitions *and* selections into
/// a file type matcher.
pub fn build(&self) -> Result<Types, Error> {
let defs = self.definitions();
let has_selected = self.selections.iter().any(|s| !s.is_negated());
let mut selections = vec![];
let mut glob_to_selection = vec![];
let mut build_set = GlobSetBuilder::new();
for (isel, selection) in self.selections.iter().enumerate() {
let def = match self.types.get(selection.name()) {
Some(def) => def.clone(),
None => {
let name = selection.name().to_string();
return Err(Error::UnrecognizedFileType(name));
}
};
for (iglob, glob) in def.globs.iter().enumerate() {
build_set.add(
GlobBuilder::new(glob)
.literal_separator(true)
.build()
.map_err(|err| Error::Glob {
glob: Some(glob.to_string()),
err: err.kind().to_string(),
})?,
);
glob_to_selection.push((isel, iglob));
}
selections.push(selection.clone().map(move |_| def));
}
let set = build_set
.build()
.map_err(|err| Error::Glob { glob: None, err: err.to_string() })?;
Ok(Types {
defs,
selections,
has_selected,
glob_to_selection,
set,
matches: Arc::new(Pool::new(|| vec![])),
})
}
/// Return the set of current file type definitions.
///
/// Definitions and globs are sorted.
pub fn definitions(&self) -> Vec<FileTypeDef> {
let mut defs = vec![];
for def in self.types.values() {
let mut def = def.clone();
def.globs.sort();
defs.push(def);
}
defs.sort_by(|def1, def2| def1.name().cmp(def2.name()));
defs
}
/// Select the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are selected.
pub fn select(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Select(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Select(name.to_string(), ()));
}
self
}
/// Ignore the file type given by `name`.
///
/// If `name` is `all`, then all file types currently defined are negated.
pub fn negate(&mut self, name: &str) -> &mut TypesBuilder {
if name == "all" {
for name in self.types.keys() {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
} else {
self.selections.push(Selection::Negate(name.to_string(), ()));
}
self
}
/// Clear any file type definitions for the type name given.
pub fn clear(&mut self, name: &str) -> &mut TypesBuilder {
self.types.remove(name);
self
}
/// Add a new file type definition. `name` can be arbitrary and `pat`
/// should be a glob recognizing file paths belonging to the `name` type.
///
/// If `name` is `all` or otherwise contains any character that is not a
/// Unicode letter or number, then an error is returned.
pub fn add(&mut self, name: &str, glob: &str) -> Result<(), Error> {
if name == "all" || !name.chars().all(|c| c.is_alphanumeric()) {
return Err(Error::InvalidDefinition);
}
let (key, glob) = (name.to_string(), glob.to_string());
self.types
.entry(key)
.or_insert_with(|| FileTypeDef {
name: name.to_string(),
globs: vec![],
})
.globs
.push(glob);
Ok(())
}
/// Add a new file type definition specified in string form. There are two
/// valid formats:
/// 1. `{name}:{glob}`. This defines a 'root' definition that associates the
/// given name with the given glob.
/// 2. `{name}:include:{comma-separated list of already defined names}.
/// This defines an 'include' definition that associates the given name
/// with the definitions of the given existing types.
/// Names may not include any characters that are not
/// Unicode letters or numbers.
pub fn add_def(&mut self, def: &str) -> Result<(), Error> {
let parts: Vec<&str> = def.split(':').collect();
match parts.len() {
2 => {
let name = parts[0];
let glob = parts[1];
if name.is_empty() || glob.is_empty() {
return Err(Error::InvalidDefinition);
}
self.add(name, glob)
}
3 => {
let name = parts[0];
let types_string = parts[2];
if name.is_empty()
|| parts[1] != "include"
|| types_string.is_empty()
{
return Err(Error::InvalidDefinition);
}
let types = types_string.split(',');
// Check ahead of time to ensure that all types specified are
// present and fail fast if not.
if types.clone().any(|t| !self.types.contains_key(t)) {
return Err(Error::InvalidDefinition);
}
for type_name in types {
let globs =
self.types.get(type_name).unwrap().globs.clone();
for glob in globs {
self.add(name, &glob)?;
}
}
Ok(())
}
_ => Err(Error::InvalidDefinition),
}
}
/// Add a set of default file type definitions.
pub fn add_defaults(&mut self) -> &mut TypesBuilder {
static MSG: &'static str = "adding a default type should never fail";
for &(names, exts) in DEFAULT_TYPES {
for name in names {
for ext in exts {
self.add(name, ext).expect(MSG);
}
}
}
self
}
}
#[cfg(test)]
mod tests {
use super::TypesBuilder;
macro_rules! matched {
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, true);
};
(not, $name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr) => {
matched!($name, $types, $sel, $selnot, $path, false);
};
($name:ident, $types:expr, $sel:expr, $selnot:expr,
$path:expr, $matched:expr) => {
#[test]
fn $name() {
let mut btypes = TypesBuilder::new();
for tydef in $types {
btypes.add_def(tydef).unwrap();
}
for sel in $sel {
btypes.select(sel);
}
for selnot in $selnot {
btypes.negate(selnot);
}
let types = btypes.build().unwrap();
let mat = types.matched($path, false);
assert_eq!($matched, !mat.is_ignore());
}
};
}
fn types() -> Vec<&'static str> {
vec![
"html:*.html",
"html:*.htm",
"rust:*.rs",
"js:*.js",
"py:*.py",
"python:*.py",
"foo:*.{rs,foo}",
"combo:include:html,rust",
]
}
matched!(match1, types(), vec!["rust"], vec![], "lib.rs");
matched!(match2, types(), vec!["html"], vec![], "index.html");
matched!(match3, types(), vec!["html"], vec![], "index.htm");
matched!(match4, types(), vec!["html", "rust"], vec![], "main.rs");
matched!(match5, types(), vec![], vec![], "index.html");
matched!(match6, types(), vec![], vec!["rust"], "index.html");
matched!(match7, types(), vec!["foo"], vec!["rust"], "main.foo");
matched!(match8, types(), vec!["combo"], vec![], "index.html");
matched!(match9, types(), vec!["combo"], vec![], "lib.rs");
matched!(match10, types(), vec!["py"], vec![], "main.py");
matched!(match11, types(), vec!["python"], vec![], "main.py");
matched!(not, matchnot1, types(), vec!["rust"], vec![], "index.html");
matched!(not, matchnot2, types(), vec![], vec!["rust"], "main.rs");
matched!(not, matchnot3, types(), vec!["foo"], vec!["rust"], "main.rs");
matched!(not, matchnot4, types(), vec!["rust"], vec!["foo"], "main.rs");
matched!(not, matchnot5, types(), vec!["rust"], vec!["foo"], "main.foo");
matched!(not, matchnot6, types(), vec!["combo"], vec![], "leftpad.js");
matched!(not, matchnot7, types(), vec!["py"], vec![], "index.html");
matched!(not, matchnot8, types(), vec!["python"], vec![], "doc.md");
#[test]
fn test_invalid_defs() {
let mut btypes = TypesBuilder::new();
for tydef in types() {
btypes.add_def(tydef).unwrap();
}
// Preserve the original definitions for later comparison.
let original_defs = btypes.definitions();
let bad_defs = vec![
// Reference to type that does not exist
"combo:include:html,qwerty",
// Bad format
"combo:foobar:html,rust",
"",
];
for def in bad_defs {
assert!(btypes.add_def(def).is_err());
// Ensure that nothing changed, even if some of the includes were valid.
assert_eq!(btypes.definitions(), original_defs);
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/dir.rs | crates/ignore/src/dir.rs | // This module provides a data structure, `Ignore`, that connects "directory
// traversal" with "ignore matchers." Specifically, it knows about gitignore
// semantics and precedence, and is organized based on directory hierarchy.
// Namely, every matcher logically corresponds to ignore rules from a single
// directory, and points to the matcher for its corresponding parent directory.
// In this sense, `Ignore` is a *persistent* data structure.
//
// This design was specifically chosen to make it possible to use this data
// structure in a parallel directory iterator.
//
// My initial intention was to expose this module as part of this crate's
// public API, but I think the data structure's public API is too complicated
// with non-obvious failure modes. Alas, such things haven't been documented
// well.
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
fs::{File, FileType},
io::{self, BufRead},
path::{Path, PathBuf},
sync::{Arc, RwLock, Weak},
};
use crate::{
gitignore::{self, Gitignore, GitignoreBuilder},
overrides::{self, Override},
pathutil::{is_hidden, strip_prefix},
types::{self, Types},
walk::DirEntry,
{Error, Match, PartialErrorBuilder},
};
/// IgnoreMatch represents information about where a match came from when using
/// the `Ignore` matcher.
#[derive(Clone, Debug)]
#[allow(dead_code)]
pub(crate) struct IgnoreMatch<'a>(IgnoreMatchInner<'a>);
/// IgnoreMatchInner describes precisely where the match information came from.
/// This is private to allow expansion to more matchers in the future.
#[derive(Clone, Debug)]
#[allow(dead_code)]
enum IgnoreMatchInner<'a> {
Override(overrides::Glob<'a>),
Gitignore(&'a gitignore::Glob),
Types(types::Glob<'a>),
Hidden,
}
impl<'a> IgnoreMatch<'a> {
fn overrides(x: overrides::Glob<'a>) -> IgnoreMatch<'a> {
IgnoreMatch(IgnoreMatchInner::Override(x))
}
fn gitignore(x: &'a gitignore::Glob) -> IgnoreMatch<'a> {
IgnoreMatch(IgnoreMatchInner::Gitignore(x))
}
fn types(x: types::Glob<'a>) -> IgnoreMatch<'a> {
IgnoreMatch(IgnoreMatchInner::Types(x))
}
fn hidden() -> IgnoreMatch<'static> {
IgnoreMatch(IgnoreMatchInner::Hidden)
}
}
/// Options for the ignore matcher, shared between the matcher itself and the
/// builder.
#[derive(Clone, Copy, Debug)]
struct IgnoreOptions {
/// Whether to ignore hidden file paths or not.
hidden: bool,
/// Whether to read .ignore files.
ignore: bool,
/// Whether to respect any ignore files in parent directories.
parents: bool,
/// Whether to read git's global gitignore file.
git_global: bool,
/// Whether to read .gitignore files.
git_ignore: bool,
/// Whether to read .git/info/exclude files.
git_exclude: bool,
/// Whether to ignore files case insensitively
ignore_case_insensitive: bool,
/// Whether a git repository must be present in order to apply any
/// git-related ignore rules.
require_git: bool,
}
/// Ignore is a matcher useful for recursively walking one or more directories.
#[derive(Clone, Debug)]
pub(crate) struct Ignore(Arc<IgnoreInner>);
#[derive(Clone, Debug)]
struct IgnoreInner {
/// A map of all existing directories that have already been
/// compiled into matchers.
///
/// Note that this is never used during matching, only when adding new
/// parent directory matchers. This avoids needing to rebuild glob sets for
/// parent directories if many paths are being searched.
compiled: Arc<RwLock<HashMap<OsString, Weak<IgnoreInner>>>>,
/// The path to the directory that this matcher was built from.
dir: PathBuf,
/// An override matcher (default is empty).
overrides: Arc<Override>,
/// A file type matcher.
types: Arc<Types>,
/// The parent directory to match next.
///
/// If this is the root directory or there are otherwise no more
/// directories to match, then `parent` is `None`.
parent: Option<Ignore>,
/// Whether this is an absolute parent matcher, as added by add_parent.
is_absolute_parent: bool,
/// The absolute base path of this matcher. Populated only if parent
/// directories are added.
absolute_base: Option<Arc<PathBuf>>,
/// The directory that gitignores should be interpreted relative to.
///
/// Usually this is the directory containing the gitignore file. But in
/// some cases, like for global gitignores or for gitignores specified
/// explicitly, this should generally be set to the current working
/// directory. This is only used for global gitignores or "explicit"
/// gitignores.
///
/// When `None`, this means the CWD could not be determined or is unknown.
/// In this case, global gitignore files are ignored because they otherwise
/// cannot be matched correctly.
global_gitignores_relative_to: Option<PathBuf>,
/// Explicit global ignore matchers specified by the caller.
explicit_ignores: Arc<Vec<Gitignore>>,
/// Ignore files used in addition to `.ignore`
custom_ignore_filenames: Arc<Vec<OsString>>,
/// The matcher for custom ignore files
custom_ignore_matcher: Gitignore,
/// The matcher for .ignore files.
ignore_matcher: Gitignore,
/// A global gitignore matcher, usually from $XDG_CONFIG_HOME/git/ignore.
git_global_matcher: Arc<Gitignore>,
/// The matcher for .gitignore files.
git_ignore_matcher: Gitignore,
/// Special matcher for `.git/info/exclude` files.
git_exclude_matcher: Gitignore,
/// Whether this directory contains a .git sub-directory.
has_git: bool,
/// Ignore config.
opts: IgnoreOptions,
}
impl Ignore {
/// Return the directory path of this matcher.
pub(crate) fn path(&self) -> &Path {
&self.0.dir
}
/// Return true if this matcher has no parent.
pub(crate) fn is_root(&self) -> bool {
self.0.parent.is_none()
}
/// Returns true if this matcher was added via the `add_parents` method.
pub(crate) fn is_absolute_parent(&self) -> bool {
self.0.is_absolute_parent
}
/// Return this matcher's parent, if one exists.
pub(crate) fn parent(&self) -> Option<Ignore> {
self.0.parent.clone()
}
/// Create a new `Ignore` matcher with the parent directories of `dir`.
///
/// Note that this can only be called on an `Ignore` matcher with no
/// parents (i.e., `is_root` returns `true`). This will panic otherwise.
pub(crate) fn add_parents<P: AsRef<Path>>(
&self,
path: P,
) -> (Ignore, Option<Error>) {
if !self.0.opts.parents
&& !self.0.opts.git_ignore
&& !self.0.opts.git_exclude
&& !self.0.opts.git_global
{
// If we never need info from parent directories, then don't do
// anything.
return (self.clone(), None);
}
if !self.is_root() {
panic!("Ignore::add_parents called on non-root matcher");
}
let absolute_base = match path.as_ref().canonicalize() {
Ok(path) => Arc::new(path),
Err(_) => {
// There's not much we can do here, so just return our
// existing matcher. We drop the error to be consistent
// with our general pattern of ignoring I/O errors when
// processing ignore files.
return (self.clone(), None);
}
};
// List of parents, from child to root.
let mut parents = vec![];
let mut path = &**absolute_base;
while let Some(parent) = path.parent() {
parents.push(parent);
path = parent;
}
let mut errs = PartialErrorBuilder::default();
let mut ig = self.clone();
for parent in parents.into_iter().rev() {
let mut compiled = self.0.compiled.write().unwrap();
if let Some(weak) = compiled.get(parent.as_os_str()) {
if let Some(prebuilt) = weak.upgrade() {
ig = Ignore(prebuilt);
continue;
}
}
let (mut igtmp, err) = ig.add_child_path(parent);
errs.maybe_push(err);
igtmp.is_absolute_parent = true;
igtmp.absolute_base = Some(absolute_base.clone());
igtmp.has_git =
if self.0.opts.require_git && self.0.opts.git_ignore {
parent.join(".git").exists() || parent.join(".jj").exists()
} else {
false
};
let ig_arc = Arc::new(igtmp);
ig = Ignore(ig_arc.clone());
compiled.insert(
parent.as_os_str().to_os_string(),
Arc::downgrade(&ig_arc),
);
}
(ig, errs.into_error_option())
}
/// Create a new `Ignore` matcher for the given child directory.
///
/// Since building the matcher may require reading from multiple
/// files, it's possible that this method partially succeeds. Therefore,
/// a matcher is always returned (which may match nothing) and an error is
/// returned if it exists.
///
/// Note that all I/O errors are completely ignored.
pub(crate) fn add_child<P: AsRef<Path>>(
&self,
dir: P,
) -> (Ignore, Option<Error>) {
let (ig, err) = self.add_child_path(dir.as_ref());
(Ignore(Arc::new(ig)), err)
}
/// Like add_child, but takes a full path and returns an IgnoreInner.
fn add_child_path(&self, dir: &Path) -> (IgnoreInner, Option<Error>) {
let check_vcs_dir = self.0.opts.require_git
&& (self.0.opts.git_ignore || self.0.opts.git_exclude);
let git_type = if check_vcs_dir {
dir.join(".git").metadata().ok().map(|md| md.file_type())
} else {
None
};
let has_git =
check_vcs_dir && (git_type.is_some() || dir.join(".jj").exists());
let mut errs = PartialErrorBuilder::default();
let custom_ig_matcher = if self.0.custom_ignore_filenames.is_empty() {
Gitignore::empty()
} else {
let (m, err) = create_gitignore(
&dir,
&dir,
&self.0.custom_ignore_filenames,
self.0.opts.ignore_case_insensitive,
);
errs.maybe_push(err);
m
};
let ig_matcher = if !self.0.opts.ignore {
Gitignore::empty()
} else {
let (m, err) = create_gitignore(
&dir,
&dir,
&[".ignore"],
self.0.opts.ignore_case_insensitive,
);
errs.maybe_push(err);
m
};
let gi_matcher = if !self.0.opts.git_ignore {
Gitignore::empty()
} else {
let (m, err) = create_gitignore(
&dir,
&dir,
&[".gitignore"],
self.0.opts.ignore_case_insensitive,
);
errs.maybe_push(err);
m
};
let gi_exclude_matcher = if !self.0.opts.git_exclude {
Gitignore::empty()
} else {
match resolve_git_commondir(dir, git_type) {
Ok(git_dir) => {
let (m, err) = create_gitignore(
&dir,
&git_dir,
&["info/exclude"],
self.0.opts.ignore_case_insensitive,
);
errs.maybe_push(err);
m
}
Err(err) => {
errs.maybe_push(err);
Gitignore::empty()
}
}
};
let ig = IgnoreInner {
compiled: self.0.compiled.clone(),
dir: dir.to_path_buf(),
overrides: self.0.overrides.clone(),
types: self.0.types.clone(),
parent: Some(self.clone()),
is_absolute_parent: false,
absolute_base: self.0.absolute_base.clone(),
global_gitignores_relative_to: self
.0
.global_gitignores_relative_to
.clone(),
explicit_ignores: self.0.explicit_ignores.clone(),
custom_ignore_filenames: self.0.custom_ignore_filenames.clone(),
custom_ignore_matcher: custom_ig_matcher,
ignore_matcher: ig_matcher,
git_global_matcher: self.0.git_global_matcher.clone(),
git_ignore_matcher: gi_matcher,
git_exclude_matcher: gi_exclude_matcher,
has_git,
opts: self.0.opts,
};
(ig, errs.into_error_option())
}
/// Returns true if at least one type of ignore rule should be matched.
fn has_any_ignore_rules(&self) -> bool {
let opts = self.0.opts;
let has_custom_ignore_files =
!self.0.custom_ignore_filenames.is_empty();
let has_explicit_ignores = !self.0.explicit_ignores.is_empty();
opts.ignore
|| opts.git_global
|| opts.git_ignore
|| opts.git_exclude
|| has_custom_ignore_files
|| has_explicit_ignores
}
/// Like `matched`, but works with a directory entry instead.
pub(crate) fn matched_dir_entry<'a>(
&'a self,
dent: &DirEntry,
) -> Match<IgnoreMatch<'a>> {
let m = self.matched(dent.path(), dent.is_dir());
if m.is_none() && self.0.opts.hidden && is_hidden(dent) {
return Match::Ignore(IgnoreMatch::hidden());
}
m
}
/// Returns a match indicating whether the given file path should be
/// ignored or not.
///
/// The match contains information about its origin.
fn matched<'a, P: AsRef<Path>>(
&'a self,
path: P,
is_dir: bool,
) -> Match<IgnoreMatch<'a>> {
// We need to be careful with our path. If it has a leading ./, then
// strip it because it causes nothing but trouble.
let mut path = path.as_ref();
if let Some(p) = strip_prefix("./", path) {
path = p;
}
// Match against the override patterns. If an override matches
// regardless of whether it's whitelist/ignore, then we quit and
// return that result immediately. Overrides have the highest
// precedence.
if !self.0.overrides.is_empty() {
let mat = self
.0
.overrides
.matched(path, is_dir)
.map(IgnoreMatch::overrides);
if !mat.is_none() {
return mat;
}
}
let mut whitelisted = Match::None;
if self.has_any_ignore_rules() {
let mat = self.matched_ignore(path, is_dir);
if mat.is_ignore() {
return mat;
} else if mat.is_whitelist() {
whitelisted = mat;
}
}
if !self.0.types.is_empty() {
let mat =
self.0.types.matched(path, is_dir).map(IgnoreMatch::types);
if mat.is_ignore() {
return mat;
} else if mat.is_whitelist() {
whitelisted = mat;
}
}
whitelisted
}
/// Performs matching only on the ignore files for this directory and
/// all parent directories.
fn matched_ignore<'a>(
&'a self,
path: &Path,
is_dir: bool,
) -> Match<IgnoreMatch<'a>> {
let (
mut m_custom_ignore,
mut m_ignore,
mut m_gi,
mut m_gi_exclude,
mut m_explicit,
) = (Match::None, Match::None, Match::None, Match::None, Match::None);
let any_git =
!self.0.opts.require_git || self.parents().any(|ig| ig.0.has_git);
let mut saw_git = false;
for ig in self.parents().take_while(|ig| !ig.0.is_absolute_parent) {
if m_custom_ignore.is_none() {
m_custom_ignore =
ig.0.custom_ignore_matcher
.matched(path, is_dir)
.map(IgnoreMatch::gitignore);
}
if m_ignore.is_none() {
m_ignore =
ig.0.ignore_matcher
.matched(path, is_dir)
.map(IgnoreMatch::gitignore);
}
if any_git && !saw_git && m_gi.is_none() {
m_gi =
ig.0.git_ignore_matcher
.matched(path, is_dir)
.map(IgnoreMatch::gitignore);
}
if any_git && !saw_git && m_gi_exclude.is_none() {
m_gi_exclude =
ig.0.git_exclude_matcher
.matched(path, is_dir)
.map(IgnoreMatch::gitignore);
}
saw_git = saw_git || ig.0.has_git;
}
if self.0.opts.parents {
if let Some(abs_parent_path) = self.absolute_base() {
// What we want to do here is take the absolute base path of
// this directory and join it with the path we're searching.
// The main issue we want to avoid is accidentally duplicating
// directory components, so we try to strip any common prefix
// off of `path`. Overall, this seems a little ham-fisted, but
// it does fix a nasty bug. It should do fine until we overhaul
// this crate.
let path = abs_parent_path.join(
self.parents()
.take_while(|ig| !ig.0.is_absolute_parent)
.last()
.map_or(path, |ig| {
// This is a weird special case when ripgrep users
// search with just a `.`, as some tools do
// automatically (like consult). In this case, if
// we don't bail out now, the code below will strip
// a leading `.` from `path`, which might mangle
// a hidden file name!
if ig.0.dir.as_path() == Path::new(".") {
return path;
}
let without_dot_slash =
strip_if_is_prefix("./", ig.0.dir.as_path());
let relative_base =
strip_if_is_prefix(without_dot_slash, path);
strip_if_is_prefix("/", relative_base)
}),
);
for ig in
self.parents().skip_while(|ig| !ig.0.is_absolute_parent)
{
if m_custom_ignore.is_none() {
m_custom_ignore =
ig.0.custom_ignore_matcher
.matched(&path, is_dir)
.map(IgnoreMatch::gitignore);
}
if m_ignore.is_none() {
m_ignore =
ig.0.ignore_matcher
.matched(&path, is_dir)
.map(IgnoreMatch::gitignore);
}
if any_git && !saw_git && m_gi.is_none() {
m_gi =
ig.0.git_ignore_matcher
.matched(&path, is_dir)
.map(IgnoreMatch::gitignore);
}
if any_git && !saw_git && m_gi_exclude.is_none() {
m_gi_exclude =
ig.0.git_exclude_matcher
.matched(&path, is_dir)
.map(IgnoreMatch::gitignore);
}
saw_git = saw_git || ig.0.has_git;
}
}
}
for gi in self.0.explicit_ignores.iter().rev() {
if !m_explicit.is_none() {
break;
}
m_explicit = gi.matched(&path, is_dir).map(IgnoreMatch::gitignore);
}
let m_global = if any_git {
self.0
.git_global_matcher
.matched(&path, is_dir)
.map(IgnoreMatch::gitignore)
} else {
Match::None
};
m_custom_ignore
.or(m_ignore)
.or(m_gi)
.or(m_gi_exclude)
.or(m_global)
.or(m_explicit)
}
/// Returns an iterator over parent ignore matchers, including this one.
pub(crate) fn parents(&self) -> Parents<'_> {
Parents(Some(self))
}
/// Returns the first absolute path of the first absolute parent, if
/// one exists.
fn absolute_base(&self) -> Option<&Path> {
self.0.absolute_base.as_ref().map(|p| &***p)
}
}
/// An iterator over all parents of an ignore matcher, including itself.
///
/// The lifetime `'a` refers to the lifetime of the initial `Ignore` matcher.
pub(crate) struct Parents<'a>(Option<&'a Ignore>);
impl<'a> Iterator for Parents<'a> {
type Item = &'a Ignore;
fn next(&mut self) -> Option<&'a Ignore> {
match self.0.take() {
None => None,
Some(ig) => {
self.0 = ig.0.parent.as_ref();
Some(ig)
}
}
}
}
/// A builder for creating an Ignore matcher.
#[derive(Clone, Debug)]
pub(crate) struct IgnoreBuilder {
/// The root directory path for this ignore matcher.
dir: PathBuf,
/// An override matcher (default is empty).
overrides: Arc<Override>,
/// A type matcher (default is empty).
types: Arc<Types>,
/// Explicit global ignore matchers.
explicit_ignores: Vec<Gitignore>,
/// Ignore files in addition to .ignore.
custom_ignore_filenames: Vec<OsString>,
/// The directory that gitignores should be interpreted relative to.
///
/// Usually this is the directory containing the gitignore file. But in
/// some cases, like for global gitignores or for gitignores specified
/// explicitly, this should generally be set to the current working
/// directory. This is only used for global gitignores or "explicit"
/// gitignores.
///
/// When `None`, global gitignores are ignored.
global_gitignores_relative_to: Option<PathBuf>,
/// Ignore config.
opts: IgnoreOptions,
}
impl IgnoreBuilder {
/// Create a new builder for an `Ignore` matcher.
///
/// It is likely a bug to use this without also calling `current_dir()`
/// outside of tests. This isn't made mandatory because this is an internal
/// abstraction and it's annoying to update tests.
pub(crate) fn new() -> IgnoreBuilder {
IgnoreBuilder {
dir: Path::new("").to_path_buf(),
overrides: Arc::new(Override::empty()),
types: Arc::new(Types::empty()),
explicit_ignores: vec![],
custom_ignore_filenames: vec![],
global_gitignores_relative_to: None,
opts: IgnoreOptions {
hidden: true,
ignore: true,
parents: true,
git_global: true,
git_ignore: true,
git_exclude: true,
ignore_case_insensitive: false,
require_git: true,
},
}
}
/// Builds a new `Ignore` matcher.
///
/// The matcher returned won't match anything until ignore rules from
/// directories are added to it.
pub(crate) fn build(&self) -> Ignore {
self.build_with_cwd(None)
}
/// Builds a new `Ignore` matcher using the given CWD directory.
///
/// The matcher returned won't match anything until ignore rules from
/// directories are added to it.
pub(crate) fn build_with_cwd(&self, cwd: Option<PathBuf>) -> Ignore {
let global_gitignores_relative_to =
cwd.or_else(|| self.global_gitignores_relative_to.clone());
let git_global_matcher = if !self.opts.git_global {
Gitignore::empty()
} else if let Some(ref cwd) = global_gitignores_relative_to {
let mut builder = GitignoreBuilder::new(cwd);
builder
.case_insensitive(self.opts.ignore_case_insensitive)
.unwrap();
let (gi, err) = builder.build_global();
if let Some(err) = err {
log::debug!("{}", err);
}
gi
} else {
log::debug!(
"ignoring global gitignore file because CWD is not known"
);
Gitignore::empty()
};
Ignore(Arc::new(IgnoreInner {
compiled: Arc::new(RwLock::new(HashMap::new())),
dir: self.dir.clone(),
overrides: self.overrides.clone(),
types: self.types.clone(),
parent: None,
is_absolute_parent: true,
absolute_base: None,
global_gitignores_relative_to,
explicit_ignores: Arc::new(self.explicit_ignores.clone()),
custom_ignore_filenames: Arc::new(
self.custom_ignore_filenames.clone(),
),
custom_ignore_matcher: Gitignore::empty(),
ignore_matcher: Gitignore::empty(),
git_global_matcher: Arc::new(git_global_matcher),
git_ignore_matcher: Gitignore::empty(),
git_exclude_matcher: Gitignore::empty(),
has_git: false,
opts: self.opts,
}))
}
/// Set the current directory used for matching global gitignores.
pub(crate) fn current_dir(
&mut self,
cwd: impl Into<PathBuf>,
) -> &mut IgnoreBuilder {
self.global_gitignores_relative_to = Some(cwd.into());
self
}
/// Add an override matcher.
///
/// By default, no override matcher is used.
///
/// This overrides any previous setting.
pub(crate) fn overrides(
&mut self,
overrides: Override,
) -> &mut IgnoreBuilder {
self.overrides = Arc::new(overrides);
self
}
/// Add a file type matcher.
///
/// By default, no file type matcher is used.
///
/// This overrides any previous setting.
pub(crate) fn types(&mut self, types: Types) -> &mut IgnoreBuilder {
self.types = Arc::new(types);
self
}
/// Adds a new global ignore matcher from the ignore file path given.
pub(crate) fn add_ignore(&mut self, ig: Gitignore) -> &mut IgnoreBuilder {
self.explicit_ignores.push(ig);
self
}
/// Add a custom ignore file name
///
/// These ignore files have higher precedence than all other ignore files.
///
/// When specifying multiple names, earlier names have lower precedence than
/// later names.
pub(crate) fn add_custom_ignore_filename<S: AsRef<OsStr>>(
&mut self,
file_name: S,
) -> &mut IgnoreBuilder {
self.custom_ignore_filenames.push(file_name.as_ref().to_os_string());
self
}
/// Enables ignoring hidden files.
///
/// This is enabled by default.
pub(crate) fn hidden(&mut self, yes: bool) -> &mut IgnoreBuilder {
self.opts.hidden = yes;
self
}
/// Enables reading `.ignore` files.
///
/// `.ignore` files have the same semantics as `gitignore` files and are
/// supported by search tools such as ripgrep and The Silver Searcher.
///
/// This is enabled by default.
pub(crate) fn ignore(&mut self, yes: bool) -> &mut IgnoreBuilder {
self.opts.ignore = yes;
self
}
/// Enables reading ignore files from parent directories.
///
/// If this is enabled, then .gitignore files in parent directories of each
/// file path given are respected. Otherwise, they are ignored.
///
/// This is enabled by default.
pub(crate) fn parents(&mut self, yes: bool) -> &mut IgnoreBuilder {
self.opts.parents = yes;
self
}
/// Add a global gitignore matcher.
///
/// Its precedence is lower than both normal `.gitignore` files and
/// `.git/info/exclude` files.
///
/// This overwrites any previous global gitignore setting.
///
/// This is enabled by default.
pub(crate) fn git_global(&mut self, yes: bool) -> &mut IgnoreBuilder {
self.opts.git_global = yes;
self
}
/// Enables reading `.gitignore` files.
///
/// `.gitignore` files have match semantics as described in the `gitignore`
/// man page.
///
/// This is enabled by default.
pub(crate) fn git_ignore(&mut self, yes: bool) -> &mut IgnoreBuilder {
self.opts.git_ignore = yes;
self
}
/// Enables reading `.git/info/exclude` files.
///
/// `.git/info/exclude` files have match semantics as described in the
/// `gitignore` man page.
///
/// This is enabled by default.
pub(crate) fn git_exclude(&mut self, yes: bool) -> &mut IgnoreBuilder {
self.opts.git_exclude = yes;
self
}
/// Whether a git repository is required to apply git-related ignore
/// rules (global rules, .gitignore and local exclude rules).
///
/// When disabled, git-related ignore rules are applied even when searching
/// outside a git repository.
pub(crate) fn require_git(&mut self, yes: bool) -> &mut IgnoreBuilder {
self.opts.require_git = yes;
self
}
/// Process ignore files case insensitively
///
/// This is disabled by default.
pub(crate) fn ignore_case_insensitive(
&mut self,
yes: bool,
) -> &mut IgnoreBuilder {
self.opts.ignore_case_insensitive = yes;
self
}
}
/// Creates a new gitignore matcher for the directory given.
///
/// The matcher is meant to match files below `dir`.
/// Ignore globs are extracted from each of the file names relative to
/// `dir_for_ignorefile` in the order given (earlier names have lower
/// precedence than later names).
///
/// I/O errors are ignored.
pub(crate) fn create_gitignore<T: AsRef<OsStr>>(
dir: &Path,
dir_for_ignorefile: &Path,
names: &[T],
case_insensitive: bool,
) -> (Gitignore, Option<Error>) {
let mut builder = GitignoreBuilder::new(dir);
let mut errs = PartialErrorBuilder::default();
builder.case_insensitive(case_insensitive).unwrap();
for name in names {
let gipath = dir_for_ignorefile.join(name.as_ref());
// This check is not necessary, but is added for performance. Namely,
// a simple stat call checking for existence can often be just a bit
// quicker than actually trying to open a file. Since the number of
// directories without ignore files likely greatly exceeds the number
// with ignore files, this check generally makes sense.
//
// However, until demonstrated otherwise, we speculatively do not do
// this on Windows since Windows is notorious for having slow file
// system operations. Namely, it's not clear whether this analysis
// makes sense on Windows.
//
// For more details: https://github.com/BurntSushi/ripgrep/pull/1381
if cfg!(windows) || gipath.exists() {
errs.maybe_push_ignore_io(builder.add(gipath));
}
}
let gi = match builder.build() {
Ok(gi) => gi,
Err(err) => {
errs.push(err);
GitignoreBuilder::new(dir).build().unwrap()
}
};
(gi, errs.into_error_option())
}
/// Find the GIT_COMMON_DIR for the given git worktree.
///
/// This is the directory that may contain a private ignore file
/// "info/exclude". Unlike git, this function does *not* read environment
/// variables GIT_DIR and GIT_COMMON_DIR, because it is not clear how to use
/// them when multiple repositories are searched.
///
/// Some I/O errors are ignored.
fn resolve_git_commondir(
dir: &Path,
git_type: Option<FileType>,
) -> Result<PathBuf, Option<Error>> {
let git_dir_path = || dir.join(".git");
let git_dir = git_dir_path();
if !git_type.map_or(false, |ft| ft.is_file()) {
return Ok(git_dir);
}
let file = match File::open(git_dir) {
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | true |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/src/default_types.rs | crates/ignore/src/default_types.rs | /// This list represents the default file types that ripgrep ships with. In
/// general, any file format is fair game, although it should generally be
/// limited to reasonably popular open formats. For other cases, you can add
/// types to each invocation of ripgrep with the '--type-add' flag.
///
/// If you would like to add or improve this list, please file a PR:
/// <https://github.com/BurntSushi/ripgrep>.
///
/// Please try to keep this list sorted lexicographically and wrapped to 79
/// columns (inclusive).
#[rustfmt::skip]
pub(crate) const DEFAULT_TYPES: &[(&[&str], &[&str])] = &[
(&["ada"], &["*.adb", "*.ads"]),
(&["agda"], &["*.agda", "*.lagda"]),
(&["aidl"], &["*.aidl"]),
(&["alire"], &["alire.toml"]),
(&["amake"], &["*.mk", "*.bp"]),
(&["asciidoc"], &["*.adoc", "*.asc", "*.asciidoc"]),
(&["asm"], &["*.asm", "*.s", "*.S"]),
(&["asp"], &[
"*.aspx", "*.aspx.cs", "*.aspx.vb", "*.ascx", "*.ascx.cs",
"*.ascx.vb", "*.asp"
]),
(&["ats"], &["*.ats", "*.dats", "*.sats", "*.hats"]),
(&["avro"], &["*.avdl", "*.avpr", "*.avsc"]),
(&["awk"], &["*.awk"]),
(&["bat", "batch"], &["*.bat"]),
(&["bazel"], &[
"*.bazel", "*.bzl", "*.BUILD", "*.bazelrc", "BUILD", "MODULE.bazel",
"WORKSPACE", "WORKSPACE.bazel", "WORKSPACE.bzlmod",
]),
(&["bitbake"], &["*.bb", "*.bbappend", "*.bbclass", "*.conf", "*.inc"]),
(&["boxlang"], &["*.bx", "*.bxm", "*.bxs"]),
(&["brotli"], &["*.br"]),
(&["buildstream"], &["*.bst"]),
(&["bzip2"], &["*.bz2", "*.tbz2"]),
(&["c"], &["*.[chH]", "*.[chH].in", "*.cats"]),
(&["cabal"], &["*.cabal"]),
(&["candid"], &["*.did"]),
(&["carp"], &["*.carp"]),
(&["cbor"], &["*.cbor"]),
(&["ceylon"], &["*.ceylon"]),
(&["cfml"], &["*.cfc", "*.cfm"]),
(&["clojure"], &["*.clj", "*.cljc", "*.cljs", "*.cljx"]),
(&["cmake"], &["*.cmake", "CMakeLists.txt"]),
(&["cmd"], &["*.bat", "*.cmd"]),
(&["cml"], &["*.cml"]),
(&["coffeescript"], &["*.coffee"]),
(&["config"], &["*.cfg", "*.conf", "*.config", "*.ini"]),
(&["coq"], &["*.v"]),
(&["cpp"], &[
"*.[ChH]", "*.cc", "*.[ch]pp", "*.[ch]xx", "*.hh", "*.inl",
"*.[ChH].in", "*.cc.in", "*.[ch]pp.in", "*.[ch]xx.in", "*.hh.in",
]),
(&["creole"], &["*.creole"]),
(&["crystal"], &["Projectfile", "*.cr", "*.ecr", "shard.yml"]),
(&["cs"], &["*.cs"]),
(&["csharp"], &["*.cs"]),
(&["cshtml"], &["*.cshtml"]),
(&["csproj"], &["*.csproj"]),
(&["css"], &["*.css", "*.scss"]),
(&["csv"], &["*.csv"]),
(&["cuda"], &["*.cu", "*.cuh"]),
(&["cython"], &["*.pyx", "*.pxi", "*.pxd"]),
(&["d"], &["*.d"]),
(&["dart"], &["*.dart"]),
(&["devicetree"], &["*.dts", "*.dtsi", "*.dtso"]),
(&["dhall"], &["*.dhall"]),
(&["diff"], &["*.patch", "*.diff"]),
(&["dita"], &["*.dita", "*.ditamap", "*.ditaval"]),
(&["docker"], &["*Dockerfile*"]),
(&["dockercompose"], &["docker-compose.yml", "docker-compose.*.yml"]),
(&["dts"], &["*.dts", "*.dtsi"]),
(&["dvc"], &["Dvcfile", "*.dvc"]),
(&["ebuild"], &["*.ebuild", "*.eclass"]),
(&["edn"], &["*.edn"]),
(&["elisp"], &["*.el"]),
(&["elixir"], &["*.ex", "*.eex", "*.exs", "*.heex", "*.leex", "*.livemd"]),
(&["elm"], &["*.elm"]),
(&["erb"], &["*.erb"]),
(&["erlang"], &["*.erl", "*.hrl"]),
(&["fennel"], &["*.fnl"]),
(&["fidl"], &["*.fidl"]),
(&["fish"], &["*.fish"]),
(&["flatbuffers"], &["*.fbs"]),
(&["fortran"], &[
"*.f", "*.F", "*.f77", "*.F77", "*.pfo",
"*.f90", "*.F90", "*.f95", "*.F95",
]),
(&["fsharp"], &["*.fs", "*.fsx", "*.fsi"]),
(&["fut"], &["*.fut"]),
(&["gap"], &["*.g", "*.gap", "*.gi", "*.gd", "*.tst"]),
(&["gdscript"], &["*.gd"]),
(&["gleam"], &["*.gleam"]),
(&["gn"], &["*.gn", "*.gni"]),
(&["go"], &["*.go"]),
(&["gprbuild"], &["*.gpr"]),
(&["gradle"], &[
"*.gradle", "*.gradle.kts", "gradle.properties", "gradle-wrapper.*",
"gradlew", "gradlew.bat",
]),
(&["graphql"], &["*.graphql", "*.graphqls"]),
(&["groovy"], &["*.groovy", "*.gradle"]),
(&["gzip"], &["*.gz", "*.tgz"]),
(&["h"], &["*.h", "*.hh", "*.hpp"]),
(&["haml"], &["*.haml"]),
(&["hare"], &["*.ha"]),
(&["haskell"], &["*.hs", "*.lhs", "*.cpphs", "*.c2hs", "*.hsc"]),
(&["hbs"], &["*.hbs"]),
(&["hs"], &["*.hs", "*.lhs"]),
(&["html"], &["*.htm", "*.html", "*.ejs"]),
(&["hy"], &["*.hy"]),
(&["idris"], &["*.idr", "*.lidr"]),
(&["janet"], &["*.janet"]),
(&["java"], &["*.java", "*.jsp", "*.jspx", "*.properties"]),
(&["jinja"], &["*.j2", "*.jinja", "*.jinja2"]),
(&["jl"], &["*.jl"]),
(&["js"], &["*.js", "*.jsx", "*.vue", "*.cjs", "*.mjs"]),
(&["json"], &["*.json", "composer.lock", "*.sarif"]),
(&["jsonl"], &["*.jsonl"]),
(&["julia"], &["*.jl"]),
(&["jupyter"], &["*.ipynb", "*.jpynb"]),
(&["k"], &["*.k"]),
(&["kconfig"], &["Kconfig", "Kconfig.*"]),
(&["kotlin"], &["*.kt", "*.kts"]),
(&["lean"], &["*.lean"]),
(&["less"], &["*.less"]),
(&["license"], &[
// General
"COPYING", "COPYING[.-]*",
"COPYRIGHT", "COPYRIGHT[.-]*",
"EULA", "EULA[.-]*",
"licen[cs]e", "licen[cs]e.*",
"LICEN[CS]E", "LICEN[CS]E[.-]*", "*[.-]LICEN[CS]E*",
"NOTICE", "NOTICE[.-]*",
"PATENTS", "PATENTS[.-]*",
"UNLICEN[CS]E", "UNLICEN[CS]E[.-]*",
// GPL (gpl.txt, etc.)
"agpl[.-]*",
"gpl[.-]*",
"lgpl[.-]*",
// Other license-specific (APACHE-2.0.txt, etc.)
"AGPL-*[0-9]*",
"APACHE-*[0-9]*",
"BSD-*[0-9]*",
"CC-BY-*",
"GFDL-*[0-9]*",
"GNU-*[0-9]*",
"GPL-*[0-9]*",
"LGPL-*[0-9]*",
"MIT-*[0-9]*",
"MPL-*[0-9]*",
"OFL-*[0-9]*",
]),
(&["lilypond"], &["*.ly", "*.ily"]),
(&["lisp"], &["*.el", "*.jl", "*.lisp", "*.lsp", "*.sc", "*.scm"]),
(&["llvm"], &["*.ll"]),
(&["lock"], &["*.lock", "package-lock.json"]),
(&["log"], &["*.log"]),
(&["lua"], &["*.lua"]),
(&["lz4"], &["*.lz4"]),
(&["lzma"], &["*.lzma"]),
(&["m4"], &["*.ac", "*.m4"]),
(&["make"], &[
"[Gg][Nn][Uu]makefile", "[Mm]akefile",
"[Gg][Nn][Uu]makefile.am", "[Mm]akefile.am",
"[Gg][Nn][Uu]makefile.in", "[Mm]akefile.in",
"Makefile.*",
"*.mk", "*.mak"
]),
(&["mako"], &["*.mako", "*.mao"]),
(&["man"], &["*.[0-9lnpx]", "*.[0-9][cEFMmpSx]"]),
(&["markdown", "md"], &[
"*.markdown",
"*.md",
"*.mdown",
"*.mdwn",
"*.mkd",
"*.mkdn",
"*.mdx",
]),
(&["matlab"], &["*.m"]),
(&["meson"], &["meson.build", "meson_options.txt", "meson.options"]),
(&["minified"], &["*.min.html", "*.min.css", "*.min.js"]),
(&["mint"], &["*.mint"]),
(&["mk"], &["mkfile"]),
(&["ml"], &["*.ml"]),
(&["motoko"], &["*.mo"]),
(&["msbuild"], &[
"*.csproj", "*.fsproj", "*.vcxproj", "*.proj", "*.props", "*.targets",
"*.sln", "*.slnf"
]),
(&["nim"], &["*.nim", "*.nimf", "*.nimble", "*.nims"]),
(&["nix"], &["*.nix"]),
(&["objc"], &["*.h", "*.m"]),
(&["objcpp"], &["*.h", "*.mm"]),
(&["ocaml"], &["*.ml", "*.mli", "*.mll", "*.mly"]),
(&["org"], &["*.org", "*.org_archive"]),
(&["pants"], &["BUILD"]),
(&["pascal"], &["*.pas", "*.dpr", "*.lpr", "*.pp", "*.inc"]),
(&["pdf"], &["*.pdf"]),
(&["perl"], &["*.perl", "*.pl", "*.PL", "*.plh", "*.plx", "*.pm", "*.t"]),
(&["php"], &[
// note that PHP 6 doesn't exist
// See: https://wiki.php.net/rfc/php6
"*.php", "*.php3", "*.php4", "*.php5", "*.php7", "*.php8",
"*.pht", "*.phtml"
]),
(&["po"], &["*.po"]),
(&["pod"], &["*.pod"]),
(&["postscript"], &["*.eps", "*.ps"]),
(&["prolog"], &["*.pl", "*.pro", "*.prolog", "*.P"]),
(&["protobuf"], &["*.proto"]),
(&["ps"], &["*.cdxml", "*.ps1", "*.ps1xml", "*.psd1", "*.psm1"]),
(&["puppet"], &["*.epp", "*.erb", "*.pp", "*.rb"]),
(&["purs"], &["*.purs"]),
(&["py", "python"], &["*.py", "*.pyi"]),
(&["qmake"], &["*.pro", "*.pri", "*.prf"]),
(&["qml"], &["*.qml"]),
(&["qrc"], &["*.qrc"]),
(&["qui"], &["*.ui"]),
(&["r"], &["*.R", "*.r", "*.Rmd", "*.rmd", "*.Rnw", "*.rnw"]),
(&["racket"], &["*.rkt"]),
(&["raku"], &[
"*.raku", "*.rakumod", "*.rakudoc", "*.rakutest",
"*.p6", "*.pl6", "*.pm6"
]),
(&["rdoc"], &["*.rdoc"]),
(&["readme"], &["README*", "*README"]),
(&["reasonml"], &["*.re", "*.rei"]),
(&["red"], &["*.r", "*.red", "*.reds"]),
(&["rescript"], &["*.res", "*.resi"]),
(&["robot"], &["*.robot"]),
(&["rst"], &["*.rst"]),
(&["ruby"], &[
// Idiomatic files
"config.ru", "Gemfile", ".irbrc", "Rakefile",
// Extensions
"*.gemspec", "*.rb", "*.rbw", "*.rake"
]),
(&["rust"], &["*.rs"]),
(&["sass"], &["*.sass", "*.scss"]),
(&["scala"], &["*.scala", "*.sbt"]),
(&["scdoc"], &["*.scd", "*.scdoc"]),
(&["seed7"], &["*.sd7", "*.s7i"]),
(&["sh"], &[
// Portable/misc. init files
".env", ".login", ".logout", ".profile", "profile",
// bash-specific init files
".bash_login", "bash_login",
".bash_logout", "bash_logout",
".bash_profile", "bash_profile",
".bashrc", "bashrc", "*.bashrc",
// csh-specific init files
".cshrc", "*.cshrc",
// ksh-specific init files
".kshrc", "*.kshrc",
// tcsh-specific init files
".tcshrc",
// zsh-specific init files
".zshenv", "zshenv",
".zlogin", "zlogin",
".zlogout", "zlogout",
".zprofile", "zprofile",
".zshrc", "zshrc",
// Extensions
"*.bash", "*.csh", "*.env", "*.ksh", "*.sh", "*.tcsh", "*.zsh",
]),
(&["slim"], &["*.skim", "*.slim", "*.slime"]),
(&["smarty"], &["*.tpl"]),
(&["sml"], &["*.sml", "*.sig"]),
(&["solidity"], &["*.sol"]),
(&["soy"], &["*.soy"]),
(&["spark"], &["*.spark"]),
(&["spec"], &["*.spec"]),
(&["sql"], &["*.sql", "*.psql"]),
(&["ssa"], &["*.ssa"]),
(&["stylus"], &["*.styl"]),
(&["sv"], &["*.v", "*.vg", "*.sv", "*.svh", "*.h"]),
(&["svelte"], &["*.svelte", "*.svelte.ts"]),
(&["svg"], &["*.svg"]),
(&["swift"], &["*.swift"]),
(&["swig"], &["*.def", "*.i"]),
(&["systemd"], &[
"*.automount", "*.conf", "*.device", "*.link", "*.mount", "*.path",
"*.scope", "*.service", "*.slice", "*.socket", "*.swap", "*.target",
"*.timer",
]),
(&["taskpaper"], &["*.taskpaper"]),
(&["tcl"], &["*.tcl"]),
(&["tex"], &["*.tex", "*.ltx", "*.cls", "*.sty", "*.bib", "*.dtx", "*.ins"]),
(&["texinfo"], &["*.texi"]),
(&["textile"], &["*.textile"]),
(&["tf"], &[
"*.tf", "*.tf.json", "*.tfvars", "*.tfvars.json",
"*.terraformrc", "terraform.rc", "*.tfrc", "*.terraform.lock.hcl",
]),
(&["thrift"], &["*.thrift"]),
(&["toml"], &["*.toml", "Cargo.lock"]),
(&["ts", "typescript"], &["*.ts", "*.tsx", "*.cts", "*.mts"]),
(&["twig"], &["*.twig"]),
(&["txt"], &["*.txt"]),
(&["typoscript"], &["*.typoscript", "*.ts"]),
(&["typst"], &["*.typ"]),
(&["usd"], &["*.usd", "*.usda", "*.usdc"]),
(&["v"], &["*.v", "*.vsh"]),
(&["vala"], &["*.vala"]),
(&["vb"], &["*.vb"]),
(&["vcl"], &["*.vcl"]),
(&["verilog"], &["*.v", "*.vh", "*.sv", "*.svh"]),
(&["vhdl"], &["*.vhd", "*.vhdl"]),
(&["vim"], &[
"*.vim", ".vimrc", ".gvimrc", "vimrc", "gvimrc", "_vimrc", "_gvimrc",
]),
(&["vimscript"], &[
"*.vim", ".vimrc", ".gvimrc", "vimrc", "gvimrc", "_vimrc", "_gvimrc",
]),
(&["vue"], &["*.vue"]),
(&["webidl"], &["*.idl", "*.webidl", "*.widl"]),
(&["wgsl"], &["*.wgsl"]),
(&["wiki"], &["*.mediawiki", "*.wiki"]),
(&["xml"], &[
"*.xml", "*.xml.dist", "*.dtd", "*.xsl", "*.xslt", "*.xsd", "*.xjb",
"*.rng", "*.sch", "*.xhtml",
]),
(&["xz"], &["*.xz", "*.txz"]),
(&["yacc"], &["*.y"]),
(&["yaml"], &["*.yaml", "*.yml"]),
(&["yang"], &["*.yang"]),
(&["z"], &["*.Z"]),
(&["zig"], &["*.zig"]),
(&["zsh"], &[
".zshenv", "zshenv",
".zlogin", "zlogin",
".zlogout", "zlogout",
".zprofile", "zprofile",
".zshrc", "zshrc",
"*.zsh",
]),
(&["zstd"], &["*.zst", "*.zstd"]),
];
#[cfg(test)]
mod tests {
use super::DEFAULT_TYPES;
#[test]
fn default_types_are_sorted() {
let mut names = DEFAULT_TYPES.iter().map(|(aliases, _)| aliases[0]);
let Some(mut previous_name) = names.next() else {
return;
};
for name in names {
assert!(
name > previous_name,
r#""{}" should be sorted before "{}" in `DEFAULT_TYPES`"#,
name,
previous_name
);
previous_name = name;
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/tests/gitignore_skip_bom.rs | crates/ignore/tests/gitignore_skip_bom.rs | use ignore::gitignore::GitignoreBuilder;
const IGNORE_FILE: &'static str = "tests/gitignore_skip_bom.gitignore";
/// Skip a Byte-Order Mark (BOM) at the beginning of the file, matching Git's
/// behavior.
///
/// Ref: <https://github.com/BurntSushi/ripgrep/issues/2177>
#[test]
fn gitignore_skip_bom() {
let mut builder = GitignoreBuilder::new("ROOT");
let error = builder.add(IGNORE_FILE);
assert!(error.is_none(), "failed to open gitignore file");
let g = builder.build().unwrap();
assert!(g.matched("ignore/this/path", false).is_ignore());
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/tests/gitignore_matched_path_or_any_parents_tests.rs | crates/ignore/tests/gitignore_matched_path_or_any_parents_tests.rs | use std::path::Path;
use ignore::gitignore::{Gitignore, GitignoreBuilder};
const IGNORE_FILE: &'static str =
"tests/gitignore_matched_path_or_any_parents_tests.gitignore";
fn get_gitignore() -> Gitignore {
let mut builder = GitignoreBuilder::new("ROOT");
let error = builder.add(IGNORE_FILE);
assert!(error.is_none(), "failed to open gitignore file");
builder.build().unwrap()
}
#[test]
#[should_panic(expected = "path is expected to be under the root")]
fn test_path_should_be_under_root() {
let gitignore = get_gitignore();
let path = "/tmp/some_file";
gitignore.matched_path_or_any_parents(Path::new(path), false);
assert!(false);
}
#[test]
fn test_files_in_root() {
let gitignore = get_gitignore();
let m = |path: &str| {
gitignore.matched_path_or_any_parents(Path::new(path), false)
};
// 0x
assert!(m("ROOT/file_root_00").is_ignore());
assert!(m("ROOT/file_root_01").is_none());
assert!(m("ROOT/file_root_02").is_none());
assert!(m("ROOT/file_root_03").is_none());
// 1x
assert!(m("ROOT/file_root_10").is_ignore());
assert!(m("ROOT/file_root_11").is_none());
assert!(m("ROOT/file_root_12").is_none());
assert!(m("ROOT/file_root_13").is_none());
// 2x
assert!(m("ROOT/file_root_20").is_none());
assert!(m("ROOT/file_root_21").is_none());
assert!(m("ROOT/file_root_22").is_none());
assert!(m("ROOT/file_root_23").is_none());
// 3x
assert!(m("ROOT/file_root_30").is_ignore());
assert!(m("ROOT/file_root_31").is_none());
assert!(m("ROOT/file_root_32").is_none());
assert!(m("ROOT/file_root_33").is_none());
}
#[test]
fn test_files_in_deep() {
let gitignore = get_gitignore();
let m = |path: &str| {
gitignore.matched_path_or_any_parents(Path::new(path), false)
};
// 0x
assert!(m("ROOT/parent_dir/file_deep_00").is_ignore());
assert!(m("ROOT/parent_dir/file_deep_01").is_none());
assert!(m("ROOT/parent_dir/file_deep_02").is_none());
assert!(m("ROOT/parent_dir/file_deep_03").is_none());
// 1x
assert!(m("ROOT/parent_dir/file_deep_10").is_none());
assert!(m("ROOT/parent_dir/file_deep_11").is_none());
assert!(m("ROOT/parent_dir/file_deep_12").is_none());
assert!(m("ROOT/parent_dir/file_deep_13").is_none());
// 2x
assert!(m("ROOT/parent_dir/file_deep_20").is_ignore());
assert!(m("ROOT/parent_dir/file_deep_21").is_none());
assert!(m("ROOT/parent_dir/file_deep_22").is_none());
assert!(m("ROOT/parent_dir/file_deep_23").is_none());
// 3x
assert!(m("ROOT/parent_dir/file_deep_30").is_ignore());
assert!(m("ROOT/parent_dir/file_deep_31").is_none());
assert!(m("ROOT/parent_dir/file_deep_32").is_none());
assert!(m("ROOT/parent_dir/file_deep_33").is_none());
}
#[test]
fn test_dirs_in_root() {
let gitignore = get_gitignore();
let m = |path: &str, is_dir: bool| {
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
};
// 00
assert!(m("ROOT/dir_root_00", true).is_ignore());
assert!(m("ROOT/dir_root_00/file", false).is_ignore());
assert!(m("ROOT/dir_root_00/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_00/child_dir/file", false).is_ignore());
// 01
assert!(m("ROOT/dir_root_01", true).is_ignore());
assert!(m("ROOT/dir_root_01/file", false).is_ignore());
assert!(m("ROOT/dir_root_01/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_01/child_dir/file", false).is_ignore());
// 02
assert!(m("ROOT/dir_root_02", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_02/file", false).is_ignore());
assert!(m("ROOT/dir_root_02/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_02/child_dir/file", false).is_ignore());
// 03
assert!(m("ROOT/dir_root_03", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_03/file", false).is_ignore());
assert!(m("ROOT/dir_root_03/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_03/child_dir/file", false).is_ignore());
// 10
assert!(m("ROOT/dir_root_10", true).is_ignore());
assert!(m("ROOT/dir_root_10/file", false).is_ignore());
assert!(m("ROOT/dir_root_10/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_10/child_dir/file", false).is_ignore());
// 11
assert!(m("ROOT/dir_root_11", true).is_ignore());
assert!(m("ROOT/dir_root_11/file", false).is_ignore());
assert!(m("ROOT/dir_root_11/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_11/child_dir/file", false).is_ignore());
// 12
assert!(m("ROOT/dir_root_12", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_12/file", false).is_ignore());
assert!(m("ROOT/dir_root_12/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_12/child_dir/file", false).is_ignore());
// 13
assert!(m("ROOT/dir_root_13", true).is_none());
assert!(m("ROOT/dir_root_13/file", false).is_ignore());
assert!(m("ROOT/dir_root_13/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_13/child_dir/file", false).is_ignore());
// 20
assert!(m("ROOT/dir_root_20", true).is_none());
assert!(m("ROOT/dir_root_20/file", false).is_none());
assert!(m("ROOT/dir_root_20/child_dir", true).is_none());
assert!(m("ROOT/dir_root_20/child_dir/file", false).is_none());
// 21
assert!(m("ROOT/dir_root_21", true).is_none());
assert!(m("ROOT/dir_root_21/file", false).is_none());
assert!(m("ROOT/dir_root_21/child_dir", true).is_none());
assert!(m("ROOT/dir_root_21/child_dir/file", false).is_none());
// 22
assert!(m("ROOT/dir_root_22", true).is_none());
assert!(m("ROOT/dir_root_22/file", false).is_none());
assert!(m("ROOT/dir_root_22/child_dir", true).is_none());
assert!(m("ROOT/dir_root_22/child_dir/file", false).is_none());
// 23
assert!(m("ROOT/dir_root_23", true).is_none());
assert!(m("ROOT/dir_root_23/file", false).is_none());
assert!(m("ROOT/dir_root_23/child_dir", true).is_none());
assert!(m("ROOT/dir_root_23/child_dir/file", false).is_none());
// 30
assert!(m("ROOT/dir_root_30", true).is_ignore());
assert!(m("ROOT/dir_root_30/file", false).is_ignore());
assert!(m("ROOT/dir_root_30/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_30/child_dir/file", false).is_ignore());
// 31
assert!(m("ROOT/dir_root_31", true).is_ignore());
assert!(m("ROOT/dir_root_31/file", false).is_ignore());
assert!(m("ROOT/dir_root_31/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_31/child_dir/file", false).is_ignore());
// 32
assert!(m("ROOT/dir_root_32", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_32/file", false).is_ignore());
assert!(m("ROOT/dir_root_32/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_32/child_dir/file", false).is_ignore());
// 33
assert!(m("ROOT/dir_root_33", true).is_none()); // dir itself doesn't match
assert!(m("ROOT/dir_root_33/file", false).is_ignore());
assert!(m("ROOT/dir_root_33/child_dir", true).is_ignore());
assert!(m("ROOT/dir_root_33/child_dir/file", false).is_ignore());
}
#[test]
fn test_dirs_in_deep() {
let gitignore = get_gitignore();
let m = |path: &str, is_dir: bool| {
gitignore.matched_path_or_any_parents(Path::new(path), is_dir)
};
// 00
assert!(m("ROOT/parent_dir/dir_deep_00", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_00/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_00/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_00/child_dir/file", false).is_ignore()
);
// 01
assert!(m("ROOT/parent_dir/dir_deep_01", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_01/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_01/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_01/child_dir/file", false).is_ignore()
);
// 02
assert!(m("ROOT/parent_dir/dir_deep_02", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_02/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_02/child_dir/file", false).is_none());
// 03
assert!(m("ROOT/parent_dir/dir_deep_03", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_03/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_03/child_dir/file", false).is_none());
// 10
assert!(m("ROOT/parent_dir/dir_deep_10", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_10/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_10/child_dir/file", false).is_none());
// 11
assert!(m("ROOT/parent_dir/dir_deep_11", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_11/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_11/child_dir/file", false).is_none());
// 12
assert!(m("ROOT/parent_dir/dir_deep_12", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_12/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_12/child_dir/file", false).is_none());
// 13
assert!(m("ROOT/parent_dir/dir_deep_13", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_13/file", false).is_none());
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_13/child_dir/file", false).is_none());
// 20
assert!(m("ROOT/parent_dir/dir_deep_20", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_20/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_20/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_20/child_dir/file", false).is_ignore()
);
// 21
assert!(m("ROOT/parent_dir/dir_deep_21", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_21/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_21/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_21/child_dir/file", false).is_ignore()
);
// 22
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_22", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_22/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_22/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_22/child_dir/file", false).is_ignore()
);
// 23
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_23", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_23/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_23/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_23/child_dir/file", false).is_ignore()
);
// 30
assert!(m("ROOT/parent_dir/dir_deep_30", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_30/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_30/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_30/child_dir/file", false).is_ignore()
);
// 31
assert!(m("ROOT/parent_dir/dir_deep_31", true).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_31/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_31/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_31/child_dir/file", false).is_ignore()
);
// 32
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_32", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_32/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_32/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_32/child_dir/file", false).is_ignore()
);
// 33
// dir itself doesn't match
assert!(m("ROOT/parent_dir/dir_deep_33", true).is_none());
assert!(m("ROOT/parent_dir/dir_deep_33/file", false).is_ignore());
assert!(m("ROOT/parent_dir/dir_deep_33/child_dir", true).is_ignore());
assert!(
m("ROOT/parent_dir/dir_deep_33/child_dir/file", false).is_ignore()
);
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/ignore/examples/walk.rs | crates/ignore/examples/walk.rs | use std::{env, io::Write, path::Path};
use {bstr::ByteVec, ignore::WalkBuilder, walkdir::WalkDir};
fn main() {
let mut path = env::args().nth(1).unwrap();
let mut parallel = false;
let mut simple = false;
let (tx, rx) = crossbeam_channel::bounded::<DirEntry>(100);
if path == "parallel" {
path = env::args().nth(2).unwrap();
parallel = true;
} else if path == "walkdir" {
path = env::args().nth(2).unwrap();
simple = true;
}
let stdout_thread = std::thread::spawn(move || {
let mut stdout = std::io::BufWriter::new(std::io::stdout());
for dent in rx {
stdout.write_all(&Vec::from_path_lossy(dent.path())).unwrap();
stdout.write_all(b"\n").unwrap();
}
});
if parallel {
let walker = WalkBuilder::new(path).threads(6).build_parallel();
walker.run(|| {
let tx = tx.clone();
Box::new(move |result| {
use ignore::WalkState::*;
tx.send(DirEntry::Y(result.unwrap())).unwrap();
Continue
})
});
} else if simple {
let walker = WalkDir::new(path);
for result in walker {
tx.send(DirEntry::X(result.unwrap())).unwrap();
}
} else {
let walker = WalkBuilder::new(path).build();
for result in walker {
tx.send(DirEntry::Y(result.unwrap())).unwrap();
}
}
drop(tx);
stdout_thread.join().unwrap();
}
enum DirEntry {
X(walkdir::DirEntry),
Y(ignore::DirEntry),
}
impl DirEntry {
fn path(&self) -> &Path {
match *self {
DirEntry::X(ref x) => x.path(),
DirEntry::Y(ref y) => y.path(),
}
}
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
BurntSushi/ripgrep | https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/messages.rs | crates/core/messages.rs | /*!
This module defines some macros and some light shared mutable state.
This state is responsible for keeping track of whether we should emit certain
kinds of messages to the user (such as errors) that are distinct from the
standard "debug" or "trace" log messages. This state is specifically set at
startup time when CLI arguments are parsed and then never changed.
The other state tracked here is whether ripgrep experienced an error
condition. Aside from errors associated with invalid CLI arguments, ripgrep
generally does not abort when an error occurs (e.g., if reading a file failed).
But when an error does occur, it will alter ripgrep's exit status. Thus, when
an error message is emitted via `err_message`, then a global flag is toggled
indicating that at least one error occurred. When ripgrep exits, this flag is
consulted to determine what the exit status ought to be.
*/
use std::sync::atomic::{AtomicBool, Ordering};
/// When false, "messages" will not be printed.
static MESSAGES: AtomicBool = AtomicBool::new(false);
/// When false, "messages" related to ignore rules will not be printed.
static IGNORE_MESSAGES: AtomicBool = AtomicBool::new(false);
/// Flipped to true when an error message is printed.
static ERRORED: AtomicBool = AtomicBool::new(false);
/// Like eprintln, but locks stdout to prevent interleaving lines.
///
/// This locks stdout, not stderr, even though this prints to stderr. This
/// avoids the appearance of interleaving output when stdout and stderr both
/// correspond to a tty.
#[macro_export]
macro_rules! eprintln_locked {
($($tt:tt)*) => {{
{
use std::io::Write;
// This is a bit of an abstraction violation because we explicitly
// lock stdout before printing to stderr. This avoids interleaving
// lines within ripgrep because `search_parallel` uses `termcolor`,
// which accesses the same stdout lock when writing lines.
let stdout = std::io::stdout().lock();
let mut stderr = std::io::stderr().lock();
// We specifically ignore any errors here. One plausible error we
// can get in some cases is a broken pipe error. And when that
// occurs, we should exit gracefully. Otherwise, just abort with
// an error code because there isn't much else we can do.
//
// See: https://github.com/BurntSushi/ripgrep/issues/1966
if let Err(err) = write!(stderr, "rg: ") {
if err.kind() == std::io::ErrorKind::BrokenPipe {
std::process::exit(0);
} else {
std::process::exit(2);
}
}
if let Err(err) = writeln!(stderr, $($tt)*) {
if err.kind() == std::io::ErrorKind::BrokenPipe {
std::process::exit(0);
} else {
std::process::exit(2);
}
}
drop(stdout);
}
}}
}
/// Emit a non-fatal error message, unless messages were disabled.
#[macro_export]
macro_rules! message {
($($tt:tt)*) => {
if crate::messages::messages() {
eprintln_locked!($($tt)*);
}
}
}
/// Like message, but sets ripgrep's "errored" flag, which controls the exit
/// status.
#[macro_export]
macro_rules! err_message {
($($tt:tt)*) => {
crate::messages::set_errored();
message!($($tt)*);
}
}
/// Emit a non-fatal ignore-related error message (like a parse error), unless
/// ignore-messages were disabled.
#[macro_export]
macro_rules! ignore_message {
($($tt:tt)*) => {
if crate::messages::messages() && crate::messages::ignore_messages() {
eprintln_locked!($($tt)*);
}
}
}
/// Returns true if and only if messages should be shown.
pub(crate) fn messages() -> bool {
MESSAGES.load(Ordering::Relaxed)
}
/// Set whether messages should be shown or not.
///
/// By default, they are not shown.
pub(crate) fn set_messages(yes: bool) {
MESSAGES.store(yes, Ordering::Relaxed)
}
/// Returns true if and only if "ignore" related messages should be shown.
pub(crate) fn ignore_messages() -> bool {
IGNORE_MESSAGES.load(Ordering::Relaxed)
}
/// Set whether "ignore" related messages should be shown or not.
///
/// By default, they are not shown.
///
/// Note that this is overridden if `messages` is disabled. Namely, if
/// `messages` is disabled, then "ignore" messages are never shown, regardless
/// of this setting.
pub(crate) fn set_ignore_messages(yes: bool) {
IGNORE_MESSAGES.store(yes, Ordering::Relaxed)
}
/// Returns true if and only if ripgrep came across a non-fatal error.
pub(crate) fn errored() -> bool {
ERRORED.load(Ordering::Relaxed)
}
/// Indicate that ripgrep has come across a non-fatal error.
///
/// Callers should not use this directly. Instead, it is called automatically
/// via the `err_message` macro.
pub(crate) fn set_errored() {
ERRORED.store(true, Ordering::Relaxed);
}
| rust | Unlicense | 0a88cccd5188074de96f54a4b6b44a63971ac157 | 2026-01-04T15:31:58.730867Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.