repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/logger.rs
crates/core/logger.rs
/*! Defines a super simple logger that works with the `log` crate. We don't do anything fancy. We just need basic log levels and the ability to print to stderr. We therefore avoid bringing in extra dependencies just for this functionality. */ use log::Log; /// The simplest possible logger that logs to stderr. /// /// This logger does no filtering. Instead, it relies on the `log` crates /// filtering via its global max_level setting. #[derive(Debug)] pub(crate) struct Logger(()); /// A singleton used as the target for an implementation of the `Log` trait. const LOGGER: &'static Logger = &Logger(()); impl Logger { /// Create a new logger that logs to stderr and initialize it as the /// global logger. If there was a problem setting the logger, then an /// error is returned. pub(crate) fn init() -> Result<(), log::SetLoggerError> { log::set_logger(LOGGER) } } impl Log for Logger { fn enabled(&self, _: &log::Metadata<'_>) -> bool { // We set the log level via log::set_max_level, so we don't need to // implement filtering here. true } fn log(&self, record: &log::Record<'_>) { match (record.file(), record.line()) { (Some(file), Some(line)) => { eprintln_locked!( "{}|{}|{}:{}: {}", record.level(), record.target(), file, line, record.args() ); } (Some(file), None) => { eprintln_locked!( "{}|{}|{}: {}", record.level(), record.target(), file, record.args() ); } _ => { eprintln_locked!( "{}|{}: {}", record.level(), record.target(), record.args() ); } } } fn flush(&self) { // We use eprintln_locked! which is flushed on every call. } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/haystack.rs
crates/core/haystack.rs
/*! Defines a builder for haystacks. A "haystack" represents something we want to search. It encapsulates the logic for whether a haystack ought to be searched or not, separate from the standard ignore rules and other filtering logic. Effectively, a haystack wraps a directory entry and adds some light application level logic around it. */ use std::path::Path; /// A builder for constructing things to search over. #[derive(Clone, Debug)] pub(crate) struct HaystackBuilder { strip_dot_prefix: bool, } impl HaystackBuilder { /// Return a new haystack builder with a default configuration. pub(crate) fn new() -> HaystackBuilder { HaystackBuilder { strip_dot_prefix: false } } /// Create a new haystack from a possibly missing directory entry. /// /// If the directory entry isn't present, then the corresponding error is /// logged if messages have been configured. Otherwise, if the directory /// entry is deemed searchable, then it is returned as a haystack. pub(crate) fn build_from_result( &self, result: Result<ignore::DirEntry, ignore::Error>, ) -> Option<Haystack> { match result { Ok(dent) => self.build(dent), Err(err) => { err_message!("{err}"); None } } } /// Create a new haystack using this builder's configuration. /// /// If a directory entry could not be created or should otherwise not be /// searched, then this returns `None` after emitting any relevant log /// messages. fn build(&self, dent: ignore::DirEntry) -> Option<Haystack> { let hay = Haystack { dent, strip_dot_prefix: self.strip_dot_prefix }; if let Some(err) = hay.dent.error() { ignore_message!("{err}"); } // If this entry was explicitly provided by an end user, then we always // want to search it. if hay.is_explicit() { return Some(hay); } // At this point, we only want to search something if it's explicitly a // file. This omits symlinks. (If ripgrep was configured to follow // symlinks, then they have already been followed by the directory // traversal.) if hay.is_file() { return Some(hay); } // We got nothing. Emit a debug message, but only if this isn't a // directory. Otherwise, emitting messages for directories is just // noisy. if !hay.is_dir() { log::debug!( "ignoring {}: failed to pass haystack filter: \ file type: {:?}, metadata: {:?}", hay.dent.path().display(), hay.dent.file_type(), hay.dent.metadata() ); } None } /// When enabled, if the haystack's file path starts with `./` then it is /// stripped. /// /// This is useful when implicitly searching the current working directory. pub(crate) fn strip_dot_prefix( &mut self, yes: bool, ) -> &mut HaystackBuilder { self.strip_dot_prefix = yes; self } } /// A haystack is a thing we want to search. /// /// Generally, a haystack is either a file or stdin. #[derive(Clone, Debug)] pub(crate) struct Haystack { dent: ignore::DirEntry, strip_dot_prefix: bool, } impl Haystack { /// Return the file path corresponding to this haystack. /// /// If this haystack corresponds to stdin, then a special `<stdin>` path /// is returned instead. pub(crate) fn path(&self) -> &Path { if self.strip_dot_prefix && self.dent.path().starts_with("./") { self.dent.path().strip_prefix("./").unwrap() } else { self.dent.path() } } /// Returns true if and only if this entry corresponds to stdin. pub(crate) fn is_stdin(&self) -> bool { self.dent.is_stdin() } /// Returns true if and only if this entry corresponds to a haystack to /// search that was explicitly supplied by an end user. /// /// Generally, this corresponds to either stdin or an explicit file path /// argument. e.g., in `rg foo some-file ./some-dir/`, `some-file` is /// an explicit haystack, but, e.g., `./some-dir/some-other-file` is not. /// /// However, note that ripgrep does not see through shell globbing. e.g., /// in `rg foo ./some-dir/*`, `./some-dir/some-other-file` will be treated /// as an explicit haystack. pub(crate) fn is_explicit(&self) -> bool { // stdin is obvious. When an entry has a depth of 0, that means it // was explicitly provided to our directory iterator, which means it // was in turn explicitly provided by the end user. The !is_dir check // means that we want to search files even if their symlinks, again, // because they were explicitly provided. (And we never want to try // to search a directory.) self.is_stdin() || (self.dent.depth() == 0 && !self.is_dir()) } /// Returns true if and only if this haystack points to a directory after /// following symbolic links. fn is_dir(&self) -> bool { let ft = match self.dent.file_type() { None => return false, Some(ft) => ft, }; if ft.is_dir() { return true; } // If this is a symlink, then we want to follow it to determine // whether it's a directory or not. self.dent.path_is_symlink() && self.dent.path().is_dir() } /// Returns true if and only if this haystack points to a file. fn is_file(&self) -> bool { self.dent.file_type().map_or(false, |ft| ft.is_file()) } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/search.rs
crates/core/search.rs
/*! Defines a very high level "search worker" abstraction. A search worker manages the high level interaction points between the matcher (i.e., which regex engine is used), the searcher (i.e., how data is actually read and matched using the regex engine) and the printer. For example, the search worker is where things like preprocessors or decompression happens. */ use std::{io, path::Path}; use {grep::matcher::Matcher, termcolor::WriteColor}; /// The configuration for the search worker. /// /// Among a few other things, the configuration primarily controls the way we /// show search results to users at a very high level. #[derive(Clone, Debug)] struct Config { preprocessor: Option<std::path::PathBuf>, preprocessor_globs: ignore::overrides::Override, search_zip: bool, binary_implicit: grep::searcher::BinaryDetection, binary_explicit: grep::searcher::BinaryDetection, } impl Default for Config { fn default() -> Config { Config { preprocessor: None, preprocessor_globs: ignore::overrides::Override::empty(), search_zip: false, binary_implicit: grep::searcher::BinaryDetection::none(), binary_explicit: grep::searcher::BinaryDetection::none(), } } } /// A builder for configuring and constructing a search worker. #[derive(Clone, Debug)] pub(crate) struct SearchWorkerBuilder { config: Config, command_builder: grep::cli::CommandReaderBuilder, } impl Default for SearchWorkerBuilder { fn default() -> SearchWorkerBuilder { SearchWorkerBuilder::new() } } impl SearchWorkerBuilder { /// Create a new builder for configuring and constructing a search worker. pub(crate) fn new() -> SearchWorkerBuilder { let mut command_builder = grep::cli::CommandReaderBuilder::new(); command_builder.async_stderr(true); SearchWorkerBuilder { config: Config::default(), command_builder } } /// Create a new search worker using the given searcher, matcher and /// printer. pub(crate) fn build<W: WriteColor>( &self, matcher: PatternMatcher, searcher: grep::searcher::Searcher, printer: Printer<W>, ) -> SearchWorker<W> { let config = self.config.clone(); let command_builder = self.command_builder.clone(); let decomp_builder = config.search_zip.then(|| { let mut decomp_builder = grep::cli::DecompressionReaderBuilder::new(); decomp_builder.async_stderr(true); decomp_builder }); SearchWorker { config, command_builder, decomp_builder, matcher, searcher, printer, } } /// Set the path to a preprocessor command. /// /// When this is set, instead of searching files directly, the given /// command will be run with the file path as the first argument, and the /// output of that command will be searched instead. pub(crate) fn preprocessor( &mut self, cmd: Option<std::path::PathBuf>, ) -> anyhow::Result<&mut SearchWorkerBuilder> { if let Some(ref prog) = cmd { let bin = grep::cli::resolve_binary(prog)?; self.config.preprocessor = Some(bin); } else { self.config.preprocessor = None; } Ok(self) } /// Set the globs for determining which files should be run through the /// preprocessor. By default, with no globs and a preprocessor specified, /// every file is run through the preprocessor. pub(crate) fn preprocessor_globs( &mut self, globs: ignore::overrides::Override, ) -> &mut SearchWorkerBuilder { self.config.preprocessor_globs = globs; self } /// Enable the decompression and searching of common compressed files. /// /// When enabled, if a particular file path is recognized as a compressed /// file, then it is decompressed before searching. /// /// Note that if a preprocessor command is set, then it overrides this /// setting. pub(crate) fn search_zip( &mut self, yes: bool, ) -> &mut SearchWorkerBuilder { self.config.search_zip = yes; self } /// Set the binary detection that should be used when searching files /// found via a recursive directory search. /// /// Generally, this binary detection may be /// `grep::searcher::BinaryDetection::quit` if we want to skip binary files /// completely. /// /// By default, no binary detection is performed. pub(crate) fn binary_detection_implicit( &mut self, detection: grep::searcher::BinaryDetection, ) -> &mut SearchWorkerBuilder { self.config.binary_implicit = detection; self } /// Set the binary detection that should be used when searching files /// explicitly supplied by an end user. /// /// Generally, this binary detection should NOT be /// `grep::searcher::BinaryDetection::quit`, since we never want to /// automatically filter files supplied by the end user. /// /// By default, no binary detection is performed. pub(crate) fn binary_detection_explicit( &mut self, detection: grep::searcher::BinaryDetection, ) -> &mut SearchWorkerBuilder { self.config.binary_explicit = detection; self } } /// The result of executing a search. /// /// Generally speaking, the "result" of a search is sent to a printer, which /// writes results to an underlying writer such as stdout or a file. However, /// every search also has some aggregate statistics or meta data that may be /// useful to higher level routines. #[derive(Clone, Debug, Default)] pub(crate) struct SearchResult { has_match: bool, stats: Option<grep::printer::Stats>, } impl SearchResult { /// Whether the search found a match or not. pub(crate) fn has_match(&self) -> bool { self.has_match } /// Return aggregate search statistics for a single search, if available. /// /// It can be expensive to compute statistics, so these are only present /// if explicitly enabled in the printer provided by the caller. pub(crate) fn stats(&self) -> Option<&grep::printer::Stats> { self.stats.as_ref() } } /// The pattern matcher used by a search worker. #[derive(Clone, Debug)] pub(crate) enum PatternMatcher { RustRegex(grep::regex::RegexMatcher), #[cfg(feature = "pcre2")] PCRE2(grep::pcre2::RegexMatcher), } /// The printer used by a search worker. /// /// The `W` type parameter refers to the type of the underlying writer. #[derive(Clone, Debug)] pub(crate) enum Printer<W> { /// Use the standard printer, which supports the classic grep-like format. Standard(grep::printer::Standard<W>), /// Use the summary printer, which supports aggregate displays of search /// results. Summary(grep::printer::Summary<W>), /// A JSON printer, which emits results in the JSON Lines format. JSON(grep::printer::JSON<W>), } impl<W: WriteColor> Printer<W> { /// Return a mutable reference to the underlying printer's writer. pub(crate) fn get_mut(&mut self) -> &mut W { match *self { Printer::Standard(ref mut p) => p.get_mut(), Printer::Summary(ref mut p) => p.get_mut(), Printer::JSON(ref mut p) => p.get_mut(), } } } /// A worker for executing searches. /// /// It is intended for a single worker to execute many searches, and is /// generally intended to be used from a single thread. When searching using /// multiple threads, it is better to create a new worker for each thread. #[derive(Clone, Debug)] pub(crate) struct SearchWorker<W> { config: Config, command_builder: grep::cli::CommandReaderBuilder, /// This is `None` when `search_zip` is not enabled, since in this case it /// can never be used. We do this because building the reader can sometimes /// do non-trivial work (like resolving the paths of decompression binaries /// on Windows). decomp_builder: Option<grep::cli::DecompressionReaderBuilder>, matcher: PatternMatcher, searcher: grep::searcher::Searcher, printer: Printer<W>, } impl<W: WriteColor> SearchWorker<W> { /// Execute a search over the given haystack. pub(crate) fn search( &mut self, haystack: &crate::haystack::Haystack, ) -> io::Result<SearchResult> { let bin = if haystack.is_explicit() { self.config.binary_explicit.clone() } else { self.config.binary_implicit.clone() }; let path = haystack.path(); log::trace!("{}: binary detection: {:?}", path.display(), bin); self.searcher.set_binary_detection(bin); if haystack.is_stdin() { self.search_reader(path, &mut io::stdin().lock()) } else if self.should_preprocess(path) { self.search_preprocessor(path) } else if self.should_decompress(path) { self.search_decompress(path) } else { self.search_path(path) } } /// Return a mutable reference to the underlying printer. pub(crate) fn printer(&mut self) -> &mut Printer<W> { &mut self.printer } /// Returns true if and only if the given file path should be /// decompressed before searching. fn should_decompress(&self, path: &Path) -> bool { self.decomp_builder.as_ref().is_some_and(|decomp_builder| { decomp_builder.get_matcher().has_command(path) }) } /// Returns true if and only if the given file path should be run through /// the preprocessor. fn should_preprocess(&self, path: &Path) -> bool { if !self.config.preprocessor.is_some() { return false; } if self.config.preprocessor_globs.is_empty() { return true; } !self.config.preprocessor_globs.matched(path, false).is_ignore() } /// Search the given file path by first asking the preprocessor for the /// data to search instead of opening the path directly. fn search_preprocessor( &mut self, path: &Path, ) -> io::Result<SearchResult> { use std::{fs::File, process::Stdio}; let bin = self.config.preprocessor.as_ref().unwrap(); let mut cmd = std::process::Command::new(bin); cmd.arg(path).stdin(Stdio::from(File::open(path)?)); let mut rdr = self.command_builder.build(&mut cmd).map_err(|err| { io::Error::new( io::ErrorKind::Other, format!( "preprocessor command could not start: '{cmd:?}': {err}", ), ) })?; let result = self.search_reader(path, &mut rdr).map_err(|err| { io::Error::new( io::ErrorKind::Other, format!("preprocessor command failed: '{cmd:?}': {err}"), ) }); let close_result = rdr.close(); let search_result = result?; close_result?; Ok(search_result) } /// Attempt to decompress the data at the given file path and search the /// result. If the given file path isn't recognized as a compressed file, /// then search it without doing any decompression. fn search_decompress(&mut self, path: &Path) -> io::Result<SearchResult> { let Some(ref decomp_builder) = self.decomp_builder else { return self.search_path(path); }; let mut rdr = decomp_builder.build(path)?; let result = self.search_reader(path, &mut rdr); let close_result = rdr.close(); let search_result = result?; close_result?; Ok(search_result) } /// Search the contents of the given file path. fn search_path(&mut self, path: &Path) -> io::Result<SearchResult> { use self::PatternMatcher::*; let (searcher, printer) = (&mut self.searcher, &mut self.printer); match self.matcher { RustRegex(ref m) => search_path(m, searcher, printer, path), #[cfg(feature = "pcre2")] PCRE2(ref m) => search_path(m, searcher, printer, path), } } /// Executes a search on the given reader, which may or may not correspond /// directly to the contents of the given file path. Instead, the reader /// may actually cause something else to be searched (for example, when /// a preprocessor is set or when decompression is enabled). In those /// cases, the file path is used for visual purposes only. /// /// Generally speaking, this method should only be used when there is no /// other choice. Searching via `search_path` provides more opportunities /// for optimizations (such as memory maps). fn search_reader<R: io::Read>( &mut self, path: &Path, rdr: &mut R, ) -> io::Result<SearchResult> { use self::PatternMatcher::*; let (searcher, printer) = (&mut self.searcher, &mut self.printer); match self.matcher { RustRegex(ref m) => search_reader(m, searcher, printer, path, rdr), #[cfg(feature = "pcre2")] PCRE2(ref m) => search_reader(m, searcher, printer, path, rdr), } } } /// Search the contents of the given file path using the given matcher, /// searcher and printer. fn search_path<M: Matcher, W: WriteColor>( matcher: M, searcher: &mut grep::searcher::Searcher, printer: &mut Printer<W>, path: &Path, ) -> io::Result<SearchResult> { match *printer { Printer::Standard(ref mut p) => { let mut sink = p.sink_with_path(&matcher, path); searcher.search_path(&matcher, path, &mut sink)?; Ok(SearchResult { has_match: sink.has_match(), stats: sink.stats().map(|s| s.clone()), }) } Printer::Summary(ref mut p) => { let mut sink = p.sink_with_path(&matcher, path); searcher.search_path(&matcher, path, &mut sink)?; Ok(SearchResult { has_match: sink.has_match(), stats: sink.stats().map(|s| s.clone()), }) } Printer::JSON(ref mut p) => { let mut sink = p.sink_with_path(&matcher, path); searcher.search_path(&matcher, path, &mut sink)?; Ok(SearchResult { has_match: sink.has_match(), stats: Some(sink.stats().clone()), }) } } } /// Search the contents of the given reader using the given matcher, searcher /// and printer. fn search_reader<M: Matcher, R: io::Read, W: WriteColor>( matcher: M, searcher: &mut grep::searcher::Searcher, printer: &mut Printer<W>, path: &Path, mut rdr: R, ) -> io::Result<SearchResult> { match *printer { Printer::Standard(ref mut p) => { let mut sink = p.sink_with_path(&matcher, path); searcher.search_reader(&matcher, &mut rdr, &mut sink)?; Ok(SearchResult { has_match: sink.has_match(), stats: sink.stats().map(|s| s.clone()), }) } Printer::Summary(ref mut p) => { let mut sink = p.sink_with_path(&matcher, path); searcher.search_reader(&matcher, &mut rdr, &mut sink)?; Ok(SearchResult { has_match: sink.has_match(), stats: sink.stats().map(|s| s.clone()), }) } Printer::JSON(ref mut p) => { let mut sink = p.sink_with_path(&matcher, path); searcher.search_reader(&matcher, &mut rdr, &mut sink)?; Ok(SearchResult { has_match: sink.has_match(), stats: Some(sink.stats().clone()), }) } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/main.rs
crates/core/main.rs
/*! The main entry point into ripgrep. */ use std::{io::Write, process::ExitCode}; use ignore::WalkState; use crate::flags::{HiArgs, SearchMode}; #[macro_use] mod messages; mod flags; mod haystack; mod logger; mod search; // Since Rust no longer uses jemalloc by default, ripgrep will, by default, // use the system allocator. On Linux, this would normally be glibc's // allocator, which is pretty good. In particular, ripgrep does not have a // particularly allocation heavy workload, so there really isn't much // difference (for ripgrep's purposes) between glibc's allocator and jemalloc. // // However, when ripgrep is built with musl, this means ripgrep will use musl's // allocator, which appears to be substantially worse. (musl's goal is not to // have the fastest version of everything. Its goal is to be small and amenable // to static compilation.) Even though ripgrep isn't particularly allocation // heavy, musl's allocator appears to slow down ripgrep quite a bit. Therefore, // when building with musl, we use jemalloc. // // We don't unconditionally use jemalloc because it can be nice to use the // system's default allocator by default. Moreover, jemalloc seems to increase // compilation times by a bit. // // Moreover, we only do this on 64-bit systems since jemalloc doesn't support // i686. #[cfg(all(target_env = "musl", target_pointer_width = "64"))] #[global_allocator] static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; /// Then, as it was, then again it will be. fn main() -> ExitCode { match run(flags::parse()) { Ok(code) => code, Err(err) => { // Look for a broken pipe error. In this case, we generally want // to exit "gracefully" with a success exit code. This matches // existing Unix convention. We need to handle this explicitly // since the Rust runtime doesn't ask for PIPE signals, and thus // we get an I/O error instead. Traditional C Unix applications // quit by getting a PIPE signal that they don't handle, and thus // the unhandled signal causes the process to unceremoniously // terminate. for cause in err.chain() { if let Some(ioerr) = cause.downcast_ref::<std::io::Error>() { if ioerr.kind() == std::io::ErrorKind::BrokenPipe { return ExitCode::from(0); } } } eprintln_locked!("{:#}", err); ExitCode::from(2) } } } /// The main entry point for ripgrep. /// /// The given parse result determines ripgrep's behavior. The parse /// result should be the result of parsing CLI arguments in a low level /// representation, and then followed by an attempt to convert them into a /// higher level representation. The higher level representation has some nicer /// abstractions, for example, instead of representing the `-g/--glob` flag /// as a `Vec<String>` (as in the low level representation), the globs are /// converted into a single matcher. fn run(result: crate::flags::ParseResult<HiArgs>) -> anyhow::Result<ExitCode> { use crate::flags::{Mode, ParseResult}; let args = match result { ParseResult::Err(err) => return Err(err), ParseResult::Special(mode) => return special(mode), ParseResult::Ok(args) => args, }; let matched = match args.mode() { Mode::Search(_) if !args.matches_possible() => false, Mode::Search(mode) if args.threads() == 1 => search(&args, mode)?, Mode::Search(mode) => search_parallel(&args, mode)?, Mode::Files if args.threads() == 1 => files(&args)?, Mode::Files => files_parallel(&args)?, Mode::Types => return types(&args), Mode::Generate(mode) => return generate(mode), }; Ok(if matched && (args.quiet() || !messages::errored()) { ExitCode::from(0) } else if messages::errored() { ExitCode::from(2) } else { ExitCode::from(1) }) } /// The top-level entry point for single-threaded search. /// /// This recursively steps through the file list (current directory by default) /// and searches each file sequentially. fn search(args: &HiArgs, mode: SearchMode) -> anyhow::Result<bool> { let started_at = std::time::Instant::now(); let haystack_builder = args.haystack_builder(); let unsorted = args .walk_builder()? .build() .filter_map(|result| haystack_builder.build_from_result(result)); let haystacks = args.sort(unsorted); let mut matched = false; let mut searched = false; let mut stats = args.stats(); let mut searcher = args.search_worker( args.matcher()?, args.searcher()?, args.printer(mode, args.stdout()), )?; for haystack in haystacks { searched = true; let search_result = match searcher.search(&haystack) { Ok(search_result) => search_result, // A broken pipe means graceful termination. Err(err) if err.kind() == std::io::ErrorKind::BrokenPipe => break, Err(err) => { err_message!("{}: {}", haystack.path().display(), err); continue; } }; matched = matched || search_result.has_match(); if let Some(ref mut stats) = stats { *stats += search_result.stats().unwrap(); } if matched && args.quit_after_match() { break; } } if args.has_implicit_path() && !searched { eprint_nothing_searched(); } if let Some(ref stats) = stats { let wtr = searcher.printer().get_mut(); let _ = print_stats(mode, stats, started_at, wtr); } Ok(matched) } /// The top-level entry point for multi-threaded search. /// /// The parallelism is itself achieved by the recursive directory traversal. /// All we need to do is feed it a worker for performing a search on each file. /// /// Requesting a sorted output from ripgrep (such as with `--sort path`) will /// automatically disable parallelism and hence sorting is not handled here. fn search_parallel(args: &HiArgs, mode: SearchMode) -> anyhow::Result<bool> { use std::sync::atomic::{AtomicBool, Ordering}; let started_at = std::time::Instant::now(); let haystack_builder = args.haystack_builder(); let bufwtr = args.buffer_writer(); let stats = args.stats().map(std::sync::Mutex::new); let matched = AtomicBool::new(false); let searched = AtomicBool::new(false); let mut searcher = args.search_worker( args.matcher()?, args.searcher()?, args.printer(mode, bufwtr.buffer()), )?; args.walk_builder()?.build_parallel().run(|| { let bufwtr = &bufwtr; let stats = &stats; let matched = &matched; let searched = &searched; let haystack_builder = &haystack_builder; let mut searcher = searcher.clone(); Box::new(move |result| { let haystack = match haystack_builder.build_from_result(result) { Some(haystack) => haystack, None => return WalkState::Continue, }; searched.store(true, Ordering::SeqCst); searcher.printer().get_mut().clear(); let search_result = match searcher.search(&haystack) { Ok(search_result) => search_result, Err(err) => { err_message!("{}: {}", haystack.path().display(), err); return WalkState::Continue; } }; if search_result.has_match() { matched.store(true, Ordering::SeqCst); } if let Some(ref locked_stats) = *stats { let mut stats = locked_stats.lock().unwrap(); *stats += search_result.stats().unwrap(); } if let Err(err) = bufwtr.print(searcher.printer().get_mut()) { // A broken pipe means graceful termination. if err.kind() == std::io::ErrorKind::BrokenPipe { return WalkState::Quit; } // Otherwise, we continue on our merry way. err_message!("{}: {}", haystack.path().display(), err); } if matched.load(Ordering::SeqCst) && args.quit_after_match() { WalkState::Quit } else { WalkState::Continue } }) }); if args.has_implicit_path() && !searched.load(Ordering::SeqCst) { eprint_nothing_searched(); } if let Some(ref locked_stats) = stats { let stats = locked_stats.lock().unwrap(); let mut wtr = searcher.printer().get_mut(); let _ = print_stats(mode, &stats, started_at, &mut wtr); let _ = bufwtr.print(&mut wtr); } Ok(matched.load(Ordering::SeqCst)) } /// The top-level entry point for file listing without searching. /// /// This recursively steps through the file list (current directory by default) /// and prints each path sequentially using a single thread. fn files(args: &HiArgs) -> anyhow::Result<bool> { let haystack_builder = args.haystack_builder(); let unsorted = args .walk_builder()? .build() .filter_map(|result| haystack_builder.build_from_result(result)); let haystacks = args.sort(unsorted); let mut matched = false; let mut path_printer = args.path_printer_builder().build(args.stdout()); for haystack in haystacks { matched = true; if args.quit_after_match() { break; } if let Err(err) = path_printer.write(haystack.path()) { // A broken pipe means graceful termination. if err.kind() == std::io::ErrorKind::BrokenPipe { break; } // Otherwise, we have some other error that's preventing us from // writing to stdout, so we should bubble it up. return Err(err.into()); } } Ok(matched) } /// The top-level entry point for multi-threaded file listing without /// searching. /// /// This recursively steps through the file list (current directory by default) /// and prints each path sequentially using multiple threads. /// /// Requesting a sorted output from ripgrep (such as with `--sort path`) will /// automatically disable parallelism and hence sorting is not handled here. fn files_parallel(args: &HiArgs) -> anyhow::Result<bool> { use std::{ sync::{ atomic::{AtomicBool, Ordering}, mpsc, }, thread, }; let haystack_builder = args.haystack_builder(); let mut path_printer = args.path_printer_builder().build(args.stdout()); let matched = AtomicBool::new(false); let (tx, rx) = mpsc::channel::<crate::haystack::Haystack>(); // We spawn a single printing thread to make sure we don't tear writes. // We use a channel here under the presumption that it's probably faster // than using a mutex in the worker threads below, but this has never been // seriously litigated. let print_thread = thread::spawn(move || -> std::io::Result<()> { for haystack in rx.iter() { path_printer.write(haystack.path())?; } Ok(()) }); args.walk_builder()?.build_parallel().run(|| { let haystack_builder = &haystack_builder; let matched = &matched; let tx = tx.clone(); Box::new(move |result| { let haystack = match haystack_builder.build_from_result(result) { Some(haystack) => haystack, None => return WalkState::Continue, }; matched.store(true, Ordering::SeqCst); if args.quit_after_match() { WalkState::Quit } else { match tx.send(haystack) { Ok(_) => WalkState::Continue, Err(_) => WalkState::Quit, } } }) }); drop(tx); if let Err(err) = print_thread.join().unwrap() { // A broken pipe means graceful termination, so fall through. // Otherwise, something bad happened while writing to stdout, so bubble // it up. if err.kind() != std::io::ErrorKind::BrokenPipe { return Err(err.into()); } } Ok(matched.load(Ordering::SeqCst)) } /// The top-level entry point for `--type-list`. fn types(args: &HiArgs) -> anyhow::Result<ExitCode> { let mut count = 0; let mut stdout = args.stdout(); for def in args.types().definitions() { count += 1; stdout.write_all(def.name().as_bytes())?; stdout.write_all(b": ")?; let mut first = true; for glob in def.globs() { if !first { stdout.write_all(b", ")?; } stdout.write_all(glob.as_bytes())?; first = false; } stdout.write_all(b"\n")?; } Ok(ExitCode::from(if count == 0 { 1 } else { 0 })) } /// Implements ripgrep's "generate" modes. /// /// These modes correspond to generating some kind of ancillary data related /// to ripgrep. At present, this includes ripgrep's man page (in roff format) /// and supported shell completions. fn generate(mode: crate::flags::GenerateMode) -> anyhow::Result<ExitCode> { use crate::flags::GenerateMode; let output = match mode { GenerateMode::Man => flags::generate_man_page(), GenerateMode::CompleteBash => flags::generate_complete_bash(), GenerateMode::CompleteZsh => flags::generate_complete_zsh(), GenerateMode::CompleteFish => flags::generate_complete_fish(), GenerateMode::CompletePowerShell => { flags::generate_complete_powershell() } }; writeln!(std::io::stdout(), "{}", output.trim_end())?; Ok(ExitCode::from(0)) } /// Implements ripgrep's "special" modes. /// /// A special mode is one that generally short-circuits most (not all) of /// ripgrep's initialization logic and skips right to this routine. The /// special modes essentially consist of printing help and version output. The /// idea behind the short circuiting is to ensure there is as little as possible /// (within reason) that would prevent ripgrep from emitting help output. /// /// For example, part of the initialization logic that is skipped (among /// other things) is accessing the current working directory. If that fails, /// ripgrep emits an error. We don't want to emit an error if it fails and /// the user requested version or help information. fn special(mode: crate::flags::SpecialMode) -> anyhow::Result<ExitCode> { use crate::flags::SpecialMode; let mut exit = ExitCode::from(0); let output = match mode { SpecialMode::HelpShort => flags::generate_help_short(), SpecialMode::HelpLong => flags::generate_help_long(), SpecialMode::VersionShort => flags::generate_version_short(), SpecialMode::VersionLong => flags::generate_version_long(), // --pcre2-version is a little special because it emits an error // exit code if this build of ripgrep doesn't support PCRE2. SpecialMode::VersionPCRE2 => { let (output, available) = flags::generate_version_pcre2(); if !available { exit = ExitCode::from(1); } output } }; writeln!(std::io::stdout(), "{}", output.trim_end())?; Ok(exit) } /// Prints a heuristic error messages when nothing is searched. /// /// This can happen if an applicable ignore file has one or more rules that /// are too broad and cause ripgrep to ignore everything. /// /// We only show this error message when the user does *not* provide an /// explicit path to search. This is because the message can otherwise be /// noisy, e.g., when it is intended that there is nothing to search. fn eprint_nothing_searched() { err_message!( "No files were searched, which means ripgrep probably \ applied a filter you didn't expect.\n\ Running with --debug will show why files are being skipped." ); } /// Prints the statistics given to the writer given. /// /// The search mode given determines whether the stats should be printed in /// a plain text format or in a JSON format. /// /// The `started` time should be the time at which ripgrep started working. /// /// If an error occurs while writing, then writing stops and the error is /// returned. Note that callers should probably ignore this errror, since /// whether stats fail to print or not generally shouldn't cause ripgrep to /// enter into an "error" state. And usually the only way for this to fail is /// if writing to stdout itself fails. fn print_stats<W: Write>( mode: SearchMode, stats: &grep::printer::Stats, started: std::time::Instant, mut wtr: W, ) -> std::io::Result<()> { let elapsed = std::time::Instant::now().duration_since(started); if matches!(mode, SearchMode::JSON) { // We specifically match the format laid out by the JSON printer in // the grep-printer crate. We simply "extend" it with the 'summary' // message type. serde_json::to_writer( &mut wtr, &serde_json::json!({ "type": "summary", "data": { "stats": stats, "elapsed_total": { "secs": elapsed.as_secs(), "nanos": elapsed.subsec_nanos(), "human": format!("{:0.6}s", elapsed.as_secs_f64()), }, } }), )?; write!(wtr, "\n") } else { write!( wtr, " {matches} matches {lines} matched lines {searches_with_match} files contained matches {searches} files searched {bytes_printed} bytes printed {bytes_searched} bytes searched {search_time:0.6} seconds spent searching {process_time:0.6} seconds total ", matches = stats.matches(), lines = stats.matched_lines(), searches_with_match = stats.searches_with_match(), searches = stats.searches(), bytes_printed = stats.bytes_printed(), bytes_searched = stats.bytes_searched(), search_time = stats.elapsed().as_secs_f64(), process_time = elapsed.as_secs_f64(), ) } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/config.rs
crates/core/flags/config.rs
/*! This module provides routines for reading ripgrep config "rc" files. The primary output of these routines is a sequence of arguments, where each argument corresponds precisely to one shell argument. */ use std::{ ffi::OsString, path::{Path, PathBuf}, }; use bstr::{ByteSlice, io::BufReadExt}; /// Return a sequence of arguments derived from ripgrep rc configuration files. pub fn args() -> Vec<OsString> { let config_path = match std::env::var_os("RIPGREP_CONFIG_PATH") { None => return vec![], Some(config_path) => { if config_path.is_empty() { return vec![]; } PathBuf::from(config_path) } }; let (args, errs) = match parse(&config_path) { Ok((args, errs)) => (args, errs), Err(err) => { message!( "failed to read the file specified in RIPGREP_CONFIG_PATH: {}", err ); return vec![]; } }; if !errs.is_empty() { for err in errs { message!("{}:{}", config_path.display(), err); } } log::debug!( "{}: arguments loaded from config file: {:?}", config_path.display(), args ); args } /// Parse a single ripgrep rc file from the given path. /// /// On success, this returns a set of shell arguments, in order, that should /// be pre-pended to the arguments given to ripgrep at the command line. /// /// If the file could not be read, then an error is returned. If there was /// a problem parsing one or more lines in the file, then errors are returned /// for each line in addition to successfully parsed arguments. fn parse<P: AsRef<Path>>( path: P, ) -> anyhow::Result<(Vec<OsString>, Vec<anyhow::Error>)> { let path = path.as_ref(); match std::fs::File::open(&path) { Ok(file) => parse_reader(file), Err(err) => anyhow::bail!("{}: {}", path.display(), err), } } /// Parse a single ripgrep rc file from the given reader. /// /// Callers should not provided a buffered reader, as this routine will use its /// own buffer internally. /// /// On success, this returns a set of shell arguments, in order, that should /// be pre-pended to the arguments given to ripgrep at the command line. /// /// If the reader could not be read, then an error is returned. If there was a /// problem parsing one or more lines, then errors are returned for each line /// in addition to successfully parsed arguments. fn parse_reader<R: std::io::Read>( rdr: R, ) -> anyhow::Result<(Vec<OsString>, Vec<anyhow::Error>)> { let mut bufrdr = std::io::BufReader::new(rdr); let (mut args, mut errs) = (vec![], vec![]); let mut line_number = 0; bufrdr.for_byte_line_with_terminator(|line| { line_number += 1; let line = line.trim(); if line.is_empty() || line[0] == b'#' { return Ok(true); } match line.to_os_str() { Ok(osstr) => { args.push(osstr.to_os_string()); } Err(err) => { errs.push(anyhow::anyhow!("{line_number}: {err}")); } } Ok(true) })?; Ok((args, errs)) } #[cfg(test)] mod tests { use super::parse_reader; use std::ffi::OsString; #[test] fn basic() { let (args, errs) = parse_reader( &b"\ # Test --context=0 --smart-case -u # --bar --foo "[..], ) .unwrap(); assert!(errs.is_empty()); let args: Vec<String> = args.into_iter().map(|s| s.into_string().unwrap()).collect(); assert_eq!(args, vec!["--context=0", "--smart-case", "-u", "--foo",]); } // We test that we can handle invalid UTF-8 on Unix-like systems. #[test] #[cfg(unix)] fn error() { use std::os::unix::ffi::OsStringExt; let (args, errs) = parse_reader( &b"\ quux foo\xFFbar baz "[..], ) .unwrap(); assert!(errs.is_empty()); assert_eq!( args, vec![ OsString::from("quux"), OsString::from_vec(b"foo\xFFbar".to_vec()), OsString::from("baz"), ] ); } // ... but test that invalid UTF-8 fails on Windows. #[test] #[cfg(not(unix))] fn error() { let (args, errs) = parse_reader( &b"\ quux foo\xFFbar baz "[..], ) .unwrap(); assert_eq!(errs.len(), 1); assert_eq!(args, vec![OsString::from("quux"), OsString::from("baz"),]); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/parse.rs
crates/core/flags/parse.rs
/*! Parses command line arguments into a structured and typed representation. */ use std::{borrow::Cow, collections::BTreeSet, ffi::OsString}; use anyhow::Context; use crate::flags::{ Flag, FlagValue, defs::FLAGS, hiargs::HiArgs, lowargs::{LoggingMode, LowArgs, SpecialMode}, }; /// The result of parsing CLI arguments. /// /// This is basically a `anyhow::Result<T>`, but with one extra variant that is /// inhabited whenever ripgrep should execute a "special" mode. That is, when a /// user provides the `-h/--help` or `-V/--version` flags. /// /// This special variant exists to allow CLI parsing to short circuit as /// quickly as is reasonable. For example, it lets CLI parsing avoid reading /// ripgrep's configuration and converting low level arguments into a higher /// level representation. #[derive(Debug)] pub(crate) enum ParseResult<T> { Special(SpecialMode), Ok(T), Err(anyhow::Error), } impl<T> ParseResult<T> { /// If this result is `Ok`, then apply `then` to it. Otherwise, return this /// result unchanged. fn and_then<U>( self, mut then: impl FnMut(T) -> ParseResult<U>, ) -> ParseResult<U> { match self { ParseResult::Special(mode) => ParseResult::Special(mode), ParseResult::Ok(t) => then(t), ParseResult::Err(err) => ParseResult::Err(err), } } } /// Parse CLI arguments and convert then to their high level representation. pub(crate) fn parse() -> ParseResult<HiArgs> { parse_low().and_then(|low| match HiArgs::from_low_args(low) { Ok(hi) => ParseResult::Ok(hi), Err(err) => ParseResult::Err(err), }) } /// Parse CLI arguments only into their low level representation. /// /// This takes configuration into account. That is, it will try to read /// `RIPGREP_CONFIG_PATH` and prepend any arguments found there to the /// arguments passed to this process. /// /// This will also set one-time global state flags, such as the log level and /// whether messages should be printed. fn parse_low() -> ParseResult<LowArgs> { if let Err(err) = crate::logger::Logger::init() { let err = anyhow::anyhow!("failed to initialize logger: {err}"); return ParseResult::Err(err); } let parser = Parser::new(); let mut low = LowArgs::default(); if let Err(err) = parser.parse(std::env::args_os().skip(1), &mut low) { return ParseResult::Err(err); } // Even though we haven't parsed the config file yet (assuming it exists), // we can still use the arguments given on the CLI to setup ripgrep's // logging preferences. Even if the config file changes them in some way, // it's really the best we can do. This way, for example, folks can pass // `--trace` and see any messages logged during config file parsing. set_log_levels(&low); // Before we try to take configuration into account, we can bail early // if a special mode was enabled. This is basically only for version and // help output which shouldn't be impacted by extra configuration. if let Some(special) = low.special.take() { return ParseResult::Special(special); } // If the end user says no config, then respect it. if low.no_config { log::debug!("not reading config files because --no-config is present"); return ParseResult::Ok(low); } // Look for arguments from a config file. If we got nothing (whether the // file is empty or RIPGREP_CONFIG_PATH wasn't set), then we don't need // to re-parse. let config_args = crate::flags::config::args(); if config_args.is_empty() { log::debug!("no extra arguments found from configuration file"); return ParseResult::Ok(low); } // The final arguments are just the arguments from the CLI appending to // the end of the config arguments. let mut final_args = config_args; final_args.extend(std::env::args_os().skip(1)); // Now do the CLI parsing dance again. let mut low = LowArgs::default(); if let Err(err) = parser.parse(final_args.into_iter(), &mut low) { return ParseResult::Err(err); } // Reset the message and logging levels, since they could have changed. set_log_levels(&low); ParseResult::Ok(low) } /// Sets global state flags that control logging based on low-level arguments. fn set_log_levels(low: &LowArgs) { crate::messages::set_messages(!low.no_messages); crate::messages::set_ignore_messages(!low.no_ignore_messages); match low.logging { Some(LoggingMode::Trace) => { log::set_max_level(log::LevelFilter::Trace) } Some(LoggingMode::Debug) => { log::set_max_level(log::LevelFilter::Debug) } None => log::set_max_level(log::LevelFilter::Warn), } } /// Parse the sequence of CLI arguments given a low level typed set of /// arguments. /// /// This is exposed for testing that the correct low-level arguments are parsed /// from a CLI. It just runs the parser once over the CLI arguments. It doesn't /// setup logging or read from a config file. /// /// This assumes the iterator given does *not* begin with the binary name. #[cfg(test)] pub(crate) fn parse_low_raw( rawargs: impl IntoIterator<Item = impl Into<OsString>>, ) -> anyhow::Result<LowArgs> { let mut args = LowArgs::default(); Parser::new().parse(rawargs, &mut args)?; Ok(args) } /// Return the metadata for the flag of the given name. pub(super) fn lookup(name: &str) -> Option<&'static dyn Flag> { // N.B. Creating a new parser might look expensive, but it only builds // the lookup trie exactly once. That is, we get a `&'static Parser` from // `Parser::new()`. match Parser::new().find_long(name) { FlagLookup::Match(&FlagInfo { flag, .. }) => Some(flag), _ => None, } } /// A parser for turning a sequence of command line arguments into a more /// strictly typed set of arguments. #[derive(Debug)] struct Parser { /// A single map that contains all possible flag names. This includes /// short and long names, aliases and negations. This maps those names to /// indices into `info`. map: FlagMap, /// A map from IDs returned by the `map` to the corresponding flag /// information. info: Vec<FlagInfo>, } impl Parser { /// Create a new parser. /// /// This always creates the same parser and only does it once. Callers may /// call this repeatedly, and the parser will only be built once. fn new() -> &'static Parser { use std::sync::OnceLock; // Since a parser's state is immutable and completely determined by // FLAGS, and since FLAGS is a constant, we can initialize it exactly // once. static P: OnceLock<Parser> = OnceLock::new(); P.get_or_init(|| { let mut infos = vec![]; for &flag in FLAGS.iter() { infos.push(FlagInfo { flag, name: Ok(flag.name_long()), kind: FlagInfoKind::Standard, }); for alias in flag.aliases() { infos.push(FlagInfo { flag, name: Ok(alias), kind: FlagInfoKind::Alias, }); } if let Some(byte) = flag.name_short() { infos.push(FlagInfo { flag, name: Err(byte), kind: FlagInfoKind::Standard, }); } if let Some(name) = flag.name_negated() { infos.push(FlagInfo { flag, name: Ok(name), kind: FlagInfoKind::Negated, }); } } let map = FlagMap::new(&infos); Parser { map, info: infos } }) } /// Parse the given CLI arguments into a low level representation. /// /// The iterator given should *not* start with the binary name. fn parse<I, O>(&self, rawargs: I, args: &mut LowArgs) -> anyhow::Result<()> where I: IntoIterator<Item = O>, O: Into<OsString>, { let mut p = lexopt::Parser::from_args(rawargs); while let Some(arg) = p.next().context("invalid CLI arguments")? { let lookup = match arg { lexopt::Arg::Value(value) => { args.positional.push(value); continue; } lexopt::Arg::Short(ch) if ch == 'h' => { // Special case -h/--help since behavior is different // based on whether short or long flag is given. args.special = Some(SpecialMode::HelpShort); continue; } lexopt::Arg::Short(ch) if ch == 'V' => { // Special case -V/--version since behavior is different // based on whether short or long flag is given. args.special = Some(SpecialMode::VersionShort); continue; } lexopt::Arg::Short(ch) => self.find_short(ch), lexopt::Arg::Long(name) if name == "help" => { // Special case -h/--help since behavior is different // based on whether short or long flag is given. args.special = Some(SpecialMode::HelpLong); continue; } lexopt::Arg::Long(name) if name == "version" => { // Special case -V/--version since behavior is different // based on whether short or long flag is given. args.special = Some(SpecialMode::VersionLong); continue; } lexopt::Arg::Long(name) => self.find_long(name), }; let mat = match lookup { FlagLookup::Match(mat) => mat, FlagLookup::UnrecognizedShort(name) => { anyhow::bail!("unrecognized flag -{name}") } FlagLookup::UnrecognizedLong(name) => { let mut msg = format!("unrecognized flag --{name}"); if let Some(suggest_msg) = suggest(&name) { msg = format!("{msg}\n\n{suggest_msg}"); } anyhow::bail!("{msg}") } }; let value = if matches!(mat.kind, FlagInfoKind::Negated) { // Negated flags are always switches, even if the non-negated // flag is not. For example, --context-separator accepts a // value, but --no-context-separator does not. FlagValue::Switch(false) } else if mat.flag.is_switch() { FlagValue::Switch(true) } else { FlagValue::Value(p.value().with_context(|| { format!("missing value for flag {mat}") })?) }; mat.flag .update(value, args) .with_context(|| format!("error parsing flag {mat}"))?; } Ok(()) } /// Look for a flag by its short name. fn find_short(&self, ch: char) -> FlagLookup<'_> { if !ch.is_ascii() { return FlagLookup::UnrecognizedShort(ch); } let byte = u8::try_from(ch).unwrap(); let Some(index) = self.map.find(&[byte]) else { return FlagLookup::UnrecognizedShort(ch); }; FlagLookup::Match(&self.info[index]) } /// Look for a flag by its long name. /// /// This also works for aliases and negated names. fn find_long(&self, name: &str) -> FlagLookup<'_> { let Some(index) = self.map.find(name.as_bytes()) else { return FlagLookup::UnrecognizedLong(name.to_string()); }; FlagLookup::Match(&self.info[index]) } } /// The result of looking up a flag name. #[derive(Debug)] enum FlagLookup<'a> { /// Lookup found a match and the metadata for the flag is attached. Match(&'a FlagInfo), /// The given short name is unrecognized. UnrecognizedShort(char), /// The given long name is unrecognized. UnrecognizedLong(String), } /// The info about a flag associated with a flag's ID in the flag map. #[derive(Debug)] struct FlagInfo { /// The flag object and its associated metadata. flag: &'static dyn Flag, /// The actual name that is stored in the Aho-Corasick automaton. When this /// is a byte, it corresponds to a short single character ASCII flag. The /// actual pattern that's in the Aho-Corasick automaton is just the single /// byte. name: Result<&'static str, u8>, /// The type of flag that is stored for the corresponding Aho-Corasick /// pattern. kind: FlagInfoKind, } /// The kind of flag that is being matched. #[derive(Debug)] enum FlagInfoKind { /// A standard flag, e.g., --passthru. Standard, /// A negation of a standard flag, e.g., --no-multiline. Negated, /// An alias for a standard flag, e.g., --passthrough. Alias, } impl std::fmt::Display for FlagInfo { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self.name { Ok(long) => write!(f, "--{long}"), Err(short) => write!(f, "-{short}", short = char::from(short)), } } } /// A map from flag names (short, long, negated and aliases) to their ID. /// /// Once an ID is known, it can be used to look up a flag's metadata in the /// parser's internal state. #[derive(Debug)] struct FlagMap { map: std::collections::HashMap<Vec<u8>, usize>, } impl FlagMap { /// Create a new map of flags for the given flag information. /// /// The index of each flag info corresponds to its ID. fn new(infos: &[FlagInfo]) -> FlagMap { let mut map = std::collections::HashMap::with_capacity(infos.len()); for (i, info) in infos.iter().enumerate() { match info.name { Ok(name) => { assert_eq!(None, map.insert(name.as_bytes().to_vec(), i)); } Err(byte) => { assert_eq!(None, map.insert(vec![byte], i)); } } } FlagMap { map } } /// Look for a match of `name` in the given Aho-Corasick automaton. /// /// This only returns a match if the one found has a length equivalent to /// the length of the name given. fn find(&self, name: &[u8]) -> Option<usize> { self.map.get(name).copied() } } /// Possibly return a message suggesting flags similar in the name to the one /// given. /// /// The one given should be a flag given by the user (without the leading /// dashes) that was unrecognized. This attempts to find existing flags that /// are similar to the one given. fn suggest(unrecognized: &str) -> Option<String> { let similars = find_similar_names(unrecognized); if similars.is_empty() { return None; } let list = similars .into_iter() .map(|name| format!("--{name}")) .collect::<Vec<String>>() .join(", "); Some(format!("similar flags that are available: {list}")) } /// Return a sequence of names similar to the unrecognized name given. fn find_similar_names(unrecognized: &str) -> Vec<&'static str> { // The jaccard similarity threshold at which we consider two flag names // similar enough that it's worth suggesting it to the end user. // // This value was determined by some ad hoc experimentation. It might need // further tweaking. const THRESHOLD: f64 = 0.4; let mut similar = vec![]; let bow_given = ngrams(unrecognized); for &flag in FLAGS.iter() { let name = flag.name_long(); let bow = ngrams(name); if jaccard_index(&bow_given, &bow) >= THRESHOLD { similar.push(name); } if let Some(name) = flag.name_negated() { let bow = ngrams(name); if jaccard_index(&bow_given, &bow) >= THRESHOLD { similar.push(name); } } for name in flag.aliases() { let bow = ngrams(name); if jaccard_index(&bow_given, &bow) >= THRESHOLD { similar.push(name); } } } similar } /// A "bag of words" is a set of ngrams. type BagOfWords<'a> = BTreeSet<Cow<'a, [u8]>>; /// Returns the jaccard index (a measure of similarity) between sets of ngrams. fn jaccard_index(ngrams1: &BagOfWords<'_>, ngrams2: &BagOfWords<'_>) -> f64 { let union = u32::try_from(ngrams1.union(ngrams2).count()) .expect("fewer than u32::MAX flags"); let intersection = u32::try_from(ngrams1.intersection(ngrams2).count()) .expect("fewer than u32::MAX flags"); f64::from(intersection) / f64::from(union) } /// Returns all 3-grams in the slice given. /// /// If the slice doesn't contain a 3-gram, then one is artificially created by /// padding it out with a character that will never appear in a flag name. fn ngrams(flag_name: &str) -> BagOfWords<'_> { // We only allow ASCII flag names, so we can just use bytes. let slice = flag_name.as_bytes(); let seq: Vec<Cow<[u8]>> = match slice.len() { 0 => vec![Cow::Owned(b"!!!".to_vec())], 1 => vec![Cow::Owned(vec![slice[0], b'!', b'!'])], 2 => vec![Cow::Owned(vec![slice[0], slice[1], b'!'])], _ => slice.windows(3).map(Cow::Borrowed).collect(), }; BTreeSet::from_iter(seq) }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/defs.rs
crates/core/flags/defs.rs
/*! Defines all of the flags available in ripgrep. Each flag corresponds to a unit struct with a corresponding implementation of `Flag`. Note that each implementation of `Flag` might actually have many possible manifestations of the same "flag." That is, each implementation of `Flag` can have the following flags available to an end user of ripgrep: * The long flag name. * An optional short flag name. * An optional negated long flag name. * An arbitrarily long list of aliases. The idea is that even though there are multiple flags that a user can type, one implementation of `Flag` corresponds to a single _logical_ flag inside of ripgrep. For example, `-E`, `--encoding` and `--no-encoding` all manipulate the same encoding state in ripgrep. */ use std::{path::PathBuf, sync::LazyLock}; use {anyhow::Context as AnyhowContext, bstr::ByteVec}; use crate::flags::{ Category, Flag, FlagValue, lowargs::{ BinaryMode, BoundaryMode, BufferMode, CaseMode, ColorChoice, ContextMode, EncodingMode, EngineChoice, GenerateMode, LoggingMode, LowArgs, MmapMode, Mode, PatternSource, SearchMode, SortMode, SortModeKind, SpecialMode, TypeChange, }, }; #[cfg(test)] use crate::flags::parse::parse_low_raw; use super::CompletionType; /// A list of all flags in ripgrep via implementations of `Flag`. /// /// The order of these flags matter. It determines the order of the flags in /// the generated documentation (`-h`, `--help` and the man page) within each /// category. (This is why the deprecated flags are last.) pub(super) const FLAGS: &[&dyn Flag] = &[ // -e/--regexp and -f/--file should come before anything else in the // same category. &Regexp, &File, &AfterContext, &BeforeContext, &Binary, &BlockBuffered, &ByteOffset, &CaseSensitive, &Color, &Colors, &Column, &Context, &ContextSeparator, &Count, &CountMatches, &Crlf, &Debug, &DfaSizeLimit, &Encoding, &Engine, &FieldContextSeparator, &FieldMatchSeparator, &Files, &FilesWithMatches, &FilesWithoutMatch, &FixedStrings, &Follow, &Generate, &Glob, &GlobCaseInsensitive, &Heading, &Help, &Hidden, &HostnameBin, &HyperlinkFormat, &IGlob, &IgnoreCase, &IgnoreFile, &IgnoreFileCaseInsensitive, &IncludeZero, &InvertMatch, &JSON, &LineBuffered, &LineNumber, &LineNumberNo, &LineRegexp, &MaxColumns, &MaxColumnsPreview, &MaxCount, &MaxDepth, &MaxFilesize, &Mmap, &Multiline, &MultilineDotall, &NoConfig, &NoIgnore, &NoIgnoreDot, &NoIgnoreExclude, &NoIgnoreFiles, &NoIgnoreGlobal, &NoIgnoreMessages, &NoIgnoreParent, &NoIgnoreVcs, &NoMessages, &NoRequireGit, &NoUnicode, &Null, &NullData, &OneFileSystem, &OnlyMatching, &PathSeparator, &Passthru, &PCRE2, &PCRE2Version, &Pre, &PreGlob, &Pretty, &Quiet, &RegexSizeLimit, &Replace, &SearchZip, &SmartCase, &Sort, &Sortr, &Stats, &StopOnNonmatch, &Text, &Threads, &Trace, &Trim, &Type, &TypeNot, &TypeAdd, &TypeClear, &TypeList, &Unrestricted, &Version, &Vimgrep, &WithFilename, &WithFilenameNo, &WordRegexp, // DEPRECATED (make them show up last in their respective categories) &AutoHybridRegex, &NoPcre2Unicode, &SortFiles, ]; /// -A/--after-context #[derive(Debug)] struct AfterContext; impl Flag for AfterContext { fn is_switch(&self) -> bool { false } fn name_short(&self) -> Option<u8> { Some(b'A') } fn name_long(&self) -> &'static str { "after-context" } fn doc_variable(&self) -> Option<&'static str> { Some("NUM") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { "Show NUM lines after each match." } fn doc_long(&self) -> &'static str { r" Show \fINUM\fP lines after each match. .sp This overrides the \flag{passthru} flag and partially overrides the \flag{context} flag. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.context.set_after(convert::usize(&v.unwrap_value())?); Ok(()) } } #[cfg(test)] #[test] fn test_after_context() { let mkctx = |lines| { let mut mode = ContextMode::default(); mode.set_after(lines); mode }; let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(ContextMode::default(), args.context); let args = parse_low_raw(["--after-context", "5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["--after-context=5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-A", "5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-A5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-A5", "-A10"]).unwrap(); assert_eq!(mkctx(10), args.context); let args = parse_low_raw(["-A5", "-A0"]).unwrap(); assert_eq!(mkctx(0), args.context); let args = parse_low_raw(["-A5", "--passthru"]).unwrap(); assert_eq!(ContextMode::Passthru, args.context); let args = parse_low_raw(["--passthru", "-A5"]).unwrap(); assert_eq!(mkctx(5), args.context); let n = usize::MAX.to_string(); let args = parse_low_raw(["--after-context", n.as_str()]).unwrap(); assert_eq!(mkctx(usize::MAX), args.context); #[cfg(target_pointer_width = "64")] { let n = (u128::from(u64::MAX) + 1).to_string(); let result = parse_low_raw(["--after-context", n.as_str()]); assert!(result.is_err(), "{result:?}"); } } /// --auto-hybrid-regex #[derive(Debug)] struct AutoHybridRegex; impl Flag for AutoHybridRegex { fn is_switch(&self) -> bool { true } fn name_long(&self) -> &'static str { "auto-hybrid-regex" } fn name_negated(&self) -> Option<&'static str> { Some("no-auto-hybrid-regex") } fn doc_category(&self) -> Category { Category::Search } fn doc_short(&self) -> &'static str { "(DEPRECATED) Use PCRE2 if appropriate." } fn doc_long(&self) -> &'static str { r" DEPRECATED. Use \flag{engine} instead. .sp When this flag is used, ripgrep will dynamically choose between supported regex engines depending on the features used in a pattern. When ripgrep chooses a regex engine, it applies that choice for every regex provided to ripgrep (e.g., via multiple \flag{regexp} or \flag{file} flags). .sp As an example of how this flag might behave, ripgrep will attempt to use its default finite automata based regex engine whenever the pattern can be successfully compiled with that regex engine. If PCRE2 is enabled and if the pattern given could not be compiled with the default regex engine, then PCRE2 will be automatically used for searching. If PCRE2 isn't available, then this flag has no effect because there is only one regex engine to choose from. .sp In the future, ripgrep may adjust its heuristics for how it decides which regex engine to use. In general, the heuristics will be limited to a static analysis of the patterns, and not to any specific runtime behavior observed while searching files. .sp The primary downside of using this flag is that it may not always be obvious which regex engine ripgrep uses, and thus, the match semantics or performance profile of ripgrep may subtly and unexpectedly change. However, in many cases, all regex engines will agree on what constitutes a match and it can be nice to transparently support more advanced regex features like look-around and backreferences without explicitly needing to enable them. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { let mode = if v.unwrap_switch() { EngineChoice::Auto } else { EngineChoice::Default }; args.engine = mode; Ok(()) } } #[cfg(test)] #[test] fn test_auto_hybrid_regex() { let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(EngineChoice::Default, args.engine); let args = parse_low_raw(["--auto-hybrid-regex"]).unwrap(); assert_eq!(EngineChoice::Auto, args.engine); let args = parse_low_raw(["--auto-hybrid-regex", "--no-auto-hybrid-regex"]) .unwrap(); assert_eq!(EngineChoice::Default, args.engine); let args = parse_low_raw(["--no-auto-hybrid-regex", "--auto-hybrid-regex"]) .unwrap(); assert_eq!(EngineChoice::Auto, args.engine); let args = parse_low_raw(["--auto-hybrid-regex", "-P"]).unwrap(); assert_eq!(EngineChoice::PCRE2, args.engine); let args = parse_low_raw(["-P", "--auto-hybrid-regex"]).unwrap(); assert_eq!(EngineChoice::Auto, args.engine); let args = parse_low_raw(["--engine=auto", "--auto-hybrid-regex"]).unwrap(); assert_eq!(EngineChoice::Auto, args.engine); let args = parse_low_raw(["--engine=default", "--auto-hybrid-regex"]).unwrap(); assert_eq!(EngineChoice::Auto, args.engine); let args = parse_low_raw(["--auto-hybrid-regex", "--engine=default"]).unwrap(); assert_eq!(EngineChoice::Default, args.engine); } /// -B/--before-context #[derive(Debug)] struct BeforeContext; impl Flag for BeforeContext { fn is_switch(&self) -> bool { false } fn name_short(&self) -> Option<u8> { Some(b'B') } fn name_long(&self) -> &'static str { "before-context" } fn doc_variable(&self) -> Option<&'static str> { Some("NUM") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { "Show NUM lines before each match." } fn doc_long(&self) -> &'static str { r" Show \fINUM\fP lines before each match. .sp This overrides the \flag{passthru} flag and partially overrides the \flag{context} flag. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.context.set_before(convert::usize(&v.unwrap_value())?); Ok(()) } } #[cfg(test)] #[test] fn test_before_context() { let mkctx = |lines| { let mut mode = ContextMode::default(); mode.set_before(lines); mode }; let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(ContextMode::default(), args.context); let args = parse_low_raw(["--before-context", "5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["--before-context=5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-B", "5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-B5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-B5", "-B10"]).unwrap(); assert_eq!(mkctx(10), args.context); let args = parse_low_raw(["-B5", "-B0"]).unwrap(); assert_eq!(mkctx(0), args.context); let args = parse_low_raw(["-B5", "--passthru"]).unwrap(); assert_eq!(ContextMode::Passthru, args.context); let args = parse_low_raw(["--passthru", "-B5"]).unwrap(); assert_eq!(mkctx(5), args.context); let n = usize::MAX.to_string(); let args = parse_low_raw(["--before-context", n.as_str()]).unwrap(); assert_eq!(mkctx(usize::MAX), args.context); #[cfg(target_pointer_width = "64")] { let n = (u128::from(u64::MAX) + 1).to_string(); let result = parse_low_raw(["--before-context", n.as_str()]); assert!(result.is_err(), "{result:?}"); } } /// --binary #[derive(Debug)] struct Binary; impl Flag for Binary { fn is_switch(&self) -> bool { true } fn name_long(&self) -> &'static str { "binary" } fn name_negated(&self) -> Option<&'static str> { Some("no-binary") } fn doc_category(&self) -> Category { Category::Filter } fn doc_short(&self) -> &'static str { "Search binary files." } fn doc_long(&self) -> &'static str { r" Enabling this flag will cause ripgrep to search binary files. By default, ripgrep attempts to automatically skip binary files in order to improve the relevance of results and make the search faster. .sp Binary files are heuristically detected based on whether they contain a \fBNUL\fP byte or not. By default (without this flag set), once a \fBNUL\fP byte is seen, ripgrep will stop searching the file. Usually, \fBNUL\fP bytes occur in the beginning of most binary files. If a \fBNUL\fP byte occurs after a match, then ripgrep will not print the match, stop searching that file, and emit a warning that some matches are being suppressed. .sp In contrast, when this flag is provided, ripgrep will continue searching a file even if a \fBNUL\fP byte is found. In particular, if a \fBNUL\fP byte is found then ripgrep will continue searching until either a match is found or the end of the file is reached, whichever comes sooner. If a match is found, then ripgrep will stop and print a warning saying that the search stopped prematurely. .sp If you want ripgrep to search a file without any special \fBNUL\fP byte handling at all (and potentially print binary data to stdout), then you should use the \flag{text} flag. .sp The \flag{binary} flag is a flag for controlling ripgrep's automatic filtering mechanism. As such, it does not need to be used when searching a file explicitly or when searching stdin. That is, it is only applicable when recursively searching a directory. .sp When the \flag{unrestricted} flag is provided for a third time, then this flag is automatically enabled. .sp This flag overrides the \flag{text} flag. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.binary = if v.unwrap_switch() { BinaryMode::SearchAndSuppress } else { BinaryMode::Auto }; Ok(()) } } #[cfg(test)] #[test] fn test_binary() { let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(BinaryMode::Auto, args.binary); let args = parse_low_raw(["--binary"]).unwrap(); assert_eq!(BinaryMode::SearchAndSuppress, args.binary); let args = parse_low_raw(["--binary", "--no-binary"]).unwrap(); assert_eq!(BinaryMode::Auto, args.binary); let args = parse_low_raw(["--no-binary", "--binary"]).unwrap(); assert_eq!(BinaryMode::SearchAndSuppress, args.binary); let args = parse_low_raw(["--binary", "-a"]).unwrap(); assert_eq!(BinaryMode::AsText, args.binary); let args = parse_low_raw(["-a", "--binary"]).unwrap(); assert_eq!(BinaryMode::SearchAndSuppress, args.binary); let args = parse_low_raw(["-a", "--no-binary"]).unwrap(); assert_eq!(BinaryMode::Auto, args.binary); } /// --block-buffered #[derive(Debug)] struct BlockBuffered; impl Flag for BlockBuffered { fn is_switch(&self) -> bool { true } fn name_long(&self) -> &'static str { "block-buffered" } fn name_negated(&self) -> Option<&'static str> { Some("no-block-buffered") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { "Force block buffering." } fn doc_long(&self) -> &'static str { r" When enabled, ripgrep will use block buffering. That is, whenever a matching line is found, it will be written to an in-memory buffer and will not be written to stdout until the buffer reaches a certain size. This is the default when ripgrep's stdout is redirected to a pipeline or a file. When ripgrep's stdout is connected to a tty, line buffering will be used by default. Forcing block buffering can be useful when dumping a large amount of contents to a tty. .sp This overrides the \flag{line-buffered} flag. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.buffer = if v.unwrap_switch() { BufferMode::Block } else { BufferMode::Auto }; Ok(()) } } #[cfg(test)] #[test] fn test_block_buffered() { let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(BufferMode::Auto, args.buffer); let args = parse_low_raw(["--block-buffered"]).unwrap(); assert_eq!(BufferMode::Block, args.buffer); let args = parse_low_raw(["--block-buffered", "--no-block-buffered"]).unwrap(); assert_eq!(BufferMode::Auto, args.buffer); let args = parse_low_raw(["--block-buffered", "--line-buffered"]).unwrap(); assert_eq!(BufferMode::Line, args.buffer); } /// --byte-offset #[derive(Debug)] struct ByteOffset; impl Flag for ByteOffset { fn is_switch(&self) -> bool { true } fn name_short(&self) -> Option<u8> { Some(b'b') } fn name_long(&self) -> &'static str { "byte-offset" } fn name_negated(&self) -> Option<&'static str> { Some("no-byte-offset") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { "Print the byte offset for each matching line." } fn doc_long(&self) -> &'static str { r" Print the 0-based byte offset within the input file before each line of output. If \flag{only-matching} is specified, print the offset of the matched text itself. .sp If ripgrep does transcoding, then the byte offset is in terms of the result of transcoding and not the original data. This applies similarly to other transformations on the data, such as decompression or a \flag{pre} filter. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.byte_offset = v.unwrap_switch(); Ok(()) } } #[cfg(test)] #[test] fn test_byte_offset() { let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(false, args.byte_offset); let args = parse_low_raw(["--byte-offset"]).unwrap(); assert_eq!(true, args.byte_offset); let args = parse_low_raw(["-b"]).unwrap(); assert_eq!(true, args.byte_offset); let args = parse_low_raw(["--byte-offset", "--no-byte-offset"]).unwrap(); assert_eq!(false, args.byte_offset); let args = parse_low_raw(["--no-byte-offset", "-b"]).unwrap(); assert_eq!(true, args.byte_offset); } /// -s/--case-sensitive #[derive(Debug)] struct CaseSensitive; impl Flag for CaseSensitive { fn is_switch(&self) -> bool { true } fn name_short(&self) -> Option<u8> { Some(b's') } fn name_long(&self) -> &'static str { "case-sensitive" } fn doc_category(&self) -> Category { Category::Search } fn doc_short(&self) -> &'static str { r"Search case sensitively (default)." } fn doc_long(&self) -> &'static str { r" Execute the search case sensitively. This is the default mode. .sp This is a global option that applies to all patterns given to ripgrep. Individual patterns can still be matched case insensitively by using inline regex flags. For example, \fB(?i)abc\fP will match \fBabc\fP case insensitively even when this flag is used. .sp This flag overrides the \flag{ignore-case} and \flag{smart-case} flags. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { assert!(v.unwrap_switch(), "flag has no negation"); args.case = CaseMode::Sensitive; Ok(()) } } #[cfg(test)] #[test] fn test_case_sensitive() { let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(CaseMode::Sensitive, args.case); let args = parse_low_raw(["--case-sensitive"]).unwrap(); assert_eq!(CaseMode::Sensitive, args.case); let args = parse_low_raw(["-s"]).unwrap(); assert_eq!(CaseMode::Sensitive, args.case); } /// --color #[derive(Debug)] struct Color; impl Flag for Color { fn is_switch(&self) -> bool { false } fn name_long(&self) -> &'static str { "color" } fn doc_variable(&self) -> Option<&'static str> { Some("WHEN") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { "When to use color." } fn doc_long(&self) -> &'static str { r" This flag controls when to use colors. The default setting is \fBauto\fP, which means ripgrep will try to guess when to use colors. For example, if ripgrep is printing to a tty, then it will use colors, but if it is redirected to a file or a pipe, then it will suppress color output. .sp ripgrep will suppress color output by default in some other circumstances as well. These include, but are not limited to: .sp .IP \(bu 3n When the \fBTERM\fP environment variable is not set or set to \fBdumb\fP. .sp .IP \(bu 3n When the \fBNO_COLOR\fP environment variable is set (regardless of value). .sp .IP \(bu 3n When flags that imply no use for colors are given. For example, \flag{vimgrep} and \flag{json}. . .PP The possible values for this flag are: .sp .IP \fBnever\fP 10n Colors will never be used. .sp .IP \fBauto\fP 10n The default. ripgrep tries to be smart. .sp .IP \fBalways\fP 10n Colors will always be used regardless of where output is sent. .sp .IP \fBansi\fP 10n Like 'always', but emits ANSI escapes (even in a Windows console). . .PP This flag also controls whether hyperlinks are emitted. For example, when a hyperlink format is specified, hyperlinks won't be used when color is suppressed. If one wants to emit hyperlinks but no colors, then one must use the \flag{colors} flag to manually set all color styles to \fBnone\fP: .sp .EX \-\-colors 'path:none' \\ \-\-colors 'line:none' \\ \-\-colors 'column:none' \\ \-\-colors 'match:none' \\ \-\-colors 'highlight:none' .EE .sp " } fn doc_choices(&self) -> &'static [&'static str] { &["never", "auto", "always", "ansi"] } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.color = match convert::str(&v.unwrap_value())? { "never" => ColorChoice::Never, "auto" => ColorChoice::Auto, "always" => ColorChoice::Always, "ansi" => ColorChoice::Ansi, unk => anyhow::bail!("choice '{unk}' is unrecognized"), }; Ok(()) } } #[cfg(test)] #[test] fn test_color() { let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(ColorChoice::Auto, args.color); let args = parse_low_raw(["--color", "never"]).unwrap(); assert_eq!(ColorChoice::Never, args.color); let args = parse_low_raw(["--color", "auto"]).unwrap(); assert_eq!(ColorChoice::Auto, args.color); let args = parse_low_raw(["--color", "always"]).unwrap(); assert_eq!(ColorChoice::Always, args.color); let args = parse_low_raw(["--color", "ansi"]).unwrap(); assert_eq!(ColorChoice::Ansi, args.color); let args = parse_low_raw(["--color=never"]).unwrap(); assert_eq!(ColorChoice::Never, args.color); let args = parse_low_raw(["--color", "always", "--color", "never"]).unwrap(); assert_eq!(ColorChoice::Never, args.color); let args = parse_low_raw(["--color", "never", "--color", "always"]).unwrap(); assert_eq!(ColorChoice::Always, args.color); let result = parse_low_raw(["--color", "foofoo"]); assert!(result.is_err(), "{result:?}"); let result = parse_low_raw(["--color", "Always"]); assert!(result.is_err(), "{result:?}"); } /// --colors #[derive(Debug)] struct Colors; impl Flag for Colors { fn is_switch(&self) -> bool { false } fn name_long(&self) -> &'static str { "colors" } fn doc_variable(&self) -> Option<&'static str> { Some("COLOR_SPEC") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { "Configure color settings and styles." } fn doc_long(&self) -> &'static str { r#" This flag specifies color settings for use in the output. This flag may be provided multiple times. Settings are applied iteratively. Pre-existing color labels are limited to one of eight choices: \fBred\fP, \fBblue\fP, \fBgreen\fP, \fBcyan\fP, \fBmagenta\fP, \fByellow\fP, \fBwhite\fP and \fBblack\fP. Styles are limited to \fBnobold\fP, \fBbold\fP, \fBnointense\fP, \fBintense\fP, \fBnounderline\fP, \fBunderline\fP, \fBnoitalic\fP or \fBitalic\fP. .sp The format of the flag is \fB{\fP\fItype\fP\fB}:{\fP\fIattribute\fP\fB}:{\fP\fIvalue\fP\fB}\fP. \fItype\fP should be one of \fBpath\fP, \fBline\fP, \fBcolumn\fP, \fBhighlight\fP or \fBmatch\fP. \fIattribute\fP can be \fBfg\fP, \fBbg\fP or \fBstyle\fP. \fIvalue\fP is either a color (for \fBfg\fP and \fBbg\fP) or a text style. A special format, \fB{\fP\fItype\fP\fB}:none\fP, will clear all color settings for \fItype\fP. .sp For example, the following command will change the match color to magenta and the background color for line numbers to yellow: .sp .EX rg \-\-colors 'match:fg:magenta' \-\-colors 'line:bg:yellow' .EE .sp Another example, the following command will "highlight" the non-matching text in matching lines: .sp .EX rg \-\-colors 'highlight:bg:yellow' \-\-colors 'highlight:fg:black' .EE .sp The "highlight" color type is particularly useful for contrasting matching lines with surrounding context printed by the \flag{before-context}, \flag{after-context}, \flag{context} or \flag{passthru} flags. .sp Extended colors can be used for \fIvalue\fP when the tty supports ANSI color sequences. These are specified as either \fIx\fP (256-color) or .IB x , x , x (24-bit truecolor) where \fIx\fP is a number between \fB0\fP and \fB255\fP inclusive. \fIx\fP may be given as a normal decimal number or a hexadecimal number, which is prefixed by \fB0x\fP. .sp For example, the following command will change the match background color to that represented by the rgb value (0,128,255): .sp .EX rg \-\-colors 'match:bg:0,128,255' .EE .sp or, equivalently, .sp .EX rg \-\-colors 'match:bg:0x0,0x80,0xFF' .EE .sp Note that the \fBintense\fP and \fBnointense\fP styles will have no effect when used alongside these extended color codes. "# } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { let v = v.unwrap_value(); let v = convert::str(&v)?; args.colors.push(v.parse()?); Ok(()) } } #[cfg(test)] #[test] fn test_colors() { let args = parse_low_raw(None::<&str>).unwrap(); assert!(args.colors.is_empty()); let args = parse_low_raw(["--colors", "match:fg:magenta"]).unwrap(); assert_eq!(args.colors, vec!["match:fg:magenta".parse().unwrap()]); let args = parse_low_raw([ "--colors", "match:fg:magenta", "--colors", "line:bg:yellow", ]) .unwrap(); assert_eq!( args.colors, vec![ "match:fg:magenta".parse().unwrap(), "line:bg:yellow".parse().unwrap() ] ); let args = parse_low_raw(["--colors", "highlight:bg:240"]).unwrap(); assert_eq!(args.colors, vec!["highlight:bg:240".parse().unwrap()]); let args = parse_low_raw([ "--colors", "match:fg:magenta", "--colors", "highlight:bg:blue", ]) .unwrap(); assert_eq!( args.colors, vec![ "match:fg:magenta".parse().unwrap(), "highlight:bg:blue".parse().unwrap() ] ); } /// --column #[derive(Debug)] struct Column; impl Flag for Column { fn is_switch(&self) -> bool { true } fn name_long(&self) -> &'static str { "column" } fn name_negated(&self) -> Option<&'static str> { Some("no-column") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { "Show column numbers." } fn doc_long(&self) -> &'static str { r" Show column numbers (1-based). This only shows the column numbers for the first match on each line. This does not try to account for Unicode. One byte is equal to one column. This implies \flag{line-number}. .sp When \flag{only-matching} is used, then the column numbers written correspond to the start of each match. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.column = Some(v.unwrap_switch()); Ok(()) } } #[cfg(test)] #[test] fn test_column() { let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(None, args.column); let args = parse_low_raw(["--column"]).unwrap(); assert_eq!(Some(true), args.column); let args = parse_low_raw(["--column", "--no-column"]).unwrap(); assert_eq!(Some(false), args.column); let args = parse_low_raw(["--no-column", "--column"]).unwrap(); assert_eq!(Some(true), args.column); } /// -C/--context #[derive(Debug)] struct Context; impl Flag for Context { fn is_switch(&self) -> bool { false } fn name_short(&self) -> Option<u8> { Some(b'C') } fn name_long(&self) -> &'static str { "context" } fn doc_variable(&self) -> Option<&'static str> { Some("NUM") } fn doc_category(&self) -> Category { Category::Output } fn doc_short(&self) -> &'static str { r"Show NUM lines before and after each match." } fn doc_long(&self) -> &'static str { r" Show \fINUM\fP lines before and after each match. This is equivalent to providing both the \flag{before-context} and \flag{after-context} flags with the same value. .sp This overrides the \flag{passthru} flag. The \flag{after-context} and \flag{before-context} flags both partially override this flag, regardless of the order. For example, \fB\-A2 \-C1\fP is equivalent to \fB\-A2 \-B1\fP. " } fn update(&self, v: FlagValue, args: &mut LowArgs) -> anyhow::Result<()> { args.context.set_both(convert::usize(&v.unwrap_value())?); Ok(()) } } #[cfg(test)] #[test] fn test_context() { let mkctx = |lines| { let mut mode = ContextMode::default(); mode.set_both(lines); mode }; let args = parse_low_raw(None::<&str>).unwrap(); assert_eq!(ContextMode::default(), args.context); let args = parse_low_raw(["--context", "5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["--context=5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-C", "5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-C5"]).unwrap(); assert_eq!(mkctx(5), args.context); let args = parse_low_raw(["-C5", "-C10"]).unwrap(); assert_eq!(mkctx(10), args.context); let args = parse_low_raw(["-C5", "-C0"]).unwrap(); assert_eq!(mkctx(0), args.context); let args = parse_low_raw(["-C5", "--passthru"]).unwrap(); assert_eq!(ContextMode::Passthru, args.context); let args = parse_low_raw(["--passthru", "-C5"]).unwrap(); assert_eq!(mkctx(5), args.context); let n = usize::MAX.to_string(); let args = parse_low_raw(["--context", n.as_str()]).unwrap(); assert_eq!(mkctx(usize::MAX), args.context); #[cfg(target_pointer_width = "64")] { let n = (u128::from(u64::MAX) + 1).to_string(); let result = parse_low_raw(["--context", n.as_str()]); assert!(result.is_err(), "{result:?}"); } // Test the interaction between -A/-B and -C. Basically, -A/-B always // partially overrides -C, regardless of where they appear relative to // each other. This behavior is also how GNU grep works, and it also makes // logical sense to me: -A/-B are the more specific flags. let args = parse_low_raw(["-A1", "-C5"]).unwrap(); let mut mode = ContextMode::default(); mode.set_after(1); mode.set_both(5); assert_eq!(mode, args.context); assert_eq!((5, 1), args.context.get_limited()); let args = parse_low_raw(["-B1", "-C5"]).unwrap(); let mut mode = ContextMode::default(); mode.set_before(1); mode.set_both(5); assert_eq!(mode, args.context); assert_eq!((1, 5), args.context.get_limited()); let args = parse_low_raw(["-A1", "-B2", "-C5"]).unwrap(); let mut mode = ContextMode::default(); mode.set_before(2); mode.set_after(1); mode.set_both(5); assert_eq!(mode, args.context); assert_eq!((2, 1), args.context.get_limited()); // These next three are like the ones above, but with -C before -A/-B. This
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/mod.rs
crates/core/flags/mod.rs
/*! Defines ripgrep's command line interface. This modules deals with everything involving ripgrep's flags and positional arguments. This includes generating shell completions, `--help` output and even ripgrep's man page. It's also responsible for parsing and validating every flag (including reading ripgrep's config file), and manages the contact points between these flags and ripgrep's cast of supporting libraries. For example, once [`HiArgs`] has been created, it knows how to create a multi threaded recursive directory traverser. */ use std::{ ffi::OsString, fmt::Debug, panic::{RefUnwindSafe, UnwindSafe}, }; pub(crate) use crate::flags::{ complete::{ bash::generate as generate_complete_bash, fish::generate as generate_complete_fish, powershell::generate as generate_complete_powershell, zsh::generate as generate_complete_zsh, }, doc::{ help::{ generate_long as generate_help_long, generate_short as generate_help_short, }, man::generate as generate_man_page, version::{ generate_long as generate_version_long, generate_pcre2 as generate_version_pcre2, generate_short as generate_version_short, }, }, hiargs::HiArgs, lowargs::{GenerateMode, Mode, SearchMode, SpecialMode}, parse::{ParseResult, parse}, }; mod complete; mod config; mod defs; mod doc; mod hiargs; mod lowargs; mod parse; /// A trait that encapsulates the definition of an optional flag for ripgrep. /// /// This trait is meant to be used via dynamic dispatch. Namely, the `defs` /// module provides a single global slice of `&dyn Flag` values correspondings /// to all of the flags in ripgrep. /// /// ripgrep's required positional arguments are handled by the parser and by /// the conversion from low-level arguments to high level arguments. Namely, /// all of ripgrep's positional arguments are treated as file paths, except /// in certain circumstances where the first argument is treated as a regex /// pattern. /// /// Note that each implementation of this trait requires a long flag name, /// but can also optionally have a short version and even a negation flag. /// For example, the `-E/--encoding` flag accepts a value, but it also has a /// `--no-encoding` negation flag for reverting back to "automatic" encoding /// detection. All three of `-E`, `--encoding` and `--no-encoding` are provided /// by a single implementation of this trait. /// /// ripgrep only supports flags that are switches or flags that accept a single /// value. Flags that accept multiple values are an unsupported abberation. trait Flag: Debug + Send + Sync + UnwindSafe + RefUnwindSafe + 'static { /// Returns true if this flag is a switch. When a flag is a switch, the /// CLI parser will not look for a value after the flag is seen. fn is_switch(&self) -> bool; /// A short single byte name for this flag. This returns `None` by default, /// which signifies that the flag has no short name. /// /// The byte returned must be an ASCII codepoint that is a `.` or is /// alpha-numeric. fn name_short(&self) -> Option<u8> { None } /// Returns the long name of this flag. All flags must have a "long" name. /// /// The long name must be at least 2 bytes, and all of its bytes must be /// ASCII codepoints that are either `-` or alpha-numeric. fn name_long(&self) -> &'static str; /// Returns a list of aliases for this flag. /// /// The aliases must follow the same rules as `Flag::name_long`. /// /// By default, an empty slice is returned. fn aliases(&self) -> &'static [&'static str] { &[] } /// Returns a negated name for this flag. The negation of a flag is /// intended to have the opposite meaning of a flag or to otherwise turn /// something "off" or revert it to its default behavior. /// /// Negated flags are not listed in their own section in the `-h/--help` /// output or man page. Instead, they are automatically mentioned at the /// end of the documentation section of the flag they negated. /// /// The aliases must follow the same rules as `Flag::name_long`. /// /// By default, a flag has no negation and this returns `None`. fn name_negated(&self) -> Option<&'static str> { None } /// Returns the variable name describing the type of value this flag /// accepts. This should always be set for non-switch flags and never set /// for switch flags. /// /// For example, the `--max-count` flag has its variable name set to `NUM`. /// /// The convention is to capitalize variable names. /// /// By default this returns `None`. fn doc_variable(&self) -> Option<&'static str> { None } /// Returns the category of this flag. /// /// Every flag must have a single category. Categories are used to organize /// flags in the generated documentation. fn doc_category(&self) -> Category; /// A (very) short documentation string describing what this flag does. /// /// This may sacrifice "proper English" in order to be as terse as /// possible. Generally, we try to ensure that `rg -h` doesn't have any /// lines that exceed 79 columns. fn doc_short(&self) -> &'static str; /// A (possibly very) longer documentation string describing in full /// detail what this flag does. This should be in mandoc/mdoc format. fn doc_long(&self) -> &'static str; /// If this is a non-switch flag that accepts a small set of specific /// values, then this should list them. /// /// This returns an empty slice by default. fn doc_choices(&self) -> &'static [&'static str] { &[] } fn completion_type(&self) -> CompletionType { CompletionType::Other } /// Given the parsed value (which might just be a switch), this should /// update the state in `args` based on the value given for this flag. /// /// This may update state for other flags as appropriate. /// /// The `-V/--version` and `-h/--help` flags are treated specially in the /// parser and should do nothing here. /// /// By convention, implementations should generally not try to "do" /// anything other than validate the value given. For example, the /// implementation for `--hostname-bin` should not try to resolve the /// hostname to use by running the binary provided. That should be saved /// for a later step. This convention is used to ensure that getting the /// low-level arguments is as reliable and quick as possible. It also /// ensures that "doing something" occurs a minimal number of times. For /// example, by avoiding trying to find the hostname here, we can do it /// once later no matter how many times `--hostname-bin` is provided. /// /// Implementations should not include the flag name in the error message /// returned. The flag name is included automatically by the parser. fn update( &self, value: FlagValue, args: &mut crate::flags::lowargs::LowArgs, ) -> anyhow::Result<()>; } /// The category that a flag belongs to. /// /// Categories are used to organize flags into "logical" groups in the /// generated documentation. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] enum Category { /// Flags related to how ripgrep reads its input. Its "input" generally /// consists of the patterns it is trying to match and the haystacks it is /// trying to search. Input, /// Flags related to the operation of the search itself. For example, /// whether case insensitive matching is enabled. Search, /// Flags related to how ripgrep filters haystacks. For example, whether /// to respect gitignore files or not. Filter, /// Flags related to how ripgrep shows its search results. For example, /// whether to show line numbers or not. Output, /// Flags related to changing ripgrep's output at a more fundamental level. /// For example, flags like `--count` suppress printing of individual /// lines, and instead just print the total count of matches for each file /// searched. OutputModes, /// Flags related to logging behavior such as emitting non-fatal error /// messages or printing search statistics. Logging, /// Other behaviors not related to ripgrep's core functionality. For /// example, printing the file type globbing rules, or printing the list /// of files ripgrep would search without actually searching them. OtherBehaviors, } impl Category { /// Returns a string representation of this category. /// /// This string is the name of the variable used in various templates for /// generated documentation. This name can be used for interpolation. fn as_str(&self) -> &'static str { match *self { Category::Input => "input", Category::Search => "search", Category::Filter => "filter", Category::Output => "output", Category::OutputModes => "output-modes", Category::Logging => "logging", Category::OtherBehaviors => "other-behaviors", } } } /// The kind of argument a flag accepts, to be used for shell completions. #[derive(Clone, Copy, Debug)] enum CompletionType { /// No special category. is_switch() and doc_choices() may apply. Other, /// A path to a file. Filename, /// A command in $PATH. Executable, /// The name of a file type, as used by e.g. --type. Filetype, /// The name of an encoding_rs encoding, as used by --encoding. Encoding, } /// Represents a value parsed from the command line. /// /// This doesn't include the corresponding flag, but values come in one of /// two forms: a switch (on or off) or an arbitrary value. /// /// Note that the CLI doesn't directly support negated switches. For example, /// you can'd do anything like `-n=false` or any of that nonsense. Instead, /// the CLI parser knows about which flag names are negations and which aren't /// (courtesy of the `Flag` trait). If a flag given is known as a negation, /// then a `FlagValue::Switch(false)` value is passed into `Flag::update`. #[derive(Debug)] enum FlagValue { /// A flag that is either on or off. Switch(bool), /// A flag that comes with an arbitrary user value. Value(OsString), } impl FlagValue { /// Return the yes or no value of this switch. /// /// If this flag value is not a switch, then this panics. /// /// This is useful when writing the implementation of `Flag::update`. /// namely, callers usually know whether a switch or a value is expected. /// If a flag is something different, then it indicates a bug, and thus a /// panic is acceptable. fn unwrap_switch(self) -> bool { match self { FlagValue::Switch(yes) => yes, FlagValue::Value(_) => { unreachable!("got flag value but expected switch") } } } /// Return the user provided value of this flag. /// /// If this flag is a switch, then this panics. /// /// This is useful when writing the implementation of `Flag::update`. /// namely, callers usually know whether a switch or a value is expected. /// If a flag is something different, then it indicates a bug, and thus a /// panic is acceptable. fn unwrap_value(self) -> OsString { match self { FlagValue::Switch(_) => { unreachable!("got switch but expected flag value") } FlagValue::Value(v) => v, } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/lowargs.rs
crates/core/flags/lowargs.rs
/*! Provides the definition of low level arguments from CLI flags. */ use std::{ ffi::{OsStr, OsString}, path::PathBuf, }; use { bstr::{BString, ByteVec}, grep::printer::{HyperlinkFormat, UserColorSpec}, }; /// A collection of "low level" arguments. /// /// The "low level" here is meant to constrain this type to be as close to the /// actual CLI flags and arguments as possible. Namely, other than some /// convenience types to help validate flag values and deal with overrides /// between flags, these low level arguments do not contain any higher level /// abstractions. /// /// Another self-imposed constraint is that populating low level arguments /// should not require anything other than validating what the user has /// provided. For example, low level arguments should not contain a /// `HyperlinkConfig`, since in order to get a full configuration, one needs to /// discover the hostname of the current system (which might require running a /// binary or a syscall). /// /// Low level arguments are populated by the parser directly via the `update` /// method on the corresponding implementation of the `Flag` trait. #[derive(Debug, Default)] pub(crate) struct LowArgs { // Essential arguments. pub(crate) special: Option<SpecialMode>, pub(crate) mode: Mode, pub(crate) positional: Vec<OsString>, pub(crate) patterns: Vec<PatternSource>, // Everything else, sorted lexicographically. pub(crate) binary: BinaryMode, pub(crate) boundary: Option<BoundaryMode>, pub(crate) buffer: BufferMode, pub(crate) byte_offset: bool, pub(crate) case: CaseMode, pub(crate) color: ColorChoice, pub(crate) colors: Vec<UserColorSpec>, pub(crate) column: Option<bool>, pub(crate) context: ContextMode, pub(crate) context_separator: ContextSeparator, pub(crate) crlf: bool, pub(crate) dfa_size_limit: Option<usize>, pub(crate) encoding: EncodingMode, pub(crate) engine: EngineChoice, pub(crate) field_context_separator: FieldContextSeparator, pub(crate) field_match_separator: FieldMatchSeparator, pub(crate) fixed_strings: bool, pub(crate) follow: bool, pub(crate) glob_case_insensitive: bool, pub(crate) globs: Vec<String>, pub(crate) heading: Option<bool>, pub(crate) hidden: bool, pub(crate) hostname_bin: Option<PathBuf>, pub(crate) hyperlink_format: HyperlinkFormat, pub(crate) iglobs: Vec<String>, pub(crate) ignore_file: Vec<PathBuf>, pub(crate) ignore_file_case_insensitive: bool, pub(crate) include_zero: bool, pub(crate) invert_match: bool, pub(crate) line_number: Option<bool>, pub(crate) logging: Option<LoggingMode>, pub(crate) max_columns: Option<u64>, pub(crate) max_columns_preview: bool, pub(crate) max_count: Option<u64>, pub(crate) max_depth: Option<usize>, pub(crate) max_filesize: Option<u64>, pub(crate) mmap: MmapMode, pub(crate) multiline: bool, pub(crate) multiline_dotall: bool, pub(crate) no_config: bool, pub(crate) no_ignore_dot: bool, pub(crate) no_ignore_exclude: bool, pub(crate) no_ignore_files: bool, pub(crate) no_ignore_global: bool, pub(crate) no_ignore_messages: bool, pub(crate) no_ignore_parent: bool, pub(crate) no_ignore_vcs: bool, pub(crate) no_messages: bool, pub(crate) no_require_git: bool, pub(crate) no_unicode: bool, pub(crate) null: bool, pub(crate) null_data: bool, pub(crate) one_file_system: bool, pub(crate) only_matching: bool, pub(crate) path_separator: Option<u8>, pub(crate) pre: Option<PathBuf>, pub(crate) pre_glob: Vec<String>, pub(crate) quiet: bool, pub(crate) regex_size_limit: Option<usize>, pub(crate) replace: Option<BString>, pub(crate) search_zip: bool, pub(crate) sort: Option<SortMode>, pub(crate) stats: bool, pub(crate) stop_on_nonmatch: bool, pub(crate) threads: Option<usize>, pub(crate) trim: bool, pub(crate) type_changes: Vec<TypeChange>, pub(crate) unrestricted: usize, pub(crate) vimgrep: bool, pub(crate) with_filename: Option<bool>, } /// A "special" mode that supercedes everything else. /// /// When one of these modes is present, it overrides everything else and causes /// ripgrep to short-circuit. In particular, we avoid converting low-level /// argument types into higher level arguments types that can fail for various /// reasons related to the environment. (Parsing the low-level arguments can /// fail too, but usually not in a way that can't be worked around by removing /// the corresponding arguments from the CLI command.) This is overall a hedge /// to ensure that version and help information are basically always available. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum SpecialMode { /// Show a condensed version of "help" output. Generally speaking, this /// shows each flag and an extremely terse description of that flag on /// a single line. This corresponds to the `-h` flag. HelpShort, /// Shows a very verbose version of the "help" output. The docs for some /// flags will be paragraphs long. This corresponds to the `--help` flag. HelpLong, /// Show condensed version information. e.g., `ripgrep x.y.z`. VersionShort, /// Show verbose version information. Includes "short" information as well /// as features included in the build. VersionLong, /// Show PCRE2's version information, or an error if this version of /// ripgrep wasn't compiled with PCRE2 support. VersionPCRE2, } /// The overall mode that ripgrep should operate in. /// /// If ripgrep were designed without the legacy of grep, these would probably /// be sub-commands? Perhaps not, since they aren't as frequently used. /// /// The point of putting these in one enum is that they are all mutually /// exclusive and override one another. /// /// Note that -h/--help and -V/--version are not included in this because /// they always overrides everything else, regardless of where it appears /// in the command line. They are treated as "special" modes that short-circuit /// ripgrep's usual flow. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum Mode { /// ripgrep will execute a search of some kind. Search(SearchMode), /// Show the files that *would* be searched, but don't actually search /// them. Files, /// List all file type definitions configured, including the default file /// types and any additional file types added to the command line. Types, /// Generate various things like the man page and completion files. Generate(GenerateMode), } impl Default for Mode { fn default() -> Mode { Mode::Search(SearchMode::Standard) } } impl Mode { /// Update this mode to the new mode while implementing various override /// semantics. For example, a search mode cannot override a non-search /// mode. pub(crate) fn update(&mut self, new: Mode) { match *self { // If we're in a search mode, then anything can override it. Mode::Search(_) => *self = new, _ => { // Once we're in a non-search mode, other non-search modes // can override it. But search modes cannot. So for example, // `--files -l` will still be Mode::Files. if !matches!(*self, Mode::Search(_)) { *self = new; } } } } } /// The kind of search that ripgrep is going to perform. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum SearchMode { /// The default standard mode of operation. ripgrep looks for matches and /// prints them when found. /// /// There is no specific flag for this mode since it's the default. But /// some of the modes below, like JSON, have negation flags like --no-json /// that let you revert back to this default mode. Standard, /// Show files containing at least one match. FilesWithMatches, /// Show files that don't contain any matches. FilesWithoutMatch, /// Show files containing at least one match and the number of matching /// lines. Count, /// Show files containing at least one match and the total number of /// matches. CountMatches, /// Print matches in a JSON lines format. JSON, } /// The thing to generate via the --generate flag. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub(crate) enum GenerateMode { /// Generate the raw roff used for the man page. Man, /// Completions for bash. CompleteBash, /// Completions for zsh. CompleteZsh, /// Completions for fish. CompleteFish, /// Completions for PowerShell. CompletePowerShell, } /// Indicates how ripgrep should treat binary data. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) enum BinaryMode { /// Automatically determine the binary mode to use. Essentially, when /// a file is searched explicitly, then it will be searched using the /// `SearchAndSuppress` strategy. Otherwise, it will be searched in a way /// that attempts to skip binary files as much as possible. That is, once /// a file is classified as binary, searching will immediately stop. #[default] Auto, /// Search files even when they have binary data, but if a match is found, /// suppress it and emit a warning. /// /// In this mode, `NUL` bytes are replaced with line terminators. This is /// a heuristic meant to reduce heap memory usage, since true binary data /// isn't line oriented. If one attempts to treat such data as line /// oriented, then one may wind up with impractically large lines. For /// example, many binary files contain very long runs of NUL bytes. SearchAndSuppress, /// Treat all files as if they were plain text. There's no skipping and no /// replacement of `NUL` bytes with line terminators. AsText, } /// Indicates what kind of boundary mode to use (line or word). #[derive(Debug, Eq, PartialEq)] pub(crate) enum BoundaryMode { /// Only allow matches when surrounded by line bounaries. Line, /// Only allow matches when surrounded by word bounaries. Word, } /// Indicates the buffer mode that ripgrep should use when printing output. /// /// The default is `Auto`. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) enum BufferMode { /// Select the buffer mode, 'line' or 'block', automatically based on /// whether stdout is connected to a tty. #[default] Auto, /// Flush the output buffer whenever a line terminator is seen. /// /// This is useful when wants to see search results more immediately, /// for example, with `tail -f`. Line, /// Flush the output buffer whenever it reaches some fixed size. The size /// is usually big enough to hold many lines. /// /// This is useful for maximum performance, particularly when printing /// lots of results. Block, } /// Indicates the case mode for how to interpret all patterns given to ripgrep. /// /// The default is `Sensitive`. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) enum CaseMode { /// Patterns are matched case sensitively. i.e., `a` does not match `A`. #[default] Sensitive, /// Patterns are matched case insensitively. i.e., `a` does match `A`. Insensitive, /// Patterns are automatically matched case insensitively only when they /// consist of all lowercase literal characters. For example, the pattern /// `a` will match `A` but `A` will not match `a`. Smart, } /// Indicates whether ripgrep should include color/hyperlinks in its output. /// /// The default is `Auto`. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) enum ColorChoice { /// Color and hyperlinks will never be used. Never, /// Color and hyperlinks will be used only when stdout is connected to a /// tty. #[default] Auto, /// Color will always be used. Always, /// Color will always be used and only ANSI escapes will be used. /// /// This only makes sense in the context of legacy Windows console APIs. /// At time of writing, ripgrep will try to use the legacy console APIs /// if ANSI coloring isn't believed to be possible. This option will force /// ripgrep to use ANSI coloring. Ansi, } impl ColorChoice { /// Convert this color choice to the corresponding termcolor type. pub(crate) fn to_termcolor(&self) -> termcolor::ColorChoice { match *self { ColorChoice::Never => termcolor::ColorChoice::Never, ColorChoice::Auto => termcolor::ColorChoice::Auto, ColorChoice::Always => termcolor::ColorChoice::Always, ColorChoice::Ansi => termcolor::ColorChoice::AlwaysAnsi, } } } /// Indicates the line context options ripgrep should use for output. /// /// The default is no context at all. #[derive(Debug, Eq, PartialEq)] pub(crate) enum ContextMode { /// All lines will be printed. That is, the context is unbounded. Passthru, /// Only show a certain number of lines before and after each match. Limited(ContextModeLimited), } impl Default for ContextMode { fn default() -> ContextMode { ContextMode::Limited(ContextModeLimited::default()) } } impl ContextMode { /// Set the "before" context. /// /// If this was set to "passthru" context, then it is overridden in favor /// of limited context with the given value for "before" and `0` for /// "after." pub(crate) fn set_before(&mut self, lines: usize) { match *self { ContextMode::Passthru => { *self = ContextMode::Limited(ContextModeLimited { before: Some(lines), after: None, both: None, }) } ContextMode::Limited(ContextModeLimited { ref mut before, .. }) => *before = Some(lines), } } /// Set the "after" context. /// /// If this was set to "passthru" context, then it is overridden in favor /// of limited context with the given value for "after" and `0` for /// "before." pub(crate) fn set_after(&mut self, lines: usize) { match *self { ContextMode::Passthru => { *self = ContextMode::Limited(ContextModeLimited { before: None, after: Some(lines), both: None, }) } ContextMode::Limited(ContextModeLimited { ref mut after, .. }) => *after = Some(lines), } } /// Set the "both" context. /// /// If this was set to "passthru" context, then it is overridden in favor /// of limited context with the given value for "both" and `None` for /// "before" and "after". pub(crate) fn set_both(&mut self, lines: usize) { match *self { ContextMode::Passthru => { *self = ContextMode::Limited(ContextModeLimited { before: None, after: None, both: Some(lines), }) } ContextMode::Limited(ContextModeLimited { ref mut both, .. }) => *both = Some(lines), } } /// A convenience function for use in tests that returns the limited /// context. If this mode isn't limited, then it panics. #[cfg(test)] pub(crate) fn get_limited(&self) -> (usize, usize) { match *self { ContextMode::Passthru => unreachable!("context mode is passthru"), ContextMode::Limited(ref limited) => limited.get(), } } } /// A context mode for a finite number of lines. /// /// Namely, this indicates that a specific number of lines (possibly zero) /// should be shown before and/or after each matching line. /// /// Note that there is a subtle difference between `Some(0)` and `None`. In the /// former case, it happens when `0` is given explicitly, where as `None` is /// the default value and occurs when no value is specified. /// /// `both` is only set by the -C/--context flag. The reason why we don't just /// set before = after = --context is because the before and after context /// settings always take precedent over the -C/--context setting, regardless of /// order. Thus, we need to keep track of them separately. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) struct ContextModeLimited { before: Option<usize>, after: Option<usize>, both: Option<usize>, } impl ContextModeLimited { /// Returns the specific number of contextual lines that should be shown /// around each match. This takes proper precedent into account, i.e., /// that `before` and `after` both partially override `both` in all cases. /// /// By default, this returns `(0, 0)`. pub(crate) fn get(&self) -> (usize, usize) { let (mut before, mut after) = self.both.map(|lines| (lines, lines)).unwrap_or((0, 0)); // --before and --after always override --context, regardless // of where they appear relative to each other. if let Some(lines) = self.before { before = lines; } if let Some(lines) = self.after { after = lines; } (before, after) } } /// Represents the separator to use between non-contiguous sections of /// contextual lines. /// /// The default is `--`. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct ContextSeparator(Option<BString>); impl Default for ContextSeparator { fn default() -> ContextSeparator { ContextSeparator(Some(BString::from("--"))) } } impl ContextSeparator { /// Create a new context separator from the user provided argument. This /// handles unescaping. pub(crate) fn new(os: &OsStr) -> anyhow::Result<ContextSeparator> { let Some(string) = os.to_str() else { anyhow::bail!( "separator must be valid UTF-8 (use escape sequences \ to provide a separator that is not valid UTF-8)" ) }; Ok(ContextSeparator(Some(Vec::unescape_bytes(string).into()))) } /// Creates a new separator that intructs the printer to disable contextual /// separators entirely. pub(crate) fn disabled() -> ContextSeparator { ContextSeparator(None) } /// Return the raw bytes of this separator. /// /// If context separators were disabled, then this returns `None`. /// /// Note that this may return a `Some` variant with zero bytes. pub(crate) fn into_bytes(self) -> Option<Vec<u8>> { self.0.map(|sep| sep.into()) } } /// The encoding mode the searcher will use. /// /// The default is `Auto`. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) enum EncodingMode { /// Use only BOM sniffing to auto-detect an encoding. #[default] Auto, /// Use an explicit encoding forcefully, but let BOM sniffing override it. Some(grep::searcher::Encoding), /// Use no explicit encoding and disable all BOM sniffing. This will /// always result in searching the raw bytes, regardless of their /// true encoding. Disabled, } /// The regex engine to use. /// /// The default is `Default`. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) enum EngineChoice { /// Uses the default regex engine: Rust's `regex` crate. /// /// (Well, technically it uses `regex-automata`, but `regex-automata` is /// the implementation of the `regex` crate.) #[default] Default, /// Dynamically select the right engine to use. /// /// This works by trying to use the default engine, and if the pattern does /// not compile, it switches over to the PCRE2 engine if it's available. Auto, /// Uses the PCRE2 regex engine if it's available. PCRE2, } /// The field context separator to use to between metadata for each contextual /// line. /// /// The default is `-`. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct FieldContextSeparator(BString); impl Default for FieldContextSeparator { fn default() -> FieldContextSeparator { FieldContextSeparator(BString::from("-")) } } impl FieldContextSeparator { /// Create a new separator from the given argument value provided by the /// user. Unescaping it automatically handled. pub(crate) fn new(os: &OsStr) -> anyhow::Result<FieldContextSeparator> { let Some(string) = os.to_str() else { anyhow::bail!( "separator must be valid UTF-8 (use escape sequences \ to provide a separator that is not valid UTF-8)" ) }; Ok(FieldContextSeparator(Vec::unescape_bytes(string).into())) } /// Return the raw bytes of this separator. /// /// Note that this may return an empty `Vec`. pub(crate) fn into_bytes(self) -> Vec<u8> { self.0.into() } } /// The field match separator to use to between metadata for each matching /// line. /// /// The default is `:`. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct FieldMatchSeparator(BString); impl Default for FieldMatchSeparator { fn default() -> FieldMatchSeparator { FieldMatchSeparator(BString::from(":")) } } impl FieldMatchSeparator { /// Create a new separator from the given argument value provided by the /// user. Unescaping it automatically handled. pub(crate) fn new(os: &OsStr) -> anyhow::Result<FieldMatchSeparator> { let Some(string) = os.to_str() else { anyhow::bail!( "separator must be valid UTF-8 (use escape sequences \ to provide a separator that is not valid UTF-8)" ) }; Ok(FieldMatchSeparator(Vec::unescape_bytes(string).into())) } /// Return the raw bytes of this separator. /// /// Note that this may return an empty `Vec`. pub(crate) fn into_bytes(self) -> Vec<u8> { self.0.into() } } /// The type of logging to do. `Debug` emits some details while `Trace` emits /// much more. #[derive(Debug, Eq, PartialEq)] pub(crate) enum LoggingMode { Debug, Trace, } /// Indicates when to use memory maps. /// /// The default is `Auto`. #[derive(Debug, Default, Eq, PartialEq)] pub(crate) enum MmapMode { /// This instructs ripgrep to use heuristics for selecting when to and not /// to use memory maps for searching. #[default] Auto, /// This instructs ripgrep to always try memory maps when possible. (Memory /// maps are not possible to use in all circumstances, for example, for /// virtual files.) AlwaysTryMmap, /// Never use memory maps under any circumstances. This includes even /// when multi-line search is enabled where ripgrep will read the entire /// contents of a file on to the heap before searching it. Never, } /// Represents a source of patterns that ripgrep should search for. /// /// The reason to unify these is so that we can retain the order of `-f/--flag` /// and `-e/--regexp` flags relative to one another. #[derive(Debug, Eq, PartialEq)] pub(crate) enum PatternSource { /// Comes from the `-e/--regexp` flag. Regexp(String), /// Comes from the `-f/--file` flag. File(PathBuf), } /// The sort criteria, if present. #[derive(Debug, Eq, PartialEq)] pub(crate) struct SortMode { /// Whether to reverse the sort criteria (i.e., descending order). pub(crate) reverse: bool, /// The actual sorting criteria. pub(crate) kind: SortModeKind, } /// The criteria to use for sorting. #[derive(Debug, Eq, PartialEq)] pub(crate) enum SortModeKind { /// Sort by path. Path, /// Sort by last modified time. LastModified, /// Sort by last accessed time. LastAccessed, /// Sort by creation time. Created, } impl SortMode { /// Checks whether the selected sort mode is supported. If it isn't, an /// error (hopefully explaining why) is returned. pub(crate) fn supported(&self) -> anyhow::Result<()> { match self.kind { SortModeKind::Path => Ok(()), SortModeKind::LastModified => { let md = std::env::current_exe() .and_then(|p| p.metadata()) .and_then(|md| md.modified()); let Err(err) = md else { return Ok(()) }; anyhow::bail!( "sorting by last modified isn't supported: {err}" ); } SortModeKind::LastAccessed => { let md = std::env::current_exe() .and_then(|p| p.metadata()) .and_then(|md| md.accessed()); let Err(err) = md else { return Ok(()) }; anyhow::bail!( "sorting by last accessed isn't supported: {err}" ); } SortModeKind::Created => { let md = std::env::current_exe() .and_then(|p| p.metadata()) .and_then(|md| md.created()); let Err(err) = md else { return Ok(()) }; anyhow::bail!( "sorting by creation time isn't supported: {err}" ); } } } } /// A single instance of either a change or a selection of one ripgrep's /// file types. #[derive(Debug, Eq, PartialEq)] pub(crate) enum TypeChange { /// Clear the given type from ripgrep. Clear { name: String }, /// Add the given type definition (name and glob) to ripgrep. Add { def: String }, /// Select the given type for filtering. Select { name: String }, /// Select the given type for filtering but negate it. Negate { name: String }, }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/hiargs.rs
crates/core/flags/hiargs.rs
/*! Provides the definition of high level arguments from CLI flags. */ use std::{ collections::HashSet, path::{Path, PathBuf}, }; use { bstr::BString, grep::printer::{ColorSpecs, SummaryKind}, }; use crate::{ flags::lowargs::{ BinaryMode, BoundaryMode, BufferMode, CaseMode, ColorChoice, ContextMode, ContextSeparator, EncodingMode, EngineChoice, FieldContextSeparator, FieldMatchSeparator, LowArgs, MmapMode, Mode, PatternSource, SearchMode, SortMode, SortModeKind, TypeChange, }, haystack::{Haystack, HaystackBuilder}, search::{PatternMatcher, Printer, SearchWorker, SearchWorkerBuilder}, }; /// A high level representation of CLI arguments. /// /// The distinction between low and high level arguments is somewhat arbitrary /// and wishy washy. The main idea here is that high level arguments generally /// require all of CLI parsing to be finished. For example, one cannot /// construct a glob matcher until all of the glob patterns are known. /// /// So while low level arguments are collected during parsing itself, high /// level arguments aren't created until parsing has completely finished. #[derive(Debug)] pub(crate) struct HiArgs { binary: BinaryDetection, boundary: Option<BoundaryMode>, buffer: BufferMode, byte_offset: bool, case: CaseMode, color: ColorChoice, colors: grep::printer::ColorSpecs, column: bool, context: ContextMode, context_separator: ContextSeparator, crlf: bool, cwd: PathBuf, dfa_size_limit: Option<usize>, encoding: EncodingMode, engine: EngineChoice, field_context_separator: FieldContextSeparator, field_match_separator: FieldMatchSeparator, file_separator: Option<Vec<u8>>, fixed_strings: bool, follow: bool, globs: ignore::overrides::Override, heading: bool, hidden: bool, hyperlink_config: grep::printer::HyperlinkConfig, ignore_file_case_insensitive: bool, ignore_file: Vec<PathBuf>, include_zero: bool, invert_match: bool, is_terminal_stdout: bool, line_number: bool, max_columns: Option<u64>, max_columns_preview: bool, max_count: Option<u64>, max_depth: Option<usize>, max_filesize: Option<u64>, mmap_choice: grep::searcher::MmapChoice, mode: Mode, multiline: bool, multiline_dotall: bool, no_ignore_dot: bool, no_ignore_exclude: bool, no_ignore_files: bool, no_ignore_global: bool, no_ignore_parent: bool, no_ignore_vcs: bool, no_require_git: bool, no_unicode: bool, null_data: bool, one_file_system: bool, only_matching: bool, path_separator: Option<u8>, paths: Paths, path_terminator: Option<u8>, patterns: Patterns, pre: Option<PathBuf>, pre_globs: ignore::overrides::Override, quiet: bool, quit_after_match: bool, regex_size_limit: Option<usize>, replace: Option<BString>, search_zip: bool, sort: Option<SortMode>, stats: Option<grep::printer::Stats>, stop_on_nonmatch: bool, threads: usize, trim: bool, types: ignore::types::Types, vimgrep: bool, with_filename: bool, } impl HiArgs { /// Convert low level arguments into high level arguments. /// /// This process can fail for a variety of reasons. For example, invalid /// globs or some kind of environment issue. pub(crate) fn from_low_args(mut low: LowArgs) -> anyhow::Result<HiArgs> { // Callers should not be trying to convert low-level arguments when // a short-circuiting special mode is present. assert_eq!(None, low.special, "special mode demands short-circuiting"); // If the sorting mode isn't supported, then we bail loudly. I'm not // sure if this is the right thing to do. We could silently "not sort" // as well. If we wanted to go that route, then we could just set // `low.sort = None` if `supported()` returns an error. if let Some(ref sort) = low.sort { sort.supported()?; } // We modify the mode in-place on `low` so that subsequent conversions // see the correct mode. match low.mode { Mode::Search(ref mut mode) => match *mode { // treat `-v --count-matches` as `-v --count` SearchMode::CountMatches if low.invert_match => { *mode = SearchMode::Count; } // treat `-o --count` as `--count-matches` SearchMode::Count if low.only_matching => { *mode = SearchMode::CountMatches; } _ => {} }, _ => {} } let mut state = State::new()?; let patterns = Patterns::from_low_args(&mut state, &mut low)?; let paths = Paths::from_low_args(&mut state, &patterns, &mut low)?; let binary = BinaryDetection::from_low_args(&state, &low); let colors = take_color_specs(&mut state, &mut low); let hyperlink_config = take_hyperlink_config(&mut state, &mut low)?; let stats = stats(&low); let types = types(&low)?; let globs = globs(&state, &low)?; let pre_globs = preprocessor_globs(&state, &low)?; let color = match low.color { ColorChoice::Auto if !state.is_terminal_stdout => { ColorChoice::Never } _ => low.color, }; let column = low.column.unwrap_or(low.vimgrep); let heading = match low.heading { None => !low.vimgrep && state.is_terminal_stdout, Some(false) => false, Some(true) => !low.vimgrep, }; let path_terminator = if low.null { Some(b'\x00') } else { None }; let quit_after_match = stats.is_none() && low.quiet; let threads = if low.sort.is_some() || paths.is_one_file { 1 } else if let Some(threads) = low.threads { threads } else { std::thread::available_parallelism().map_or(1, |n| n.get()).min(12) }; log::debug!("using {threads} thread(s)"); let with_filename = low .with_filename .unwrap_or_else(|| low.vimgrep || !paths.is_one_file); let file_separator = match low.mode { Mode::Search(SearchMode::Standard) => { if heading { Some(b"".to_vec()) } else if let ContextMode::Limited(ref limited) = low.context { let (before, after) = limited.get(); if before > 0 || after > 0 { low.context_separator.clone().into_bytes() } else { None } } else { None } } _ => None, }; let line_number = low.line_number.unwrap_or_else(|| { if low.quiet { return false; } let Mode::Search(ref search_mode) = low.mode else { return false }; match *search_mode { SearchMode::FilesWithMatches | SearchMode::FilesWithoutMatch | SearchMode::Count | SearchMode::CountMatches => return false, SearchMode::JSON => return true, SearchMode::Standard => { // A few things can imply counting line numbers. In // particular, we generally want to show line numbers by // default when printing to a tty for human consumption, // except for one interesting case: when we're only // searching stdin. This makes pipelines work as expected. (state.is_terminal_stdout && !paths.is_only_stdin()) || column || low.vimgrep } } }); let mmap_choice = { // SAFETY: Memory maps are difficult to impossible to encapsulate // safely in a portable way that doesn't simultaneously negate some // of the benfits of using memory maps. For ripgrep's use, we never // mutate a memory map and generally never store the contents of // memory map in a data structure that depends on immutability. // Generally speaking, the worst thing that can happen is a SIGBUS // (if the underlying file is truncated while reading it), which // will cause ripgrep to abort. This reasoning should be treated as // suspect. let maybe = unsafe { grep::searcher::MmapChoice::auto() }; let never = grep::searcher::MmapChoice::never(); match low.mmap { MmapMode::Auto => { if paths.paths.len() <= 10 && paths.paths.iter().all(|p| p.is_file()) { // If we're only searching a few paths and all of them // are files, then memory maps are probably faster. maybe } else { never } } MmapMode::AlwaysTryMmap => maybe, MmapMode::Never => never, } }; Ok(HiArgs { mode: low.mode, patterns, paths, binary, boundary: low.boundary, buffer: low.buffer, byte_offset: low.byte_offset, case: low.case, color, colors, column, context: low.context, context_separator: low.context_separator, crlf: low.crlf, cwd: state.cwd, dfa_size_limit: low.dfa_size_limit, encoding: low.encoding, engine: low.engine, field_context_separator: low.field_context_separator, field_match_separator: low.field_match_separator, file_separator, fixed_strings: low.fixed_strings, follow: low.follow, heading, hidden: low.hidden, hyperlink_config, ignore_file: low.ignore_file, ignore_file_case_insensitive: low.ignore_file_case_insensitive, include_zero: low.include_zero, invert_match: low.invert_match, is_terminal_stdout: state.is_terminal_stdout, line_number, max_columns: low.max_columns, max_columns_preview: low.max_columns_preview, max_count: low.max_count, max_depth: low.max_depth, max_filesize: low.max_filesize, mmap_choice, multiline: low.multiline, multiline_dotall: low.multiline_dotall, no_ignore_dot: low.no_ignore_dot, no_ignore_exclude: low.no_ignore_exclude, no_ignore_files: low.no_ignore_files, no_ignore_global: low.no_ignore_global, no_ignore_parent: low.no_ignore_parent, no_ignore_vcs: low.no_ignore_vcs, no_require_git: low.no_require_git, no_unicode: low.no_unicode, null_data: low.null_data, one_file_system: low.one_file_system, only_matching: low.only_matching, globs, path_separator: low.path_separator, path_terminator, pre: low.pre, pre_globs, quiet: low.quiet, quit_after_match, regex_size_limit: low.regex_size_limit, replace: low.replace, search_zip: low.search_zip, sort: low.sort, stats, stop_on_nonmatch: low.stop_on_nonmatch, threads, trim: low.trim, types, vimgrep: low.vimgrep, with_filename, }) } /// Returns a writer for printing buffers to stdout. /// /// This is intended to be used from multiple threads. Namely, a buffer /// writer can create new buffers that are sent to threads. Threads can /// then independently write to the buffers. Once a unit of work is /// complete, a buffer can be given to the buffer writer to write to /// stdout. pub(crate) fn buffer_writer(&self) -> termcolor::BufferWriter { let mut wtr = termcolor::BufferWriter::stdout(self.color.to_termcolor()); wtr.separator(self.file_separator.clone()); wtr } /// Returns true when ripgrep had to guess to search the current working /// directory. That is, it's true when ripgrep is called without any file /// paths or directories to search. /// /// Other than changing how file paths are printed (i.e., without the /// leading `./`), it's also useful to know for diagnostic reasons. For /// example, ripgrep will print an error message when nothing is searched /// since it's possible the ignore rules in play are too aggressive. But /// this warning is only emitted when ripgrep was called without any /// explicit file paths since otherwise the warning would likely be too /// aggressive. pub(crate) fn has_implicit_path(&self) -> bool { self.paths.has_implicit_path } /// Return a properly configured builder for constructing haystacks. /// /// The builder can be used to turn a directory entry (from the `ignore` /// crate) into something that can be searched. pub(crate) fn haystack_builder(&self) -> HaystackBuilder { let mut builder = HaystackBuilder::new(); builder.strip_dot_prefix(self.paths.has_implicit_path); builder } /// Return the matcher that should be used for searching using the engine /// choice made by the user. /// /// If there was a problem building the matcher (e.g., a syntax error), /// then this returns an error. pub(crate) fn matcher(&self) -> anyhow::Result<PatternMatcher> { match self.engine { EngineChoice::Default => match self.matcher_rust() { Ok(m) => Ok(m), Err(err) => { anyhow::bail!(suggest_other_engine(err.to_string())); } }, EngineChoice::PCRE2 => Ok(self.matcher_pcre2()?), EngineChoice::Auto => { let rust_err = match self.matcher_rust() { Ok(m) => return Ok(m), Err(err) => err, }; log::debug!( "error building Rust regex in hybrid mode:\n{rust_err}", ); let pcre_err = match self.matcher_pcre2() { Ok(m) => return Ok(m), Err(err) => err, }; let divider = "~".repeat(79); anyhow::bail!( "regex could not be compiled with either the default \ regex engine or with PCRE2.\n\n\ default regex engine error:\n\ {divider}\n\ {rust_err}\n\ {divider}\n\n\ PCRE2 regex engine error:\n{pcre_err}", ); } } } /// Build a matcher using PCRE2. /// /// If there was a problem building the matcher (such as a regex syntax /// error), then an error is returned. /// /// If the `pcre2` feature is not enabled then this always returns an /// error. fn matcher_pcre2(&self) -> anyhow::Result<PatternMatcher> { #[cfg(feature = "pcre2")] { let mut builder = grep::pcre2::RegexMatcherBuilder::new(); builder.multi_line(true).fixed_strings(self.fixed_strings); match self.case { CaseMode::Sensitive => builder.caseless(false), CaseMode::Insensitive => builder.caseless(true), CaseMode::Smart => builder.case_smart(true), }; if let Some(ref boundary) = self.boundary { match *boundary { BoundaryMode::Line => builder.whole_line(true), BoundaryMode::Word => builder.word(true), }; } // For whatever reason, the JIT craps out during regex compilation with // a "no more memory" error on 32 bit systems. So don't use it there. if cfg!(target_pointer_width = "64") { builder .jit_if_available(true) // The PCRE2 docs say that 32KB is the default, and that 1MB // should be big enough for anything. But let's crank it to // 10MB. .max_jit_stack_size(Some(10 * (1 << 20))); } if !self.no_unicode { builder.utf(true).ucp(true); } if self.multiline { builder.dotall(self.multiline_dotall); } if self.crlf { builder.crlf(true); } let m = builder.build_many(&self.patterns.patterns)?; Ok(PatternMatcher::PCRE2(m)) } #[cfg(not(feature = "pcre2"))] { Err(anyhow::anyhow!( "PCRE2 is not available in this build of ripgrep" )) } } /// Build a matcher using Rust's regex engine. /// /// If there was a problem building the matcher (such as a regex syntax /// error), then an error is returned. fn matcher_rust(&self) -> anyhow::Result<PatternMatcher> { let mut builder = grep::regex::RegexMatcherBuilder::new(); builder .multi_line(true) .unicode(!self.no_unicode) .octal(false) .fixed_strings(self.fixed_strings); match self.case { CaseMode::Sensitive => builder.case_insensitive(false), CaseMode::Insensitive => builder.case_insensitive(true), CaseMode::Smart => builder.case_smart(true), }; if let Some(ref boundary) = self.boundary { match *boundary { BoundaryMode::Line => builder.whole_line(true), BoundaryMode::Word => builder.word(true), }; } if self.multiline { builder.dot_matches_new_line(self.multiline_dotall); if self.crlf { builder.crlf(true).line_terminator(None); } } else { builder.line_terminator(Some(b'\n')).dot_matches_new_line(false); if self.crlf { builder.crlf(true); } // We don't need to set this in multiline mode since multiline // matchers don't use optimizations related to line terminators. // Moreover, a multiline regex used with --null-data should // be allowed to match NUL bytes explicitly, which this would // otherwise forbid. if self.null_data { builder.line_terminator(Some(b'\x00')); } } if let Some(limit) = self.regex_size_limit { builder.size_limit(limit); } if let Some(limit) = self.dfa_size_limit { builder.dfa_size_limit(limit); } if !self.binary.is_none() { builder.ban_byte(Some(b'\x00')); } let m = match builder.build_many(&self.patterns.patterns) { Ok(m) => m, Err(err) => { anyhow::bail!(suggest_text(suggest_multiline(err.to_string()))) } }; Ok(PatternMatcher::RustRegex(m)) } /// Returns true if some non-zero number of matches is believed to be /// possible. /// /// When this returns false, it is impossible for ripgrep to ever report /// a match. pub(crate) fn matches_possible(&self) -> bool { if self.patterns.patterns.is_empty() && !self.invert_match { return false; } if self.max_count == Some(0) { return false; } true } /// Returns the "mode" that ripgrep should operate in. /// /// This is generally useful for determining what action ripgrep should /// take. The main mode is of course to "search," but there are other /// non-search modes such as `--type-list` and `--files`. pub(crate) fn mode(&self) -> Mode { self.mode } /// Returns a builder for constructing a "path printer." /// /// This is useful for the `--files` mode in ripgrep, where the printer /// just needs to emit paths and not need to worry about the functionality /// of searching. pub(crate) fn path_printer_builder( &self, ) -> grep::printer::PathPrinterBuilder { let mut builder = grep::printer::PathPrinterBuilder::new(); builder .color_specs(self.colors.clone()) .hyperlink(self.hyperlink_config.clone()) .separator(self.path_separator.clone()) .terminator(self.path_terminator.unwrap_or(b'\n')); builder } /// Returns a printer for the given search mode. /// /// This chooses which printer to build (JSON, summary or standard) based /// on the search mode given. pub(crate) fn printer<W: termcolor::WriteColor>( &self, search_mode: SearchMode, wtr: W, ) -> Printer<W> { let summary_kind = if self.quiet { match search_mode { SearchMode::FilesWithMatches | SearchMode::Count | SearchMode::CountMatches | SearchMode::JSON | SearchMode::Standard => SummaryKind::QuietWithMatch, SearchMode::FilesWithoutMatch => { SummaryKind::QuietWithoutMatch } } } else { match search_mode { SearchMode::FilesWithMatches => SummaryKind::PathWithMatch, SearchMode::FilesWithoutMatch => SummaryKind::PathWithoutMatch, SearchMode::Count => SummaryKind::Count, SearchMode::CountMatches => SummaryKind::CountMatches, SearchMode::JSON => { return Printer::JSON(self.printer_json(wtr)); } SearchMode::Standard => { return Printer::Standard(self.printer_standard(wtr)); } } }; Printer::Summary(self.printer_summary(wtr, summary_kind)) } /// Builds a JSON printer. fn printer_json<W: std::io::Write>( &self, wtr: W, ) -> grep::printer::JSON<W> { grep::printer::JSONBuilder::new() .pretty(false) .always_begin_end(false) .replacement(self.replace.clone().map(|r| r.into())) .build(wtr) } /// Builds a "standard" grep printer where matches are printed as plain /// text lines. fn printer_standard<W: termcolor::WriteColor>( &self, wtr: W, ) -> grep::printer::Standard<W> { let mut builder = grep::printer::StandardBuilder::new(); builder .byte_offset(self.byte_offset) .color_specs(self.colors.clone()) .column(self.column) .heading(self.heading) .hyperlink(self.hyperlink_config.clone()) .max_columns_preview(self.max_columns_preview) .max_columns(self.max_columns) .only_matching(self.only_matching) .path(self.with_filename) .path_terminator(self.path_terminator.clone()) .per_match_one_line(true) .per_match(self.vimgrep) .replacement(self.replace.clone().map(|r| r.into())) .separator_context(self.context_separator.clone().into_bytes()) .separator_field_context( self.field_context_separator.clone().into_bytes(), ) .separator_field_match( self.field_match_separator.clone().into_bytes(), ) .separator_path(self.path_separator.clone()) .stats(self.stats.is_some()) .trim_ascii(self.trim); // When doing multi-threaded searching, the buffer writer is // responsible for writing separators since it is the only thing that // knows whether something has been printed or not. But for the single // threaded case, we don't use a buffer writer and thus can let the // printer own this. if self.threads == 1 { builder.separator_search(self.file_separator.clone()); } builder.build(wtr) } /// Builds a "summary" printer where search results are aggregated on a /// file-by-file basis. fn printer_summary<W: termcolor::WriteColor>( &self, wtr: W, kind: SummaryKind, ) -> grep::printer::Summary<W> { grep::printer::SummaryBuilder::new() .color_specs(self.colors.clone()) .exclude_zero(!self.include_zero) .hyperlink(self.hyperlink_config.clone()) .kind(kind) .path(self.with_filename) .path_terminator(self.path_terminator.clone()) .separator_field(b":".to_vec()) .separator_path(self.path_separator.clone()) .stats(self.stats.is_some()) .build(wtr) } /// Returns true if ripgrep should operate in "quiet" mode. /// /// Generally speaking, quiet mode means that ripgrep should not print /// anything to stdout. There are some exceptions. For example, when the /// user has provided `--stats`, then ripgrep will print statistics to /// stdout. pub(crate) fn quiet(&self) -> bool { self.quiet } /// Returns true when ripgrep should stop searching after a single match is /// found. /// /// This is useful for example when quiet mode is enabled. In that case, /// users generally can't tell the difference in behavior between a search /// that finds all matches and a search that only finds one of them. (An /// exception here is if `--stats` is given, then `quit_after_match` will /// always return false since the user expects ripgrep to find everything.) pub(crate) fn quit_after_match(&self) -> bool { self.quit_after_match } /// Build a worker for executing searches. /// /// Search results are found using the given matcher and written to the /// given printer. pub(crate) fn search_worker<W: termcolor::WriteColor>( &self, matcher: PatternMatcher, searcher: grep::searcher::Searcher, printer: Printer<W>, ) -> anyhow::Result<SearchWorker<W>> { let mut builder = SearchWorkerBuilder::new(); builder .preprocessor(self.pre.clone())? .preprocessor_globs(self.pre_globs.clone()) .search_zip(self.search_zip) .binary_detection_explicit(self.binary.explicit.clone()) .binary_detection_implicit(self.binary.implicit.clone()); Ok(builder.build(matcher, searcher, printer)) } /// Build a searcher from the command line parameters. pub(crate) fn searcher(&self) -> anyhow::Result<grep::searcher::Searcher> { let line_term = if self.crlf { grep::matcher::LineTerminator::crlf() } else if self.null_data { grep::matcher::LineTerminator::byte(b'\x00') } else { grep::matcher::LineTerminator::byte(b'\n') }; let mut builder = grep::searcher::SearcherBuilder::new(); builder .max_matches(self.max_count) .line_terminator(line_term) .invert_match(self.invert_match) .line_number(self.line_number) .multi_line(self.multiline) .memory_map(self.mmap_choice.clone()) .stop_on_nonmatch(self.stop_on_nonmatch); match self.context { ContextMode::Passthru => { builder.passthru(true); } ContextMode::Limited(ref limited) => { let (before, after) = limited.get(); builder.before_context(before); builder.after_context(after); } } match self.encoding { EncodingMode::Auto => {} // default for the searcher EncodingMode::Some(ref enc) => { builder.encoding(Some(enc.clone())); } EncodingMode::Disabled => { builder.bom_sniffing(false); } } Ok(builder.build()) } /// Given an iterator of haystacks, sort them if necessary. /// /// When sorting is necessary, this will collect the entire iterator into /// memory, sort them and then return a new iterator. When sorting is not /// necessary, then the iterator given is returned as is without collecting /// it into memory. /// /// Once special case is when sorting by path in ascending order has been /// requested. In this case, the iterator given is returned as is without /// any additional sorting. This is done because `walk_builder()` will sort /// the iterator it yields during directory traversal, so no additional /// sorting is needed. pub(crate) fn sort<'a, I>( &self, haystacks: I, ) -> Box<dyn Iterator<Item = Haystack> + 'a> where I: Iterator<Item = Haystack> + 'a, { use std::{cmp::Ordering, fs::Metadata, io, time::SystemTime}; fn attach_timestamps( haystacks: impl Iterator<Item = Haystack>, get: impl Fn(&Metadata) -> io::Result<SystemTime>, ) -> impl Iterator<Item = (Haystack, Option<SystemTime>)> { haystacks.map(move |s| { let time = s.path().metadata().and_then(|m| get(&m)).ok(); (s, time) }) } let Some(ref sort) = self.sort else { return Box::new(haystacks) }; let mut with_timestamps: Vec<_> = match sort.kind { SortModeKind::Path if !sort.reverse => return Box::new(haystacks), SortModeKind::Path => { let mut haystacks = haystacks.collect::<Vec<Haystack>>(); haystacks.sort_by(|ref h1, ref h2| { h1.path().cmp(h2.path()).reverse() }); return Box::new(haystacks.into_iter()); } SortModeKind::LastModified => { attach_timestamps(haystacks, |md| md.modified()).collect() } SortModeKind::LastAccessed => { attach_timestamps(haystacks, |md| md.accessed()).collect() } SortModeKind::Created => { attach_timestamps(haystacks, |md| md.created()).collect() } }; with_timestamps.sort_by(|(_, t1), (_, t2)| { let ordering = match (*t1, *t2) { // Both have metadata, do the obvious thing. (Some(t1), Some(t2)) => t1.cmp(&t2), // Things that error should appear later (when ascending). (Some(_), None) => Ordering::Less, // Things that error should appear later (when ascending). (None, Some(_)) => Ordering::Greater, // When both error, we can't distinguish, so treat as equal. (None, None) => Ordering::Equal, }; if sort.reverse { ordering.reverse() } else { ordering } }); Box::new(with_timestamps.into_iter().map(|(s, _)| s)) } /// Returns a stats object if the user requested that ripgrep keep track /// of various metrics during a search. /// /// When this returns `None`, then callers may assume that the user did /// not request statistics. pub(crate) fn stats(&self) -> Option<grep::printer::Stats> { self.stats.clone() } /// Returns a color-enabled writer for stdout. /// /// The writer returned is also configured to do either line or block /// buffering, based on either explicit configuration from the user via CLI /// flags, or automatically based on whether stdout is connected to a tty. pub(crate) fn stdout(&self) -> grep::cli::StandardStream { let color = self.color.to_termcolor(); match self.buffer { BufferMode::Auto => { if self.is_terminal_stdout { grep::cli::stdout_buffered_line(color) } else {
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/complete/bash.rs
crates/core/flags/complete/bash.rs
/*! Provides completions for ripgrep's CLI for the bash shell. */ use crate::flags::defs::FLAGS; const TEMPLATE_FULL: &'static str = " _rg() { local i cur prev opts cmds COMPREPLY=() cur=\"${COMP_WORDS[COMP_CWORD]}\" prev=\"${COMP_WORDS[COMP_CWORD-1]}\" cmd=\"\" opts=\"\" for i in ${COMP_WORDS[@]}; do case \"${i}\" in rg) cmd=\"rg\" ;; *) ;; esac done case \"${cmd}\" in rg) opts=\"!OPTS!\" if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then COMPREPLY=($(compgen -W \"${opts}\" -- \"${cur}\")) return 0 fi case \"${prev}\" in !CASES! esac COMPREPLY=($(compgen -W \"${opts}\" -- \"${cur}\")) return 0 ;; esac } complete -F _rg -o bashdefault -o default rg "; const TEMPLATE_CASE: &'static str = " !FLAG!) COMPREPLY=($(compgen -f \"${cur}\")) return 0 ;; "; const TEMPLATE_CASE_CHOICES: &'static str = " !FLAG!) COMPREPLY=($(compgen -W \"!CHOICES!\" -- \"${cur}\")) return 0 ;; "; /// Generate completions for Bash. /// /// Note that these completions are based on what was produced for ripgrep <=13 /// using Clap 2.x. Improvements on this are welcome. pub(crate) fn generate() -> String { let mut opts = String::new(); for flag in FLAGS.iter() { opts.push_str("--"); opts.push_str(flag.name_long()); opts.push(' '); if let Some(short) = flag.name_short() { opts.push('-'); opts.push(char::from(short)); opts.push(' '); } if let Some(name) = flag.name_negated() { opts.push_str("--"); opts.push_str(name); opts.push(' '); } } opts.push_str("<PATTERN> <PATH>..."); let mut cases = String::new(); for flag in FLAGS.iter() { let template = if !flag.doc_choices().is_empty() { let choices = flag.doc_choices().join(" "); TEMPLATE_CASE_CHOICES.trim_end().replace("!CHOICES!", &choices) } else { TEMPLATE_CASE.trim_end().to_string() }; let name = format!("--{}", flag.name_long()); cases.push_str(&template.replace("!FLAG!", &name)); if let Some(short) = flag.name_short() { let name = format!("-{}", char::from(short)); cases.push_str(&template.replace("!FLAG!", &name)); } if let Some(negated) = flag.name_negated() { let name = format!("--{negated}"); cases.push_str(&template.replace("!FLAG!", &name)); } } TEMPLATE_FULL .replace("!OPTS!", &opts) .replace("!CASES!", &cases) .trim_start() .to_string() }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/complete/zsh.rs
crates/core/flags/complete/zsh.rs
/*! Provides completions for ripgrep's CLI for the zsh shell. Unlike completion short for other shells (at time of writing), zsh's completions for ripgrep are maintained by hand. This is because: 1. They are lovingly written by an expert in such things. 2. Are much higher in quality than the ones below that are auto-generated. Namely, the zsh completions take application level context about flag compatibility into account. 3. There is a CI script that fails if a new flag is added to ripgrep that isn't included in the zsh completions. 4. There is a wealth of documentation in the zsh script explaining how it works and how it can be extended. In principle, I'd be open to maintaining any completion script by hand so long as it meets criteria 3 and 4 above. */ /// Generate completions for zsh. pub(crate) fn generate() -> String { let hyperlink_alias_descriptions = grep::printer::hyperlink_aliases() .iter() .map(|alias| { format!(r#" {}:"{}""#, alias.name(), alias.description()) }) .collect::<Vec<String>>() .join("\n"); include_str!("rg.zsh") .replace("!ENCODINGS!", super::ENCODINGS.trim_end()) .replace("!HYPERLINK_ALIASES!", &hyperlink_alias_descriptions) }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/complete/mod.rs
crates/core/flags/complete/mod.rs
/*! Modules for generating completions for various shells. */ static ENCODINGS: &'static str = include_str!("encodings.sh"); pub(super) mod bash; pub(super) mod fish; pub(super) mod powershell; pub(super) mod zsh;
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/complete/fish.rs
crates/core/flags/complete/fish.rs
/*! Provides completions for ripgrep's CLI for the fish shell. */ use crate::flags::{CompletionType, defs::FLAGS}; const TEMPLATE: &'static str = "complete -c rg !SHORT! -l !LONG! -d '!DOC!'"; const TEMPLATE_NEGATED: &'static str = "complete -c rg -l !NEGATED! -n '__rg_contains_opt !LONG! !SHORT!' -d '!DOC!'\n"; /// Generate completions for Fish. /// /// Reference: <https://fishshell.com/docs/current/completions.html> pub(crate) fn generate() -> String { let mut out = String::new(); out.push_str(include_str!("prelude.fish")); out.push('\n'); for flag in FLAGS.iter() { let short = match flag.name_short() { None => "".to_string(), Some(byte) => format!("-s {}", char::from(byte)), }; let long = flag.name_long(); let doc = flag.doc_short().replace("'", "\\'"); let mut completion = TEMPLATE .replace("!SHORT!", &short) .replace("!LONG!", &long) .replace("!DOC!", &doc); match flag.completion_type() { CompletionType::Filename => { completion.push_str(" -r -F"); } CompletionType::Executable => { completion.push_str(" -r -f -a '(__fish_complete_command)'"); } CompletionType::Filetype => { completion.push_str( " -r -f -a '(rg --type-list | string replace : \\t)'", ); } CompletionType::Encoding => { completion.push_str(" -r -f -a '"); completion.push_str(super::ENCODINGS); completion.push_str("'"); } CompletionType::Other if !flag.doc_choices().is_empty() => { completion.push_str(" -r -f -a '"); completion.push_str(&flag.doc_choices().join(" ")); completion.push_str("'"); } CompletionType::Other if !flag.is_switch() => { completion.push_str(" -r -f"); } CompletionType::Other => (), } completion.push('\n'); out.push_str(&completion); if let Some(negated) = flag.name_negated() { let short = match flag.name_short() { None => "".to_string(), Some(byte) => char::from(byte).to_string(), }; out.push_str( &TEMPLATE_NEGATED .replace("!NEGATED!", &negated) .replace("!SHORT!", &short) .replace("!LONG!", &long) .replace("!DOC!", &doc), ); } } out }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/complete/powershell.rs
crates/core/flags/complete/powershell.rs
/*! Provides completions for ripgrep's CLI for PowerShell. */ use crate::flags::defs::FLAGS; const TEMPLATE: &'static str = " using namespace System.Management.Automation using namespace System.Management.Automation.Language Register-ArgumentCompleter -Native -CommandName 'rg' -ScriptBlock { param($wordToComplete, $commandAst, $cursorPosition) $commandElements = $commandAst.CommandElements $command = @( 'rg' for ($i = 1; $i -lt $commandElements.Count; $i++) { $element = $commandElements[$i] if ($element -isnot [StringConstantExpressionAst] -or $element.StringConstantType -ne [StringConstantType]::BareWord -or $element.Value.StartsWith('-')) { break } $element.Value }) -join ';' $completions = @(switch ($command) { 'rg' { !FLAGS! } }) $completions.Where{ $_.CompletionText -like \"$wordToComplete*\" } | Sort-Object -Property ListItemText } "; const TEMPLATE_FLAG: &'static str = "[CompletionResult]::new('!DASH_NAME!', '!NAME!', [CompletionResultType]::ParameterName, '!DOC!')"; /// Generate completions for PowerShell. /// /// Note that these completions are based on what was produced for ripgrep <=13 /// using Clap 2.x. Improvements on this are welcome. pub(crate) fn generate() -> String { let mut flags = String::new(); for (i, flag) in FLAGS.iter().enumerate() { let doc = flag.doc_short().replace("'", "''"); let dash_name = format!("--{}", flag.name_long()); let name = flag.name_long(); if i > 0 { flags.push('\n'); } flags.push_str(" "); flags.push_str( &TEMPLATE_FLAG .replace("!DASH_NAME!", &dash_name) .replace("!NAME!", &name) .replace("!DOC!", &doc), ); if let Some(byte) = flag.name_short() { let dash_name = format!("-{}", char::from(byte)); let name = char::from(byte).to_string(); flags.push_str("\n "); flags.push_str( &TEMPLATE_FLAG .replace("!DASH_NAME!", &dash_name) .replace("!NAME!", &name) .replace("!DOC!", &doc), ); } if let Some(negated) = flag.name_negated() { let dash_name = format!("--{negated}"); flags.push_str("\n "); flags.push_str( &TEMPLATE_FLAG .replace("!DASH_NAME!", &dash_name) .replace("!NAME!", &negated) .replace("!DOC!", &doc), ); } } TEMPLATE.trim_start().replace("!FLAGS!", &flags) }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/doc/version.rs
crates/core/flags/doc/version.rs
/*! Provides routines for generating version strings. Version strings can be just the digits, an overall short one-line description or something more verbose that includes things like CPU target feature support. */ use std::fmt::Write; /// Generates just the numerical part of the version of ripgrep. /// /// This includes the git revision hash. pub(crate) fn generate_digits() -> String { let semver = option_env!("CARGO_PKG_VERSION").unwrap_or("N/A"); match option_env!("RIPGREP_BUILD_GIT_HASH") { None => semver.to_string(), Some(hash) => format!("{semver} (rev {hash})"), } } /// Generates a short version string of the form `ripgrep x.y.z`. pub(crate) fn generate_short() -> String { let digits = generate_digits(); format!("ripgrep {digits}") } /// Generates a longer multi-line version string. /// /// This includes not only the version of ripgrep but some other information /// about its build. For example, SIMD support and PCRE2 support. pub(crate) fn generate_long() -> String { let (compile, runtime) = (compile_cpu_features(), runtime_cpu_features()); let mut out = String::new(); writeln!(out, "{}", generate_short()).unwrap(); writeln!(out).unwrap(); writeln!(out, "features:{}", features().join(",")).unwrap(); if !compile.is_empty() { writeln!(out, "simd(compile):{}", compile.join(",")).unwrap(); } if !runtime.is_empty() { writeln!(out, "simd(runtime):{}", runtime.join(",")).unwrap(); } let (pcre2_version, _) = generate_pcre2(); writeln!(out, "\n{pcre2_version}").unwrap(); out } /// Generates multi-line version string with PCRE2 information. /// /// This also returns whether PCRE2 is actually available in this build of /// ripgrep. pub(crate) fn generate_pcre2() -> (String, bool) { let mut out = String::new(); #[cfg(feature = "pcre2")] { use grep::pcre2; let (major, minor) = pcre2::version(); write!(out, "PCRE2 {}.{} is available", major, minor).unwrap(); if cfg!(target_pointer_width = "64") && pcre2::is_jit_available() { writeln!(out, " (JIT is available)").unwrap(); } else { writeln!(out, " (JIT is unavailable)").unwrap(); } (out, true) } #[cfg(not(feature = "pcre2"))] { writeln!(out, "PCRE2 is not available in this build of ripgrep.") .unwrap(); (out, false) } } /// Returns the relevant SIMD features supported by the CPU at runtime. /// /// This is kind of a dirty violation of abstraction, since it assumes /// knowledge about what specific SIMD features are being used by various /// components. fn runtime_cpu_features() -> Vec<String> { #[cfg(target_arch = "x86_64")] { let mut features = vec![]; let sse2 = is_x86_feature_detected!("sse2"); features.push(format!("{sign}SSE2", sign = sign(sse2))); let ssse3 = is_x86_feature_detected!("ssse3"); features.push(format!("{sign}SSSE3", sign = sign(ssse3))); let avx2 = is_x86_feature_detected!("avx2"); features.push(format!("{sign}AVX2", sign = sign(avx2))); features } #[cfg(target_arch = "aarch64")] { let mut features = vec![]; // memchr and aho-corasick only use NEON when it is available at // compile time. This isn't strictly necessary, but NEON is supposed // to be available for all aarch64 targets. If this isn't true, please // file an issue at https://github.com/BurntSushi/memchr. let neon = cfg!(target_feature = "neon"); features.push(format!("{sign}NEON", sign = sign(neon))); features } #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] { vec![] } } /// Returns the SIMD features supported while compiling ripgrep. /// /// In essence, any features listed here are required to run ripgrep correctly. /// /// This is kind of a dirty violation of abstraction, since it assumes /// knowledge about what specific SIMD features are being used by various /// components. /// /// An easy way to enable everything available on your current CPU is to /// compile ripgrep with `RUSTFLAGS="-C target-cpu=native"`. But note that /// the binary produced by this will not be portable. fn compile_cpu_features() -> Vec<String> { #[cfg(target_arch = "x86_64")] { let mut features = vec![]; let sse2 = cfg!(target_feature = "sse2"); features.push(format!("{sign}SSE2", sign = sign(sse2))); let ssse3 = cfg!(target_feature = "ssse3"); features.push(format!("{sign}SSSE3", sign = sign(ssse3))); let avx2 = cfg!(target_feature = "avx2"); features.push(format!("{sign}AVX2", sign = sign(avx2))); features } #[cfg(target_arch = "aarch64")] { let mut features = vec![]; let neon = cfg!(target_feature = "neon"); features.push(format!("{sign}NEON", sign = sign(neon))); features } #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] { vec![] } } /// Returns a list of "features" supported (or not) by this build of ripgrpe. fn features() -> Vec<String> { let mut features = vec![]; let pcre2 = cfg!(feature = "pcre2"); features.push(format!("{sign}pcre2", sign = sign(pcre2))); features } /// Returns `+` when `enabled` is `true` and `-` otherwise. fn sign(enabled: bool) -> &'static str { if enabled { "+" } else { "-" } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/doc/man.rs
crates/core/flags/doc/man.rs
/*! Provides routines for generating ripgrep's man page in `roff` format. */ use std::{collections::BTreeMap, fmt::Write}; use crate::flags::{Flag, defs::FLAGS, doc::version}; const TEMPLATE: &'static str = include_str!("template.rg.1"); /// Wraps `std::write!` and asserts there is no failure. /// /// We only write to `String` in this module. macro_rules! write { ($($tt:tt)*) => { std::write!($($tt)*).unwrap(); } } /// Wraps `std::writeln!` and asserts there is no failure. /// /// We only write to `String` in this module. macro_rules! writeln { ($($tt:tt)*) => { std::writeln!($($tt)*).unwrap(); } } /// Returns a `roff` formatted string corresponding to ripgrep's entire man /// page. pub(crate) fn generate() -> String { let mut cats = BTreeMap::new(); for flag in FLAGS.iter().copied() { let mut cat = cats.entry(flag.doc_category()).or_insert(String::new()); if !cat.is_empty() { writeln!(cat, ".sp"); } generate_flag(flag, &mut cat); } let mut out = TEMPLATE.replace("!!VERSION!!", &version::generate_digits()); for (cat, value) in cats.iter() { let var = format!("!!{name}!!", name = cat.as_str()); out = out.replace(&var, value); } out } /// Writes `roff` formatted documentation for `flag` to `out`. fn generate_flag(flag: &'static dyn Flag, out: &mut String) { if let Some(byte) = flag.name_short() { let name = char::from(byte); write!(out, r"\fB\-{name}\fP"); if let Some(var) = flag.doc_variable() { write!(out, r" \fI{var}\fP"); } write!(out, r", "); } let name = flag.name_long().replace("-", r"\-"); write!(out, r"\fB\-\-{name}\fP"); if let Some(var) = flag.doc_variable() { write!(out, r"=\fI{var}\fP"); } write!(out, "\n"); writeln!(out, ".RS 4"); let doc = flag.doc_long().trim(); // Convert \flag{foo} into something nicer. let doc = super::render_custom_markup(doc, "flag", |name, out| { let Some(flag) = crate::flags::parse::lookup(name) else { unreachable!(r"found unrecognized \flag{{{name}}} in roff docs") }; out.push_str(r"\fB"); if let Some(name) = flag.name_short() { write!(out, r"\-{}/", char::from(name)); } write!(out, r"\-\-{}", flag.name_long().replace("-", r"\-")); out.push_str(r"\fP"); }); // Convert \flag-negate{foo} into something nicer. let doc = super::render_custom_markup(&doc, "flag-negate", |name, out| { let Some(flag) = crate::flags::parse::lookup(name) else { unreachable!( r"found unrecognized \flag-negate{{{name}}} in roff docs" ) }; let Some(name) = flag.name_negated() else { let long = flag.name_long(); unreachable!( "found \\flag-negate{{{long}}} in roff docs but \ {long} does not have a negation" ); }; out.push_str(r"\fB"); write!(out, r"\-\-{name}"); out.push_str(r"\fP"); }); writeln!(out, "{doc}"); if let Some(negated) = flag.name_negated() { // Flags that can be negated that aren't switches, like // --context-separator, are somewhat weird. Because of that, the docs // for those flags should discuss the semantics of negation explicitly. // But for switches, the behavior is always the same. if flag.is_switch() { writeln!(out, ".sp"); writeln!( out, r"This flag can be disabled with \fB\-\-{negated}\fP." ); } } writeln!(out, ".RE"); }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/doc/help.rs
crates/core/flags/doc/help.rs
/*! Provides routines for generating ripgrep's "short" and "long" help documentation. The short version is used when the `-h` flag is given, while the long version is used when the `--help` flag is given. */ use std::{collections::BTreeMap, fmt::Write}; use crate::flags::{Category, Flag, defs::FLAGS, doc::version}; const TEMPLATE_SHORT: &'static str = include_str!("template.short.help"); const TEMPLATE_LONG: &'static str = include_str!("template.long.help"); /// Wraps `std::write!` and asserts there is no failure. /// /// We only write to `String` in this module. macro_rules! write { ($($tt:tt)*) => { std::write!($($tt)*).unwrap(); } } /// Generate short documentation, i.e., for `-h`. pub(crate) fn generate_short() -> String { let mut cats: BTreeMap<Category, (Vec<String>, Vec<String>)> = BTreeMap::new(); let (mut maxcol1, mut maxcol2) = (0, 0); for flag in FLAGS.iter().copied() { let columns = cats.entry(flag.doc_category()).or_insert((vec![], vec![])); let (col1, col2) = generate_short_flag(flag); maxcol1 = maxcol1.max(col1.len()); maxcol2 = maxcol2.max(col2.len()); columns.0.push(col1); columns.1.push(col2); } let mut out = TEMPLATE_SHORT.replace("!!VERSION!!", &version::generate_digits()); for (cat, (col1, col2)) in cats.iter() { let var = format!("!!{name}!!", name = cat.as_str()); let val = format_short_columns(col1, col2, maxcol1, maxcol2); out = out.replace(&var, &val); } out } /// Generate short for a single flag. /// /// The first element corresponds to the flag name while the second element /// corresponds to the documentation string. fn generate_short_flag(flag: &dyn Flag) -> (String, String) { let (mut col1, mut col2) = (String::new(), String::new()); // Some of the variable names are fine for longer form // docs, but they make the succinct short help very noisy. // So just shorten some of them. let var = flag.doc_variable().map(|s| { let mut s = s.to_string(); s = s.replace("SEPARATOR", "SEP"); s = s.replace("REPLACEMENT", "TEXT"); s = s.replace("NUM+SUFFIX?", "NUM"); s }); // Generate the first column, the flag name. if let Some(byte) = flag.name_short() { let name = char::from(byte); write!(col1, r"-{name}"); write!(col1, r", "); } write!(col1, r"--{name}", name = flag.name_long()); if let Some(var) = var.as_ref() { write!(col1, r"={var}"); } // And now the second column, with the description. write!(col2, "{}", flag.doc_short()); (col1, col2) } /// Write two columns of documentation. /// /// `maxcol1` should be the maximum length (in bytes) of the first column, /// while `maxcol2` should be the maximum length (in bytes) of the second /// column. fn format_short_columns( col1: &[String], col2: &[String], maxcol1: usize, _maxcol2: usize, ) -> String { assert_eq!(col1.len(), col2.len(), "columns must have equal length"); const PAD: usize = 2; let mut out = String::new(); for (i, (c1, c2)) in col1.iter().zip(col2.iter()).enumerate() { if i > 0 { write!(out, "\n"); } let pad = maxcol1 - c1.len() + PAD; write!(out, " "); write!(out, "{c1}"); write!(out, "{}", " ".repeat(pad)); write!(out, "{c2}"); } out } /// Generate long documentation, i.e., for `--help`. pub(crate) fn generate_long() -> String { let mut cats = BTreeMap::new(); for flag in FLAGS.iter().copied() { let mut cat = cats.entry(flag.doc_category()).or_insert(String::new()); if !cat.is_empty() { write!(cat, "\n\n"); } generate_long_flag(flag, &mut cat); } let mut out = TEMPLATE_LONG.replace("!!VERSION!!", &version::generate_digits()); for (cat, value) in cats.iter() { let var = format!("!!{name}!!", name = cat.as_str()); out = out.replace(&var, value); } out } /// Write generated documentation for `flag` to `out`. fn generate_long_flag(flag: &dyn Flag, out: &mut String) { if let Some(byte) = flag.name_short() { let name = char::from(byte); write!(out, r" -{name}"); if let Some(var) = flag.doc_variable() { write!(out, r" {var}"); } write!(out, r", "); } else { write!(out, r" "); } let name = flag.name_long(); write!(out, r"--{name}"); if let Some(var) = flag.doc_variable() { write!(out, r"={var}"); } write!(out, "\n"); let doc = flag.doc_long().trim(); let doc = super::render_custom_markup(doc, "flag", |name, out| { let Some(flag) = crate::flags::parse::lookup(name) else { unreachable!(r"found unrecognized \flag{{{name}}} in --help docs") }; if let Some(name) = flag.name_short() { write!(out, r"-{}/", char::from(name)); } write!(out, r"--{}", flag.name_long()); }); let doc = super::render_custom_markup(&doc, "flag-negate", |name, out| { let Some(flag) = crate::flags::parse::lookup(name) else { unreachable!( r"found unrecognized \flag-negate{{{name}}} in --help docs" ) }; let Some(name) = flag.name_negated() else { let long = flag.name_long(); unreachable!( "found \\flag-negate{{{long}}} in --help docs but \ {long} does not have a negation" ); }; write!(out, r"--{name}"); }); let mut cleaned = remove_roff(&doc); if let Some(negated) = flag.name_negated() { // Flags that can be negated that aren't switches, like // --context-separator, are somewhat weird. Because of that, the docs // for those flags should discuss the semantics of negation explicitly. // But for switches, the behavior is always the same. if flag.is_switch() { write!(cleaned, "\n\nThis flag can be disabled with --{negated}."); } } let indent = " ".repeat(8); let wrapopts = textwrap::Options::new(71) // Normally I'd be fine with breaking at hyphens, but ripgrep's docs // includes a lot of flag names, and they in turn contain hyphens. // Breaking flag names across lines is not great. .word_splitter(textwrap::WordSplitter::NoHyphenation); for (i, paragraph) in cleaned.split("\n\n").enumerate() { if i > 0 { write!(out, "\n\n"); } let mut new = paragraph.to_string(); if paragraph.lines().all(|line| line.starts_with(" ")) { // Re-indent but don't refill so as to preserve line breaks // in code/shell example snippets. new = textwrap::indent(&new, &indent); } else { new = new.replace("\n", " "); new = textwrap::refill(&new, &wrapopts); new = textwrap::indent(&new, &indent); } write!(out, "{}", new.trim_end()); } } /// Removes roff syntax from `v` such that the result is approximately plain /// text readable. /// /// This is basically a mish mash of heuristics based on the specific roff used /// in the docs for the flags in this tool. If new kinds of roff are used in /// the docs, then this may need to be updated to handle them. fn remove_roff(v: &str) -> String { let mut lines = vec![]; for line in v.trim().lines() { assert!(!line.is_empty(), "roff should have no empty lines"); if line.starts_with(".") { if line.starts_with(".IP ") { let item_label = line .split(" ") .nth(1) .expect("first argument to .IP") .replace(r"\(bu", r"•") .replace(r"\fB", "") .replace(r"\fP", ":"); lines.push(format!("{item_label}")); } else if line.starts_with(".IB ") || line.starts_with(".BI ") { let pieces = line .split_whitespace() .skip(1) .collect::<Vec<_>>() .concat(); lines.push(format!("{pieces}")); } else if line.starts_with(".sp") || line.starts_with(".PP") || line.starts_with(".TP") { lines.push("".to_string()); } } else if line.starts_with(r"\fB") && line.ends_with(r"\fP") { let line = line.replace(r"\fB", "").replace(r"\fP", ""); lines.push(format!("{line}:")); } else { lines.push(line.to_string()); } } // Squash multiple adjacent paragraph breaks into one. lines.dedup_by(|l1, l2| l1.is_empty() && l2.is_empty()); lines .join("\n") .replace(r"\fB", "") .replace(r"\fI", "") .replace(r"\fP", "") .replace(r"\-", "-") .replace(r"\\", r"\") }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/core/flags/doc/mod.rs
crates/core/flags/doc/mod.rs
/*! Modules for generating documentation for ripgrep's flags. */ pub(crate) mod help; pub(crate) mod man; pub(crate) mod version; /// Searches for `\tag{...}` occurrences in `doc` and calls `replacement` for /// each such tag found. /// /// The first argument given to `replacement` is the tag value, `...`. The /// second argument is the buffer that accumulates the full replacement text. /// /// Since this function is only intended to be used on doc strings written into /// the program source code, callers should panic in `replacement` if there are /// any errors or unexpected circumstances. fn render_custom_markup( mut doc: &str, tag: &str, mut replacement: impl FnMut(&str, &mut String), ) -> String { let mut out = String::with_capacity(doc.len()); let tag_prefix = format!(r"\{tag}{{"); while let Some(offset) = doc.find(&tag_prefix) { out.push_str(&doc[..offset]); let start = offset + tag_prefix.len(); let Some(end) = doc[start..].find('}').map(|i| start + i) else { unreachable!(r"found {tag_prefix} without closing }}"); }; let name = &doc[start..end]; replacement(name, &mut out); doc = &doc[end + 1..]; } out.push_str(doc); out }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/lib.rs
crates/cli/src/lib.rs
/*! This crate provides common routines used in command line applications, with a focus on routines useful for search oriented applications. As a utility library, there is no central type or function. However, a key focus of this crate is to improve failure modes and provide user friendly error messages when things go wrong. To the best extent possible, everything in this crate works on Windows, macOS and Linux. # Standard I/O [`is_readable_stdin`] determines whether stdin can be usefully read from. It is useful when writing an application that changes behavior based on whether the application was invoked with data on stdin. For example, `rg foo` might recursively search the current working directory for occurrences of `foo`, but `rg foo < file` might only search the contents of `file`. # Coloring and buffering The [`stdout`], [`stdout_buffered_block`] and [`stdout_buffered_line`] routines are alternative constructors for [`StandardStream`]. A `StandardStream` implements `termcolor::WriteColor`, which provides a way to emit colors to terminals. Its key use is the encapsulation of buffering style. Namely, `stdout` will return a line buffered `StandardStream` if and only if stdout is connected to a tty, and will otherwise return a block buffered `StandardStream`. Line buffering is important for use with a tty because it typically decreases the latency at which the end user sees output. Block buffering is used otherwise because it is faster, and redirecting stdout to a file typically doesn't benefit from the decreased latency that line buffering provides. The `stdout_buffered_block` and `stdout_buffered_line` can be used to explicitly set the buffering strategy regardless of whether stdout is connected to a tty or not. # Escaping The [`escape`](crate::escape()), [`escape_os`], [`unescape`] and [`unescape_os`] routines provide a user friendly way of dealing with UTF-8 encoded strings that can express arbitrary bytes. For example, you might want to accept a string containing arbitrary bytes as a command line argument, but most interactive shells make such strings difficult to type. Instead, we can ask users to use escape sequences. For example, `a\xFFz` is itself a valid UTF-8 string corresponding to the following bytes: ```ignore [b'a', b'\\', b'x', b'F', b'F', b'z'] ``` However, we can interpret `\xFF` as an escape sequence with the `unescape`/`unescape_os` routines, which will yield ```ignore [b'a', b'\xFF', b'z'] ``` instead. For example: ``` use grep_cli::unescape; // Note the use of a raw string! assert_eq!(vec![b'a', b'\xFF', b'z'], unescape(r"a\xFFz")); ``` The `escape`/`escape_os` routines provide the reverse transformation, which makes it easy to show user friendly error messages involving arbitrary bytes. # Building patterns Typically, regular expression patterns must be valid UTF-8. However, command line arguments aren't guaranteed to be valid UTF-8. Unfortunately, the standard library's UTF-8 conversion functions from `OsStr`s do not provide good error messages. However, the [`pattern_from_bytes`] and [`pattern_from_os`] do, including reporting exactly where the first invalid UTF-8 byte is seen. Additionally, it can be useful to read patterns from a file while reporting good error messages that include line numbers. The [`patterns_from_path`], [`patterns_from_reader`] and [`patterns_from_stdin`] routines do just that. If any pattern is found that is invalid UTF-8, then the error includes the file path (if available) along with the line number and the byte offset at which the first invalid UTF-8 byte was observed. # Read process output Sometimes a command line application needs to execute other processes and read its stdout in a streaming fashion. The [`CommandReader`] provides this functionality with an explicit goal of improving failure modes. In particular, if the process exits with an error code, then stderr is read and converted into a normal Rust error to show to end users. This makes the underlying failure modes explicit and gives more information to end users for debugging the problem. As a special case, [`DecompressionReader`] provides a way to decompress arbitrary files by matching their file extensions up with corresponding decompression programs (such as `gzip` and `xz`). This is useful as a means of performing simplistic decompression in a portable manner without binding to specific compression libraries. This does come with some overhead though, so if you need to decompress lots of small files, this may not be an appropriate convenience to use. Each reader has a corresponding builder for additional configuration, such as whether to read stderr asynchronously in order to avoid deadlock (which is enabled by default). # Miscellaneous parsing The [`parse_human_readable_size`] routine parses strings like `2M` and converts them to the corresponding number of bytes (`2 * 1<<20` in this case). If an invalid size is found, then a good error message is crafted that typically tells the user how to fix the problem. */ #![deny(missing_docs)] mod decompress; mod escape; mod hostname; mod human; mod pattern; mod process; mod wtr; pub use crate::{ decompress::{ DecompressionMatcher, DecompressionMatcherBuilder, DecompressionReader, DecompressionReaderBuilder, resolve_binary, }, escape::{escape, escape_os, unescape, unescape_os}, hostname::hostname, human::{ParseSizeError, parse_human_readable_size}, pattern::{ InvalidPatternError, pattern_from_bytes, pattern_from_os, patterns_from_path, patterns_from_reader, patterns_from_stdin, }, process::{CommandError, CommandReader, CommandReaderBuilder}, wtr::{ StandardStream, stdout, stdout_buffered_block, stdout_buffered_line, }, }; /// Returns true if and only if stdin is believed to be readable. /// /// When stdin is readable, command line programs may choose to behave /// differently than when stdin is not readable. For example, `command foo` /// might search the current directory for occurrences of `foo` where as /// `command foo < some-file` or `cat some-file | command foo` might instead /// only search stdin for occurrences of `foo`. /// /// Note that this isn't perfect and essentially corresponds to a heuristic. /// When things are unclear (such as if an error occurs during introspection to /// determine whether stdin is readable), this prefers to return `false`. That /// means it's possible for an end user to pipe something into your program and /// have this return `false` and thus potentially lead to ignoring the user's /// stdin data. While not ideal, this is perhaps better than falsely assuming /// stdin is readable, which would result in blocking forever on reading stdin. /// Regardless, commands should always provide explicit fallbacks to override /// behavior. For example, `rg foo -` will explicitly search stdin and `rg foo /// ./` will explicitly search the current working directory. pub fn is_readable_stdin() -> bool { use std::io::IsTerminal; #[cfg(unix)] fn imp() -> bool { use std::{ fs::File, os::{fd::AsFd, unix::fs::FileTypeExt}, }; let stdin = std::io::stdin(); let fd = match stdin.as_fd().try_clone_to_owned() { Ok(fd) => fd, Err(err) => { log::debug!( "for heuristic stdin detection on Unix, \ could not clone stdin file descriptor \ (thus assuming stdin is not readable): {err}", ); return false; } }; let file = File::from(fd); let md = match file.metadata() { Ok(md) => md, Err(err) => { log::debug!( "for heuristic stdin detection on Unix, \ could not get file metadata for stdin \ (thus assuming stdin is not readable): {err}", ); return false; } }; let ft = md.file_type(); let is_file = ft.is_file(); let is_fifo = ft.is_fifo(); let is_socket = ft.is_socket(); let is_readable = is_file || is_fifo || is_socket; log::debug!( "for heuristic stdin detection on Unix, \ found that \ is_file={is_file}, is_fifo={is_fifo} and is_socket={is_socket}, \ and thus concluded that is_stdin_readable={is_readable}", ); is_readable } #[cfg(windows)] fn imp() -> bool { let stdin = winapi_util::HandleRef::stdin(); let typ = match winapi_util::file::typ(stdin) { Ok(typ) => typ, Err(err) => { log::debug!( "for heuristic stdin detection on Windows, \ could not get file type of stdin \ (thus assuming stdin is not readable): {err}", ); return false; } }; let is_disk = typ.is_disk(); let is_pipe = typ.is_pipe(); let is_readable = is_disk || is_pipe; log::debug!( "for heuristic stdin detection on Windows, \ found that is_disk={is_disk} and is_pipe={is_pipe}, \ and thus concluded that is_stdin_readable={is_readable}", ); is_readable } #[cfg(not(any(unix, windows)))] fn imp() -> bool { log::debug!("on non-{{Unix,Windows}}, assuming stdin is not readable"); false } !std::io::stdin().is_terminal() && imp() } /// Returns true if and only if stdin is believed to be connected to a tty /// or a console. /// /// Note that this is now just a wrapper around /// [`std::io::IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html). /// Callers should prefer using the `IsTerminal` trait directly. This routine /// is deprecated and will be removed in the next semver incompatible release. #[deprecated(since = "0.1.10", note = "use std::io::IsTerminal instead")] pub fn is_tty_stdin() -> bool { use std::io::IsTerminal; std::io::stdin().is_terminal() } /// Returns true if and only if stdout is believed to be connected to a tty /// or a console. /// /// This is useful for when you want your command line program to produce /// different output depending on whether it's printing directly to a user's /// terminal or whether it's being redirected somewhere else. For example, /// implementations of `ls` will often show one item per line when stdout is /// redirected, but will condensed output when printing to a tty. /// /// Note that this is now just a wrapper around /// [`std::io::IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html). /// Callers should prefer using the `IsTerminal` trait directly. This routine /// is deprecated and will be removed in the next semver incompatible release. #[deprecated(since = "0.1.10", note = "use std::io::IsTerminal instead")] pub fn is_tty_stdout() -> bool { use std::io::IsTerminal; std::io::stdout().is_terminal() } /// Returns true if and only if stderr is believed to be connected to a tty /// or a console. /// /// Note that this is now just a wrapper around /// [`std::io::IsTerminal`](https://doc.rust-lang.org/std/io/trait.IsTerminal.html). /// Callers should prefer using the `IsTerminal` trait directly. This routine /// is deprecated and will be removed in the next semver incompatible release. #[deprecated(since = "0.1.10", note = "use std::io::IsTerminal instead")] pub fn is_tty_stderr() -> bool { use std::io::IsTerminal; std::io::stderr().is_terminal() }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/process.rs
crates/cli/src/process.rs
use std::{ io::{self, Read}, process, }; /// An error that can occur while running a command and reading its output. /// /// This error can be seamlessly converted to an `io::Error` via a `From` /// implementation. #[derive(Debug)] pub struct CommandError { kind: CommandErrorKind, } #[derive(Debug)] enum CommandErrorKind { Io(io::Error), Stderr(Vec<u8>), } impl CommandError { /// Create an error from an I/O error. pub(crate) fn io(ioerr: io::Error) -> CommandError { CommandError { kind: CommandErrorKind::Io(ioerr) } } /// Create an error from the contents of stderr (which may be empty). pub(crate) fn stderr(bytes: Vec<u8>) -> CommandError { CommandError { kind: CommandErrorKind::Stderr(bytes) } } /// Returns true if and only if this error has empty data from stderr. pub(crate) fn is_empty(&self) -> bool { match self.kind { CommandErrorKind::Stderr(ref bytes) => bytes.is_empty(), _ => false, } } } impl std::error::Error for CommandError {} impl std::fmt::Display for CommandError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.kind { CommandErrorKind::Io(ref e) => e.fmt(f), CommandErrorKind::Stderr(ref bytes) => { let msg = String::from_utf8_lossy(bytes); if msg.trim().is_empty() { write!(f, "<stderr is empty>") } else { let div = "-".repeat(79); write!( f, "\n{div}\n{msg}\n{div}", div = div, msg = msg.trim() ) } } } } } impl From<io::Error> for CommandError { fn from(ioerr: io::Error) -> CommandError { CommandError { kind: CommandErrorKind::Io(ioerr) } } } impl From<CommandError> for io::Error { fn from(cmderr: CommandError) -> io::Error { match cmderr.kind { CommandErrorKind::Io(ioerr) => ioerr, CommandErrorKind::Stderr(_) => { io::Error::new(io::ErrorKind::Other, cmderr) } } } } /// Configures and builds a streaming reader for process output. #[derive(Clone, Debug, Default)] pub struct CommandReaderBuilder { async_stderr: bool, } impl CommandReaderBuilder { /// Create a new builder with the default configuration. pub fn new() -> CommandReaderBuilder { CommandReaderBuilder::default() } /// Build a new streaming reader for the given command's output. /// /// The caller should set everything that's required on the given command /// before building a reader, such as its arguments, environment and /// current working directory. Settings such as the stdout and stderr (but /// not stdin) pipes will be overridden so that they can be controlled by /// the reader. /// /// If there was a problem spawning the given command, then its error is /// returned. pub fn build( &self, command: &mut process::Command, ) -> Result<CommandReader, CommandError> { let mut child = command .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) .spawn()?; let stderr = if self.async_stderr { StderrReader::r#async(child.stderr.take().unwrap()) } else { StderrReader::sync(child.stderr.take().unwrap()) }; Ok(CommandReader { child, stderr, eof: false }) } /// When enabled, the reader will asynchronously read the contents of the /// command's stderr output. When disabled, stderr is only read after the /// stdout stream has been exhausted (or if the process quits with an error /// code). /// /// Note that when enabled, this may require launching an additional /// thread in order to read stderr. This is done so that the process being /// executed is never blocked from writing to stdout or stderr. If this is /// disabled, then it is possible for the process to fill up the stderr /// buffer and deadlock. /// /// This is enabled by default. pub fn async_stderr(&mut self, yes: bool) -> &mut CommandReaderBuilder { self.async_stderr = yes; self } } /// A streaming reader for a command's output. /// /// The purpose of this reader is to provide an easy way to execute processes /// whose stdout is read in a streaming way while also making the processes' /// stderr available when the process fails with an exit code. This makes it /// possible to execute processes while surfacing the underlying failure mode /// in the case of an error. /// /// Moreover, by default, this reader will asynchronously read the processes' /// stderr. This prevents subtle deadlocking bugs for noisy processes that /// write a lot to stderr. Currently, the entire contents of stderr is read /// on to the heap. /// /// # Example /// /// This example shows how to invoke `gzip` to decompress the contents of a /// file. If the `gzip` command reports a failing exit status, then its stderr /// is returned as an error. /// /// ```no_run /// use std::{io::Read, process::Command}; /// /// use grep_cli::CommandReader; /// /// let mut cmd = Command::new("gzip"); /// cmd.arg("-d").arg("-c").arg("/usr/share/man/man1/ls.1.gz"); /// /// let mut rdr = CommandReader::new(&mut cmd)?; /// let mut contents = vec![]; /// rdr.read_to_end(&mut contents)?; /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Debug)] pub struct CommandReader { child: process::Child, stderr: StderrReader, /// This is set to true once 'read' returns zero bytes. When this isn't /// set and we close the reader, then we anticipate a pipe error when /// reaping the child process and silence it. eof: bool, } impl CommandReader { /// Create a new streaming reader for the given command using the default /// configuration. /// /// The caller should set everything that's required on the given command /// before building a reader, such as its arguments, environment and /// current working directory. Settings such as the stdout and stderr (but /// not stdin) pipes will be overridden so that they can be controlled by /// the reader. /// /// If there was a problem spawning the given command, then its error is /// returned. /// /// If the caller requires additional configuration for the reader /// returned, then use [`CommandReaderBuilder`]. pub fn new( cmd: &mut process::Command, ) -> Result<CommandReader, CommandError> { CommandReaderBuilder::new().build(cmd) } /// Closes the CommandReader, freeing any resources used by its underlying /// child process. If the child process exits with a nonzero exit code, the /// returned Err value will include its stderr. /// /// `close` is idempotent, meaning it can be safely called multiple times. /// The first call closes the CommandReader and any subsequent calls do /// nothing. /// /// This method should be called after partially reading a file to prevent /// resource leakage. However there is no need to call `close` explicitly /// if your code always calls `read` to EOF, as `read` takes care of /// calling `close` in this case. /// /// `close` is also called in `drop` as a last line of defense against /// resource leakage. Any error from the child process is then printed as a /// warning to stderr. This can be avoided by explicitly calling `close` /// before the CommandReader is dropped. pub fn close(&mut self) -> io::Result<()> { // Dropping stdout closes the underlying file descriptor, which should // cause a well-behaved child process to exit. If child.stdout is None // we assume that close() has already been called and do nothing. let stdout = match self.child.stdout.take() { None => return Ok(()), Some(stdout) => stdout, }; drop(stdout); if self.child.wait()?.success() { Ok(()) } else { let err = self.stderr.read_to_end(); // In the specific case where we haven't consumed the full data // from the child process, then closing stdout above results in // a pipe signal being thrown in most cases. But I don't think // there is any reliable and portable way of detecting it. Instead, // if we know we haven't hit EOF (so we anticipate a broken pipe // error) and if stderr otherwise doesn't have anything on it, then // we assume total success. if !self.eof && err.is_empty() { return Ok(()); } Err(io::Error::from(err)) } } } impl Drop for CommandReader { fn drop(&mut self) { if let Err(error) = self.close() { log::warn!("{}", error); } } } impl io::Read for CommandReader { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let stdout = match self.child.stdout { None => return Ok(0), Some(ref mut stdout) => stdout, }; let nread = stdout.read(buf)?; if nread == 0 { self.eof = true; self.close().map(|_| 0) } else { Ok(nread) } } } /// A reader that encapsulates the asynchronous or synchronous reading of /// stderr. #[derive(Debug)] enum StderrReader { Async(Option<std::thread::JoinHandle<CommandError>>), Sync(process::ChildStderr), } impl StderrReader { /// Create a reader for stderr that reads contents asynchronously. fn r#async(mut stderr: process::ChildStderr) -> StderrReader { let handle = std::thread::spawn(move || stderr_to_command_error(&mut stderr)); StderrReader::Async(Some(handle)) } /// Create a reader for stderr that reads contents synchronously. fn sync(stderr: process::ChildStderr) -> StderrReader { StderrReader::Sync(stderr) } /// Consumes all of stderr on to the heap and returns it as an error. /// /// If there was a problem reading stderr itself, then this returns an I/O /// command error. fn read_to_end(&mut self) -> CommandError { match *self { StderrReader::Async(ref mut handle) => { let handle = handle .take() .expect("read_to_end cannot be called more than once"); handle.join().expect("stderr reading thread does not panic") } StderrReader::Sync(ref mut stderr) => { stderr_to_command_error(stderr) } } } } fn stderr_to_command_error(stderr: &mut process::ChildStderr) -> CommandError { let mut bytes = vec![]; match stderr.read_to_end(&mut bytes) { Ok(_) => CommandError::stderr(bytes), Err(err) => CommandError::io(err), } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/pattern.rs
crates/cli/src/pattern.rs
use std::{ffi::OsStr, io, path::Path}; use bstr::io::BufReadExt; use crate::escape::{escape, escape_os}; /// An error that occurs when a pattern could not be converted to valid UTF-8. /// /// The purpose of this error is to give a more targeted failure mode for /// patterns written by end users that are not valid UTF-8. #[derive(Clone, Debug, Eq, PartialEq)] pub struct InvalidPatternError { original: String, valid_up_to: usize, } impl InvalidPatternError { /// Returns the index in the given string up to which valid UTF-8 was /// verified. pub fn valid_up_to(&self) -> usize { self.valid_up_to } } impl std::error::Error for InvalidPatternError {} impl std::fmt::Display for InvalidPatternError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "found invalid UTF-8 in pattern at byte offset {}: {} \ (disable Unicode mode and use hex escape sequences to match \ arbitrary bytes in a pattern, e.g., '(?-u)\\xFF')", self.valid_up_to, self.original, ) } } impl From<InvalidPatternError> for io::Error { fn from(paterr: InvalidPatternError) -> io::Error { io::Error::new(io::ErrorKind::Other, paterr) } } /// Convert an OS string into a regular expression pattern. /// /// This conversion fails if the given pattern is not valid UTF-8, in which /// case, a targeted error with more information about where the invalid UTF-8 /// occurs is given. The error also suggests the use of hex escape sequences, /// which are supported by many regex engines. pub fn pattern_from_os(pattern: &OsStr) -> Result<&str, InvalidPatternError> { pattern.to_str().ok_or_else(|| { let valid_up_to = pattern .to_string_lossy() .find('\u{FFFD}') .expect("a Unicode replacement codepoint for invalid UTF-8"); InvalidPatternError { original: escape_os(pattern), valid_up_to } }) } /// Convert arbitrary bytes into a regular expression pattern. /// /// This conversion fails if the given pattern is not valid UTF-8, in which /// case, a targeted error with more information about where the invalid UTF-8 /// occurs is given. The error also suggests the use of hex escape sequences, /// which are supported by many regex engines. pub fn pattern_from_bytes( pattern: &[u8], ) -> Result<&str, InvalidPatternError> { std::str::from_utf8(pattern).map_err(|err| InvalidPatternError { original: escape(pattern), valid_up_to: err.valid_up_to(), }) } /// Read patterns from a file path, one per line. /// /// If there was a problem reading or if any of the patterns contain invalid /// UTF-8, then an error is returned. If there was a problem with a specific /// pattern, then the error message will include the line number and the file /// path. pub fn patterns_from_path<P: AsRef<Path>>(path: P) -> io::Result<Vec<String>> { let path = path.as_ref(); let file = std::fs::File::open(path).map_err(|err| { io::Error::new( io::ErrorKind::Other, format!("{}: {}", path.display(), err), ) })?; patterns_from_reader(file).map_err(|err| { io::Error::new( io::ErrorKind::Other, format!("{}:{}", path.display(), err), ) }) } /// Read patterns from stdin, one per line. /// /// If there was a problem reading or if any of the patterns contain invalid /// UTF-8, then an error is returned. If there was a problem with a specific /// pattern, then the error message will include the line number and the fact /// that it came from stdin. pub fn patterns_from_stdin() -> io::Result<Vec<String>> { let stdin = io::stdin(); let locked = stdin.lock(); patterns_from_reader(locked).map_err(|err| { io::Error::new(io::ErrorKind::Other, format!("<stdin>:{}", err)) }) } /// Read patterns from any reader, one per line. /// /// If there was a problem reading or if any of the patterns contain invalid /// UTF-8, then an error is returned. If there was a problem with a specific /// pattern, then the error message will include the line number. /// /// Note that this routine uses its own internal buffer, so the caller should /// not provide their own buffered reader if possible. /// /// # Example /// /// This shows how to parse patterns, one per line. /// /// ``` /// use grep_cli::patterns_from_reader; /// /// let patterns = "\ /// foo /// bar\\s+foo /// [a-z]{3} /// "; /// /// assert_eq!(patterns_from_reader(patterns.as_bytes())?, vec![ /// r"foo", /// r"bar\s+foo", /// r"[a-z]{3}", /// ]); /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` pub fn patterns_from_reader<R: io::Read>(rdr: R) -> io::Result<Vec<String>> { let mut patterns = vec![]; let mut line_number = 0; io::BufReader::new(rdr).for_byte_line(|line| { line_number += 1; match pattern_from_bytes(line) { Ok(pattern) => { patterns.push(pattern.to_string()); Ok(true) } Err(err) => Err(io::Error::new( io::ErrorKind::Other, format!("{}: {}", line_number, err), )), } })?; Ok(patterns) } #[cfg(test)] mod tests { use super::*; #[test] fn bytes() { let pat = b"abc\xFFxyz"; let err = pattern_from_bytes(pat).unwrap_err(); assert_eq!(3, err.valid_up_to()); } #[test] #[cfg(unix)] fn os() { use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; let pat = OsStr::from_bytes(b"abc\xFFxyz"); let err = pattern_from_os(pat).unwrap_err(); assert_eq!(3, err.valid_up_to()); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/hostname.rs
crates/cli/src/hostname.rs
use std::{ffi::OsString, io}; /// Returns the hostname of the current system. /// /// It is unusual, although technically possible, for this routine to return /// an error. It is difficult to list out the error conditions, but one such /// possibility is platform support. /// /// # Platform specific behavior /// /// On Windows, this currently uses the "physical DNS hostname" computer name. /// This may change in the future. /// /// On Unix, this returns the result of the `gethostname` function from the /// `libc` linked into the program. pub fn hostname() -> io::Result<OsString> { #[cfg(windows)] { use winapi_util::sysinfo::{ComputerNameKind, get_computer_name}; get_computer_name(ComputerNameKind::PhysicalDnsHostname) } #[cfg(unix)] { gethostname() } #[cfg(not(any(windows, unix)))] { Err(io::Error::new( io::ErrorKind::Other, "hostname could not be found on unsupported platform", )) } } #[cfg(unix)] fn gethostname() -> io::Result<OsString> { use std::os::unix::ffi::OsStringExt; // SAFETY: There don't appear to be any safety requirements for calling // sysconf. let limit = unsafe { libc::sysconf(libc::_SC_HOST_NAME_MAX) }; if limit == -1 { // It is in theory possible for sysconf to return -1 for a limit but // *not* set errno, in which case, io::Error::last_os_error is // indeterminate. But untangling that is super annoying because std // doesn't expose any unix-specific APIs for inspecting the errno. (We // could do it ourselves, but it just doesn't seem worth doing?) return Err(io::Error::last_os_error()); } let Ok(maxlen) = usize::try_from(limit) else { let msg = format!("host name max limit ({}) overflowed usize", limit); return Err(io::Error::new(io::ErrorKind::Other, msg)); }; // maxlen here includes the NUL terminator. let mut buf = vec![0; maxlen]; // SAFETY: The pointer we give is valid as it is derived directly from a // Vec. Similarly, `maxlen` is the length of our Vec, and is thus valid // to write to. let rc = unsafe { libc::gethostname(buf.as_mut_ptr().cast::<libc::c_char>(), maxlen) }; if rc == -1 { return Err(io::Error::last_os_error()); } // POSIX says that if the hostname is bigger than `maxlen`, then it may // write a truncate name back that is not necessarily NUL terminated (wtf, // lol). So if we can't find a NUL terminator, then just give up. let Some(zeropos) = buf.iter().position(|&b| b == 0) else { let msg = "could not find NUL terminator in hostname"; return Err(io::Error::new(io::ErrorKind::Other, msg)); }; buf.truncate(zeropos); buf.shrink_to_fit(); Ok(OsString::from_vec(buf)) } #[cfg(test)] mod tests { use super::*; #[test] fn print_hostname() { println!("{:?}", hostname().unwrap()); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/wtr.rs
crates/cli/src/wtr.rs
use std::io::{self, IsTerminal}; use termcolor::HyperlinkSpec; /// A writer that supports coloring with either line or block buffering. #[derive(Debug)] pub struct StandardStream(StandardStreamKind); /// Returns a possibly buffered writer to stdout for the given color choice. /// /// The writer returned is either line buffered or block buffered. The decision /// between these two is made automatically based on whether a tty is attached /// to stdout or not. If a tty is attached, then line buffering is used. /// Otherwise, block buffering is used. In general, block buffering is more /// efficient, but may increase the time it takes for the end user to see the /// first bits of output. /// /// If you need more fine grained control over the buffering mode, then use one /// of `stdout_buffered_line` or `stdout_buffered_block`. /// /// The color choice given is passed along to the underlying writer. To /// completely disable colors in all cases, use `ColorChoice::Never`. pub fn stdout(color_choice: termcolor::ColorChoice) -> StandardStream { if std::io::stdout().is_terminal() { stdout_buffered_line(color_choice) } else { stdout_buffered_block(color_choice) } } /// Returns a line buffered writer to stdout for the given color choice. /// /// This writer is useful when printing results directly to a tty such that /// users see output as soon as it's written. The downside of this approach /// is that it can be slower, especially when there is a lot of output. /// /// You might consider using [`stdout`] instead, which chooses the buffering /// strategy automatically based on whether stdout is connected to a tty. pub fn stdout_buffered_line( color_choice: termcolor::ColorChoice, ) -> StandardStream { let out = termcolor::StandardStream::stdout(color_choice); StandardStream(StandardStreamKind::LineBuffered(out)) } /// Returns a block buffered writer to stdout for the given color choice. /// /// This writer is useful when printing results to a file since it amortizes /// the cost of writing data. The downside of this approach is that it can /// increase the latency of display output when writing to a tty. /// /// You might consider using [`stdout`] instead, which chooses the buffering /// strategy automatically based on whether stdout is connected to a tty. pub fn stdout_buffered_block( color_choice: termcolor::ColorChoice, ) -> StandardStream { let out = termcolor::BufferedStandardStream::stdout(color_choice); StandardStream(StandardStreamKind::BlockBuffered(out)) } #[derive(Debug)] enum StandardStreamKind { LineBuffered(termcolor::StandardStream), BlockBuffered(termcolor::BufferedStandardStream), } impl io::Write for StandardStream { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref mut w) => w.write(buf), BlockBuffered(ref mut w) => w.write(buf), } } #[inline] fn flush(&mut self) -> io::Result<()> { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref mut w) => w.flush(), BlockBuffered(ref mut w) => w.flush(), } } } impl termcolor::WriteColor for StandardStream { #[inline] fn supports_color(&self) -> bool { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref w) => w.supports_color(), BlockBuffered(ref w) => w.supports_color(), } } #[inline] fn supports_hyperlinks(&self) -> bool { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref w) => w.supports_hyperlinks(), BlockBuffered(ref w) => w.supports_hyperlinks(), } } #[inline] fn set_color(&mut self, spec: &termcolor::ColorSpec) -> io::Result<()> { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref mut w) => w.set_color(spec), BlockBuffered(ref mut w) => w.set_color(spec), } } #[inline] fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref mut w) => w.set_hyperlink(link), BlockBuffered(ref mut w) => w.set_hyperlink(link), } } #[inline] fn reset(&mut self) -> io::Result<()> { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref mut w) => w.reset(), BlockBuffered(ref mut w) => w.reset(), } } #[inline] fn is_synchronous(&self) -> bool { use self::StandardStreamKind::*; match self.0 { LineBuffered(ref w) => w.is_synchronous(), BlockBuffered(ref w) => w.is_synchronous(), } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/human.rs
crates/cli/src/human.rs
/// An error that occurs when parsing a human readable size description. /// /// This error provides an end user friendly message describing why the /// description couldn't be parsed and what the expected format is. #[derive(Clone, Debug, Eq, PartialEq)] pub struct ParseSizeError { original: String, kind: ParseSizeErrorKind, } #[derive(Clone, Debug, Eq, PartialEq)] enum ParseSizeErrorKind { InvalidFormat, InvalidInt(std::num::ParseIntError), Overflow, } impl ParseSizeError { fn format(original: &str) -> ParseSizeError { ParseSizeError { original: original.to_string(), kind: ParseSizeErrorKind::InvalidFormat, } } fn int(original: &str, err: std::num::ParseIntError) -> ParseSizeError { ParseSizeError { original: original.to_string(), kind: ParseSizeErrorKind::InvalidInt(err), } } fn overflow(original: &str) -> ParseSizeError { ParseSizeError { original: original.to_string(), kind: ParseSizeErrorKind::Overflow, } } } impl std::error::Error for ParseSizeError {} impl std::fmt::Display for ParseSizeError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use self::ParseSizeErrorKind::*; match self.kind { InvalidFormat => write!( f, "invalid format for size '{}', which should be a non-empty \ sequence of digits followed by an optional 'K', 'M' or 'G' \ suffix", self.original ), InvalidInt(ref err) => write!( f, "invalid integer found in size '{}': {}", self.original, err ), Overflow => write!(f, "size too big in '{}'", self.original), } } } impl From<ParseSizeError> for std::io::Error { fn from(size_err: ParseSizeError) -> std::io::Error { std::io::Error::new(std::io::ErrorKind::Other, size_err) } } /// Parse a human readable size like `2M` into a corresponding number of bytes. /// /// Supported size suffixes are `K` (for kilobyte), `M` (for megabyte) and `G` /// (for gigabyte). If a size suffix is missing, then the size is interpreted /// as bytes. If the size is too big to fit into a `u64`, then this returns an /// error. /// /// Additional suffixes may be added over time. pub fn parse_human_readable_size(size: &str) -> Result<u64, ParseSizeError> { let digits_end = size.as_bytes().iter().take_while(|&b| b.is_ascii_digit()).count(); let digits = &size[..digits_end]; if digits.is_empty() { return Err(ParseSizeError::format(size)); } let value = digits.parse::<u64>().map_err(|e| ParseSizeError::int(size, e))?; let suffix = &size[digits_end..]; if suffix.is_empty() { return Ok(value); } let bytes = match suffix { "K" => value.checked_mul(1 << 10), "M" => value.checked_mul(1 << 20), "G" => value.checked_mul(1 << 30), _ => return Err(ParseSizeError::format(size)), }; bytes.ok_or_else(|| ParseSizeError::overflow(size)) } #[cfg(test)] mod tests { use super::*; #[test] fn suffix_none() { let x = parse_human_readable_size("123").unwrap(); assert_eq!(123, x); } #[test] fn suffix_k() { let x = parse_human_readable_size("123K").unwrap(); assert_eq!(123 * (1 << 10), x); } #[test] fn suffix_m() { let x = parse_human_readable_size("123M").unwrap(); assert_eq!(123 * (1 << 20), x); } #[test] fn suffix_g() { let x = parse_human_readable_size("123G").unwrap(); assert_eq!(123 * (1 << 30), x); } #[test] fn invalid_empty() { assert!(parse_human_readable_size("").is_err()); } #[test] fn invalid_non_digit() { assert!(parse_human_readable_size("a").is_err()); } #[test] fn invalid_overflow() { assert!(parse_human_readable_size("9999999999999999G").is_err()); } #[test] fn invalid_suffix() { assert!(parse_human_readable_size("123T").is_err()); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/decompress.rs
crates/cli/src/decompress.rs
use std::{ ffi::{OsStr, OsString}, fs::File, io, path::{Path, PathBuf}, process::Command, }; use globset::{Glob, GlobSet, GlobSetBuilder}; use crate::process::{CommandError, CommandReader, CommandReaderBuilder}; /// A builder for a matcher that determines which files get decompressed. #[derive(Clone, Debug)] pub struct DecompressionMatcherBuilder { /// The commands for each matching glob. commands: Vec<DecompressionCommand>, /// Whether to include the default matching rules. defaults: bool, } /// A representation of a single command for decompressing data /// out-of-process. #[derive(Clone, Debug)] struct DecompressionCommand { /// The glob that matches this command. glob: String, /// The command or binary name. bin: PathBuf, /// The arguments to invoke with the command. args: Vec<OsString>, } impl Default for DecompressionMatcherBuilder { fn default() -> DecompressionMatcherBuilder { DecompressionMatcherBuilder::new() } } impl DecompressionMatcherBuilder { /// Create a new builder for configuring a decompression matcher. pub fn new() -> DecompressionMatcherBuilder { DecompressionMatcherBuilder { commands: vec![], defaults: true } } /// Build a matcher for determining how to decompress files. /// /// If there was a problem compiling the matcher, then an error is /// returned. pub fn build(&self) -> Result<DecompressionMatcher, CommandError> { let defaults = if !self.defaults { vec![] } else { default_decompression_commands() }; let mut glob_builder = GlobSetBuilder::new(); let mut commands = vec![]; for decomp_cmd in defaults.iter().chain(&self.commands) { let glob = Glob::new(&decomp_cmd.glob).map_err(|err| { CommandError::io(io::Error::new(io::ErrorKind::Other, err)) })?; glob_builder.add(glob); commands.push(decomp_cmd.clone()); } let globs = glob_builder.build().map_err(|err| { CommandError::io(io::Error::new(io::ErrorKind::Other, err)) })?; Ok(DecompressionMatcher { globs, commands }) } /// When enabled, the default matching rules will be compiled into this /// matcher before any other associations. When disabled, only the /// rules explicitly given to this builder will be used. /// /// This is enabled by default. pub fn defaults(&mut self, yes: bool) -> &mut DecompressionMatcherBuilder { self.defaults = yes; self } /// Associates a glob with a command to decompress files matching the glob. /// /// If multiple globs match the same file, then the most recently added /// glob takes precedence. /// /// The syntax for the glob is documented in the /// [`globset` crate](https://docs.rs/globset/#syntax). /// /// The `program` given is resolved with respect to `PATH` and turned /// into an absolute path internally before being executed by the current /// platform. Notably, on Windows, this avoids a security problem where /// passing a relative path to `CreateProcess` will automatically search /// the current directory for a matching program. If the program could /// not be resolved, then it is silently ignored and the association is /// dropped. For this reason, callers should prefer `try_associate`. pub fn associate<P, I, A>( &mut self, glob: &str, program: P, args: I, ) -> &mut DecompressionMatcherBuilder where P: AsRef<OsStr>, I: IntoIterator<Item = A>, A: AsRef<OsStr>, { let _ = self.try_associate(glob, program, args); self } /// Associates a glob with a command to decompress files matching the glob. /// /// If multiple globs match the same file, then the most recently added /// glob takes precedence. /// /// The syntax for the glob is documented in the /// [`globset` crate](https://docs.rs/globset/#syntax). /// /// The `program` given is resolved with respect to `PATH` and turned /// into an absolute path internally before being executed by the current /// platform. Notably, on Windows, this avoids a security problem where /// passing a relative path to `CreateProcess` will automatically search /// the current directory for a matching program. If the program could not /// be resolved, then an error is returned. pub fn try_associate<P, I, A>( &mut self, glob: &str, program: P, args: I, ) -> Result<&mut DecompressionMatcherBuilder, CommandError> where P: AsRef<OsStr>, I: IntoIterator<Item = A>, A: AsRef<OsStr>, { let glob = glob.to_string(); let bin = try_resolve_binary(Path::new(program.as_ref()))?; let args = args.into_iter().map(|a| a.as_ref().to_os_string()).collect(); self.commands.push(DecompressionCommand { glob, bin, args }); Ok(self) } } /// A matcher for determining how to decompress files. #[derive(Clone, Debug)] pub struct DecompressionMatcher { /// The set of globs to match. Each glob has a corresponding entry in /// `commands`. When a glob matches, the corresponding command should be /// used to perform out-of-process decompression. globs: GlobSet, /// The commands for each matching glob. commands: Vec<DecompressionCommand>, } impl Default for DecompressionMatcher { fn default() -> DecompressionMatcher { DecompressionMatcher::new() } } impl DecompressionMatcher { /// Create a new matcher with default rules. /// /// To add more matching rules, build a matcher with /// [`DecompressionMatcherBuilder`]. pub fn new() -> DecompressionMatcher { DecompressionMatcherBuilder::new() .build() .expect("built-in matching rules should always compile") } /// Return a pre-built command based on the given file path that can /// decompress its contents. If no such decompressor is known, then this /// returns `None`. /// /// If there are multiple possible commands matching the given path, then /// the command added last takes precedence. pub fn command<P: AsRef<Path>>(&self, path: P) -> Option<Command> { if let Some(i) = self.globs.matches(path).into_iter().next_back() { let decomp_cmd = &self.commands[i]; let mut cmd = Command::new(&decomp_cmd.bin); cmd.args(&decomp_cmd.args); return Some(cmd); } None } /// Returns true if and only if the given file path has at least one /// matching command to perform decompression on. pub fn has_command<P: AsRef<Path>>(&self, path: P) -> bool { self.globs.is_match(path) } } /// Configures and builds a streaming reader for decompressing data. #[derive(Clone, Debug, Default)] pub struct DecompressionReaderBuilder { matcher: DecompressionMatcher, command_builder: CommandReaderBuilder, } impl DecompressionReaderBuilder { /// Create a new builder with the default configuration. pub fn new() -> DecompressionReaderBuilder { DecompressionReaderBuilder::default() } /// Build a new streaming reader for decompressing data. /// /// If decompression is done out-of-process and if there was a problem /// spawning the process, then its error is logged at the debug level and a /// passthru reader is returned that does no decompression. This behavior /// typically occurs when the given file path matches a decompression /// command, but is executing in an environment where the decompression /// command is not available. /// /// If the given file path could not be matched with a decompression /// strategy, then a passthru reader is returned that does no /// decompression. pub fn build<P: AsRef<Path>>( &self, path: P, ) -> Result<DecompressionReader, CommandError> { let path = path.as_ref(); let Some(mut cmd) = self.matcher.command(path) else { return DecompressionReader::new_passthru(path); }; cmd.arg(path); match self.command_builder.build(&mut cmd) { Ok(cmd_reader) => Ok(DecompressionReader { rdr: Ok(cmd_reader) }), Err(err) => { log::debug!( "{}: error spawning command '{:?}': {} \ (falling back to uncompressed reader)", path.display(), cmd, err, ); DecompressionReader::new_passthru(path) } } } /// Set the matcher to use to look up the decompression command for each /// file path. /// /// A set of sensible rules is enabled by default. Setting this will /// completely replace the current rules. pub fn matcher( &mut self, matcher: DecompressionMatcher, ) -> &mut DecompressionReaderBuilder { self.matcher = matcher; self } /// Get the underlying matcher currently used by this builder. pub fn get_matcher(&self) -> &DecompressionMatcher { &self.matcher } /// When enabled, the reader will asynchronously read the contents of the /// command's stderr output. When disabled, stderr is only read after the /// stdout stream has been exhausted (or if the process quits with an error /// code). /// /// Note that when enabled, this may require launching an additional /// thread in order to read stderr. This is done so that the process being /// executed is never blocked from writing to stdout or stderr. If this is /// disabled, then it is possible for the process to fill up the stderr /// buffer and deadlock. /// /// This is enabled by default. pub fn async_stderr( &mut self, yes: bool, ) -> &mut DecompressionReaderBuilder { self.command_builder.async_stderr(yes); self } } /// A streaming reader for decompressing the contents of a file. /// /// The purpose of this reader is to provide a seamless way to decompress the /// contents of file using existing tools in the current environment. This is /// meant to be an alternative to using decompression libraries in favor of the /// simplicity and portability of using external commands such as `gzip` and /// `xz`. This does impose the overhead of spawning a process, so other means /// for performing decompression should be sought if this overhead isn't /// acceptable. /// /// A decompression reader comes with a default set of matching rules that are /// meant to associate file paths with the corresponding command to use to /// decompress them. For example, a glob like `*.gz` matches gzip compressed /// files with the command `gzip -d -c`. If a file path does not match any /// existing rules, or if it matches a rule whose command does not exist in the /// current environment, then the decompression reader passes through the /// contents of the underlying file without doing any decompression. /// /// The default matching rules are probably good enough for most cases, and if /// they require revision, pull requests are welcome. In cases where they must /// be changed or extended, they can be customized through the use of /// [`DecompressionMatcherBuilder`] and [`DecompressionReaderBuilder`]. /// /// By default, this reader will asynchronously read the processes' stderr. /// This prevents subtle deadlocking bugs for noisy processes that write a lot /// to stderr. Currently, the entire contents of stderr is read on to the heap. /// /// # Example /// /// This example shows how to read the decompressed contents of a file without /// needing to explicitly choose the decompression command to run. /// /// Note that if you need to decompress multiple files, it is better to use /// `DecompressionReaderBuilder`, which will amortize the cost of compiling the /// matcher. /// /// ```no_run /// use std::{io::Read, process::Command}; /// /// use grep_cli::DecompressionReader; /// /// let mut rdr = DecompressionReader::new("/usr/share/man/man1/ls.1.gz")?; /// let mut contents = vec![]; /// rdr.read_to_end(&mut contents)?; /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Debug)] pub struct DecompressionReader { rdr: Result<CommandReader, File>, } impl DecompressionReader { /// Build a new streaming reader for decompressing data. /// /// If decompression is done out-of-process and if there was a problem /// spawning the process, then its error is returned. /// /// If the given file path could not be matched with a decompression /// strategy, then a passthru reader is returned that does no /// decompression. /// /// This uses the default matching rules for determining how to decompress /// the given file. To change those matching rules, use /// [`DecompressionReaderBuilder`] and [`DecompressionMatcherBuilder`]. /// /// When creating readers for many paths. it is better to use the builder /// since it will amortize the cost of constructing the matcher. pub fn new<P: AsRef<Path>>( path: P, ) -> Result<DecompressionReader, CommandError> { DecompressionReaderBuilder::new().build(path) } /// Creates a new "passthru" decompression reader that reads from the file /// corresponding to the given path without doing decompression and without /// executing another process. fn new_passthru(path: &Path) -> Result<DecompressionReader, CommandError> { let file = File::open(path)?; Ok(DecompressionReader { rdr: Err(file) }) } /// Closes this reader, freeing any resources used by its underlying child /// process, if one was used. If the child process exits with a nonzero /// exit code, the returned Err value will include its stderr. /// /// `close` is idempotent, meaning it can be safely called multiple times. /// The first call closes the CommandReader and any subsequent calls do /// nothing. /// /// This method should be called after partially reading a file to prevent /// resource leakage. However there is no need to call `close` explicitly /// if your code always calls `read` to EOF, as `read` takes care of /// calling `close` in this case. /// /// `close` is also called in `drop` as a last line of defense against /// resource leakage. Any error from the child process is then printed as a /// warning to stderr. This can be avoided by explicitly calling `close` /// before the CommandReader is dropped. pub fn close(&mut self) -> io::Result<()> { match self.rdr { Ok(ref mut rdr) => rdr.close(), Err(_) => Ok(()), } } } impl io::Read for DecompressionReader { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { match self.rdr { Ok(ref mut rdr) => rdr.read(buf), Err(ref mut rdr) => rdr.read(buf), } } } /// Resolves a path to a program to a path by searching for the program in /// `PATH`. /// /// If the program could not be resolved, then an error is returned. /// /// The purpose of doing this instead of passing the path to the program /// directly to Command::new is that Command::new will hand relative paths /// to CreateProcess on Windows, which will implicitly search the current /// working directory for the executable. This could be undesirable for /// security reasons. e.g., running ripgrep with the -z/--search-zip flag on an /// untrusted directory tree could result in arbitrary programs executing on /// Windows. /// /// Note that this could still return a relative path if PATH contains a /// relative path. We permit this since it is assumed that the user has set /// this explicitly, and thus, desires this behavior. /// /// # Platform behavior /// /// On non-Windows, this is a no-op. pub fn resolve_binary<P: AsRef<Path>>( prog: P, ) -> Result<PathBuf, CommandError> { if !cfg!(windows) { return Ok(prog.as_ref().to_path_buf()); } try_resolve_binary(prog) } /// Resolves a path to a program to a path by searching for the program in /// `PATH`. /// /// If the program could not be resolved, then an error is returned. /// /// The purpose of doing this instead of passing the path to the program /// directly to Command::new is that Command::new will hand relative paths /// to CreateProcess on Windows, which will implicitly search the current /// working directory for the executable. This could be undesirable for /// security reasons. e.g., running ripgrep with the -z/--search-zip flag on an /// untrusted directory tree could result in arbitrary programs executing on /// Windows. /// /// Note that this could still return a relative path if PATH contains a /// relative path. We permit this since it is assumed that the user has set /// this explicitly, and thus, desires this behavior. /// /// If `check_exists` is false or the path is already an absolute path this /// will return immediately. fn try_resolve_binary<P: AsRef<Path>>( prog: P, ) -> Result<PathBuf, CommandError> { use std::env; fn is_exe(path: &Path) -> bool { let Ok(md) = path.metadata() else { return false }; !md.is_dir() } let prog = prog.as_ref(); if prog.is_absolute() { return Ok(prog.to_path_buf()); } let Some(syspaths) = env::var_os("PATH") else { let msg = "system PATH environment variable not found"; return Err(CommandError::io(io::Error::new( io::ErrorKind::Other, msg, ))); }; for syspath in env::split_paths(&syspaths) { if syspath.as_os_str().is_empty() { continue; } let abs_prog = syspath.join(prog); if is_exe(&abs_prog) { return Ok(abs_prog.to_path_buf()); } if abs_prog.extension().is_none() { for extension in ["com", "exe"] { let abs_prog = abs_prog.with_extension(extension); if is_exe(&abs_prog) { return Ok(abs_prog.to_path_buf()); } } } } let msg = format!("{}: could not find executable in PATH", prog.display()); return Err(CommandError::io(io::Error::new(io::ErrorKind::Other, msg))); } fn default_decompression_commands() -> Vec<DecompressionCommand> { const ARGS_GZIP: &[&str] = &["gzip", "-d", "-c"]; const ARGS_BZIP: &[&str] = &["bzip2", "-d", "-c"]; const ARGS_XZ: &[&str] = &["xz", "-d", "-c"]; const ARGS_LZ4: &[&str] = &["lz4", "-d", "-c"]; const ARGS_LZMA: &[&str] = &["xz", "--format=lzma", "-d", "-c"]; const ARGS_BROTLI: &[&str] = &["brotli", "-d", "-c"]; const ARGS_ZSTD: &[&str] = &["zstd", "-q", "-d", "-c"]; const ARGS_UNCOMPRESS: &[&str] = &["uncompress", "-c"]; fn add(glob: &str, args: &[&str], cmds: &mut Vec<DecompressionCommand>) { let bin = match resolve_binary(Path::new(args[0])) { Ok(bin) => bin, Err(err) => { log::debug!("{}", err); return; } }; cmds.push(DecompressionCommand { glob: glob.to_string(), bin, args: args .iter() .skip(1) .map(|s| OsStr::new(s).to_os_string()) .collect(), }); } let mut cmds = vec![]; add("*.gz", ARGS_GZIP, &mut cmds); add("*.tgz", ARGS_GZIP, &mut cmds); add("*.bz2", ARGS_BZIP, &mut cmds); add("*.tbz2", ARGS_BZIP, &mut cmds); add("*.xz", ARGS_XZ, &mut cmds); add("*.txz", ARGS_XZ, &mut cmds); add("*.lz4", ARGS_LZ4, &mut cmds); add("*.lzma", ARGS_LZMA, &mut cmds); add("*.br", ARGS_BROTLI, &mut cmds); add("*.zst", ARGS_ZSTD, &mut cmds); add("*.zstd", ARGS_ZSTD, &mut cmds); add("*.Z", ARGS_UNCOMPRESS, &mut cmds); cmds }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/cli/src/escape.rs
crates/cli/src/escape.rs
use std::ffi::OsStr; use bstr::{ByteSlice, ByteVec}; /// Escapes arbitrary bytes into a human readable string. /// /// This converts `\t`, `\r` and `\n` into their escaped forms. It also /// converts the non-printable subset of ASCII in addition to invalid UTF-8 /// bytes to hexadecimal escape sequences. Everything else is left as is. /// /// The dual of this routine is [`unescape`]. /// /// # Example /// /// This example shows how to convert a byte string that contains a `\n` and /// invalid UTF-8 bytes into a `String`. /// /// Pay special attention to the use of raw strings. That is, `r"\n"` is /// equivalent to `"\\n"`. /// /// ``` /// use grep_cli::escape; /// /// assert_eq!(r"foo\nbar\xFFbaz", escape(b"foo\nbar\xFFbaz")); /// ``` pub fn escape(bytes: &[u8]) -> String { bytes.escape_bytes().to_string() } /// Escapes an OS string into a human readable string. /// /// This is like [`escape`], but accepts an OS string. pub fn escape_os(string: &OsStr) -> String { escape(Vec::from_os_str_lossy(string).as_bytes()) } /// Unescapes a string. /// /// It supports a limited set of escape sequences: /// /// * `\t`, `\r` and `\n` are mapped to their corresponding ASCII bytes. /// * `\xZZ` hexadecimal escapes are mapped to their byte. /// /// Everything else is left as is, including non-hexadecimal escapes like /// `\xGG`. /// /// This is useful when it is desirable for a command line argument to be /// capable of specifying arbitrary bytes or otherwise make it easier to /// specify non-printable characters. /// /// The dual of this routine is [`escape`]. /// /// # Example /// /// This example shows how to convert an escaped string (which is valid UTF-8) /// into a corresponding sequence of bytes. Each escape sequence is mapped to /// its bytes, which may include invalid UTF-8. /// /// Pay special attention to the use of raw strings. That is, `r"\n"` is /// equivalent to `"\\n"`. /// /// ``` /// use grep_cli::unescape; /// /// assert_eq!(&b"foo\nbar\xFFbaz"[..], &*unescape(r"foo\nbar\xFFbaz")); /// ``` pub fn unescape(s: &str) -> Vec<u8> { Vec::unescape_bytes(s) } /// Unescapes an OS string. /// /// This is like [`unescape`], but accepts an OS string. /// /// Note that this first lossily decodes the given OS string as UTF-8. That /// is, an escaped string (the thing given) should be valid UTF-8. pub fn unescape_os(string: &OsStr) -> Vec<u8> { unescape(&string.to_string_lossy()) } #[cfg(test)] mod tests { use super::{escape, unescape}; fn b(bytes: &'static [u8]) -> Vec<u8> { bytes.to_vec() } #[test] fn empty() { assert_eq!(b(b""), unescape(r"")); assert_eq!(r"", escape(b"")); } #[test] fn backslash() { assert_eq!(b(b"\\"), unescape(r"\\")); assert_eq!(r"\\", escape(b"\\")); } #[test] fn nul() { assert_eq!(b(b"\x00"), unescape(r"\x00")); assert_eq!(b(b"\x00"), unescape(r"\0")); assert_eq!(r"\0", escape(b"\x00")); } #[test] fn nl() { assert_eq!(b(b"\n"), unescape(r"\n")); assert_eq!(r"\n", escape(b"\n")); } #[test] fn tab() { assert_eq!(b(b"\t"), unescape(r"\t")); assert_eq!(r"\t", escape(b"\t")); } #[test] fn carriage() { assert_eq!(b(b"\r"), unescape(r"\r")); assert_eq!(r"\r", escape(b"\r")); } #[test] fn nothing_simple() { assert_eq!(b(b"\\a"), unescape(r"\a")); assert_eq!(b(b"\\a"), unescape(r"\\a")); assert_eq!(r"\\a", escape(b"\\a")); } #[test] fn nothing_hex0() { assert_eq!(b(b"\\x"), unescape(r"\x")); assert_eq!(b(b"\\x"), unescape(r"\\x")); assert_eq!(r"\\x", escape(b"\\x")); } #[test] fn nothing_hex1() { assert_eq!(b(b"\\xz"), unescape(r"\xz")); assert_eq!(b(b"\\xz"), unescape(r"\\xz")); assert_eq!(r"\\xz", escape(b"\\xz")); } #[test] fn nothing_hex2() { assert_eq!(b(b"\\xzz"), unescape(r"\xzz")); assert_eq!(b(b"\\xzz"), unescape(r"\\xzz")); assert_eq!(r"\\xzz", escape(b"\\xzz")); } #[test] fn invalid_utf8() { assert_eq!(r"\xFF", escape(b"\xFF")); assert_eq!(r"a\xFFb", escape(b"a\xFFb")); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/globset/src/serde_impl.rs
crates/globset/src/serde_impl.rs
use serde::{ de::{Error, SeqAccess, Visitor}, {Deserialize, Deserializer, Serialize, Serializer}, }; use crate::{Glob, GlobSet, GlobSetBuilder}; impl Serialize for Glob { fn serialize<S: Serializer>( &self, serializer: S, ) -> Result<S::Ok, S::Error> { serializer.serialize_str(self.glob()) } } struct GlobVisitor; impl<'de> Visitor<'de> for GlobVisitor { type Value = Glob; fn expecting( &self, formatter: &mut std::fmt::Formatter, ) -> std::fmt::Result { formatter.write_str("a glob pattern") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: Error, { Glob::new(v).map_err(serde::de::Error::custom) } } impl<'de> Deserialize<'de> for Glob { fn deserialize<D: Deserializer<'de>>( deserializer: D, ) -> Result<Self, D::Error> { deserializer.deserialize_str(GlobVisitor) } } struct GlobSetVisitor; impl<'de> Visitor<'de> for GlobSetVisitor { type Value = GlobSet; fn expecting( &self, formatter: &mut std::fmt::Formatter, ) -> std::fmt::Result { formatter.write_str("an array of glob patterns") } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de>, { let mut builder = GlobSetBuilder::new(); while let Some(glob) = seq.next_element()? { builder.add(glob); } builder.build().map_err(serde::de::Error::custom) } } impl<'de> Deserialize<'de> for GlobSet { fn deserialize<D: Deserializer<'de>>( deserializer: D, ) -> Result<Self, D::Error> { deserializer.deserialize_seq(GlobSetVisitor) } } #[cfg(test)] mod tests { use std::collections::HashMap; use crate::{Glob, GlobSet}; #[test] fn glob_deserialize_borrowed() { let string = r#"{"markdown": "*.md"}"#; let map: HashMap<String, Glob> = serde_json::from_str(&string).unwrap(); assert_eq!(map["markdown"], Glob::new("*.md").unwrap()); } #[test] fn glob_deserialize_owned() { let string = r#"{"markdown": "*.md"}"#; let v: serde_json::Value = serde_json::from_str(&string).unwrap(); let map: HashMap<String, Glob> = serde_json::from_value(v).unwrap(); assert_eq!(map["markdown"], Glob::new("*.md").unwrap()); } #[test] fn glob_deserialize_error() { let string = r#"{"error": "["}"#; let map = serde_json::from_str::<HashMap<String, Glob>>(&string); assert!(map.is_err()); } #[test] fn glob_json_works() { let test_glob = Glob::new("src/**/*.rs").unwrap(); let ser = serde_json::to_string(&test_glob).unwrap(); assert_eq!(ser, "\"src/**/*.rs\""); let de: Glob = serde_json::from_str(&ser).unwrap(); assert_eq!(test_glob, de); } #[test] fn glob_set_deserialize() { let j = r#" ["src/**/*.rs", "README.md"] "#; let set: GlobSet = serde_json::from_str(j).unwrap(); assert!(set.is_match("src/lib.rs")); assert!(!set.is_match("Cargo.lock")); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/globset/src/lib.rs
crates/globset/src/lib.rs
/*! The globset crate provides cross platform single glob and glob set matching. Glob set matching is the process of matching one or more glob patterns against a single candidate path simultaneously, and returning all of the globs that matched. For example, given this set of globs: * `*.rs` * `src/lib.rs` * `src/**/foo.rs` and a path `src/bar/baz/foo.rs`, then the set would report the first and third globs as matching. # Example: one glob This example shows how to match a single glob against a single file path. ``` use globset::Glob; let glob = Glob::new("*.rs")?.compile_matcher(); assert!(glob.is_match("foo.rs")); assert!(glob.is_match("foo/bar.rs")); assert!(!glob.is_match("Cargo.toml")); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Example: configuring a glob matcher This example shows how to use a `GlobBuilder` to configure aspects of match semantics. In this example, we prevent wildcards from matching path separators. ``` use globset::GlobBuilder; let glob = GlobBuilder::new("*.rs") .literal_separator(true).build()?.compile_matcher(); assert!(glob.is_match("foo.rs")); assert!(!glob.is_match("foo/bar.rs")); // no longer matches assert!(!glob.is_match("Cargo.toml")); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Example: match multiple globs at once This example shows how to match multiple glob patterns at once. ``` use globset::{Glob, GlobSetBuilder}; let mut builder = GlobSetBuilder::new(); // A GlobBuilder can be used to configure each glob's match semantics // independently. builder.add(Glob::new("*.rs")?); builder.add(Glob::new("src/lib.rs")?); builder.add(Glob::new("src/**/foo.rs")?); let set = builder.build()?; assert_eq!(set.matches("src/bar/baz/foo.rs"), vec![0, 2]); # Ok::<(), Box<dyn std::error::Error>>(()) ``` # Syntax Standard Unix-style glob syntax is supported: * `?` matches any single character. (If the `literal_separator` option is enabled, then `?` can never match a path separator.) * `*` matches zero or more characters. (If the `literal_separator` option is enabled, then `*` can never match a path separator.) * `**` recursively matches directories but are only legal in three situations. First, if the glob starts with <code>\*\*&#x2F;</code>, then it matches all directories. For example, <code>\*\*&#x2F;foo</code> matches `foo` and `bar/foo` but not `foo/bar`. Secondly, if the glob ends with <code>&#x2F;\*\*</code>, then it matches all sub-entries. For example, <code>foo&#x2F;\*\*</code> matches `foo/a` and `foo/a/b`, but not `foo`. Thirdly, if the glob contains <code>&#x2F;\*\*&#x2F;</code> anywhere within the pattern, then it matches zero or more directories. Using `**` anywhere else is illegal (N.B. the glob `**` is allowed and means "match everything"). * `{a,b}` matches `a` or `b` where `a` and `b` are arbitrary glob patterns. (N.B. Nesting `{...}` is not currently allowed.) * `[ab]` matches `a` or `b` where `a` and `b` are characters. Use `[!ab]` to match any character except for `a` and `b`. * Metacharacters such as `*` and `?` can be escaped with character class notation. e.g., `[*]` matches `*`. * When backslash escapes are enabled, a backslash (`\`) will escape all meta characters in a glob. If it precedes a non-meta character, then the slash is ignored. A `\\` will match a literal `\\`. Note that this mode is only enabled on Unix platforms by default, but can be enabled on any platform via the `backslash_escape` setting on `Glob`. A `GlobBuilder` can be used to prevent wildcards from matching path separators, or to enable case insensitive matching. # Crate Features This crate includes optional features that can be enabled if necessary. These features are not required but may be useful depending on the use case. The following features are available: * **arbitrary** - Enabling this feature introduces a public dependency on the [`arbitrary`](https://crates.io/crates/arbitrary) crate. Namely, it implements the `Arbitrary` trait from that crate for the [`Glob`] type. This feature is disabled by default. */ #![deny(missing_docs)] use std::{ borrow::Cow, panic::{RefUnwindSafe, UnwindSafe}, path::Path, sync::Arc, }; use { aho_corasick::AhoCorasick, bstr::{B, ByteSlice, ByteVec}, regex_automata::{ PatternSet, meta::Regex, util::pool::{Pool, PoolGuard}, }, }; use crate::{ glob::MatchStrategy, pathutil::{file_name, file_name_ext, normalize_path}, }; pub use crate::glob::{Glob, GlobBuilder, GlobMatcher}; mod fnv; mod glob; mod pathutil; #[cfg(feature = "serde1")] mod serde_impl; #[cfg(feature = "log")] macro_rules! debug { ($($token:tt)*) => (::log::debug!($($token)*);) } #[cfg(not(feature = "log"))] macro_rules! debug { ($($token:tt)*) => {}; } /// Represents an error that can occur when parsing a glob pattern. #[derive(Clone, Debug, Eq, PartialEq)] pub struct Error { /// The original glob provided by the caller. glob: Option<String>, /// The kind of error. kind: ErrorKind, } /// The kind of error that can occur when parsing a glob pattern. #[derive(Clone, Debug, Eq, PartialEq)] #[non_exhaustive] pub enum ErrorKind { /// **DEPRECATED**. /// /// This error used to occur for consistency with git's glob specification, /// but the specification now accepts all uses of `**`. When `**` does not /// appear adjacent to a path separator or at the beginning/end of a glob, /// it is now treated as two consecutive `*` patterns. As such, this error /// is no longer used. InvalidRecursive, /// Occurs when a character class (e.g., `[abc]`) is not closed. UnclosedClass, /// Occurs when a range in a character (e.g., `[a-z]`) is invalid. For /// example, if the range starts with a lexicographically larger character /// than it ends with. InvalidRange(char, char), /// Occurs when a `}` is found without a matching `{`. UnopenedAlternates, /// Occurs when a `{` is found without a matching `}`. UnclosedAlternates, /// **DEPRECATED**. /// /// This error used to occur when an alternating group was nested inside /// another alternating group, e.g., `{{a,b},{c,d}}`. However, this is now /// supported and as such this error cannot occur. NestedAlternates, /// Occurs when an unescaped '\' is found at the end of a glob. DanglingEscape, /// An error associated with parsing or compiling a regex. Regex(String), } impl std::error::Error for Error { fn description(&self) -> &str { self.kind.description() } } impl Error { /// Return the glob that caused this error, if one exists. pub fn glob(&self) -> Option<&str> { self.glob.as_ref().map(|s| &**s) } /// Return the kind of this error. pub fn kind(&self) -> &ErrorKind { &self.kind } } impl ErrorKind { fn description(&self) -> &str { match *self { ErrorKind::InvalidRecursive => { "invalid use of **; must be one path component" } ErrorKind::UnclosedClass => { "unclosed character class; missing ']'" } ErrorKind::InvalidRange(_, _) => "invalid character range", ErrorKind::UnopenedAlternates => { "unopened alternate group; missing '{' \ (maybe escape '}' with '[}]'?)" } ErrorKind::UnclosedAlternates => { "unclosed alternate group; missing '}' \ (maybe escape '{' with '[{]'?)" } ErrorKind::NestedAlternates => { "nested alternate groups are not allowed" } ErrorKind::DanglingEscape => "dangling '\\'", ErrorKind::Regex(ref err) => err, } } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.glob { None => self.kind.fmt(f), Some(ref glob) => { write!(f, "error parsing glob '{}': {}", glob, self.kind) } } } } impl std::fmt::Display for ErrorKind { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { ErrorKind::InvalidRecursive | ErrorKind::UnclosedClass | ErrorKind::UnopenedAlternates | ErrorKind::UnclosedAlternates | ErrorKind::NestedAlternates | ErrorKind::DanglingEscape | ErrorKind::Regex(_) => write!(f, "{}", self.description()), ErrorKind::InvalidRange(s, e) => { write!(f, "invalid range; '{}' > '{}'", s, e) } } } } fn new_regex(pat: &str) -> Result<Regex, Error> { let syntax = regex_automata::util::syntax::Config::new() .utf8(false) .dot_matches_new_line(true); let config = Regex::config() .utf8_empty(false) .nfa_size_limit(Some(10 * (1 << 20))) .hybrid_cache_capacity(10 * (1 << 20)); Regex::builder().syntax(syntax).configure(config).build(pat).map_err( |err| Error { glob: Some(pat.to_string()), kind: ErrorKind::Regex(err.to_string()), }, ) } fn new_regex_set(pats: Vec<String>) -> Result<Regex, Error> { let syntax = regex_automata::util::syntax::Config::new() .utf8(false) .dot_matches_new_line(true); let config = Regex::config() .match_kind(regex_automata::MatchKind::All) .utf8_empty(false) .nfa_size_limit(Some(10 * (1 << 20))) .hybrid_cache_capacity(10 * (1 << 20)); Regex::builder() .syntax(syntax) .configure(config) .build_many(&pats) .map_err(|err| Error { glob: None, kind: ErrorKind::Regex(err.to_string()), }) } /// GlobSet represents a group of globs that can be matched together in a /// single pass. #[derive(Clone, Debug)] pub struct GlobSet { len: usize, strats: Vec<GlobSetMatchStrategy>, } impl GlobSet { /// Create a new [`GlobSetBuilder`]. A `GlobSetBuilder` can be used to add /// new patterns. Once all patterns have been added, `build` should be /// called to produce a `GlobSet`, which can then be used for matching. #[inline] pub fn builder() -> GlobSetBuilder { GlobSetBuilder::new() } /// Create an empty `GlobSet`. An empty set matches nothing. #[inline] pub const fn empty() -> GlobSet { GlobSet { len: 0, strats: vec![] } } /// Returns true if this set is empty, and therefore matches nothing. #[inline] pub fn is_empty(&self) -> bool { self.len == 0 } /// Returns the number of globs in this set. #[inline] pub fn len(&self) -> usize { self.len } /// Returns true if any glob in this set matches the path given. pub fn is_match<P: AsRef<Path>>(&self, path: P) -> bool { self.is_match_candidate(&Candidate::new(path.as_ref())) } /// Returns true if any glob in this set matches the path given. /// /// This takes a Candidate as input, which can be used to amortize the /// cost of preparing a path for matching. pub fn is_match_candidate(&self, path: &Candidate<'_>) -> bool { if self.is_empty() { return false; } for strat in &self.strats { if strat.is_match(path) { return true; } } false } /// Returns true if all globs in this set match the path given. /// /// This will return true if the set of globs is empty, as in that case all /// `0` of the globs will match. /// /// ``` /// use globset::{Glob, GlobSetBuilder}; /// /// let mut builder = GlobSetBuilder::new(); /// builder.add(Glob::new("src/*").unwrap()); /// builder.add(Glob::new("**/*.rs").unwrap()); /// let set = builder.build().unwrap(); /// /// assert!(set.matches_all("src/foo.rs")); /// assert!(!set.matches_all("src/bar.c")); /// assert!(!set.matches_all("test.rs")); /// ``` pub fn matches_all<P: AsRef<Path>>(&self, path: P) -> bool { self.matches_all_candidate(&Candidate::new(path.as_ref())) } /// Returns ture if all globs in this set match the path given. /// /// This takes a Candidate as input, which can be used to amortize the cost /// of peparing a path for matching. /// /// This will return true if the set of globs is empty, as in that case all /// `0` of the globs will match. pub fn matches_all_candidate(&self, path: &Candidate<'_>) -> bool { for strat in &self.strats { if !strat.is_match(path) { return false; } } true } /// Returns the sequence number of every glob pattern that matches the /// given path. pub fn matches<P: AsRef<Path>>(&self, path: P) -> Vec<usize> { self.matches_candidate(&Candidate::new(path.as_ref())) } /// Returns the sequence number of every glob pattern that matches the /// given path. /// /// This takes a Candidate as input, which can be used to amortize the /// cost of preparing a path for matching. pub fn matches_candidate(&self, path: &Candidate<'_>) -> Vec<usize> { let mut into = vec![]; if self.is_empty() { return into; } self.matches_candidate_into(path, &mut into); into } /// Adds the sequence number of every glob pattern that matches the given /// path to the vec given. /// /// `into` is cleared before matching begins, and contains the set of /// sequence numbers (in ascending order) after matching ends. If no globs /// were matched, then `into` will be empty. pub fn matches_into<P: AsRef<Path>>( &self, path: P, into: &mut Vec<usize>, ) { self.matches_candidate_into(&Candidate::new(path.as_ref()), into); } /// Adds the sequence number of every glob pattern that matches the given /// path to the vec given. /// /// `into` is cleared before matching begins, and contains the set of /// sequence numbers (in ascending order) after matching ends. If no globs /// were matched, then `into` will be empty. /// /// This takes a Candidate as input, which can be used to amortize the /// cost of preparing a path for matching. pub fn matches_candidate_into( &self, path: &Candidate<'_>, into: &mut Vec<usize>, ) { into.clear(); if self.is_empty() { return; } for strat in &self.strats { strat.matches_into(path, into); } into.sort(); into.dedup(); } /// Builds a new matcher from a collection of Glob patterns. /// /// Once a matcher is built, no new patterns can be added to it. pub fn new<I, G>(globs: I) -> Result<GlobSet, Error> where I: IntoIterator<Item = G>, G: AsRef<Glob>, { let mut it = globs.into_iter().peekable(); if it.peek().is_none() { return Ok(GlobSet::empty()); } let mut len = 0; let mut lits = LiteralStrategy::new(); let mut base_lits = BasenameLiteralStrategy::new(); let mut exts = ExtensionStrategy::new(); let mut prefixes = MultiStrategyBuilder::new(); let mut suffixes = MultiStrategyBuilder::new(); let mut required_exts = RequiredExtensionStrategyBuilder::new(); let mut regexes = MultiStrategyBuilder::new(); for (i, p) in it.enumerate() { len += 1; let p = p.as_ref(); match MatchStrategy::new(p) { MatchStrategy::Literal(lit) => { lits.add(i, lit); } MatchStrategy::BasenameLiteral(lit) => { base_lits.add(i, lit); } MatchStrategy::Extension(ext) => { exts.add(i, ext); } MatchStrategy::Prefix(prefix) => { prefixes.add(i, prefix); } MatchStrategy::Suffix { suffix, component } => { if component { lits.add(i, suffix[1..].to_string()); } suffixes.add(i, suffix); } MatchStrategy::RequiredExtension(ext) => { required_exts.add(i, ext, p.regex().to_owned()); } MatchStrategy::Regex => { debug!( "glob `{:?}` converted to regex: `{:?}`", p, p.regex() ); regexes.add(i, p.regex().to_owned()); } } } debug!( "built glob set; {} literals, {} basenames, {} extensions, \ {} prefixes, {} suffixes, {} required extensions, {} regexes", lits.0.len(), base_lits.0.len(), exts.0.len(), prefixes.literals.len(), suffixes.literals.len(), required_exts.0.len(), regexes.literals.len() ); let mut strats = Vec::with_capacity(7); // Only add strategies that are populated if !exts.0.is_empty() { strats.push(GlobSetMatchStrategy::Extension(exts)); } if !base_lits.0.is_empty() { strats.push(GlobSetMatchStrategy::BasenameLiteral(base_lits)); } if !lits.0.is_empty() { strats.push(GlobSetMatchStrategy::Literal(lits)); } if !suffixes.is_empty() { strats.push(GlobSetMatchStrategy::Suffix(suffixes.suffix())); } if !prefixes.is_empty() { strats.push(GlobSetMatchStrategy::Prefix(prefixes.prefix())); } if !required_exts.0.is_empty() { strats.push(GlobSetMatchStrategy::RequiredExtension( required_exts.build()?, )); } if !regexes.is_empty() { strats.push(GlobSetMatchStrategy::Regex(regexes.regex_set()?)); } Ok(GlobSet { len, strats }) } } impl Default for GlobSet { /// Create a default empty GlobSet. fn default() -> Self { GlobSet::empty() } } /// GlobSetBuilder builds a group of patterns that can be used to /// simultaneously match a file path. #[derive(Clone, Debug)] pub struct GlobSetBuilder { pats: Vec<Glob>, } impl GlobSetBuilder { /// Create a new `GlobSetBuilder`. A `GlobSetBuilder` can be used to add new /// patterns. Once all patterns have been added, `build` should be called /// to produce a [`GlobSet`], which can then be used for matching. pub fn new() -> GlobSetBuilder { GlobSetBuilder { pats: vec![] } } /// Builds a new matcher from all of the glob patterns added so far. /// /// Once a matcher is built, no new patterns can be added to it. pub fn build(&self) -> Result<GlobSet, Error> { GlobSet::new(self.pats.iter()) } /// Add a new pattern to this set. pub fn add(&mut self, pat: Glob) -> &mut GlobSetBuilder { self.pats.push(pat); self } } /// A candidate path for matching. /// /// All glob matching in this crate operates on `Candidate` values. /// Constructing candidates has a very small cost associated with it, so /// callers may find it beneficial to amortize that cost when matching a single /// path against multiple globs or sets of globs. #[derive(Clone)] pub struct Candidate<'a> { path: Cow<'a, [u8]>, basename: Cow<'a, [u8]>, ext: Cow<'a, [u8]>, } impl<'a> std::fmt::Debug for Candidate<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { f.debug_struct("Candidate") .field("path", &self.path.as_bstr()) .field("basename", &self.basename.as_bstr()) .field("ext", &self.ext.as_bstr()) .finish() } } impl<'a> Candidate<'a> { /// Create a new candidate for matching from the given path. pub fn new<P: AsRef<Path> + ?Sized>(path: &'a P) -> Candidate<'a> { Self::from_cow(Vec::from_path_lossy(path.as_ref())) } /// Create a new candidate for matching from the given path as a sequence /// of bytes. /// /// Generally speaking, this routine expects the bytes to be /// _conventionally_ UTF-8. It is legal for the byte sequence to contain /// invalid UTF-8. However, if the bytes are in some other encoding that /// isn't ASCII compatible (for example, UTF-16), then the results of /// matching are unspecified. pub fn from_bytes<P: AsRef<[u8]> + ?Sized>(path: &'a P) -> Candidate<'a> { Self::from_cow(Cow::Borrowed(path.as_ref())) } fn from_cow(path: Cow<'a, [u8]>) -> Candidate<'a> { let path = normalize_path(path); let basename = file_name(&path).unwrap_or(Cow::Borrowed(B(""))); let ext = file_name_ext(&basename).unwrap_or(Cow::Borrowed(B(""))); Candidate { path, basename, ext } } fn path_prefix(&self, max: usize) -> &[u8] { if self.path.len() <= max { &*self.path } else { &self.path[..max] } } fn path_suffix(&self, max: usize) -> &[u8] { if self.path.len() <= max { &*self.path } else { &self.path[self.path.len() - max..] } } } #[derive(Clone, Debug)] enum GlobSetMatchStrategy { Literal(LiteralStrategy), BasenameLiteral(BasenameLiteralStrategy), Extension(ExtensionStrategy), Prefix(PrefixStrategy), Suffix(SuffixStrategy), RequiredExtension(RequiredExtensionStrategy), Regex(RegexSetStrategy), } impl GlobSetMatchStrategy { fn is_match(&self, candidate: &Candidate<'_>) -> bool { use self::GlobSetMatchStrategy::*; match *self { Literal(ref s) => s.is_match(candidate), BasenameLiteral(ref s) => s.is_match(candidate), Extension(ref s) => s.is_match(candidate), Prefix(ref s) => s.is_match(candidate), Suffix(ref s) => s.is_match(candidate), RequiredExtension(ref s) => s.is_match(candidate), Regex(ref s) => s.is_match(candidate), } } fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { use self::GlobSetMatchStrategy::*; match *self { Literal(ref s) => s.matches_into(candidate, matches), BasenameLiteral(ref s) => s.matches_into(candidate, matches), Extension(ref s) => s.matches_into(candidate, matches), Prefix(ref s) => s.matches_into(candidate, matches), Suffix(ref s) => s.matches_into(candidate, matches), RequiredExtension(ref s) => s.matches_into(candidate, matches), Regex(ref s) => s.matches_into(candidate, matches), } } } #[derive(Clone, Debug)] struct LiteralStrategy(fnv::HashMap<Vec<u8>, Vec<usize>>); impl LiteralStrategy { fn new() -> LiteralStrategy { LiteralStrategy(fnv::HashMap::default()) } fn add(&mut self, global_index: usize, lit: String) { self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index); } fn is_match(&self, candidate: &Candidate<'_>) -> bool { self.0.contains_key(candidate.path.as_bytes()) } #[inline(never)] fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { if let Some(hits) = self.0.get(candidate.path.as_bytes()) { matches.extend(hits); } } } #[derive(Clone, Debug)] struct BasenameLiteralStrategy(fnv::HashMap<Vec<u8>, Vec<usize>>); impl BasenameLiteralStrategy { fn new() -> BasenameLiteralStrategy { BasenameLiteralStrategy(fnv::HashMap::default()) } fn add(&mut self, global_index: usize, lit: String) { self.0.entry(lit.into_bytes()).or_insert(vec![]).push(global_index); } fn is_match(&self, candidate: &Candidate<'_>) -> bool { if candidate.basename.is_empty() { return false; } self.0.contains_key(candidate.basename.as_bytes()) } #[inline(never)] fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { if candidate.basename.is_empty() { return; } if let Some(hits) = self.0.get(candidate.basename.as_bytes()) { matches.extend(hits); } } } #[derive(Clone, Debug)] struct ExtensionStrategy(fnv::HashMap<Vec<u8>, Vec<usize>>); impl ExtensionStrategy { fn new() -> ExtensionStrategy { ExtensionStrategy(fnv::HashMap::default()) } fn add(&mut self, global_index: usize, ext: String) { self.0.entry(ext.into_bytes()).or_insert(vec![]).push(global_index); } fn is_match(&self, candidate: &Candidate<'_>) -> bool { if candidate.ext.is_empty() { return false; } self.0.contains_key(candidate.ext.as_bytes()) } #[inline(never)] fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { if candidate.ext.is_empty() { return; } if let Some(hits) = self.0.get(candidate.ext.as_bytes()) { matches.extend(hits); } } } #[derive(Clone, Debug)] struct PrefixStrategy { matcher: AhoCorasick, map: Vec<usize>, longest: usize, } impl PrefixStrategy { fn is_match(&self, candidate: &Candidate<'_>) -> bool { let path = candidate.path_prefix(self.longest); for m in self.matcher.find_overlapping_iter(path) { if m.start() == 0 { return true; } } false } fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { let path = candidate.path_prefix(self.longest); for m in self.matcher.find_overlapping_iter(path) { if m.start() == 0 { matches.push(self.map[m.pattern()]); } } } } #[derive(Clone, Debug)] struct SuffixStrategy { matcher: AhoCorasick, map: Vec<usize>, longest: usize, } impl SuffixStrategy { fn is_match(&self, candidate: &Candidate<'_>) -> bool { let path = candidate.path_suffix(self.longest); for m in self.matcher.find_overlapping_iter(path) { if m.end() == path.len() { return true; } } false } fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { let path = candidate.path_suffix(self.longest); for m in self.matcher.find_overlapping_iter(path) { if m.end() == path.len() { matches.push(self.map[m.pattern()]); } } } } #[derive(Clone, Debug)] struct RequiredExtensionStrategy(fnv::HashMap<Vec<u8>, Vec<(usize, Regex)>>); impl RequiredExtensionStrategy { fn is_match(&self, candidate: &Candidate<'_>) -> bool { if candidate.ext.is_empty() { return false; } match self.0.get(candidate.ext.as_bytes()) { None => false, Some(regexes) => { for &(_, ref re) in regexes { if re.is_match(candidate.path.as_bytes()) { return true; } } false } } } #[inline(never)] fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { if candidate.ext.is_empty() { return; } if let Some(regexes) = self.0.get(candidate.ext.as_bytes()) { for &(global_index, ref re) in regexes { if re.is_match(candidate.path.as_bytes()) { matches.push(global_index); } } } } } #[derive(Clone, Debug)] struct RegexSetStrategy { matcher: Regex, map: Vec<usize>, // We use a pool of PatternSets to hopefully allocating a fresh one on each // call. // // TODO: In the next semver breaking release, we should drop this pool and // expose an opaque type that wraps PatternSet. Then callers can provide // it to `matches_into` directly. Callers might still want to use a pool // or similar to amortize allocation, but that matches the status quo and // absolves us of needing to do it here. patset: Arc<Pool<PatternSet, PatternSetPoolFn>>, } type PatternSetPoolFn = Box<dyn Fn() -> PatternSet + Send + Sync + UnwindSafe + RefUnwindSafe>; impl RegexSetStrategy { fn is_match(&self, candidate: &Candidate<'_>) -> bool { self.matcher.is_match(candidate.path.as_bytes()) } fn matches_into( &self, candidate: &Candidate<'_>, matches: &mut Vec<usize>, ) { let input = regex_automata::Input::new(candidate.path.as_bytes()); let mut patset = self.patset.get(); patset.clear(); self.matcher.which_overlapping_matches(&input, &mut patset); for i in patset.iter() { matches.push(self.map[i]); } PoolGuard::put(patset); } } #[derive(Clone, Debug)] struct MultiStrategyBuilder { literals: Vec<String>, map: Vec<usize>, longest: usize, } impl MultiStrategyBuilder { fn new() -> MultiStrategyBuilder { MultiStrategyBuilder { literals: vec![], map: vec![], longest: 0 } } fn add(&mut self, global_index: usize, literal: String) { if literal.len() > self.longest { self.longest = literal.len(); } self.map.push(global_index); self.literals.push(literal); } fn prefix(self) -> PrefixStrategy { PrefixStrategy { matcher: AhoCorasick::new(&self.literals).unwrap(), map: self.map, longest: self.longest, } } fn suffix(self) -> SuffixStrategy { SuffixStrategy { matcher: AhoCorasick::new(&self.literals).unwrap(), map: self.map, longest: self.longest, } } fn regex_set(self) -> Result<RegexSetStrategy, Error> { let matcher = new_regex_set(self.literals)?; let pattern_len = matcher.pattern_len(); let create: PatternSetPoolFn = Box::new(move || PatternSet::new(pattern_len)); Ok(RegexSetStrategy { matcher, map: self.map, patset: Arc::new(Pool::new(create)), }) } fn is_empty(&self) -> bool { self.literals.is_empty() } } #[derive(Clone, Debug)] struct RequiredExtensionStrategyBuilder( fnv::HashMap<Vec<u8>, Vec<(usize, String)>>, ); impl RequiredExtensionStrategyBuilder { fn new() -> RequiredExtensionStrategyBuilder { RequiredExtensionStrategyBuilder(fnv::HashMap::default()) } fn add(&mut self, global_index: usize, ext: String, regex: String) { self.0 .entry(ext.into_bytes()) .or_insert(vec![]) .push((global_index, regex)); } fn build(self) -> Result<RequiredExtensionStrategy, Error> { let mut exts = fnv::HashMap::default(); for (ext, regexes) in self.0.into_iter() { exts.insert(ext.clone(), vec![]); for (global_index, regex) in regexes { let compiled = new_regex(&regex)?; exts.get_mut(&ext).unwrap().push((global_index, compiled)); } } Ok(RequiredExtensionStrategy(exts)) } } /// Escape meta-characters within the given glob pattern. /// /// The escaping works by surrounding meta-characters with brackets. For /// example, `*` becomes `[*]`. /// /// # Example /// /// ``` /// use globset::escape; /// /// assert_eq!(escape("foo*bar"), "foo[*]bar"); /// assert_eq!(escape("foo?bar"), "foo[?]bar"); /// assert_eq!(escape("foo[bar"), "foo[[]bar"); /// assert_eq!(escape("foo]bar"), "foo[]]bar"); /// assert_eq!(escape("foo{bar"), "foo[{]bar"); /// assert_eq!(escape("foo}bar"), "foo[}]bar"); /// ``` pub fn escape(s: &str) -> String { let mut escaped = String::with_capacity(s.len()); for c in s.chars() { match c { // note that ! does not need escaping because it is only special // inside brackets
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/globset/src/pathutil.rs
crates/globset/src/pathutil.rs
use std::borrow::Cow; use bstr::{ByteSlice, ByteVec}; /// The final component of the path, if it is a normal file. /// /// If the path terminates in `..`, or consists solely of a root of prefix, /// file_name will return `None`. pub(crate) fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> { if path.is_empty() { return None; } let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0); let got = match *path { Cow::Borrowed(path) => Cow::Borrowed(&path[last_slash..]), Cow::Owned(ref path) => { let mut path = path.clone(); path.drain_bytes(..last_slash); Cow::Owned(path) } }; if got == &b".."[..] { return None; } Some(got) } /// Return a file extension given a path's file name. /// /// Note that this does NOT match the semantics of std::path::Path::extension. /// Namely, the extension includes the `.` and matching is otherwise more /// liberal. Specifically, the extension is: /// /// * None, if the file name given is empty; /// * None, if there is no embedded `.`; /// * Otherwise, the portion of the file name starting with the final `.`. /// /// e.g., A file name of `.rs` has an extension `.rs`. /// /// N.B. This is done to make certain glob match optimizations easier. Namely, /// a pattern like `*.rs` is obviously trying to match files with a `rs` /// extension, but it also matches files like `.rs`, which doesn't have an /// extension according to std::path::Path::extension. pub(crate) fn file_name_ext<'a>( name: &Cow<'a, [u8]>, ) -> Option<Cow<'a, [u8]>> { if name.is_empty() { return None; } let last_dot_at = match name.rfind_byte(b'.') { None => return None, Some(i) => i, }; Some(match *name { Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]), Cow::Owned(ref name) => { let mut name = name.clone(); name.drain_bytes(..last_dot_at); Cow::Owned(name) } }) } /// Normalizes a path to use `/` as a separator everywhere, even on platforms /// that recognize other characters as separators. #[cfg(unix)] pub(crate) fn normalize_path(path: Cow<'_, [u8]>) -> Cow<'_, [u8]> { // UNIX only uses /, so we're good. path } /// Normalizes a path to use `/` as a separator everywhere, even on platforms /// that recognize other characters as separators. #[cfg(not(unix))] pub(crate) fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> { use std::path::is_separator; for i in 0..path.len() { if path[i] == b'/' || !is_separator(char::from(path[i])) { continue; } path.to_mut()[i] = b'/'; } path } #[cfg(test)] mod tests { use std::borrow::Cow; use bstr::{B, ByteVec}; use super::{file_name_ext, normalize_path}; macro_rules! ext { ($name:ident, $file_name:expr, $ext:expr) => { #[test] fn $name() { let bs = Vec::from($file_name); let got = file_name_ext(&Cow::Owned(bs)); assert_eq!($ext.map(|s| Cow::Borrowed(B(s))), got); } }; } ext!(ext1, "foo.rs", Some(".rs")); ext!(ext2, ".rs", Some(".rs")); ext!(ext3, "..rs", Some(".rs")); ext!(ext4, "", None::<&str>); ext!(ext5, "foo", None::<&str>); macro_rules! normalize { ($name:ident, $path:expr, $expected:expr) => { #[test] fn $name() { let bs = Vec::from_slice($path); let got = normalize_path(Cow::Owned(bs)); assert_eq!($expected.to_vec(), got.into_owned()); } }; } normalize!(normal1, b"foo", b"foo"); normalize!(normal2, b"foo/bar", b"foo/bar"); #[cfg(unix)] normalize!(normal3, b"foo\\bar", b"foo\\bar"); #[cfg(not(unix))] normalize!(normal3, b"foo\\bar", b"foo/bar"); #[cfg(unix)] normalize!(normal4, b"foo\\bar/baz", b"foo\\bar/baz"); #[cfg(not(unix))] normalize!(normal4, b"foo\\bar/baz", b"foo/bar/baz"); }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/globset/src/glob.rs
crates/globset/src/glob.rs
use std::fmt::Write; use std::path::{Path, is_separator}; use regex_automata::meta::Regex; use crate::{Candidate, Error, ErrorKind, new_regex}; /// Describes a matching strategy for a particular pattern. /// /// This provides a way to more quickly determine whether a pattern matches /// a particular file path in a way that scales with a large number of /// patterns. For example, if many patterns are of the form `*.ext`, then it's /// possible to test whether any of those patterns matches by looking up a /// file path's extension in a hash table. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum MatchStrategy { /// A pattern matches if and only if the entire file path matches this /// literal string. Literal(String), /// A pattern matches if and only if the file path's basename matches this /// literal string. BasenameLiteral(String), /// A pattern matches if and only if the file path's extension matches this /// literal string. Extension(String), /// A pattern matches if and only if this prefix literal is a prefix of the /// candidate file path. Prefix(String), /// A pattern matches if and only if this prefix literal is a prefix of the /// candidate file path. /// /// An exception: if `component` is true, then `suffix` must appear at the /// beginning of a file path or immediately following a `/`. Suffix { /// The actual suffix. suffix: String, /// Whether this must start at the beginning of a path component. component: bool, }, /// A pattern matches only if the given extension matches the file path's /// extension. Note that this is a necessary but NOT sufficient criterion. /// Namely, if the extension matches, then a full regex search is still /// required. RequiredExtension(String), /// A regex needs to be used for matching. Regex, } impl MatchStrategy { /// Returns a matching strategy for the given pattern. pub(crate) fn new(pat: &Glob) -> MatchStrategy { if let Some(lit) = pat.basename_literal() { MatchStrategy::BasenameLiteral(lit) } else if let Some(lit) = pat.literal() { MatchStrategy::Literal(lit) } else if let Some(ext) = pat.ext() { MatchStrategy::Extension(ext) } else if let Some(prefix) = pat.prefix() { MatchStrategy::Prefix(prefix) } else if let Some((suffix, component)) = pat.suffix() { MatchStrategy::Suffix { suffix, component } } else if let Some(ext) = pat.required_ext() { MatchStrategy::RequiredExtension(ext) } else { MatchStrategy::Regex } } } /// Glob represents a successfully parsed shell glob pattern. /// /// It cannot be used directly to match file paths, but it can be converted /// to a regular expression string or a matcher. #[derive(Clone, Eq)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] pub struct Glob { glob: String, re: String, opts: GlobOptions, tokens: Tokens, } impl AsRef<Glob> for Glob { fn as_ref(&self) -> &Glob { self } } impl PartialEq for Glob { fn eq(&self, other: &Glob) -> bool { self.glob == other.glob && self.opts == other.opts } } impl std::hash::Hash for Glob { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.glob.hash(state); self.opts.hash(state); } } impl std::fmt::Debug for Glob { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if f.alternate() { f.debug_struct("Glob") .field("glob", &self.glob) .field("re", &self.re) .field("opts", &self.opts) .field("tokens", &self.tokens) .finish() } else { f.debug_tuple("Glob").field(&self.glob).finish() } } } impl std::fmt::Display for Glob { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.glob.fmt(f) } } impl std::str::FromStr for Glob { type Err = Error; fn from_str(glob: &str) -> Result<Self, Self::Err> { Self::new(glob) } } /// A matcher for a single pattern. #[derive(Clone, Debug)] pub struct GlobMatcher { /// The underlying pattern. pat: Glob, /// The pattern, as a compiled regex. re: Regex, } impl GlobMatcher { /// Tests whether the given path matches this pattern or not. pub fn is_match<P: AsRef<Path>>(&self, path: P) -> bool { self.is_match_candidate(&Candidate::new(path.as_ref())) } /// Tests whether the given path matches this pattern or not. pub fn is_match_candidate(&self, path: &Candidate<'_>) -> bool { self.re.is_match(&path.path) } /// Returns the `Glob` used to compile this matcher. pub fn glob(&self) -> &Glob { &self.pat } } /// A strategic matcher for a single pattern. #[cfg(test)] #[derive(Clone, Debug)] struct GlobStrategic { /// The match strategy to use. strategy: MatchStrategy, /// The pattern, as a compiled regex. re: Regex, } #[cfg(test)] impl GlobStrategic { /// Tests whether the given path matches this pattern or not. fn is_match<P: AsRef<Path>>(&self, path: P) -> bool { self.is_match_candidate(&Candidate::new(path.as_ref())) } /// Tests whether the given path matches this pattern or not. fn is_match_candidate(&self, candidate: &Candidate<'_>) -> bool { let byte_path = &*candidate.path; match self.strategy { MatchStrategy::Literal(ref lit) => lit.as_bytes() == byte_path, MatchStrategy::BasenameLiteral(ref lit) => { lit.as_bytes() == &*candidate.basename } MatchStrategy::Extension(ref ext) => { ext.as_bytes() == &*candidate.ext } MatchStrategy::Prefix(ref pre) => { starts_with(pre.as_bytes(), byte_path) } MatchStrategy::Suffix { ref suffix, component } => { if component && byte_path == &suffix.as_bytes()[1..] { return true; } ends_with(suffix.as_bytes(), byte_path) } MatchStrategy::RequiredExtension(ref ext) => { let ext = ext.as_bytes(); &*candidate.ext == ext && self.re.is_match(byte_path) } MatchStrategy::Regex => self.re.is_match(byte_path), } } } /// A builder for a pattern. /// /// This builder enables configuring the match semantics of a pattern. For /// example, one can make matching case insensitive. /// /// The lifetime `'a` refers to the lifetime of the pattern string. #[derive(Clone, Debug)] pub struct GlobBuilder<'a> { /// The glob pattern to compile. glob: &'a str, /// Options for the pattern. opts: GlobOptions, } #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] struct GlobOptions { /// Whether to match case insensitively. case_insensitive: bool, /// Whether to require a literal separator to match a separator in a file /// path. e.g., when enabled, `*` won't match `/`. literal_separator: bool, /// Whether or not to use `\` to escape special characters. /// e.g., when enabled, `\*` will match a literal `*`. backslash_escape: bool, /// Whether or not an empty case in an alternate will be removed. /// e.g., when enabled, `{,a}` will match "" and "a". empty_alternates: bool, /// Whether or not an unclosed character class is allowed. When an unclosed /// character class is found, the opening `[` is treated as a literal `[`. /// When this isn't enabled, an opening `[` without a corresponding `]` is /// treated as an error. allow_unclosed_class: bool, } impl GlobOptions { fn default() -> GlobOptions { GlobOptions { case_insensitive: false, literal_separator: false, backslash_escape: !is_separator('\\'), empty_alternates: false, allow_unclosed_class: false, } } } #[derive(Clone, Debug, Default, Eq, PartialEq)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] struct Tokens(Vec<Token>); impl std::ops::Deref for Tokens { type Target = Vec<Token>; fn deref(&self) -> &Vec<Token> { &self.0 } } impl std::ops::DerefMut for Tokens { fn deref_mut(&mut self) -> &mut Vec<Token> { &mut self.0 } } #[derive(Clone, Debug, Eq, PartialEq)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] enum Token { Literal(char), Any, ZeroOrMore, RecursivePrefix, RecursiveSuffix, RecursiveZeroOrMore, Class { negated: bool, ranges: Vec<(char, char)> }, Alternates(Vec<Tokens>), } impl Glob { /// Builds a new pattern with default options. pub fn new(glob: &str) -> Result<Glob, Error> { GlobBuilder::new(glob).build() } /// Returns a matcher for this pattern. pub fn compile_matcher(&self) -> GlobMatcher { let re = new_regex(&self.re).expect("regex compilation shouldn't fail"); GlobMatcher { pat: self.clone(), re } } /// Returns a strategic matcher. /// /// This isn't exposed because it's not clear whether it's actually /// faster than just running a regex for a *single* pattern. If it /// is faster, then GlobMatcher should do it automatically. #[cfg(test)] fn compile_strategic_matcher(&self) -> GlobStrategic { let strategy = MatchStrategy::new(self); let re = new_regex(&self.re).expect("regex compilation shouldn't fail"); GlobStrategic { strategy, re } } /// Returns the original glob pattern used to build this pattern. pub fn glob(&self) -> &str { &self.glob } /// Returns the regular expression string for this glob. /// /// Note that regular expressions for globs are intended to be matched on /// arbitrary bytes (`&[u8]`) instead of Unicode strings (`&str`). In /// particular, globs are frequently used on file paths, where there is no /// general guarantee that file paths are themselves valid UTF-8. As a /// result, callers will need to ensure that they are using a regex API /// that can match on arbitrary bytes. For example, the /// [`regex`](https://crates.io/regex) /// crate's /// [`Regex`](https://docs.rs/regex/*/regex/struct.Regex.html) /// API is not suitable for this since it matches on `&str`, but its /// [`bytes::Regex`](https://docs.rs/regex/*/regex/bytes/struct.Regex.html) /// API is suitable for this. pub fn regex(&self) -> &str { &self.re } /// Returns the pattern as a literal if and only if the pattern must match /// an entire path exactly. /// /// The basic format of these patterns is `{literal}`. fn literal(&self) -> Option<String> { if self.opts.case_insensitive { return None; } let mut lit = String::new(); for t in &*self.tokens { let Token::Literal(c) = *t else { return None }; lit.push(c); } if lit.is_empty() { None } else { Some(lit) } } /// Returns an extension if this pattern matches a file path if and only /// if the file path has the extension returned. /// /// Note that this extension returned differs from the extension that /// std::path::Path::extension returns. Namely, this extension includes /// the '.'. Also, paths like `.rs` are considered to have an extension /// of `.rs`. fn ext(&self) -> Option<String> { if self.opts.case_insensitive { return None; } let start = match *self.tokens.get(0)? { Token::RecursivePrefix => 1, _ => 0, }; match *self.tokens.get(start)? { Token::ZeroOrMore => { // If there was no recursive prefix, then we only permit // `*` if `*` can match a `/`. For example, if `*` can't // match `/`, then `*.c` doesn't match `foo/bar.c`. if start == 0 && self.opts.literal_separator { return None; } } _ => return None, } match *self.tokens.get(start + 1)? { Token::Literal('.') => {} _ => return None, } let mut lit = ".".to_string(); for t in self.tokens[start + 2..].iter() { match *t { Token::Literal('.') | Token::Literal('/') => return None, Token::Literal(c) => lit.push(c), _ => return None, } } if lit.is_empty() { None } else { Some(lit) } } /// This is like `ext`, but returns an extension even if it isn't sufficient /// to imply a match. Namely, if an extension is returned, then it is /// necessary but not sufficient for a match. fn required_ext(&self) -> Option<String> { if self.opts.case_insensitive { return None; } // We don't care at all about the beginning of this pattern. All we // need to check for is if it ends with a literal of the form `.ext`. let mut ext: Vec<char> = vec![]; // built in reverse for t in self.tokens.iter().rev() { match *t { Token::Literal('/') => return None, Token::Literal(c) => { ext.push(c); if c == '.' { break; } } _ => return None, } } if ext.last() != Some(&'.') { None } else { ext.reverse(); Some(ext.into_iter().collect()) } } /// Returns a literal prefix of this pattern if the entire pattern matches /// if the literal prefix matches. fn prefix(&self) -> Option<String> { if self.opts.case_insensitive { return None; } let (end, need_sep) = match *self.tokens.last()? { Token::ZeroOrMore => { if self.opts.literal_separator { // If a trailing `*` can't match a `/`, then we can't // assume a match of the prefix corresponds to a match // of the overall pattern. e.g., `foo/*` with // `literal_separator` enabled matches `foo/bar` but not // `foo/bar/baz`, even though `foo/bar/baz` has a `foo/` // literal prefix. return None; } (self.tokens.len() - 1, false) } Token::RecursiveSuffix => (self.tokens.len() - 1, true), _ => (self.tokens.len(), false), }; let mut lit = String::new(); for t in &self.tokens[0..end] { let Token::Literal(c) = *t else { return None }; lit.push(c); } if need_sep { lit.push('/'); } if lit.is_empty() { None } else { Some(lit) } } /// Returns a literal suffix of this pattern if the entire pattern matches /// if the literal suffix matches. /// /// If a literal suffix is returned and it must match either the entire /// file path or be preceded by a `/`, then also return true. This happens /// with a pattern like `**/foo/bar`. Namely, this pattern matches /// `foo/bar` and `baz/foo/bar`, but not `foofoo/bar`. In this case, the /// suffix returned is `/foo/bar` (but should match the entire path /// `foo/bar`). /// /// When this returns true, the suffix literal is guaranteed to start with /// a `/`. fn suffix(&self) -> Option<(String, bool)> { if self.opts.case_insensitive { return None; } let mut lit = String::new(); let (start, entire) = match *self.tokens.get(0)? { Token::RecursivePrefix => { // We only care if this follows a path component if the next // token is a literal. if let Some(&Token::Literal(_)) = self.tokens.get(1) { lit.push('/'); (1, true) } else { (1, false) } } _ => (0, false), }; let start = match *self.tokens.get(start)? { Token::ZeroOrMore => { // If literal_separator is enabled, then a `*` can't // necessarily match everything, so reporting a suffix match // as a match of the pattern would be a false positive. if self.opts.literal_separator { return None; } start + 1 } _ => start, }; for t in &self.tokens[start..] { let Token::Literal(c) = *t else { return None }; lit.push(c); } if lit.is_empty() || lit == "/" { None } else { Some((lit, entire)) } } /// If this pattern only needs to inspect the basename of a file path, /// then the tokens corresponding to only the basename match are returned. /// /// For example, given a pattern of `**/*.foo`, only the tokens /// corresponding to `*.foo` are returned. /// /// Note that this will return None if any match of the basename tokens /// doesn't correspond to a match of the entire pattern. For example, the /// glob `foo` only matches when a file path has a basename of `foo`, but /// doesn't *always* match when a file path has a basename of `foo`. e.g., /// `foo` doesn't match `abc/foo`. fn basename_tokens(&self) -> Option<&[Token]> { if self.opts.case_insensitive { return None; } let start = match *self.tokens.get(0)? { Token::RecursivePrefix => 1, _ => { // With nothing to gobble up the parent portion of a path, // we can't assume that matching on only the basename is // correct. return None; } }; if self.tokens[start..].is_empty() { return None; } for t in self.tokens[start..].iter() { match *t { Token::Literal('/') => return None, Token::Literal(_) => {} // OK Token::Any | Token::ZeroOrMore => { if !self.opts.literal_separator { // In this case, `*` and `?` can match a path // separator, which means this could reach outside // the basename. return None; } } Token::RecursivePrefix | Token::RecursiveSuffix | Token::RecursiveZeroOrMore => { return None; } Token::Class { .. } | Token::Alternates(..) => { // We *could* be a little smarter here, but either one // of these is going to prevent our literal optimizations // anyway, so give up. return None; } } } Some(&self.tokens[start..]) } /// Returns the pattern as a literal if and only if the pattern exclusively /// matches the basename of a file path *and* is a literal. /// /// The basic format of these patterns is `**/{literal}`, where `{literal}` /// does not contain a path separator. fn basename_literal(&self) -> Option<String> { let tokens = self.basename_tokens()?; let mut lit = String::new(); for t in tokens { let Token::Literal(c) = *t else { return None }; lit.push(c); } Some(lit) } } impl<'a> GlobBuilder<'a> { /// Create a new builder for the pattern given. /// /// The pattern is not compiled until `build` is called. pub fn new(glob: &'a str) -> GlobBuilder<'a> { GlobBuilder { glob, opts: GlobOptions::default() } } /// Parses and builds the pattern. pub fn build(&self) -> Result<Glob, Error> { let mut p = Parser { glob: &self.glob, alternates_stack: Vec::new(), branches: vec![Tokens::default()], chars: self.glob.chars().peekable(), prev: None, cur: None, found_unclosed_class: false, opts: &self.opts, }; p.parse()?; if p.branches.is_empty() { // OK because of how the the branches/alternate_stack are managed. // If we end up here, then there *must* be a bug in the parser // somewhere. unreachable!() } else if p.branches.len() > 1 { Err(Error { glob: Some(self.glob.to_string()), kind: ErrorKind::UnclosedAlternates, }) } else { let tokens = p.branches.pop().unwrap(); Ok(Glob { glob: self.glob.to_string(), re: tokens.to_regex_with(&self.opts), opts: self.opts, tokens, }) } } /// Toggle whether the pattern matches case insensitively or not. /// /// This is disabled by default. pub fn case_insensitive(&mut self, yes: bool) -> &mut GlobBuilder<'a> { self.opts.case_insensitive = yes; self } /// Toggle whether a literal `/` is required to match a path separator. /// /// By default this is false: `*` and `?` will match `/`. pub fn literal_separator(&mut self, yes: bool) -> &mut GlobBuilder<'a> { self.opts.literal_separator = yes; self } /// When enabled, a back slash (`\`) may be used to escape /// special characters in a glob pattern. Additionally, this will /// prevent `\` from being interpreted as a path separator on all /// platforms. /// /// This is enabled by default on platforms where `\` is not a /// path separator and disabled by default on platforms where `\` /// is a path separator. pub fn backslash_escape(&mut self, yes: bool) -> &mut GlobBuilder<'a> { self.opts.backslash_escape = yes; self } /// Toggle whether an empty pattern in a list of alternates is accepted. /// /// For example, if this is set then the glob `foo{,.txt}` will match both /// `foo` and `foo.txt`. /// /// By default this is false. pub fn empty_alternates(&mut self, yes: bool) -> &mut GlobBuilder<'a> { self.opts.empty_alternates = yes; self } /// Toggle whether unclosed character classes are allowed. When allowed, /// a `[` without a matching `]` is treated literally instead of resulting /// in a parse error. /// /// For example, if this is set then the glob `[abc` will be treated as the /// literal string `[abc` instead of returning an error. /// /// By default, this is false. Generally speaking, enabling this leads to /// worse failure modes since the glob parser becomes more permissive. You /// might want to enable this when compatibility (e.g., with POSIX glob /// implementations) is more important than good error messages. pub fn allow_unclosed_class(&mut self, yes: bool) -> &mut GlobBuilder<'a> { self.opts.allow_unclosed_class = yes; self } } impl Tokens { /// Convert this pattern to a string that is guaranteed to be a valid /// regular expression and will represent the matching semantics of this /// glob pattern and the options given. fn to_regex_with(&self, options: &GlobOptions) -> String { let mut re = String::new(); re.push_str("(?-u)"); if options.case_insensitive { re.push_str("(?i)"); } re.push('^'); // Special case. If the entire glob is just `**`, then it should match // everything. if self.len() == 1 && self[0] == Token::RecursivePrefix { re.push_str(".*"); re.push('$'); return re; } self.tokens_to_regex(options, &self, &mut re); re.push('$'); re } fn tokens_to_regex( &self, options: &GlobOptions, tokens: &[Token], re: &mut String, ) { for tok in tokens.iter() { match *tok { Token::Literal(c) => { re.push_str(&char_to_escaped_literal(c)); } Token::Any => { if options.literal_separator { re.push_str("[^/]"); } else { re.push_str("."); } } Token::ZeroOrMore => { if options.literal_separator { re.push_str("[^/]*"); } else { re.push_str(".*"); } } Token::RecursivePrefix => { re.push_str("(?:/?|.*/)"); } Token::RecursiveSuffix => { re.push_str("/.*"); } Token::RecursiveZeroOrMore => { re.push_str("(?:/|/.*/)"); } Token::Class { negated, ref ranges } => { re.push('['); if negated { re.push('^'); } for r in ranges { if r.0 == r.1 { // Not strictly necessary, but nicer to look at. re.push_str(&char_to_escaped_literal(r.0)); } else { re.push_str(&char_to_escaped_literal(r.0)); re.push('-'); re.push_str(&char_to_escaped_literal(r.1)); } } re.push(']'); } Token::Alternates(ref patterns) => { let mut parts = vec![]; for pat in patterns { let mut altre = String::new(); self.tokens_to_regex(options, &pat, &mut altre); if !altre.is_empty() || options.empty_alternates { parts.push(altre); } } // It is possible to have an empty set in which case the // resulting alternation '()' would be an error. if !parts.is_empty() { re.push_str("(?:"); re.push_str(&parts.join("|")); re.push(')'); } } } } } } /// Convert a Unicode scalar value to an escaped string suitable for use as /// a literal in a non-Unicode regex. fn char_to_escaped_literal(c: char) -> String { let mut buf = [0; 4]; let bytes = c.encode_utf8(&mut buf).as_bytes(); bytes_to_escaped_literal(bytes) } /// Converts an arbitrary sequence of bytes to a UTF-8 string. All non-ASCII /// code units are converted to their escaped form. fn bytes_to_escaped_literal(bs: &[u8]) -> String { let mut s = String::with_capacity(bs.len()); for &b in bs { if b <= 0x7F { regex_syntax::escape_into( char::from(b).encode_utf8(&mut [0; 4]), &mut s, ); } else { write!(&mut s, "\\x{:02x}", b).unwrap(); } } s } struct Parser<'a> { /// The glob to parse. glob: &'a str, /// Marks the index in `stack` where the alternation started. alternates_stack: Vec<usize>, /// The set of active alternation branches being parsed. /// Tokens are added to the end of the last one. branches: Vec<Tokens>, /// A character iterator over the glob pattern to parse. chars: std::iter::Peekable<std::str::Chars<'a>>, /// The previous character seen. prev: Option<char>, /// The current character. cur: Option<char>, /// Whether we failed to find a closing `]` for a character /// class. This can only be true when `GlobOptions::allow_unclosed_class` /// is enabled. When enabled, it is impossible to ever parse another /// character class with this glob. That's because classes cannot be /// nested *and* the only way this happens is when there is never a `]`. /// /// We track this state so that we don't end up spending quadratic time /// trying to parse something like `[[[[[[[[[[[[[[[[[[[[[[[...`. found_unclosed_class: bool, /// Glob options, which may influence parsing. opts: &'a GlobOptions, } impl<'a> Parser<'a> { fn error(&self, kind: ErrorKind) -> Error { Error { glob: Some(self.glob.to_string()), kind } } fn parse(&mut self) -> Result<(), Error> { while let Some(c) = self.bump() { match c { '?' => self.push_token(Token::Any)?, '*' => self.parse_star()?, '[' if !self.found_unclosed_class => self.parse_class()?, '{' => self.push_alternate()?, '}' => self.pop_alternate()?, ',' => self.parse_comma()?, '\\' => self.parse_backslash()?, c => self.push_token(Token::Literal(c))?, } } Ok(()) } fn push_alternate(&mut self) -> Result<(), Error> { self.alternates_stack.push(self.branches.len()); self.branches.push(Tokens::default()); Ok(()) } fn pop_alternate(&mut self) -> Result<(), Error> { let Some(start) = self.alternates_stack.pop() else { return Err(self.error(ErrorKind::UnopenedAlternates)); }; assert!(start <= self.branches.len()); let alts = Token::Alternates(self.branches.drain(start..).collect()); self.push_token(alts)?; Ok(()) } fn push_token(&mut self, tok: Token) -> Result<(), Error> { if let Some(ref mut pat) = self.branches.last_mut() { return Ok(pat.push(tok)); } Err(self.error(ErrorKind::UnopenedAlternates)) } fn pop_token(&mut self) -> Result<Token, Error> { if let Some(ref mut pat) = self.branches.last_mut() { return Ok(pat.pop().unwrap()); } Err(self.error(ErrorKind::UnopenedAlternates)) } fn have_tokens(&self) -> Result<bool, Error> { match self.branches.last() { None => Err(self.error(ErrorKind::UnopenedAlternates)), Some(ref pat) => Ok(!pat.is_empty()), } } fn parse_comma(&mut self) -> Result<(), Error> { // If we aren't inside a group alternation, then don't // treat commas specially. Otherwise, we need to start // a new alternate branch. if self.alternates_stack.is_empty() { self.push_token(Token::Literal(',')) } else { Ok(self.branches.push(Tokens::default())) } } fn parse_backslash(&mut self) -> Result<(), Error> { if self.opts.backslash_escape { match self.bump() { None => Err(self.error(ErrorKind::DanglingEscape)), Some(c) => self.push_token(Token::Literal(c)), } } else if is_separator('\\') { // Normalize all patterns to use / as a separator. self.push_token(Token::Literal('/')) } else { self.push_token(Token::Literal('\\')) } } fn parse_star(&mut self) -> Result<(), Error> { let prev = self.prev; if self.peek() != Some('*') { self.push_token(Token::ZeroOrMore)?; return Ok(()); } assert!(self.bump() == Some('*')); if !self.have_tokens()? { if !self.peek().map_or(true, is_separator) { self.push_token(Token::ZeroOrMore)?; self.push_token(Token::ZeroOrMore)?; } else { self.push_token(Token::RecursivePrefix)?; assert!(self.bump().map_or(true, is_separator)); } return Ok(()); } if !prev.map(is_separator).unwrap_or(false) { if self.branches.len() <= 1
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/globset/src/fnv.rs
crates/globset/src/fnv.rs
/// A convenience alias for creating a hash map with an FNV hasher. pub(crate) type HashMap<K, V> = std::collections::HashMap<K, V, std::hash::BuildHasherDefault<Hasher>>; /// A hasher that implements the Fowler–Noll–Vo (FNV) hash. pub(crate) struct Hasher(u64); impl Hasher { const OFFSET_BASIS: u64 = 0xcbf29ce484222325; const PRIME: u64 = 0x100000001b3; } impl Default for Hasher { fn default() -> Hasher { Hasher(Hasher::OFFSET_BASIS) } } impl std::hash::Hasher for Hasher { fn finish(&self) -> u64 { self.0 } fn write(&mut self, bytes: &[u8]) { for &byte in bytes.iter() { self.0 = self.0 ^ u64::from(byte); self.0 = self.0.wrapping_mul(Hasher::PRIME); } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/globset/benches/bench.rs
crates/globset/benches/bench.rs
/*! This module benchmarks the glob implementation. For benchmarks on the ripgrep tool itself, see the benchsuite directory. */ #![feature(test)] extern crate test; use globset::{Candidate, Glob, GlobMatcher, GlobSet, GlobSetBuilder}; const EXT: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt"; const EXT_PAT: &'static str = "*.txt"; const SHORT: &'static str = "some/needle.txt"; const SHORT_PAT: &'static str = "some/**/needle.txt"; const LONG: &'static str = "some/a/bigger/path/to/the/crazy/needle.txt"; const LONG_PAT: &'static str = "some/**/needle.txt"; fn new_glob(pat: &str) -> glob::Pattern { glob::Pattern::new(pat).unwrap() } fn new_reglob(pat: &str) -> GlobMatcher { Glob::new(pat).unwrap().compile_matcher() } fn new_reglob_many(pats: &[&str]) -> GlobSet { let mut builder = GlobSetBuilder::new(); for pat in pats { builder.add(Glob::new(pat).unwrap()); } builder.build().unwrap() } #[bench] fn ext_glob(b: &mut test::Bencher) { let pat = new_glob(EXT_PAT); b.iter(|| assert!(pat.matches(EXT))); } #[bench] fn ext_regex(b: &mut test::Bencher) { let set = new_reglob(EXT_PAT); let cand = Candidate::new(EXT); b.iter(|| assert!(set.is_match_candidate(&cand))); } #[bench] fn short_glob(b: &mut test::Bencher) { let pat = new_glob(SHORT_PAT); b.iter(|| assert!(pat.matches(SHORT))); } #[bench] fn short_regex(b: &mut test::Bencher) { let set = new_reglob(SHORT_PAT); let cand = Candidate::new(SHORT); b.iter(|| assert!(set.is_match_candidate(&cand))); } #[bench] fn long_glob(b: &mut test::Bencher) { let pat = new_glob(LONG_PAT); b.iter(|| assert!(pat.matches(LONG))); } #[bench] fn long_regex(b: &mut test::Bencher) { let set = new_reglob(LONG_PAT); let cand = Candidate::new(LONG); b.iter(|| assert!(set.is_match_candidate(&cand))); } const MANY_SHORT_GLOBS: &'static [&'static str] = &[ // Taken from a random .gitignore on my system. ".*.swp", "tags", "target", "*.lock", "tmp", "*.csv", "*.fst", "*-got", "*.csv.idx", "words", "98m*", "dict", "test", "months", ]; const MANY_SHORT_SEARCH: &'static str = "98m-blah.csv.idx"; #[bench] fn many_short_glob(b: &mut test::Bencher) { let pats: Vec<_> = MANY_SHORT_GLOBS.iter().map(|&s| new_glob(s)).collect(); b.iter(|| { let mut count = 0; for pat in &pats { if pat.matches(MANY_SHORT_SEARCH) { count += 1; } } assert_eq!(2, count); }) } #[bench] fn many_short_regex_set(b: &mut test::Bencher) { let set = new_reglob_many(MANY_SHORT_GLOBS); b.iter(|| assert_eq!(2, set.matches(MANY_SHORT_SEARCH).iter().count())); }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/stats.rs
crates/printer/src/stats.rs
use std::{ ops::{Add, AddAssign}, time::Duration, }; use crate::util::NiceDuration; /// Summary statistics produced at the end of a search. /// /// When statistics are reported by a printer, they correspond to all searches /// executed with that printer. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Stats { elapsed: NiceDuration, searches: u64, searches_with_match: u64, bytes_searched: u64, bytes_printed: u64, matched_lines: u64, matches: u64, } impl Stats { /// Return a new value for tracking aggregate statistics across searches. /// /// All statistics are set to `0`. pub fn new() -> Stats { Stats::default() } /// Return the total amount of time elapsed. pub fn elapsed(&self) -> Duration { self.elapsed.0 } /// Return the total number of searches executed. pub fn searches(&self) -> u64 { self.searches } /// Return the total number of searches that found at least one match. pub fn searches_with_match(&self) -> u64 { self.searches_with_match } /// Return the total number of bytes searched. pub fn bytes_searched(&self) -> u64 { self.bytes_searched } /// Return the total number of bytes printed. pub fn bytes_printed(&self) -> u64 { self.bytes_printed } /// Return the total number of lines that participated in a match. /// /// When matches may contain multiple lines then this includes every line /// that is part of every match. pub fn matched_lines(&self) -> u64 { self.matched_lines } /// Return the total number of matches. /// /// There may be multiple matches per line. pub fn matches(&self) -> u64 { self.matches } /// Add to the elapsed time. pub fn add_elapsed(&mut self, duration: Duration) { self.elapsed.0 += duration; } /// Add to the number of searches executed. pub fn add_searches(&mut self, n: u64) { self.searches += n; } /// Add to the number of searches that found at least one match. pub fn add_searches_with_match(&mut self, n: u64) { self.searches_with_match += n; } /// Add to the total number of bytes searched. pub fn add_bytes_searched(&mut self, n: u64) { self.bytes_searched += n; } /// Add to the total number of bytes printed. pub fn add_bytes_printed(&mut self, n: u64) { self.bytes_printed += n; } /// Add to the total number of lines that participated in a match. pub fn add_matched_lines(&mut self, n: u64) { self.matched_lines += n; } /// Add to the total number of matches. pub fn add_matches(&mut self, n: u64) { self.matches += n; } } impl Add for Stats { type Output = Stats; fn add(self, rhs: Stats) -> Stats { self + &rhs } } impl<'a> Add<&'a Stats> for Stats { type Output = Stats; fn add(self, rhs: &'a Stats) -> Stats { Stats { elapsed: NiceDuration(self.elapsed.0 + rhs.elapsed.0), searches: self.searches + rhs.searches, searches_with_match: self.searches_with_match + rhs.searches_with_match, bytes_searched: self.bytes_searched + rhs.bytes_searched, bytes_printed: self.bytes_printed + rhs.bytes_printed, matched_lines: self.matched_lines + rhs.matched_lines, matches: self.matches + rhs.matches, } } } impl AddAssign for Stats { fn add_assign(&mut self, rhs: Stats) { *self += &rhs; } } impl<'a> AddAssign<&'a Stats> for Stats { fn add_assign(&mut self, rhs: &'a Stats) { self.elapsed.0 += rhs.elapsed.0; self.searches += rhs.searches; self.searches_with_match += rhs.searches_with_match; self.bytes_searched += rhs.bytes_searched; self.bytes_printed += rhs.bytes_printed; self.matched_lines += rhs.matched_lines; self.matches += rhs.matches; } } #[cfg(feature = "serde")] impl serde::Serialize for Stats { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("Stats", 7)?; state.serialize_field("elapsed", &self.elapsed)?; state.serialize_field("searches", &self.searches)?; state.serialize_field( "searches_with_match", &self.searches_with_match, )?; state.serialize_field("bytes_searched", &self.bytes_searched)?; state.serialize_field("bytes_printed", &self.bytes_printed)?; state.serialize_field("matched_lines", &self.matched_lines)?; state.serialize_field("matches", &self.matches)?; state.end() } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/path.rs
crates/printer/src/path.rs
use std::{io, path::Path}; use termcolor::WriteColor; use crate::{ color::ColorSpecs, hyperlink::{self, HyperlinkConfig}, util::PrinterPath, }; /// A configuration for describing how paths should be written. #[derive(Clone, Debug)] struct Config { colors: ColorSpecs, hyperlink: HyperlinkConfig, separator: Option<u8>, terminator: u8, } impl Default for Config { fn default() -> Config { Config { colors: ColorSpecs::default(), hyperlink: HyperlinkConfig::default(), separator: None, terminator: b'\n', } } } /// A builder for a printer that emits file paths. #[derive(Clone, Debug)] pub struct PathPrinterBuilder { config: Config, } impl PathPrinterBuilder { /// Return a new path printer builder with a default configuration. pub fn new() -> PathPrinterBuilder { PathPrinterBuilder { config: Config::default() } } /// Create a new path printer with the current configuration that writes /// paths to the given writer. pub fn build<W: WriteColor>(&self, wtr: W) -> PathPrinter<W> { let interpolator = hyperlink::Interpolator::new(&self.config.hyperlink); PathPrinter { config: self.config.clone(), wtr, interpolator } } /// Set the user color specifications to use for coloring in this printer. /// /// A [`UserColorSpec`](crate::UserColorSpec) can be constructed from /// a string in accordance with the color specification format. See /// the `UserColorSpec` type documentation for more details on the /// format. A [`ColorSpecs`] can then be generated from zero or more /// `UserColorSpec`s. /// /// Regardless of the color specifications provided here, whether color /// is actually used or not is determined by the implementation of /// `WriteColor` provided to `build`. For example, if `termcolor::NoColor` /// is provided to `build`, then no color will ever be printed regardless /// of the color specifications provided here. /// /// This completely overrides any previous color specifications. This does /// not add to any previously provided color specifications on this /// builder. /// /// The default color specifications provide no styling. pub fn color_specs( &mut self, specs: ColorSpecs, ) -> &mut PathPrinterBuilder { self.config.colors = specs; self } /// Set the configuration to use for hyperlinks output by this printer. /// /// Regardless of the hyperlink format provided here, whether hyperlinks /// are actually used or not is determined by the implementation of /// `WriteColor` provided to `build`. For example, if `termcolor::NoColor` /// is provided to `build`, then no hyperlinks will ever be printed /// regardless of the format provided here. /// /// This completely overrides any previous hyperlink format. /// /// The default configuration results in not emitting any hyperlinks. pub fn hyperlink( &mut self, config: HyperlinkConfig, ) -> &mut PathPrinterBuilder { self.config.hyperlink = config; self } /// Set the path separator used when printing file paths. /// /// Typically, printing is done by emitting the file path as is. However, /// this setting provides the ability to use a different path separator /// from what the current environment has configured. /// /// A typical use for this option is to permit cygwin users on Windows to /// set the path separator to `/` instead of using the system default of /// `\`. /// /// This is disabled by default. pub fn separator(&mut self, sep: Option<u8>) -> &mut PathPrinterBuilder { self.config.separator = sep; self } /// Set the path terminator used. /// /// The path terminator is a byte that is printed after every file path /// emitted by this printer. /// /// The default path terminator is `\n`. pub fn terminator(&mut self, terminator: u8) -> &mut PathPrinterBuilder { self.config.terminator = terminator; self } } /// A printer file paths, with optional color and hyperlink support. /// /// This printer is very similar to [`Summary`](crate::Summary) in that it /// principally only emits file paths. The main difference is that this printer /// doesn't actually execute any search via a `Sink` implementation, and instead /// just provides a way for the caller to print paths. /// /// A caller could just print the paths themselves, but this printer handles /// a few details: /// /// * It can normalize path separators. /// * It permits configuring the terminator. /// * It allows setting the color configuration in a way that is consistent /// with the other printers in this crate. /// * It allows setting the hyperlink format in a way that is consistent /// with the other printers in this crate. #[derive(Debug)] pub struct PathPrinter<W> { config: Config, wtr: W, interpolator: hyperlink::Interpolator, } impl<W: WriteColor> PathPrinter<W> { /// Write the given path to the underlying writer. pub fn write(&mut self, path: &Path) -> io::Result<()> { let ppath = PrinterPath::new(path.as_ref()) .with_separator(self.config.separator); if !self.wtr.supports_color() { self.wtr.write_all(ppath.as_bytes())?; } else { let status = self.start_hyperlink(&ppath)?; self.wtr.set_color(self.config.colors.path())?; self.wtr.write_all(ppath.as_bytes())?; self.wtr.reset()?; self.interpolator.finish(status, &mut self.wtr)?; } self.wtr.write_all(&[self.config.terminator]) } /// Starts a hyperlink span when applicable. fn start_hyperlink( &mut self, path: &PrinterPath, ) -> io::Result<hyperlink::InterpolatorStatus> { let Some(hyperpath) = path.as_hyperlink() else { return Ok(hyperlink::InterpolatorStatus::inactive()); }; let values = hyperlink::Values::new(hyperpath); self.interpolator.begin(&values, &mut self.wtr) } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/lib.rs
crates/printer/src/lib.rs
/*! This crate provides featureful and fast printers that interoperate with the [`grep-searcher`](https://docs.rs/grep-searcher) crate. # Brief overview The [`Standard`] printer shows results in a human readable format, and is modeled after the formats used by standard grep-like tools. Features include, but are not limited to, cross platform terminal coloring, search & replace, multi-line result handling and reporting summary statistics. The [`JSON`] printer shows results in a machine readable format. To facilitate a stream of search results, the format uses [JSON Lines](https://jsonlines.org/) by emitting a series of messages as search results are found. The [`Summary`] printer shows *aggregate* results for a single search in a human readable format, and is modeled after similar formats found in standard grep-like tools. This printer is useful for showing the total number of matches and/or printing file paths that either contain or don't contain matches. # Example This example shows how to create a "standard" printer and execute a search. ``` use { grep_regex::RegexMatcher, grep_printer::Standard, grep_searcher::Searcher, }; const SHERLOCK: &'static [u8] = b"\ For the Doctor Watsons of this world, as opposed to the Sherlock Holmeses, success in the province of detective work must always be, to a very large extent, the result of luck. Sherlock Holmes can extract a clew from a wisp of straw or a flake of cigar ash; but Doctor Watson has to have it taken out for him and dusted, and exhibited clearly, with a label attached. "; let matcher = RegexMatcher::new(r"Sherlock")?; let mut printer = Standard::new_no_color(vec![]); Searcher::new().search_slice(&matcher, SHERLOCK, printer.sink(&matcher))?; // into_inner gives us back the underlying writer we provided to // new_no_color, which is wrapped in a termcolor::NoColor. Thus, a second // into_inner gives us back the actual buffer. let output = String::from_utf8(printer.into_inner().into_inner())?; let expected = "\ 1:For the Doctor Watsons of this world, as opposed to the Sherlock 3:be, to a very large extent, the result of luck. Sherlock Holmes "; assert_eq!(output, expected); # Ok::<(), Box<dyn std::error::Error>>(()) ``` */ #![deny(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg))] pub use crate::{ color::{ColorError, ColorSpecs, UserColorSpec, default_color_specs}, hyperlink::{ HyperlinkAlias, HyperlinkConfig, HyperlinkEnvironment, HyperlinkFormat, HyperlinkFormatError, hyperlink_aliases, }, path::{PathPrinter, PathPrinterBuilder}, standard::{Standard, StandardBuilder, StandardSink}, stats::Stats, summary::{Summary, SummaryBuilder, SummaryKind, SummarySink}, }; #[cfg(feature = "serde")] pub use crate::json::{JSON, JSONBuilder, JSONSink}; // The maximum number of bytes to execute a search to account for look-ahead. // // This is an unfortunate kludge since PCRE2 doesn't provide a way to search // a substring of some input while accounting for look-ahead. In theory, we // could refactor the various 'grep' interfaces to account for it, but it would // be a large change. So for now, we just let PCRE2 go looking a bit for a // match without searching the entire rest of the contents. // // Note that this kludge is only active in multi-line mode. const MAX_LOOK_AHEAD: usize = 128; #[macro_use] mod macros; mod color; mod counter; mod hyperlink; #[cfg(feature = "serde")] mod json; #[cfg(feature = "serde")] mod jsont; mod path; mod standard; mod stats; mod summary; mod util;
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/standard.rs
crates/printer/src/standard.rs
use std::{ cell::{Cell, RefCell}, cmp, io::{self, Write}, path::Path, sync::Arc, time::Instant, }; use { bstr::ByteSlice, grep_matcher::{Match, Matcher}, grep_searcher::{ LineStep, Searcher, Sink, SinkContext, SinkFinish, SinkMatch, }, termcolor::{ColorSpec, NoColor, WriteColor}, }; use crate::{ color::ColorSpecs, counter::CounterWriter, hyperlink::{self, HyperlinkConfig}, stats::Stats, util::{ DecimalFormatter, PrinterPath, Replacer, Sunk, find_iter_at_in_context, trim_ascii_prefix, trim_line_terminator, }, }; /// The configuration for the standard printer. /// /// This is manipulated by the StandardBuilder and then referenced by the /// actual implementation. Once a printer is build, the configuration is frozen /// and cannot changed. #[derive(Debug, Clone)] struct Config { colors: ColorSpecs, hyperlink: HyperlinkConfig, stats: bool, heading: bool, path: bool, only_matching: bool, per_match: bool, per_match_one_line: bool, replacement: Arc<Option<Vec<u8>>>, max_columns: Option<u64>, max_columns_preview: bool, column: bool, byte_offset: bool, trim_ascii: bool, separator_search: Arc<Option<Vec<u8>>>, separator_context: Arc<Option<Vec<u8>>>, separator_field_match: Arc<Vec<u8>>, separator_field_context: Arc<Vec<u8>>, separator_path: Option<u8>, path_terminator: Option<u8>, } impl Default for Config { fn default() -> Config { Config { colors: ColorSpecs::default(), hyperlink: HyperlinkConfig::default(), stats: false, heading: false, path: true, only_matching: false, per_match: false, per_match_one_line: false, replacement: Arc::new(None), max_columns: None, max_columns_preview: false, column: false, byte_offset: false, trim_ascii: false, separator_search: Arc::new(None), separator_context: Arc::new(Some(b"--".to_vec())), separator_field_match: Arc::new(b":".to_vec()), separator_field_context: Arc::new(b"-".to_vec()), separator_path: None, path_terminator: None, } } } /// A builder for the "standard" grep-like printer. /// /// The builder permits configuring how the printer behaves. Configurable /// behavior includes, but is not limited to, limiting the number of matches, /// tweaking separators, executing pattern replacements, recording statistics /// and setting colors. /// /// Some configuration options, such as the display of line numbers or /// contextual lines, are drawn directly from the /// `grep_searcher::Searcher`'s configuration. /// /// Once a `Standard` printer is built, its configuration cannot be changed. #[derive(Clone, Debug)] pub struct StandardBuilder { config: Config, } impl StandardBuilder { /// Return a new builder for configuring the standard printer. pub fn new() -> StandardBuilder { StandardBuilder { config: Config::default() } } /// Build a printer using any implementation of `termcolor::WriteColor`. /// /// The implementation of `WriteColor` used here controls whether colors /// are used or not when colors have been configured using the /// `color_specs` method. /// /// For maximum portability, callers should generally use either /// `termcolor::StandardStream` or `termcolor::BufferedStandardStream` /// where appropriate, which will automatically enable colors on Windows /// when possible. /// /// However, callers may also provide an arbitrary writer using the /// `termcolor::Ansi` or `termcolor::NoColor` wrappers, which always enable /// colors via ANSI escapes or always disable colors, respectively. /// /// As a convenience, callers may use `build_no_color` to automatically /// select the `termcolor::NoColor` wrapper to avoid needing to import /// from `termcolor` explicitly. pub fn build<W: WriteColor>(&self, wtr: W) -> Standard<W> { Standard { config: self.config.clone(), wtr: RefCell::new(CounterWriter::new(wtr)), matches: vec![], } } /// Build a printer from any implementation of `io::Write` and never emit /// any colors, regardless of the user color specification settings. /// /// This is a convenience routine for /// `StandardBuilder::build(termcolor::NoColor::new(wtr))`. pub fn build_no_color<W: io::Write>( &self, wtr: W, ) -> Standard<NoColor<W>> { self.build(NoColor::new(wtr)) } /// Set the user color specifications to use for coloring in this printer. /// /// A [`UserColorSpec`](crate::UserColorSpec) can be constructed from /// a string in accordance with the color specification format. See /// the `UserColorSpec` type documentation for more details on the /// format. A [`ColorSpecs`] can then be generated from zero or more /// `UserColorSpec`s. /// /// Regardless of the color specifications provided here, whether color /// is actually used or not is determined by the implementation of /// `WriteColor` provided to `build`. For example, if `termcolor::NoColor` /// is provided to `build`, then no color will ever be printed regardless /// of the color specifications provided here. /// /// This completely overrides any previous color specifications. This does /// not add to any previously provided color specifications on this /// builder. pub fn color_specs(&mut self, specs: ColorSpecs) -> &mut StandardBuilder { self.config.colors = specs; self } /// Set the configuration to use for hyperlinks output by this printer. /// /// Regardless of the hyperlink format provided here, whether hyperlinks /// are actually used or not is determined by the implementation of /// `WriteColor` provided to `build`. For example, if `termcolor::NoColor` /// is provided to `build`, then no hyperlinks will ever be printed /// regardless of the format provided here. /// /// This completely overrides any previous hyperlink format. /// /// The default configuration results in not emitting any hyperlinks. pub fn hyperlink( &mut self, config: HyperlinkConfig, ) -> &mut StandardBuilder { self.config.hyperlink = config; self } /// Enable the gathering of various aggregate statistics. /// /// When this is enabled (it's disabled by default), statistics will be /// gathered for all uses of `Standard` printer returned by `build`, /// including but not limited to, the total number of matches, the total /// number of bytes searched and the total number of bytes printed. /// /// Aggregate statistics can be accessed via the sink's /// [`StandardSink::stats`] method. /// /// When this is enabled, this printer may need to do extra work in order /// to compute certain statistics, which could cause the search to take /// longer. /// /// For a complete description of available statistics, see [`Stats`]. pub fn stats(&mut self, yes: bool) -> &mut StandardBuilder { self.config.stats = yes; self } /// Enable the use of "headings" in the printer. /// /// When this is enabled, and if a file path has been given to the printer, /// then the file path will be printed once on its own line before showing /// any matches. If the heading is not the first thing emitted by the /// printer, then a line terminator is printed before the heading. /// /// By default, this option is disabled. When disabled, the printer will /// not show any heading and will instead print the file path (if one is /// given) on the same line as each matching (or context) line. pub fn heading(&mut self, yes: bool) -> &mut StandardBuilder { self.config.heading = yes; self } /// When enabled, if a path was given to the printer, then it is shown in /// the output (either as a heading or as a prefix to each matching line). /// When disabled, then no paths are ever included in the output even when /// a path is provided to the printer. /// /// This is enabled by default. pub fn path(&mut self, yes: bool) -> &mut StandardBuilder { self.config.path = yes; self } /// Only print the specific matches instead of the entire line containing /// each match. Each match is printed on its own line. When multi line /// search is enabled, then matches spanning multiple lines are printed /// such that only the matching portions of each line are shown. pub fn only_matching(&mut self, yes: bool) -> &mut StandardBuilder { self.config.only_matching = yes; self } /// Print at least one line for every match. /// /// This is similar to the `only_matching` option, except the entire line /// is printed for each match. This is typically useful in conjunction with /// the `column` option, which will show the starting column number for /// every match on every line. /// /// When multi-line mode is enabled, each match is printed, including every /// line in the match. As with single line matches, if a line contains /// multiple matches (even if only partially), then that line is printed /// once for each match it participates in, assuming it's the first line in /// that match. In multi-line mode, column numbers only indicate the start /// of a match. Subsequent lines in a multi-line match always have a column /// number of `1`. /// /// When a match contains multiple lines, enabling `per_match_one_line` /// will cause only the first line each in match to be printed. pub fn per_match(&mut self, yes: bool) -> &mut StandardBuilder { self.config.per_match = yes; self } /// Print at most one line per match when `per_match` is enabled. /// /// By default, every line in each match found is printed when `per_match` /// is enabled. However, this is sometimes undesirable, e.g., when you /// only ever want one line per match. /// /// This is only applicable when multi-line matching is enabled, since /// otherwise, matches are guaranteed to span one line. /// /// This is disabled by default. pub fn per_match_one_line(&mut self, yes: bool) -> &mut StandardBuilder { self.config.per_match_one_line = yes; self } /// Set the bytes that will be used to replace each occurrence of a match /// found. /// /// The replacement bytes given may include references to capturing groups, /// which may either be in index form (e.g., `$2`) or can reference named /// capturing groups if present in the original pattern (e.g., `$foo`). /// /// For documentation on the full format, please see the `Capture` trait's /// `interpolate` method in the /// [grep-printer](https://docs.rs/grep-printer) crate. pub fn replacement( &mut self, replacement: Option<Vec<u8>>, ) -> &mut StandardBuilder { self.config.replacement = Arc::new(replacement); self } /// Set the maximum number of columns allowed for each line printed. A /// single column is heuristically defined as a single byte. /// /// If a line is found which exceeds this maximum, then it is replaced /// with a message indicating that the line has been omitted. /// /// The default is to not specify a limit, in which each matching or /// contextual line is printed regardless of how long it is. pub fn max_columns(&mut self, limit: Option<u64>) -> &mut StandardBuilder { self.config.max_columns = limit; self } /// When enabled, if a line is found to be over the configured maximum /// column limit (measured in terms of bytes), then a preview of the long /// line will be printed instead. /// /// The preview will correspond to the first `N` *grapheme clusters* of /// the line, where `N` is the limit configured by `max_columns`. /// /// If no limit is set, then enabling this has no effect. /// /// This is disabled by default. pub fn max_columns_preview(&mut self, yes: bool) -> &mut StandardBuilder { self.config.max_columns_preview = yes; self } /// Print the column number of the first match in a line. /// /// This option is convenient for use with `per_match` which will print a /// line for every match along with the starting offset for that match. /// /// Column numbers are computed in terms of bytes from the start of the /// line being printed. /// /// This is disabled by default. pub fn column(&mut self, yes: bool) -> &mut StandardBuilder { self.config.column = yes; self } /// Print the absolute byte offset of the beginning of each line printed. /// /// The absolute byte offset starts from the beginning of each search and /// is zero based. /// /// If the `only_matching` option is set, then this will print the absolute /// byte offset of the beginning of each match. pub fn byte_offset(&mut self, yes: bool) -> &mut StandardBuilder { self.config.byte_offset = yes; self } /// When enabled, all lines will have prefix ASCII whitespace trimmed /// before being written. /// /// This is disabled by default. pub fn trim_ascii(&mut self, yes: bool) -> &mut StandardBuilder { self.config.trim_ascii = yes; self } /// Set the separator used between sets of search results. /// /// When this is set, then it will be printed on its own line immediately /// before the results for a single search if and only if a previous search /// had already printed results. In effect, this permits showing a divider /// between sets of search results that does not appear at the beginning /// or end of all search results. /// /// To reproduce the classic grep format, this is typically set to `--` /// (the same as the context separator) if and only if contextual lines /// have been requested, but disabled otherwise. /// /// By default, this is disabled. pub fn separator_search( &mut self, sep: Option<Vec<u8>>, ) -> &mut StandardBuilder { self.config.separator_search = Arc::new(sep); self } /// Set the separator used between discontiguous runs of search context, /// but only when the searcher is configured to report contextual lines. /// /// The separator is always printed on its own line, even if it's empty. /// /// If no separator is set, then nothing is printed when a context break /// occurs. /// /// By default, this is set to `--`. pub fn separator_context( &mut self, sep: Option<Vec<u8>>, ) -> &mut StandardBuilder { self.config.separator_context = Arc::new(sep); self } /// Set the separator used between fields emitted for matching lines. /// /// For example, when the searcher has line numbers enabled, this printer /// will print the line number before each matching line. The bytes given /// here will be written after the line number but before the matching /// line. /// /// By default, this is set to `:`. pub fn separator_field_match( &mut self, sep: Vec<u8>, ) -> &mut StandardBuilder { self.config.separator_field_match = Arc::new(sep); self } /// Set the separator used between fields emitted for context lines. /// /// For example, when the searcher has line numbers enabled, this printer /// will print the line number before each context line. The bytes given /// here will be written after the line number but before the context /// line. /// /// By default, this is set to `-`. pub fn separator_field_context( &mut self, sep: Vec<u8>, ) -> &mut StandardBuilder { self.config.separator_field_context = Arc::new(sep); self } /// Set the path separator used when printing file paths. /// /// When a printer is configured with a file path, and when a match is /// found, that file path will be printed (either as a heading or as a /// prefix to each matching or contextual line, depending on other /// configuration settings). Typically, printing is done by emitting the /// file path as is. However, this setting provides the ability to use a /// different path separator from what the current environment has /// configured. /// /// A typical use for this option is to permit cygwin users on Windows to /// set the path separator to `/` instead of using the system default of /// `\`. pub fn separator_path(&mut self, sep: Option<u8>) -> &mut StandardBuilder { self.config.separator_path = sep; self } /// Set the path terminator used. /// /// The path terminator is a byte that is printed after every file path /// emitted by this printer. /// /// If no path terminator is set (the default), then paths are terminated /// by either new lines (for when `heading` is enabled) or the match or /// context field separators (e.g., `:` or `-`). pub fn path_terminator( &mut self, terminator: Option<u8>, ) -> &mut StandardBuilder { self.config.path_terminator = terminator; self } } /// The standard printer, which implements grep-like formatting, including /// color support. /// /// A default printer can be created with either of the `Standard::new` or /// `Standard::new_no_color` constructors. However, there are a considerable /// number of options that configure this printer's output. Those options can /// be configured using [`StandardBuilder`]. /// /// This type is generic over `W`, which represents any implementation /// of the `termcolor::WriteColor` trait. If colors are not desired, /// then the `new_no_color` constructor can be used, or, alternatively, /// the `termcolor::NoColor` adapter can be used to wrap any `io::Write` /// implementation without enabling any colors. #[derive(Clone, Debug)] pub struct Standard<W> { config: Config, wtr: RefCell<CounterWriter<W>>, matches: Vec<Match>, } impl<W: WriteColor> Standard<W> { /// Return a standard printer with a default configuration that writes /// matches to the given writer. /// /// The writer should be an implementation of `termcolor::WriteColor` /// and not just a bare implementation of `io::Write`. To use a normal /// `io::Write` implementation (simultaneously sacrificing colors), use /// the `new_no_color` constructor. pub fn new(wtr: W) -> Standard<W> { StandardBuilder::new().build(wtr) } } impl<W: io::Write> Standard<NoColor<W>> { /// Return a standard printer with a default configuration that writes /// matches to the given writer. /// /// The writer can be any implementation of `io::Write`. With this /// constructor, the printer will never emit colors. pub fn new_no_color(wtr: W) -> Standard<NoColor<W>> { StandardBuilder::new().build_no_color(wtr) } } impl<W: WriteColor> Standard<W> { /// Return an implementation of `Sink` for the standard printer. /// /// This does not associate the printer with a file path, which means this /// implementation will never print a file path along with the matches. pub fn sink<'s, M: Matcher>( &'s mut self, matcher: M, ) -> StandardSink<'static, 's, M, W> { let interpolator = hyperlink::Interpolator::new(&self.config.hyperlink); let stats = if self.config.stats { Some(Stats::new()) } else { None }; let needs_match_granularity = self.needs_match_granularity(); StandardSink { matcher, standard: self, replacer: Replacer::new(), interpolator, path: None, start_time: Instant::now(), match_count: 0, binary_byte_offset: None, stats, needs_match_granularity, } } /// Return an implementation of `Sink` associated with a file path. /// /// When the printer is associated with a path, then it may, depending on /// its configuration, print the path along with the matches found. pub fn sink_with_path<'p, 's, M, P>( &'s mut self, matcher: M, path: &'p P, ) -> StandardSink<'p, 's, M, W> where M: Matcher, P: ?Sized + AsRef<Path>, { if !self.config.path { return self.sink(matcher); } let interpolator = hyperlink::Interpolator::new(&self.config.hyperlink); let stats = if self.config.stats { Some(Stats::new()) } else { None }; let ppath = PrinterPath::new(path.as_ref()) .with_separator(self.config.separator_path); let needs_match_granularity = self.needs_match_granularity(); StandardSink { matcher, standard: self, replacer: Replacer::new(), interpolator, path: Some(ppath), start_time: Instant::now(), match_count: 0, binary_byte_offset: None, stats, needs_match_granularity, } } /// Returns true if and only if the configuration of the printer requires /// us to find each individual match in the lines reported by the searcher. /// /// We care about this distinction because finding each individual match /// costs more, so we only do it when we need to. fn needs_match_granularity(&self) -> bool { let supports_color = self.wtr.borrow().supports_color(); let match_colored = !self.config.colors.matched().is_none(); // Coloring requires identifying each individual match. (supports_color && match_colored) // The column feature requires finding the position of the first match. || self.config.column // Requires finding each match for performing replacement. || self.config.replacement.is_some() // Emitting a line for each match requires finding each match. || self.config.per_match // Emitting only the match requires finding each match. || self.config.only_matching // Computing certain statistics requires finding each match. || self.config.stats } } impl<W> Standard<W> { /// Returns true if and only if this printer has written at least one byte /// to the underlying writer during any of the previous searches. pub fn has_written(&self) -> bool { self.wtr.borrow().total_count() > 0 } /// Return a mutable reference to the underlying writer. pub fn get_mut(&mut self) -> &mut W { self.wtr.get_mut().get_mut() } /// Consume this printer and return back ownership of the underlying /// writer. pub fn into_inner(self) -> W { self.wtr.into_inner().into_inner() } } /// An implementation of `Sink` associated with a matcher and an optional file /// path for the standard printer. /// /// A `Sink` can be created via the [`Standard::sink`] or /// [`Standard::sink_with_path`] methods, depending on whether you want to /// include a file path in the printer's output. /// /// Building a `StandardSink` is cheap, and callers should create a new one /// for each thing that is searched. After a search has completed, callers may /// query this sink for information such as whether a match occurred or whether /// binary data was found (and if so, the offset at which it occurred). /// /// This type is generic over a few type parameters: /// /// * `'p` refers to the lifetime of the file path, if one is provided. When /// no file path is given, then this is `'static`. /// * `'s` refers to the lifetime of the [`Standard`] printer that this type /// borrows. /// * `M` refers to the type of matcher used by /// `grep_searcher::Searcher` that is reporting results to this sink. /// * `W` refers to the underlying writer that this printer is writing its /// output to. #[derive(Debug)] pub struct StandardSink<'p, 's, M: Matcher, W> { matcher: M, standard: &'s mut Standard<W>, replacer: Replacer<M>, interpolator: hyperlink::Interpolator, path: Option<PrinterPath<'p>>, start_time: Instant, match_count: u64, binary_byte_offset: Option<u64>, stats: Option<Stats>, needs_match_granularity: bool, } impl<'p, 's, M: Matcher, W: WriteColor> StandardSink<'p, 's, M, W> { /// Returns true if and only if this printer received a match in the /// previous search. /// /// This is unaffected by the result of searches before the previous /// search on this sink. pub fn has_match(&self) -> bool { self.match_count > 0 } /// Return the total number of matches reported to this sink. /// /// This corresponds to the number of times `Sink::matched` is called /// on the previous search. /// /// This is unaffected by the result of searches before the previous /// search on this sink. pub fn match_count(&self) -> u64 { self.match_count } /// If binary data was found in the previous search, this returns the /// offset at which the binary data was first detected. /// /// The offset returned is an absolute offset relative to the entire /// set of bytes searched. /// /// This is unaffected by the result of searches before the previous /// search. e.g., If the search prior to the previous search found binary /// data but the previous search found no binary data, then this will /// return `None`. pub fn binary_byte_offset(&self) -> Option<u64> { self.binary_byte_offset } /// Return a reference to the stats produced by the printer for all /// searches executed on this sink. /// /// This only returns stats if they were requested via the /// [`StandardBuilder`] configuration. pub fn stats(&self) -> Option<&Stats> { self.stats.as_ref() } /// Execute the matcher over the given bytes and record the match /// locations if the current configuration demands match granularity. fn record_matches( &mut self, searcher: &Searcher, bytes: &[u8], range: std::ops::Range<usize>, ) -> io::Result<()> { self.standard.matches.clear(); if !self.needs_match_granularity { return Ok(()); } // If printing requires knowing the location of each individual match, // then compute and stored those right now for use later. While this // adds an extra copy for storing the matches, we do amortize the // allocation for it and this greatly simplifies the printing logic to // the extent that it's easy to ensure that we never do more than // one search to find the matches (well, for replacements, we do one // additional search to perform the actual replacement). let matches = &mut self.standard.matches; find_iter_at_in_context( searcher, &self.matcher, bytes, range.clone(), |m| { let (s, e) = (m.start() - range.start, m.end() - range.start); matches.push(Match::new(s, e)); true }, )?; // Don't report empty matches appearing at the end of the bytes. if !matches.is_empty() && matches.last().unwrap().is_empty() && matches.last().unwrap().start() >= range.end { matches.pop().unwrap(); } Ok(()) } /// If the configuration specifies a replacement, then this executes the /// replacement, lazily allocating memory if necessary. /// /// To access the result of a replacement, use `replacer.replacement()`. fn replace( &mut self, searcher: &Searcher, bytes: &[u8], range: std::ops::Range<usize>, ) -> io::Result<()> { self.replacer.clear(); if self.standard.config.replacement.is_some() { let replacement = (*self.standard.config.replacement).as_ref().unwrap(); self.replacer.replace_all( searcher, &self.matcher, bytes, range, replacement, )?; } Ok(()) } } impl<'p, 's, M: Matcher, W: WriteColor> Sink for StandardSink<'p, 's, M, W> { type Error = io::Error; fn matched( &mut self, searcher: &Searcher, mat: &SinkMatch<'_>, ) -> Result<bool, io::Error> { self.match_count += 1; self.record_matches( searcher, mat.buffer(), mat.bytes_range_in_buffer(), )?; self.replace(searcher, mat.buffer(), mat.bytes_range_in_buffer())?; if let Some(ref mut stats) = self.stats { stats.add_matches(self.standard.matches.len() as u64); stats.add_matched_lines(mat.lines().count() as u64); } if searcher.binary_detection().convert_byte().is_some() { if self.binary_byte_offset.is_some() { return Ok(false); } } StandardImpl::from_match(searcher, self, mat).sink()?; Ok(true) } fn context( &mut self, searcher: &Searcher, ctx: &SinkContext<'_>, ) -> Result<bool, io::Error> { self.standard.matches.clear(); self.replacer.clear(); if searcher.invert_match() { self.record_matches(searcher, ctx.bytes(), 0..ctx.bytes().len())?; self.replace(searcher, ctx.bytes(), 0..ctx.bytes().len())?; } if searcher.binary_detection().convert_byte().is_some() { if self.binary_byte_offset.is_some() { return Ok(false); } } StandardImpl::from_context(searcher, self, ctx).sink()?; Ok(true) } fn context_break( &mut self, searcher: &Searcher, ) -> Result<bool, io::Error> { StandardImpl::new(searcher, self).write_context_separator()?; Ok(true) } fn binary_data( &mut self, searcher: &Searcher, binary_byte_offset: u64, ) -> Result<bool, io::Error> { if searcher.binary_detection().quit_byte().is_some() { if let Some(ref path) = self.path { log::debug!( "ignoring {path}: found binary data at \ offset {binary_byte_offset}", path = path.as_path().display(), ); } } self.binary_byte_offset = Some(binary_byte_offset); Ok(true) } fn begin(&mut self, _searcher: &Searcher) -> Result<bool, io::Error> { self.standard.wtr.borrow_mut().reset_count(); self.start_time = Instant::now(); self.match_count = 0; self.binary_byte_offset = None; Ok(true) } fn finish( &mut self, searcher: &Searcher, finish: &SinkFinish, ) -> Result<(), io::Error> { if let Some(offset) = self.binary_byte_offset { StandardImpl::new(searcher, self).write_binary_message(offset)?; } if let Some(stats) = self.stats.as_mut() { stats.add_elapsed(self.start_time.elapsed()); stats.add_searches(1); if self.match_count > 0 { stats.add_searches_with_match(1); } stats.add_bytes_searched(finish.byte_count()); stats.add_bytes_printed(self.standard.wtr.borrow().count()); } Ok(()) } } /// The actual implementation of the standard printer. This couples together /// the searcher, the sink implementation and information about the match. /// /// A StandardImpl is initialized every time a match or a contextual line is /// reported. #[derive(Debug)] struct StandardImpl<'a, M: Matcher, W> { searcher: &'a Searcher, sink: &'a StandardSink<'a, 'a, M, W>, sunk: Sunk<'a>, /// Set to true if and only if we are writing a match with color. in_color_match: Cell<bool>, }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/json.rs
crates/printer/src/json.rs
use std::{ io::{self, Write}, path::Path, sync::Arc, time::Instant, }; use { grep_matcher::{Match, Matcher}, grep_searcher::{Searcher, Sink, SinkContext, SinkFinish, SinkMatch}, serde_json as json, }; use crate::{ counter::CounterWriter, jsont, stats::Stats, util::Replacer, util::find_iter_at_in_context, }; /// The configuration for the JSON printer. /// /// This is manipulated by the JSONBuilder and then referenced by the actual /// implementation. Once a printer is build, the configuration is frozen and /// cannot changed. #[derive(Debug, Clone)] struct Config { pretty: bool, always_begin_end: bool, replacement: Arc<Option<Vec<u8>>>, } impl Default for Config { fn default() -> Config { Config { pretty: false, always_begin_end: false, replacement: Arc::new(None), } } } /// A builder for a JSON lines printer. /// /// The builder permits configuring how the printer behaves. The JSON printer /// has fewer configuration options than the standard printer because it is /// a structured format, and the printer always attempts to find the most /// information possible. /// /// Some configuration options, such as whether line numbers are included or /// whether contextual lines are shown, are drawn directly from the /// `grep_searcher::Searcher`'s configuration. /// /// Once a `JSON` printer is built, its configuration cannot be changed. #[derive(Clone, Debug)] pub struct JSONBuilder { config: Config, } impl JSONBuilder { /// Return a new builder for configuring the JSON printer. pub fn new() -> JSONBuilder { JSONBuilder { config: Config::default() } } /// Create a JSON printer that writes results to the given writer. pub fn build<W: io::Write>(&self, wtr: W) -> JSON<W> { JSON { config: self.config.clone(), wtr: CounterWriter::new(wtr), matches: vec![], } } /// Print JSON in a pretty printed format. /// /// Enabling this will no longer produce a "JSON lines" format, in that /// each JSON object printed may span multiple lines. /// /// This is disabled by default. pub fn pretty(&mut self, yes: bool) -> &mut JSONBuilder { self.config.pretty = yes; self } /// When enabled, the `begin` and `end` messages are always emitted, even /// when no match is found. /// /// When disabled, the `begin` and `end` messages are only shown if there /// is at least one `match` or `context` message. /// /// This is disabled by default. pub fn always_begin_end(&mut self, yes: bool) -> &mut JSONBuilder { self.config.always_begin_end = yes; self } /// Set the bytes that will be used to replace each occurrence of a match /// found. /// /// The replacement bytes given may include references to capturing groups, /// which may either be in index form (e.g., `$2`) or can reference named /// capturing groups if present in the original pattern (e.g., `$foo`). /// /// For documentation on the full format, please see the `Capture` trait's /// `interpolate` method in the /// [grep-printer](https://docs.rs/grep-printer) crate. pub fn replacement( &mut self, replacement: Option<Vec<u8>>, ) -> &mut JSONBuilder { self.config.replacement = Arc::new(replacement); self } } /// The JSON printer, which emits results in a JSON lines format. /// /// This type is generic over `W`, which represents any implementation of /// the standard library `io::Write` trait. /// /// # Format /// /// This section describes the JSON format used by this printer. /// /// To skip the rigamarole, take a look at the /// [example](#example) /// at the end. /// /// ## Overview /// /// The format of this printer is the [JSON Lines](https://jsonlines.org/) /// format. Specifically, this printer emits a sequence of messages, where /// each message is encoded as a single JSON value on a single line. There are /// four different types of messages (and this number may expand over time): /// /// * **begin** - A message that indicates a file is being searched. /// * **end** - A message the indicates a file is done being searched. This /// message also include summary statistics about the search. /// * **match** - A message that indicates a match was found. This includes /// the text and offsets of the match. /// * **context** - A message that indicates a contextual line was found. /// This includes the text of the line, along with any match information if /// the search was inverted. /// /// Every message is encoded in the same envelope format, which includes a tag /// indicating the message type along with an object for the payload: /// /// ```json /// { /// "type": "{begin|end|match|context}", /// "data": { ... } /// } /// ``` /// /// The message itself is encoded in the envelope's `data` key. /// /// ## Text encoding /// /// Before describing each message format, we first must briefly discuss text /// encoding, since it factors into every type of message. In particular, JSON /// may only be encoded in UTF-8, UTF-16 or UTF-32. For the purposes of this /// printer, we need only worry about UTF-8. The problem here is that searching /// is not limited to UTF-8 exclusively, which in turn implies that matches /// may be reported that contain invalid UTF-8. Moreover, this printer may /// also print file paths, and the encoding of file paths is itself not /// guaranteed to be valid UTF-8. Therefore, this printer must deal with the /// presence of invalid UTF-8 somehow. The printer could silently ignore such /// things completely, or even lossily transcode invalid UTF-8 to valid UTF-8 /// by replacing all invalid sequences with the Unicode replacement character. /// However, this would prevent consumers of this format from accessing the /// original data in a non-lossy way. /// /// Therefore, this printer will emit valid UTF-8 encoded bytes as normal /// JSON strings and otherwise base64 encode data that isn't valid UTF-8. To /// communicate whether this process occurs or not, strings are keyed by the /// name `text` where as arbitrary bytes are keyed by `bytes`. /// /// For example, when a path is included in a message, it is formatted like so, /// if and only if the path is valid UTF-8: /// /// ```json /// { /// "path": { /// "text": "/home/ubuntu/lib.rs" /// } /// } /// ``` /// /// If instead our path was `/home/ubuntu/lib\xFF.rs`, where the `\xFF` byte /// makes it invalid UTF-8, the path would instead be encoded like so: /// /// ```json /// { /// "path": { /// "bytes": "L2hvbWUvdWJ1bnR1L2xpYv8ucnM=" /// } /// } /// ``` /// /// This same representation is used for reporting matches as well. /// /// The printer guarantees that the `text` field is used whenever the /// underlying bytes are valid UTF-8. /// /// ## Wire format /// /// This section documents the wire format emitted by this printer, starting /// with the four types of messages. /// /// Each message has its own format, and is contained inside an envelope that /// indicates the type of message. The envelope has these fields: /// /// * **type** - A string indicating the type of this message. It may be one /// of four possible strings: `begin`, `end`, `match` or `context`. This /// list may expand over time. /// * **data** - The actual message data. The format of this field depends on /// the value of `type`. The possible message formats are /// [`begin`](#message-begin), /// [`end`](#message-end), /// [`match`](#message-match), /// [`context`](#message-context). /// /// #### Message: **begin** /// /// This message indicates that a search has begun. It has these fields: /// /// * **path** - An /// [arbitrary data object](#object-arbitrary-data) /// representing the file path corresponding to the search, if one is /// present. If no file path is available, then this field is `null`. /// /// #### Message: **end** /// /// This message indicates that a search has finished. It has these fields: /// /// * **path** - An /// [arbitrary data object](#object-arbitrary-data) /// representing the file path corresponding to the search, if one is /// present. If no file path is available, then this field is `null`. /// * **binary_offset** - The absolute offset in the data searched /// corresponding to the place at which binary data was detected. If no /// binary data was detected (or if binary detection was disabled), then this /// field is `null`. /// * **stats** - A [`stats` object](#object-stats) that contains summary /// statistics for the previous search. /// /// #### Message: **match** /// /// This message indicates that a match has been found. A match generally /// corresponds to a single line of text, although it may correspond to /// multiple lines if the search can emit matches over multiple lines. It /// has these fields: /// /// * **path** - An /// [arbitrary data object](#object-arbitrary-data) /// representing the file path corresponding to the search, if one is /// present. If no file path is available, then this field is `null`. /// * **lines** - An /// [arbitrary data object](#object-arbitrary-data) /// representing one or more lines contained in this match. /// * **line_number** - If the searcher has been configured to report line /// numbers, then this corresponds to the line number of the first line /// in `lines`. If no line numbers are available, then this is `null`. /// * **absolute_offset** - The absolute byte offset corresponding to the start /// of `lines` in the data being searched. /// * **submatches** - An array of [`submatch` objects](#object-submatch) /// corresponding to matches in `lines`. The offsets included in each /// `submatch` correspond to byte offsets into `lines`. (If `lines` is base64 /// encoded, then the byte offsets correspond to the data after base64 /// decoding.) The `submatch` objects are guaranteed to be sorted by their /// starting offsets. Note that it is possible for this array to be empty, /// for example, when searching reports inverted matches. If the configuration /// specifies a replacement, the resulting replacement text is also present. /// /// #### Message: **context** /// /// This message indicates that a contextual line has been found. A contextual /// line is a line that doesn't contain a match, but is generally adjacent to /// a line that does contain a match. The precise way in which contextual lines /// are reported is determined by the searcher. It has these fields, which are /// exactly the same fields found in a [`match`](#message-match): /// /// * **path** - An /// [arbitrary data object](#object-arbitrary-data) /// representing the file path corresponding to the search, if one is /// present. If no file path is available, then this field is `null`. /// * **lines** - An /// [arbitrary data object](#object-arbitrary-data) /// representing one or more lines contained in this context. This includes /// line terminators, if they're present. /// * **line_number** - If the searcher has been configured to report line /// numbers, then this corresponds to the line number of the first line /// in `lines`. If no line numbers are available, then this is `null`. /// * **absolute_offset** - The absolute byte offset corresponding to the start /// of `lines` in the data being searched. /// * **submatches** - An array of [`submatch` objects](#object-submatch) /// corresponding to matches in `lines`. The offsets included in each /// `submatch` correspond to byte offsets into `lines`. (If `lines` is base64 /// encoded, then the byte offsets correspond to the data after base64 /// decoding.) The `submatch` objects are guaranteed to be sorted by /// their starting offsets. Note that it is possible for this array to be /// non-empty, for example, when searching reports inverted matches such that /// the original matcher could match things in the contextual lines. If the /// configuration specifies a replacemement, the resulting replacement text /// is also present. /// /// #### Object: **submatch** /// /// This object describes submatches found within `match` or `context` /// messages. The `start` and `end` fields indicate the half-open interval on /// which the match occurs (`start` is included, but `end` is not). It is /// guaranteed that `start <= end`. It has these fields: /// /// * **match** - An /// [arbitrary data object](#object-arbitrary-data) /// corresponding to the text in this submatch. /// * **start** - A byte offset indicating the start of this match. This offset /// is generally reported in terms of the parent object's data. For example, /// the `lines` field in the /// [`match`](#message-match) or [`context`](#message-context) /// messages. /// * **end** - A byte offset indicating the end of this match. This offset /// is generally reported in terms of the parent object's data. For example, /// the `lines` field in the /// [`match`](#message-match) or [`context`](#message-context) /// messages. /// * **replacement** (optional) - An /// [arbitrary data object](#object-arbitrary-data) corresponding to the /// replacement text for this submatch, if the configuration specifies /// a replacement. /// /// #### Object: **stats** /// /// This object is included in messages and contains summary statistics about /// a search. It has these fields: /// /// * **elapsed** - A [`duration` object](#object-duration) describing the /// length of time that elapsed while performing the search. /// * **searches** - The number of searches that have run. For this printer, /// this value is always `1`. (Implementations may emit additional message /// types that use this same `stats` object that represents summary /// statistics over multiple searches.) /// * **searches_with_match** - The number of searches that have run that have /// found at least one match. This is never more than `searches`. /// * **bytes_searched** - The total number of bytes that have been searched. /// * **bytes_printed** - The total number of bytes that have been printed. /// This includes everything emitted by this printer. /// * **matched_lines** - The total number of lines that participated in a /// match. When matches may contain multiple lines, then this includes every /// line that is part of every match. /// * **matches** - The total number of matches. There may be multiple matches /// per line. When matches may contain multiple lines, each match is counted /// only once, regardless of how many lines it spans. /// /// #### Object: **duration** /// /// This object includes a few fields for describing a duration. Two of its /// fields, `secs` and `nanos`, can be combined to give nanosecond precision /// on systems that support it. It has these fields: /// /// * **secs** - A whole number of seconds indicating the length of this /// duration. /// * **nanos** - A fractional part of this duration represent by nanoseconds. /// If nanosecond precision isn't supported, then this is typically rounded /// up to the nearest number of nanoseconds. /// * **human** - A human readable string describing the length of the /// duration. The format of the string is itself unspecified. /// /// #### Object: **arbitrary data** /// /// This object is used whenever arbitrary data needs to be represented as a /// JSON value. This object contains two fields, where generally only one of /// the fields is present: /// /// * **text** - A normal JSON string that is UTF-8 encoded. This field is /// populated if and only if the underlying data is valid UTF-8. /// * **bytes** - A normal JSON string that is a base64 encoding of the /// underlying bytes. /// /// More information on the motivation for this representation can be seen in /// the section [text encoding](#text-encoding) above. /// /// ## Example /// /// This section shows a small example that includes all message types. /// /// Here's the file we want to search, located at `/home/andrew/sherlock`: /// /// ```text /// For the Doctor Watsons of this world, as opposed to the Sherlock /// Holmeses, success in the province of detective work must always /// be, to a very large extent, the result of luck. Sherlock Holmes /// can extract a clew from a wisp of straw or a flake of cigar ash; /// but Doctor Watson has to have it taken out for him and dusted, /// and exhibited clearly, with a label attached. /// ``` /// /// Searching for `Watson` with a `before_context` of `1` with line numbers /// enabled shows something like this using the standard printer: /// /// ```text /// sherlock:1:For the Doctor Watsons of this world, as opposed to the Sherlock /// -- /// sherlock-4-can extract a clew from a wisp of straw or a flake of cigar ash; /// sherlock:5:but Doctor Watson has to have it taken out for him and dusted, /// ``` /// /// Here's what the same search looks like using the JSON wire format described /// above, where in we show semi-prettified JSON (instead of a strict JSON /// Lines format), for illustrative purposes: /// /// ```json /// { /// "type": "begin", /// "data": { /// "path": {"text": "/home/andrew/sherlock"}} /// } /// } /// { /// "type": "match", /// "data": { /// "path": {"text": "/home/andrew/sherlock"}, /// "lines": {"text": "For the Doctor Watsons of this world, as opposed to the Sherlock\n"}, /// "line_number": 1, /// "absolute_offset": 0, /// "submatches": [ /// {"match": {"text": "Watson"}, "start": 15, "end": 21} /// ] /// } /// } /// { /// "type": "context", /// "data": { /// "path": {"text": "/home/andrew/sherlock"}, /// "lines": {"text": "can extract a clew from a wisp of straw or a flake of cigar ash;\n"}, /// "line_number": 4, /// "absolute_offset": 193, /// "submatches": [] /// } /// } /// { /// "type": "match", /// "data": { /// "path": {"text": "/home/andrew/sherlock"}, /// "lines": {"text": "but Doctor Watson has to have it taken out for him and dusted,\n"}, /// "line_number": 5, /// "absolute_offset": 258, /// "submatches": [ /// {"match": {"text": "Watson"}, "start": 11, "end": 17} /// ] /// } /// } /// { /// "type": "end", /// "data": { /// "path": {"text": "/home/andrew/sherlock"}, /// "binary_offset": null, /// "stats": { /// "elapsed": {"secs": 0, "nanos": 36296, "human": "0.0000s"}, /// "searches": 1, /// "searches_with_match": 1, /// "bytes_searched": 367, /// "bytes_printed": 1151, /// "matched_lines": 2, /// "matches": 2 /// } /// } /// } /// ``` /// and here's what a match type item would looks like if a replacement text /// of 'Moriarity' was given as a parameter: /// ```json /// { /// "type": "match", /// "data": { /// "path": {"text": "/home/andrew/sherlock"}, /// "lines": {"text": "For the Doctor Watsons of this world, as opposed to the Sherlock\n"}, /// "line_number": 1, /// "absolute_offset": 0, /// "submatches": [ /// {"match": {"text": "Watson"}, "replacement": {"text": "Moriarity"}, "start": 15, "end": 21} /// ] /// } /// } /// ``` #[derive(Clone, Debug)] pub struct JSON<W> { config: Config, wtr: CounterWriter<W>, matches: Vec<Match>, } impl<W: io::Write> JSON<W> { /// Return a JSON lines printer with a default configuration that writes /// matches to the given writer. pub fn new(wtr: W) -> JSON<W> { JSONBuilder::new().build(wtr) } /// Return an implementation of `Sink` for the JSON printer. /// /// This does not associate the printer with a file path, which means this /// implementation will never print a file path along with the matches. pub fn sink<'s, M: Matcher>( &'s mut self, matcher: M, ) -> JSONSink<'static, 's, M, W> { JSONSink { matcher, replacer: Replacer::new(), json: self, path: None, start_time: Instant::now(), match_count: 0, binary_byte_offset: None, begin_printed: false, stats: Stats::new(), } } /// Return an implementation of `Sink` associated with a file path. /// /// When the printer is associated with a path, then it may, depending on /// its configuration, print the path along with the matches found. pub fn sink_with_path<'p, 's, M, P>( &'s mut self, matcher: M, path: &'p P, ) -> JSONSink<'p, 's, M, W> where M: Matcher, P: ?Sized + AsRef<Path>, { JSONSink { matcher, replacer: Replacer::new(), json: self, path: Some(path.as_ref()), start_time: Instant::now(), match_count: 0, binary_byte_offset: None, begin_printed: false, stats: Stats::new(), } } /// Write the given message followed by a new line. The new line is /// determined from the configuration of the given searcher. fn write_message( &mut self, message: &jsont::Message<'_>, ) -> io::Result<()> { if self.config.pretty { json::to_writer_pretty(&mut self.wtr, message)?; } else { json::to_writer(&mut self.wtr, message)?; } let _ = self.wtr.write(b"\n")?; // This will always be Ok(1) when successful. Ok(()) } } impl<W> JSON<W> { /// Returns true if and only if this printer has written at least one byte /// to the underlying writer during any of the previous searches. pub fn has_written(&self) -> bool { self.wtr.total_count() > 0 } /// Return a mutable reference to the underlying writer. pub fn get_mut(&mut self) -> &mut W { self.wtr.get_mut() } /// Consume this printer and return back ownership of the underlying /// writer. pub fn into_inner(self) -> W { self.wtr.into_inner() } } /// An implementation of `Sink` associated with a matcher and an optional file /// path for the JSON printer. /// /// This type is generic over a few type parameters: /// /// * `'p` refers to the lifetime of the file path, if one is provided. When /// no file path is given, then this is `'static`. /// * `'s` refers to the lifetime of the [`JSON`] printer that this type /// borrows. /// * `M` refers to the type of matcher used by /// `grep_searcher::Searcher` that is reporting results to this sink. /// * `W` refers to the underlying writer that this printer is writing its /// output to. #[derive(Debug)] pub struct JSONSink<'p, 's, M: Matcher, W> { matcher: M, replacer: Replacer<M>, json: &'s mut JSON<W>, path: Option<&'p Path>, start_time: Instant, match_count: u64, binary_byte_offset: Option<u64>, begin_printed: bool, stats: Stats, } impl<'p, 's, M: Matcher, W: io::Write> JSONSink<'p, 's, M, W> { /// Returns true if and only if this printer received a match in the /// previous search. /// /// This is unaffected by the result of searches before the previous /// search. pub fn has_match(&self) -> bool { self.match_count > 0 } /// Return the total number of matches reported to this sink. /// /// This corresponds to the number of times `Sink::matched` is called. pub fn match_count(&self) -> u64 { self.match_count } /// If binary data was found in the previous search, this returns the /// offset at which the binary data was first detected. /// /// The offset returned is an absolute offset relative to the entire /// set of bytes searched. /// /// This is unaffected by the result of searches before the previous /// search. e.g., If the search prior to the previous search found binary /// data but the previous search found no binary data, then this will /// return `None`. pub fn binary_byte_offset(&self) -> Option<u64> { self.binary_byte_offset } /// Return a reference to the stats produced by the printer for all /// searches executed on this sink. pub fn stats(&self) -> &Stats { &self.stats } /// Execute the matcher over the given bytes and record the match /// locations if the current configuration demands match granularity. fn record_matches( &mut self, searcher: &Searcher, bytes: &[u8], range: std::ops::Range<usize>, ) -> io::Result<()> { self.json.matches.clear(); // If printing requires knowing the location of each individual match, // then compute and stored those right now for use later. While this // adds an extra copy for storing the matches, we do amortize the // allocation for it and this greatly simplifies the printing logic to // the extent that it's easy to ensure that we never do more than // one search to find the matches. let matches = &mut self.json.matches; find_iter_at_in_context( searcher, &self.matcher, bytes, range.clone(), |m| { let (s, e) = (m.start() - range.start, m.end() - range.start); matches.push(Match::new(s, e)); true }, )?; // Don't report empty matches appearing at the end of the bytes. if !matches.is_empty() && matches.last().unwrap().is_empty() && matches.last().unwrap().start() >= bytes.len() { matches.pop().unwrap(); } Ok(()) } /// If the configuration specifies a replacement, then this executes the /// replacement, lazily allocating memory if necessary. /// /// To access the result of a replacement, use `replacer.replacement()`. fn replace( &mut self, searcher: &Searcher, bytes: &[u8], range: std::ops::Range<usize>, ) -> io::Result<()> { self.replacer.clear(); if self.json.config.replacement.is_some() { let replacement = (*self.json.config.replacement).as_ref().map(|r| &*r).unwrap(); self.replacer.replace_all( searcher, &self.matcher, bytes, range, replacement, )?; } Ok(()) } /// Write the "begin" message. fn write_begin_message(&mut self) -> io::Result<()> { if self.begin_printed { return Ok(()); } let msg = jsont::Message::Begin(jsont::Begin { path: self.path }); self.json.write_message(&msg)?; self.begin_printed = true; Ok(()) } } impl<'p, 's, M: Matcher, W: io::Write> Sink for JSONSink<'p, 's, M, W> { type Error = io::Error; fn matched( &mut self, searcher: &Searcher, mat: &SinkMatch<'_>, ) -> Result<bool, io::Error> { self.match_count += 1; self.write_begin_message()?; self.record_matches( searcher, mat.buffer(), mat.bytes_range_in_buffer(), )?; self.replace(searcher, mat.buffer(), mat.bytes_range_in_buffer())?; self.stats.add_matches(self.json.matches.len() as u64); self.stats.add_matched_lines(mat.lines().count() as u64); let submatches = SubMatches::new( mat.bytes(), &self.json.matches, self.replacer.replacement(), ); let msg = jsont::Message::Match(jsont::Match { path: self.path, lines: mat.bytes(), line_number: mat.line_number(), absolute_offset: mat.absolute_byte_offset(), submatches: submatches.as_slice(), }); self.json.write_message(&msg)?; Ok(true) } fn context( &mut self, searcher: &Searcher, ctx: &SinkContext<'_>, ) -> Result<bool, io::Error> { self.write_begin_message()?; self.json.matches.clear(); let submatches = if searcher.invert_match() { self.record_matches(searcher, ctx.bytes(), 0..ctx.bytes().len())?; self.replace(searcher, ctx.bytes(), 0..ctx.bytes().len())?; SubMatches::new( ctx.bytes(), &self.json.matches, self.replacer.replacement(), ) } else { SubMatches::empty() }; let msg = jsont::Message::Context(jsont::Context { path: self.path, lines: ctx.bytes(), line_number: ctx.line_number(), absolute_offset: ctx.absolute_byte_offset(), submatches: submatches.as_slice(), }); self.json.write_message(&msg)?; Ok(true) } fn binary_data( &mut self, searcher: &Searcher, binary_byte_offset: u64, ) -> Result<bool, io::Error> { if searcher.binary_detection().quit_byte().is_some() { if let Some(ref path) = self.path { log::debug!( "ignoring {path}: found binary data at \ offset {binary_byte_offset}", path = path.display(), ); } } Ok(true) } fn begin(&mut self, _searcher: &Searcher) -> Result<bool, io::Error> { self.json.wtr.reset_count(); self.start_time = Instant::now(); self.match_count = 0; self.binary_byte_offset = None; if !self.json.config.always_begin_end { return Ok(true); } self.write_begin_message()?; Ok(true) } fn finish( &mut self, _searcher: &Searcher, finish: &SinkFinish, ) -> Result<(), io::Error> { self.binary_byte_offset = finish.binary_byte_offset(); self.stats.add_elapsed(self.start_time.elapsed()); self.stats.add_searches(1); if self.match_count > 0 { self.stats.add_searches_with_match(1); } self.stats.add_bytes_searched(finish.byte_count()); self.stats.add_bytes_printed(self.json.wtr.count()); if !self.begin_printed { return Ok(()); } let msg = jsont::Message::End(jsont::End { path: self.path, binary_offset: finish.binary_byte_offset(), stats: self.stats.clone(), }); self.json.write_message(&msg)?; Ok(()) } } /// SubMatches represents a set of matches in a contiguous range of bytes. /// /// A simpler representation for this would just simply be `Vec<SubMatch>`, /// but the common case is exactly one match per range of bytes, which we /// specialize here using a fixed size array without any allocation. enum SubMatches<'a> { Empty, Small([jsont::SubMatch<'a>; 1]), Big(Vec<jsont::SubMatch<'a>>), } impl<'a> SubMatches<'a> { /// Create a new set of match ranges from a set of matches and the /// corresponding bytes that those matches apply to. fn new( bytes: &'a [u8], matches: &[Match], replacement: Option<(&'a [u8], &'a [Match])>, ) -> SubMatches<'a> { if matches.len() == 1 { let mat = matches[0]; SubMatches::Small([jsont::SubMatch { m: &bytes[mat], replacement: replacement .map(|(rbuf, rmatches)| &rbuf[rmatches[0]]), start: mat.start(), end: mat.end(), }]) } else { let mut match_ranges = vec![]; for (i, &mat) in matches.iter().enumerate() { match_ranges.push(jsont::SubMatch { m: &bytes[mat], replacement: replacement .map(|(rbuf, rmatches)| &rbuf[rmatches[i]]), start: mat.start(), end: mat.end(), }); } SubMatches::Big(match_ranges) } } /// Create an empty set of match ranges. fn empty() -> SubMatches<'static> { SubMatches::Empty } /// Return this set of match ranges as a slice. fn as_slice(&self) -> &[jsont::SubMatch<'_>] { match *self { SubMatches::Empty => &[], SubMatches::Small(ref x) => x, SubMatches::Big(ref x) => x, } } } #[cfg(test)] mod tests { use grep_matcher::LineTerminator; use grep_regex::{RegexMatcher, RegexMatcherBuilder};
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/jsont.rs
crates/printer/src/jsont.rs
// This module defines the types we use for JSON serialization. We specifically // omit deserialization, partially because there isn't a clear use case for // them at this time, but also because deserialization will complicate things. // Namely, the types below are designed in a way that permits JSON // serialization with little or no allocation. Allocation is often quite // convenient for deserialization however, so these types would become a bit // more complex. use std::{borrow::Cow, path::Path}; pub(crate) enum Message<'a> { Begin(Begin<'a>), End(End<'a>), Match(Match<'a>), Context(Context<'a>), } impl<'a> serde::Serialize for Message<'a> { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("Message", 2)?; match *self { Message::Begin(ref msg) => { state.serialize_field("type", &"begin")?; state.serialize_field("data", msg)?; } Message::End(ref msg) => { state.serialize_field("type", &"end")?; state.serialize_field("data", msg)?; } Message::Match(ref msg) => { state.serialize_field("type", &"match")?; state.serialize_field("data", msg)?; } Message::Context(ref msg) => { state.serialize_field("type", &"context")?; state.serialize_field("data", msg)?; } } state.end() } } pub(crate) struct Begin<'a> { pub(crate) path: Option<&'a Path>, } impl<'a> serde::Serialize for Begin<'a> { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("Begin", 1)?; state.serialize_field("path", &self.path.map(Data::from_path))?; state.end() } } pub(crate) struct End<'a> { pub(crate) path: Option<&'a Path>, pub(crate) binary_offset: Option<u64>, pub(crate) stats: crate::stats::Stats, } impl<'a> serde::Serialize for End<'a> { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("End", 3)?; state.serialize_field("path", &self.path.map(Data::from_path))?; state.serialize_field("binary_offset", &self.binary_offset)?; state.serialize_field("stats", &self.stats)?; state.end() } } pub(crate) struct Match<'a> { pub(crate) path: Option<&'a Path>, pub(crate) lines: &'a [u8], pub(crate) line_number: Option<u64>, pub(crate) absolute_offset: u64, pub(crate) submatches: &'a [SubMatch<'a>], } impl<'a> serde::Serialize for Match<'a> { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("Match", 5)?; state.serialize_field("path", &self.path.map(Data::from_path))?; state.serialize_field("lines", &Data::from_bytes(self.lines))?; state.serialize_field("line_number", &self.line_number)?; state.serialize_field("absolute_offset", &self.absolute_offset)?; state.serialize_field("submatches", &self.submatches)?; state.end() } } pub(crate) struct Context<'a> { pub(crate) path: Option<&'a Path>, pub(crate) lines: &'a [u8], pub(crate) line_number: Option<u64>, pub(crate) absolute_offset: u64, pub(crate) submatches: &'a [SubMatch<'a>], } impl<'a> serde::Serialize for Context<'a> { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("Context", 5)?; state.serialize_field("path", &self.path.map(Data::from_path))?; state.serialize_field("lines", &Data::from_bytes(self.lines))?; state.serialize_field("line_number", &self.line_number)?; state.serialize_field("absolute_offset", &self.absolute_offset)?; state.serialize_field("submatches", &self.submatches)?; state.end() } } pub(crate) struct SubMatch<'a> { pub(crate) m: &'a [u8], pub(crate) replacement: Option<&'a [u8]>, pub(crate) start: usize, pub(crate) end: usize, } impl<'a> serde::Serialize for SubMatch<'a> { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("SubMatch", 3)?; state.serialize_field("match", &Data::from_bytes(self.m))?; if let Some(r) = self.replacement { state.serialize_field("replacement", &Data::from_bytes(r))?; } state.serialize_field("start", &self.start)?; state.serialize_field("end", &self.end)?; state.end() } } /// Data represents things that look like strings, but may actually not be /// valid UTF-8. To handle this, `Data` is serialized as an object with one /// of two keys: `text` (for valid UTF-8) or `bytes` (for invalid UTF-8). /// /// The happy path is valid UTF-8, which streams right through as-is, since /// it is natively supported by JSON. When invalid UTF-8 is found, then it is /// represented as arbitrary bytes and base64 encoded. #[derive(Clone, Debug, Hash, PartialEq, Eq)] enum Data<'a> { Text { text: Cow<'a, str> }, Bytes { bytes: &'a [u8] }, } impl<'a> Data<'a> { fn from_bytes(bytes: &[u8]) -> Data<'_> { match std::str::from_utf8(bytes) { Ok(text) => Data::Text { text: Cow::Borrowed(text) }, Err(_) => Data::Bytes { bytes }, } } #[cfg(unix)] fn from_path(path: &Path) -> Data<'_> { use std::os::unix::ffi::OsStrExt; match path.to_str() { Some(text) => Data::Text { text: Cow::Borrowed(text) }, None => Data::Bytes { bytes: path.as_os_str().as_bytes() }, } } #[cfg(not(unix))] fn from_path(path: &Path) -> Data<'_> { // Using lossy conversion means some paths won't round trip precisely, // but it's not clear what we should actually do. Serde rejects // non-UTF-8 paths, and OsStr's are serialized as a sequence of UTF-16 // code units on Windows. Neither seem appropriate for this use case, // so we do the easy thing for now. Data::Text { text: path.to_string_lossy() } } } impl<'a> serde::Serialize for Data<'a> { fn serialize<S: serde::Serializer>( &self, s: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = s.serialize_struct("Data", 1)?; match *self { Data::Text { ref text } => state.serialize_field("text", text)?, Data::Bytes { bytes } => { // use base64::engine::{general_purpose::STANDARD, Engine}; // let encoded = STANDARD.encode(bytes); state.serialize_field("bytes", &base64_standard(bytes))?; } } state.end() } } /// Implements "standard" base64 encoding as described in RFC 3548[1]. /// /// We roll our own here instead of bringing in something heavier weight like /// the `base64` crate. In particular, we really don't care about perf much /// here, since this is only used for data or file paths that are not valid /// UTF-8. /// /// [1]: https://tools.ietf.org/html/rfc3548#section-3 fn base64_standard(bytes: &[u8]) -> String { const ALPHABET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; let mut out = String::new(); let mut it = bytes.chunks_exact(3); while let Some(chunk) = it.next() { let group24 = (usize::from(chunk[0]) << 16) | (usize::from(chunk[1]) << 8) | usize::from(chunk[2]); let index1 = (group24 >> 18) & 0b111_111; let index2 = (group24 >> 12) & 0b111_111; let index3 = (group24 >> 6) & 0b111_111; let index4 = (group24 >> 0) & 0b111_111; out.push(char::from(ALPHABET[index1])); out.push(char::from(ALPHABET[index2])); out.push(char::from(ALPHABET[index3])); out.push(char::from(ALPHABET[index4])); } match it.remainder() { &[] => {} &[byte0] => { let group8 = usize::from(byte0); let index1 = (group8 >> 2) & 0b111_111; let index2 = (group8 << 4) & 0b111_111; out.push(char::from(ALPHABET[index1])); out.push(char::from(ALPHABET[index2])); out.push('='); out.push('='); } &[byte0, byte1] => { let group16 = (usize::from(byte0) << 8) | usize::from(byte1); let index1 = (group16 >> 10) & 0b111_111; let index2 = (group16 >> 4) & 0b111_111; let index3 = (group16 << 2) & 0b111_111; out.push(char::from(ALPHABET[index1])); out.push(char::from(ALPHABET[index2])); out.push(char::from(ALPHABET[index3])); out.push('='); } _ => unreachable!("remainder must have length < 3"), } out } #[cfg(test)] mod tests { use super::*; // Tests taken from RFC 4648[1]. // // [1]: https://datatracker.ietf.org/doc/html/rfc4648#section-10 #[test] fn base64_basic() { let b64 = |s: &str| base64_standard(s.as_bytes()); assert_eq!(b64(""), ""); assert_eq!(b64("f"), "Zg=="); assert_eq!(b64("fo"), "Zm8="); assert_eq!(b64("foo"), "Zm9v"); assert_eq!(b64("foob"), "Zm9vYg=="); assert_eq!(b64("fooba"), "Zm9vYmE="); assert_eq!(b64("foobar"), "Zm9vYmFy"); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/util.rs
crates/printer/src/util.rs
use std::{borrow::Cow, cell::OnceCell, fmt, io, path::Path, time}; use { bstr::ByteVec, grep_matcher::{Captures, LineTerminator, Match, Matcher}, grep_searcher::{ LineIter, Searcher, SinkContext, SinkContextKind, SinkError, SinkMatch, }, }; use crate::{MAX_LOOK_AHEAD, hyperlink::HyperlinkPath}; /// A type for handling replacements while amortizing allocation. pub(crate) struct Replacer<M: Matcher> { space: Option<Space<M>>, } struct Space<M: Matcher> { /// The place to store capture locations. caps: M::Captures, /// The place to write a replacement to. dst: Vec<u8>, /// The place to store match offsets in terms of `dst`. matches: Vec<Match>, } impl<M: Matcher> fmt::Debug for Replacer<M> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (dst, matches) = self.replacement().unwrap_or((&[], &[])); f.debug_struct("Replacer") .field("dst", &dst) .field("matches", &matches) .finish() } } impl<M: Matcher> Replacer<M> { /// Create a new replacer for use with a particular matcher. /// /// This constructor does not allocate. Instead, space for dealing with /// replacements is allocated lazily only when needed. pub(crate) fn new() -> Replacer<M> { Replacer { space: None } } /// Executes a replacement on the given haystack string by replacing all /// matches with the given replacement. To access the result of the /// replacement, use the `replacement` method. /// /// This can fail if the underlying matcher reports an error. pub(crate) fn replace_all<'a>( &'a mut self, searcher: &Searcher, matcher: &M, mut haystack: &[u8], range: std::ops::Range<usize>, replacement: &[u8], ) -> io::Result<()> { // See the giant comment in 'find_iter_at_in_context' below for why we // do this dance. let is_multi_line = searcher.multi_line_with_matcher(&matcher); // Get the line_terminator that was removed (if any) so we can add it // back. let line_terminator = if is_multi_line { if haystack[range.end..].len() >= MAX_LOOK_AHEAD { haystack = &haystack[..range.end + MAX_LOOK_AHEAD]; } &[] } else { // When searching a single line, we should remove the line // terminator. Otherwise, it's possible for the regex (via // look-around) to observe the line terminator and not match // because of it. let mut m = Match::new(0, range.end); let line_terminator = trim_line_terminator(searcher, haystack, &mut m); haystack = &haystack[..m.end()]; line_terminator }; { let &mut Space { ref mut dst, ref mut caps, ref mut matches } = self.allocate(matcher)?; dst.clear(); matches.clear(); replace_with_captures_in_context( matcher, haystack, line_terminator, range.clone(), caps, dst, |caps, dst| { let start = dst.len(); caps.interpolate( |name| matcher.capture_index(name), haystack, replacement, dst, ); let end = dst.len(); matches.push(Match::new(start, end)); true }, ) .map_err(io::Error::error_message)?; } Ok(()) } /// Return the result of the prior replacement and the match offsets for /// all replacement occurrences within the returned replacement buffer. /// /// If no replacement has occurred then `None` is returned. pub(crate) fn replacement<'a>( &'a self, ) -> Option<(&'a [u8], &'a [Match])> { match self.space { None => None, Some(ref space) => { if space.matches.is_empty() { None } else { Some((&space.dst, &space.matches)) } } } } /// Clear space used for performing a replacement. /// /// Subsequent calls to `replacement` after calling `clear` (but before /// executing another replacement) will always return `None`. pub(crate) fn clear(&mut self) { if let Some(ref mut space) = self.space { space.dst.clear(); space.matches.clear(); } } /// Allocate space for replacements when used with the given matcher and /// return a mutable reference to that space. /// /// This can fail if allocating space for capture locations from the given /// matcher fails. fn allocate(&mut self, matcher: &M) -> io::Result<&mut Space<M>> { if self.space.is_none() { let caps = matcher.new_captures().map_err(io::Error::error_message)?; self.space = Some(Space { caps, dst: vec![], matches: vec![] }); } Ok(self.space.as_mut().unwrap()) } } /// A simple layer of abstraction over either a match or a contextual line /// reported by the searcher. /// /// In particular, this provides an API that unions the `SinkMatch` and /// `SinkContext` types while also exposing a list of all individual match /// locations. /// /// While this serves as a convenient mechanism to abstract over `SinkMatch` /// and `SinkContext`, this also provides a way to abstract over replacements. /// Namely, after a replacement, a `Sunk` value can be constructed using the /// results of the replacement instead of the bytes reported directly by the /// searcher. #[derive(Debug)] pub(crate) struct Sunk<'a> { bytes: &'a [u8], absolute_byte_offset: u64, line_number: Option<u64>, context_kind: Option<&'a SinkContextKind>, matches: &'a [Match], original_matches: &'a [Match], } impl<'a> Sunk<'a> { #[inline] pub(crate) fn empty() -> Sunk<'static> { Sunk { bytes: &[], absolute_byte_offset: 0, line_number: None, context_kind: None, matches: &[], original_matches: &[], } } #[inline] pub(crate) fn from_sink_match( sunk: &'a SinkMatch<'a>, original_matches: &'a [Match], replacement: Option<(&'a [u8], &'a [Match])>, ) -> Sunk<'a> { let (bytes, matches) = replacement.unwrap_or_else(|| (sunk.bytes(), original_matches)); Sunk { bytes, absolute_byte_offset: sunk.absolute_byte_offset(), line_number: sunk.line_number(), context_kind: None, matches, original_matches, } } #[inline] pub(crate) fn from_sink_context( sunk: &'a SinkContext<'a>, original_matches: &'a [Match], replacement: Option<(&'a [u8], &'a [Match])>, ) -> Sunk<'a> { let (bytes, matches) = replacement.unwrap_or_else(|| (sunk.bytes(), original_matches)); Sunk { bytes, absolute_byte_offset: sunk.absolute_byte_offset(), line_number: sunk.line_number(), context_kind: Some(sunk.kind()), matches, original_matches, } } #[inline] pub(crate) fn context_kind(&self) -> Option<&'a SinkContextKind> { self.context_kind } #[inline] pub(crate) fn bytes(&self) -> &'a [u8] { self.bytes } #[inline] pub(crate) fn matches(&self) -> &'a [Match] { self.matches } #[inline] pub(crate) fn original_matches(&self) -> &'a [Match] { self.original_matches } #[inline] pub(crate) fn lines(&self, line_term: u8) -> LineIter<'a> { LineIter::new(line_term, self.bytes()) } #[inline] pub(crate) fn absolute_byte_offset(&self) -> u64 { self.absolute_byte_offset } #[inline] pub(crate) fn line_number(&self) -> Option<u64> { self.line_number } } /// A simple encapsulation of a file path used by a printer. /// /// This represents any transforms that we might want to perform on the path, /// such as converting it to valid UTF-8 and/or replacing its separator with /// something else. This allows us to amortize work if we are printing the /// file path for every match. /// /// In the common case, no transformation is needed, which lets us avoid /// the allocation. Typically, only Windows requires a transform, since /// it's fraught to access the raw bytes of a path directly and first need /// to lossily convert to UTF-8. Windows is also typically where the path /// separator replacement is used, e.g., in cygwin environments to use `/` /// instead of `\`. /// /// Users of this type are expected to construct it from a normal `Path` /// found in the standard library. It can then be written to any `io::Write` /// implementation using the `as_bytes` method. This achieves platform /// portability with a small cost: on Windows, paths that are not valid UTF-16 /// will not roundtrip correctly. #[derive(Clone, Debug)] pub(crate) struct PrinterPath<'a> { // On Unix, we can re-materialize a `Path` from our `Cow<'a, [u8]>` with // zero cost, so there's no point in storing it. At time of writing, // OsStr::as_os_str_bytes (and its corresponding constructor) are not // stable yet. Those would let us achieve the same end portably. (As long // as we keep our UTF-8 requirement on Windows.) #[cfg(not(unix))] path: &'a Path, bytes: Cow<'a, [u8]>, hyperlink: OnceCell<Option<HyperlinkPath>>, } impl<'a> PrinterPath<'a> { /// Create a new path suitable for printing. pub(crate) fn new(path: &'a Path) -> PrinterPath<'a> { PrinterPath { #[cfg(not(unix))] path, // N.B. This is zero-cost on Unix and requires at least a UTF-8 // check on Windows. This doesn't allocate on Windows unless the // path is invalid UTF-8 (which is exceptionally rare). bytes: Vec::from_path_lossy(path), hyperlink: OnceCell::new(), } } /// Set the separator on this path. /// /// When set, `PrinterPath::as_bytes` will return the path provided but /// with its separator replaced with the one given. pub(crate) fn with_separator( mut self, sep: Option<u8>, ) -> PrinterPath<'a> { /// Replace the path separator in this path with the given separator /// and do it in place. On Windows, both `/` and `\` are treated as /// path separators that are both replaced by `new_sep`. In all other /// environments, only `/` is treated as a path separator. fn replace_separator(bytes: &[u8], sep: u8) -> Vec<u8> { let mut bytes = bytes.to_vec(); for b in bytes.iter_mut() { if *b == b'/' || (cfg!(windows) && *b == b'\\') { *b = sep; } } bytes } let Some(sep) = sep else { return self }; self.bytes = Cow::Owned(replace_separator(self.as_bytes(), sep)); self } /// Return the raw bytes for this path. pub(crate) fn as_bytes(&self) -> &[u8] { &self.bytes } /// Return this path as a hyperlink. /// /// Note that a hyperlink may not be able to be created from a path. /// Namely, computing the hyperlink may require touching the file system /// (e.g., for path canonicalization) and that can fail. This failure is /// silent but is logged. pub(crate) fn as_hyperlink(&self) -> Option<&HyperlinkPath> { self.hyperlink .get_or_init(|| HyperlinkPath::from_path(self.as_path())) .as_ref() } /// Return this path as an actual `Path` type. pub(crate) fn as_path(&self) -> &Path { #[cfg(unix)] fn imp<'p>(p: &'p PrinterPath<'_>) -> &'p Path { use std::{ffi::OsStr, os::unix::ffi::OsStrExt}; Path::new(OsStr::from_bytes(p.as_bytes())) } #[cfg(not(unix))] fn imp<'p>(p: &'p PrinterPath<'_>) -> &'p Path { p.path } imp(self) } } /// A type that provides "nicer" Display and Serialize impls for /// std::time::Duration. The serialization format should actually be compatible /// with the Deserialize impl for std::time::Duration, since this type only /// adds new fields. #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] pub(crate) struct NiceDuration(pub time::Duration); impl fmt::Display for NiceDuration { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:0.6}s", self.fractional_seconds()) } } impl NiceDuration { /// Returns the number of seconds in this duration in fraction form. /// The number to the left of the decimal point is the number of seconds, /// and the number to the right is the number of milliseconds. fn fractional_seconds(&self) -> f64 { let fractional = (self.0.subsec_nanos() as f64) / 1_000_000_000.0; self.0.as_secs() as f64 + fractional } } #[cfg(feature = "serde")] impl serde::Serialize for NiceDuration { fn serialize<S: serde::Serializer>( &self, ser: S, ) -> Result<S::Ok, S::Error> { use serde::ser::SerializeStruct; let mut state = ser.serialize_struct("Duration", 3)?; state.serialize_field("secs", &self.0.as_secs())?; state.serialize_field("nanos", &self.0.subsec_nanos())?; state.serialize_field("human", &format!("{}", self))?; state.end() } } /// A simple formatter for converting `u64` values to ASCII byte strings. /// /// This avoids going through the formatting machinery which seems to /// substantially slow things down. /// /// The `itoa` crate does the same thing as this formatter, but is a bit /// faster. We roll our own which is a bit slower, but gets us enough of a win /// to be satisfied with and with pure safe code. #[derive(Debug)] pub(crate) struct DecimalFormatter { buf: [u8; Self::MAX_U64_LEN], start: usize, } impl DecimalFormatter { /// Discovered via `u64::MAX.to_string().len()`. const MAX_U64_LEN: usize = 20; /// Create a new decimal formatter for the given 64-bit unsigned integer. pub(crate) fn new(mut n: u64) -> DecimalFormatter { let mut buf = [0; Self::MAX_U64_LEN]; let mut i = buf.len(); loop { i -= 1; let digit = u8::try_from(n % 10).unwrap(); n /= 10; buf[i] = b'0' + digit; if n == 0 { break; } } DecimalFormatter { buf, start: i } } /// Return the decimal formatted as an ASCII byte string. pub(crate) fn as_bytes(&self) -> &[u8] { &self.buf[self.start..] } } /// Trim prefix ASCII spaces from the given slice and return the corresponding /// range. /// /// This stops trimming a prefix as soon as it sees non-whitespace or a line /// terminator. pub(crate) fn trim_ascii_prefix( line_term: LineTerminator, slice: &[u8], range: Match, ) -> Match { fn is_space(b: u8) -> bool { match b { b'\t' | b'\n' | b'\x0B' | b'\x0C' | b'\r' | b' ' => true, _ => false, } } let count = slice[range] .iter() .take_while(|&&b| -> bool { is_space(b) && !line_term.as_bytes().contains(&b) }) .count(); range.with_start(range.start() + count) } pub(crate) fn find_iter_at_in_context<M, F>( searcher: &Searcher, matcher: M, mut bytes: &[u8], range: std::ops::Range<usize>, mut matched: F, ) -> io::Result<()> where M: Matcher, F: FnMut(Match) -> bool, { // This strange dance is to account for the possibility of look-ahead in // the regex. The problem here is that mat.bytes() doesn't include the // lines beyond the match boundaries in mulit-line mode, which means that // when we try to rediscover the full set of matches here, the regex may no // longer match if it required some look-ahead beyond the matching lines. // // PCRE2 (and the grep-matcher interfaces) has no way of specifying an end // bound of the search. So we kludge it and let the regex engine search the // rest of the buffer... But to avoid things getting too crazy, we cap the // buffer. // // If it weren't for multi-line mode, then none of this would be needed. // Alternatively, if we refactored the grep interfaces to pass along the // full set of matches (if available) from the searcher, then that might // also help here. But that winds up paying an upfront unavoidable cost for // the case where matches don't need to be counted. So then you'd have to // introduce a way to pass along matches conditionally, only when needed. // Yikes. // // Maybe the bigger picture thing here is that the searcher should be // responsible for finding matches when necessary, and the printer // shouldn't be involved in this business in the first place. Sigh. Live // and learn. Abstraction boundaries are hard. let is_multi_line = searcher.multi_line_with_matcher(&matcher); if is_multi_line { if bytes[range.end..].len() >= MAX_LOOK_AHEAD { bytes = &bytes[..range.end + MAX_LOOK_AHEAD]; } } else { // When searching a single line, we should remove the line terminator. // Otherwise, it's possible for the regex (via look-around) to observe // the line terminator and not match because of it. let mut m = Match::new(0, range.end); // No need to rember the line terminator as we aren't doing a replace // here. trim_line_terminator(searcher, bytes, &mut m); bytes = &bytes[..m.end()]; } matcher .find_iter_at(bytes, range.start, |m| { if m.start() >= range.end { return false; } matched(m) }) .map_err(io::Error::error_message) } /// Given a buf and some bounds, if there is a line terminator at the end of /// the given bounds in buf, then the bounds are trimmed to remove the line /// terminator, returning the slice of the removed line terminator (if any). pub(crate) fn trim_line_terminator<'b>( searcher: &Searcher, buf: &'b [u8], line: &mut Match, ) -> &'b [u8] { let lineterm = searcher.line_terminator(); if lineterm.is_suffix(&buf[*line]) { let mut end = line.end() - 1; if lineterm.is_crlf() && end > 0 && buf.get(end - 1) == Some(&b'\r') { end -= 1; } let orig_end = line.end(); *line = line.with_end(end); &buf[end..orig_end] } else { &[] } } /// Like `Matcher::replace_with_captures_at`, but accepts an end bound. /// /// See also: `find_iter_at_in_context` for why we need this. fn replace_with_captures_in_context<M, F>( matcher: M, bytes: &[u8], line_terminator: &[u8], range: std::ops::Range<usize>, caps: &mut M::Captures, dst: &mut Vec<u8>, mut append: F, ) -> Result<(), M::Error> where M: Matcher, F: FnMut(&M::Captures, &mut Vec<u8>) -> bool, { let mut last_match = range.start; matcher.captures_iter_at(bytes, range.start, caps, |caps| { let m = caps.get(0).unwrap(); if m.start() >= range.end { return false; } dst.extend(&bytes[last_match..m.start()]); last_match = m.end(); append(caps, dst) })?; let end = if last_match > range.end { bytes.len() } else { std::cmp::min(bytes.len(), range.end) }; dst.extend(&bytes[last_match..end]); // Add back any line terminator. dst.extend(line_terminator); Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn custom_decimal_format() { let fmt = |n: u64| { let bytes = DecimalFormatter::new(n).as_bytes().to_vec(); String::from_utf8(bytes).unwrap() }; let std = |n: u64| n.to_string(); let ints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 100, 123, u64::MAX]; for n in ints { assert_eq!(std(n), fmt(n)); } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/summary.rs
crates/printer/src/summary.rs
use std::{ cell::RefCell, io::{self, Write}, path::Path, sync::Arc, time::Instant, }; use { grep_matcher::Matcher, grep_searcher::{Searcher, Sink, SinkError, SinkFinish, SinkMatch}, termcolor::{ColorSpec, NoColor, WriteColor}, }; use crate::{ color::ColorSpecs, counter::CounterWriter, hyperlink::{self, HyperlinkConfig}, stats::Stats, util::{PrinterPath, find_iter_at_in_context}, }; /// The configuration for the summary printer. /// /// This is manipulated by the SummaryBuilder and then referenced by the actual /// implementation. Once a printer is build, the configuration is frozen and /// cannot changed. #[derive(Debug, Clone)] struct Config { kind: SummaryKind, colors: ColorSpecs, hyperlink: HyperlinkConfig, stats: bool, path: bool, exclude_zero: bool, separator_field: Arc<Vec<u8>>, separator_path: Option<u8>, path_terminator: Option<u8>, } impl Default for Config { fn default() -> Config { Config { kind: SummaryKind::Count, colors: ColorSpecs::default(), hyperlink: HyperlinkConfig::default(), stats: false, path: true, exclude_zero: true, separator_field: Arc::new(b":".to_vec()), separator_path: None, path_terminator: None, } } } /// The type of summary output (if any) to print. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum SummaryKind { /// Show only a count of the total number of matches (counting each line /// at most once) found. /// /// If the `path` setting is enabled, then the count is prefixed by the /// corresponding file path. Count, /// Show only a count of the total number of matches (counting possibly /// many matches on each line) found. /// /// If the `path` setting is enabled, then the count is prefixed by the /// corresponding file path. CountMatches, /// Show only the file path if and only if a match was found. /// /// This ignores the `path` setting and always shows the file path. If no /// file path is provided, then searching will immediately stop and return /// an error. PathWithMatch, /// Show only the file path if and only if a match was found. /// /// This ignores the `path` setting and always shows the file path. If no /// file path is provided, then searching will immediately stop and return /// an error. PathWithoutMatch, /// Don't show any output and the stop the search once a match is found. /// /// Note that if `stats` is enabled, then searching continues in order to /// compute statistics. QuietWithMatch, /// Don't show any output and the stop the search once a non-matching file /// is found. /// /// Note that if `stats` is enabled, then searching continues in order to /// compute statistics. QuietWithoutMatch, } impl SummaryKind { /// Returns true if and only if this output mode requires a file path. /// /// When an output mode requires a file path, then the summary printer /// will report an error at the start of every search that lacks a file /// path. fn requires_path(&self) -> bool { use self::SummaryKind::*; match *self { PathWithMatch | PathWithoutMatch => true, Count | CountMatches | QuietWithMatch | QuietWithoutMatch => false, } } /// Returns true if and only if this output mode requires computing /// statistics, regardless of whether they have been enabled or not. fn requires_stats(&self) -> bool { use self::SummaryKind::*; match *self { CountMatches => true, Count | PathWithMatch | PathWithoutMatch | QuietWithMatch | QuietWithoutMatch => false, } } /// Returns true if and only if a printer using this output mode can /// quit after seeing the first match. fn quit_early(&self) -> bool { use self::SummaryKind::*; match *self { PathWithMatch | QuietWithMatch => true, Count | CountMatches | PathWithoutMatch | QuietWithoutMatch => { false } } } } /// A builder for summary printer. /// /// The builder permits configuring how the printer behaves. The summary /// printer has fewer configuration options than the standard printer because /// it aims to produce aggregate output about a single search (typically just /// one line) instead of output for each match. /// /// Once a `Summary` printer is built, its configuration cannot be changed. #[derive(Clone, Debug)] pub struct SummaryBuilder { config: Config, } impl SummaryBuilder { /// Return a new builder for configuring the summary printer. pub fn new() -> SummaryBuilder { SummaryBuilder { config: Config::default() } } /// Build a printer using any implementation of `termcolor::WriteColor`. /// /// The implementation of `WriteColor` used here controls whether colors /// are used or not when colors have been configured using the /// `color_specs` method. /// /// For maximum portability, callers should generally use either /// `termcolor::StandardStream` or `termcolor::BufferedStandardStream` /// where appropriate, which will automatically enable colors on Windows /// when possible. /// /// However, callers may also provide an arbitrary writer using the /// `termcolor::Ansi` or `termcolor::NoColor` wrappers, which always enable /// colors via ANSI escapes or always disable colors, respectively. /// /// As a convenience, callers may use `build_no_color` to automatically /// select the `termcolor::NoColor` wrapper to avoid needing to import /// from `termcolor` explicitly. pub fn build<W: WriteColor>(&self, wtr: W) -> Summary<W> { Summary { config: self.config.clone(), wtr: RefCell::new(CounterWriter::new(wtr)), } } /// Build a printer from any implementation of `io::Write` and never emit /// any colors, regardless of the user color specification settings. /// /// This is a convenience routine for /// `SummaryBuilder::build(termcolor::NoColor::new(wtr))`. pub fn build_no_color<W: io::Write>(&self, wtr: W) -> Summary<NoColor<W>> { self.build(NoColor::new(wtr)) } /// Set the output mode for this printer. /// /// The output mode controls how aggregate results of a search are printed. /// /// By default, this printer uses the `Count` mode. pub fn kind(&mut self, kind: SummaryKind) -> &mut SummaryBuilder { self.config.kind = kind; self } /// Set the user color specifications to use for coloring in this printer. /// /// A [`UserColorSpec`](crate::UserColorSpec) can be constructed from /// a string in accordance with the color specification format. See /// the `UserColorSpec` type documentation for more details on the /// format. A [`ColorSpecs`] can then be generated from zero or more /// `UserColorSpec`s. /// /// Regardless of the color specifications provided here, whether color /// is actually used or not is determined by the implementation of /// `WriteColor` provided to `build`. For example, if `termcolor::NoColor` /// is provided to `build`, then no color will ever be printed regardless /// of the color specifications provided here. /// /// This completely overrides any previous color specifications. This does /// not add to any previously provided color specifications on this /// builder. /// /// The default color specifications provide no styling. pub fn color_specs(&mut self, specs: ColorSpecs) -> &mut SummaryBuilder { self.config.colors = specs; self } /// Set the configuration to use for hyperlinks output by this printer. /// /// Regardless of the hyperlink format provided here, whether hyperlinks /// are actually used or not is determined by the implementation of /// `WriteColor` provided to `build`. For example, if `termcolor::NoColor` /// is provided to `build`, then no hyperlinks will ever be printed /// regardless of the format provided here. /// /// This completely overrides any previous hyperlink format. /// /// The default configuration results in not emitting any hyperlinks. pub fn hyperlink( &mut self, config: HyperlinkConfig, ) -> &mut SummaryBuilder { self.config.hyperlink = config; self } /// Enable the gathering of various aggregate statistics. /// /// When this is enabled (it's disabled by default), statistics will be /// gathered for all uses of `Summary` printer returned by `build`, /// including but not limited to, the total number of matches, the total /// number of bytes searched and the total number of bytes printed. /// /// Aggregate statistics can be accessed via the sink's /// [`SummarySink::stats`] method. /// /// When this is enabled, this printer may need to do extra work in order /// to compute certain statistics, which could cause the search to take /// longer. For example, in `QuietWithMatch` mode, a search can quit after /// finding the first match, but if `stats` is enabled, then the search /// will continue after the first match in order to compute statistics. /// /// For a complete description of available statistics, see [`Stats`]. /// /// Note that some output modes, such as `CountMatches`, automatically /// enable this option even if it has been explicitly disabled. pub fn stats(&mut self, yes: bool) -> &mut SummaryBuilder { self.config.stats = yes; self } /// When enabled, if a path was given to the printer, then it is shown in /// the output (either as a heading or as a prefix to each matching line). /// When disabled, then no paths are ever included in the output even when /// a path is provided to the printer. /// /// This setting has no effect in `PathWithMatch` and `PathWithoutMatch` /// modes. /// /// This is enabled by default. pub fn path(&mut self, yes: bool) -> &mut SummaryBuilder { self.config.path = yes; self } /// Exclude count-related summary results with no matches. /// /// When enabled and the mode is either `Count` or `CountMatches`, then /// results are not printed if no matches were found. Otherwise, every /// search prints a result with a possibly `0` number of matches. /// /// This is enabled by default. pub fn exclude_zero(&mut self, yes: bool) -> &mut SummaryBuilder { self.config.exclude_zero = yes; self } /// Set the separator used between fields for the `Count` and /// `CountMatches` modes. /// /// By default, this is set to `:`. pub fn separator_field(&mut self, sep: Vec<u8>) -> &mut SummaryBuilder { self.config.separator_field = Arc::new(sep); self } /// Set the path separator used when printing file paths. /// /// Typically, printing is done by emitting the file path as is. However, /// this setting provides the ability to use a different path separator /// from what the current environment has configured. /// /// A typical use for this option is to permit cygwin users on Windows to /// set the path separator to `/` instead of using the system default of /// `\`. /// /// This is disabled by default. pub fn separator_path(&mut self, sep: Option<u8>) -> &mut SummaryBuilder { self.config.separator_path = sep; self } /// Set the path terminator used. /// /// The path terminator is a byte that is printed after every file path /// emitted by this printer. /// /// If no path terminator is set (the default), then paths are terminated /// by either new lines or the configured field separator. pub fn path_terminator( &mut self, terminator: Option<u8>, ) -> &mut SummaryBuilder { self.config.path_terminator = terminator; self } } /// The summary printer, which emits aggregate results from a search. /// /// Aggregate results generally correspond to file paths and/or the number of /// matches found. /// /// A default printer can be created with either of the `Summary::new` or /// `Summary::new_no_color` constructors. However, there are a number of /// options that configure this printer's output. Those options can be /// configured using [`SummaryBuilder`]. /// /// This type is generic over `W`, which represents any implementation of /// the `termcolor::WriteColor` trait. #[derive(Clone, Debug)] pub struct Summary<W> { config: Config, wtr: RefCell<CounterWriter<W>>, } impl<W: WriteColor> Summary<W> { /// Return a summary printer with a default configuration that writes /// matches to the given writer. /// /// The writer should be an implementation of `termcolor::WriteColor` /// and not just a bare implementation of `io::Write`. To use a normal /// `io::Write` implementation (simultaneously sacrificing colors), use /// the `new_no_color` constructor. /// /// The default configuration uses the `Count` summary mode. pub fn new(wtr: W) -> Summary<W> { SummaryBuilder::new().build(wtr) } } impl<W: io::Write> Summary<NoColor<W>> { /// Return a summary printer with a default configuration that writes /// matches to the given writer. /// /// The writer can be any implementation of `io::Write`. With this /// constructor, the printer will never emit colors. /// /// The default configuration uses the `Count` summary mode. pub fn new_no_color(wtr: W) -> Summary<NoColor<W>> { SummaryBuilder::new().build_no_color(wtr) } } impl<W: WriteColor> Summary<W> { /// Return an implementation of `Sink` for the summary printer. /// /// This does not associate the printer with a file path, which means this /// implementation will never print a file path. If the output mode of /// this summary printer does not make sense without a file path (such as /// `PathWithMatch` or `PathWithoutMatch`), then any searches executed /// using this sink will immediately quit with an error. pub fn sink<'s, M: Matcher>( &'s mut self, matcher: M, ) -> SummarySink<'static, 's, M, W> { let interpolator = hyperlink::Interpolator::new(&self.config.hyperlink); let stats = if self.config.stats || self.config.kind.requires_stats() { Some(Stats::new()) } else { None }; SummarySink { matcher, summary: self, interpolator, path: None, start_time: Instant::now(), match_count: 0, binary_byte_offset: None, stats, } } /// Return an implementation of `Sink` associated with a file path. /// /// When the printer is associated with a path, then it may, depending on /// its configuration, print the path. pub fn sink_with_path<'p, 's, M, P>( &'s mut self, matcher: M, path: &'p P, ) -> SummarySink<'p, 's, M, W> where M: Matcher, P: ?Sized + AsRef<Path>, { if !self.config.path && !self.config.kind.requires_path() { return self.sink(matcher); } let interpolator = hyperlink::Interpolator::new(&self.config.hyperlink); let stats = if self.config.stats || self.config.kind.requires_stats() { Some(Stats::new()) } else { None }; let ppath = PrinterPath::new(path.as_ref()) .with_separator(self.config.separator_path); SummarySink { matcher, summary: self, interpolator, path: Some(ppath), start_time: Instant::now(), match_count: 0, binary_byte_offset: None, stats, } } } impl<W> Summary<W> { /// Returns true if and only if this printer has written at least one byte /// to the underlying writer during any of the previous searches. pub fn has_written(&self) -> bool { self.wtr.borrow().total_count() > 0 } /// Return a mutable reference to the underlying writer. pub fn get_mut(&mut self) -> &mut W { self.wtr.get_mut().get_mut() } /// Consume this printer and return back ownership of the underlying /// writer. pub fn into_inner(self) -> W { self.wtr.into_inner().into_inner() } } /// An implementation of `Sink` associated with a matcher and an optional file /// path for the summary printer. /// /// This type is generic over a few type parameters: /// /// * `'p` refers to the lifetime of the file path, if one is provided. When /// no file path is given, then this is `'static`. /// * `'s` refers to the lifetime of the [`Summary`] printer that this type /// borrows. /// * `M` refers to the type of matcher used by /// `grep_searcher::Searcher` that is reporting results to this sink. /// * `W` refers to the underlying writer that this printer is writing its /// output to. #[derive(Debug)] pub struct SummarySink<'p, 's, M: Matcher, W> { matcher: M, summary: &'s mut Summary<W>, interpolator: hyperlink::Interpolator, path: Option<PrinterPath<'p>>, start_time: Instant, match_count: u64, binary_byte_offset: Option<u64>, stats: Option<Stats>, } impl<'p, 's, M: Matcher, W: WriteColor> SummarySink<'p, 's, M, W> { /// Returns true if and only if this printer received a match in the /// previous search. /// /// This is unaffected by the result of searches before the previous /// search. pub fn has_match(&self) -> bool { match self.summary.config.kind { SummaryKind::PathWithoutMatch | SummaryKind::QuietWithoutMatch => { self.match_count == 0 } _ => self.match_count > 0, } } /// If binary data was found in the previous search, this returns the /// offset at which the binary data was first detected. /// /// The offset returned is an absolute offset relative to the entire /// set of bytes searched. /// /// This is unaffected by the result of searches before the previous /// search. e.g., If the search prior to the previous search found binary /// data but the previous search found no binary data, then this will /// return `None`. pub fn binary_byte_offset(&self) -> Option<u64> { self.binary_byte_offset } /// Return a reference to the stats produced by the printer for all /// searches executed on this sink. /// /// This only returns stats if they were requested via the /// [`SummaryBuilder`] configuration. pub fn stats(&self) -> Option<&Stats> { self.stats.as_ref() } /// Returns true if and only if the searcher may report matches over /// multiple lines. /// /// Note that this doesn't just return whether the searcher is in multi /// line mode, but also checks if the matter can match over multiple lines. /// If it can't, then we don't need multi line handling, even if the /// searcher has multi line mode enabled. fn multi_line(&self, searcher: &Searcher) -> bool { searcher.multi_line_with_matcher(&self.matcher) } /// If this printer has a file path associated with it, then this will /// write that path to the underlying writer followed by a line terminator. /// (If a path terminator is set, then that is used instead of the line /// terminator.) fn write_path_line(&mut self, searcher: &Searcher) -> io::Result<()> { if self.path.is_some() { self.write_path()?; if let Some(term) = self.summary.config.path_terminator { self.write(&[term])?; } else { self.write_line_term(searcher)?; } } Ok(()) } /// If this printer has a file path associated with it, then this will /// write that path to the underlying writer followed by the field /// separator. (If a path terminator is set, then that is used instead of /// the field separator.) fn write_path_field(&mut self) -> io::Result<()> { if self.path.is_some() { self.write_path()?; if let Some(term) = self.summary.config.path_terminator { self.write(&[term])?; } else { self.write(&self.summary.config.separator_field)?; } } Ok(()) } /// If this printer has a file path associated with it, then this will /// write that path to the underlying writer in the appropriate style /// (color and hyperlink). fn write_path(&mut self) -> io::Result<()> { if self.path.is_some() { let status = self.start_hyperlink()?; self.write_spec( self.summary.config.colors.path(), self.path.as_ref().unwrap().as_bytes(), )?; self.end_hyperlink(status)?; } Ok(()) } /// Starts a hyperlink span when applicable. fn start_hyperlink( &mut self, ) -> io::Result<hyperlink::InterpolatorStatus> { let Some(hyperpath) = self.path.as_ref().and_then(|p| p.as_hyperlink()) else { return Ok(hyperlink::InterpolatorStatus::inactive()); }; let values = hyperlink::Values::new(hyperpath); self.interpolator.begin(&values, &mut *self.summary.wtr.borrow_mut()) } fn end_hyperlink( &self, status: hyperlink::InterpolatorStatus, ) -> io::Result<()> { self.interpolator.finish(status, &mut *self.summary.wtr.borrow_mut()) } /// Write the line terminator configured on the given searcher. fn write_line_term(&self, searcher: &Searcher) -> io::Result<()> { self.write(searcher.line_terminator().as_bytes()) } /// Write the given bytes using the give style. fn write_spec(&self, spec: &ColorSpec, buf: &[u8]) -> io::Result<()> { self.summary.wtr.borrow_mut().set_color(spec)?; self.write(buf)?; self.summary.wtr.borrow_mut().reset()?; Ok(()) } /// Write all of the given bytes. fn write(&self, buf: &[u8]) -> io::Result<()> { self.summary.wtr.borrow_mut().write_all(buf) } } impl<'p, 's, M: Matcher, W: WriteColor> Sink for SummarySink<'p, 's, M, W> { type Error = io::Error; fn matched( &mut self, searcher: &Searcher, mat: &SinkMatch<'_>, ) -> Result<bool, io::Error> { let is_multi_line = self.multi_line(searcher); let sink_match_count = if self.stats.is_none() && !is_multi_line { 1 } else { // This gives us as many bytes as the searcher can offer. This // isn't guaranteed to hold the necessary context to get match // detection correct (because of look-around), but it does in // practice. let buf = mat.buffer(); let range = mat.bytes_range_in_buffer(); let mut count = 0; find_iter_at_in_context( searcher, &self.matcher, buf, range, |_| { count += 1; true }, )?; // Because of `find_iter_at_in_context` being a giant // kludge internally, it's possible that it won't find // *any* matches even though we clearly know that there is // at least one. So make sure we record at least one here. count.max(1) }; if is_multi_line { self.match_count += sink_match_count; } else { self.match_count += 1; } if let Some(ref mut stats) = self.stats { stats.add_matches(sink_match_count); stats.add_matched_lines(mat.lines().count() as u64); } else if self.summary.config.kind.quit_early() { return Ok(false); } Ok(true) } fn binary_data( &mut self, searcher: &Searcher, binary_byte_offset: u64, ) -> Result<bool, io::Error> { if searcher.binary_detection().quit_byte().is_some() { if let Some(ref path) = self.path { log::debug!( "ignoring {path}: found binary data at \ offset {binary_byte_offset}", path = path.as_path().display(), ); } } Ok(true) } fn begin(&mut self, _searcher: &Searcher) -> Result<bool, io::Error> { if self.path.is_none() && self.summary.config.kind.requires_path() { return Err(io::Error::error_message(format!( "output kind {:?} requires a file path", self.summary.config.kind, ))); } self.summary.wtr.borrow_mut().reset_count(); self.start_time = Instant::now(); self.match_count = 0; self.binary_byte_offset = None; Ok(true) } fn finish( &mut self, searcher: &Searcher, finish: &SinkFinish, ) -> Result<(), io::Error> { self.binary_byte_offset = finish.binary_byte_offset(); if let Some(ref mut stats) = self.stats { stats.add_elapsed(self.start_time.elapsed()); stats.add_searches(1); if self.match_count > 0 { stats.add_searches_with_match(1); } stats.add_bytes_searched(finish.byte_count()); stats.add_bytes_printed(self.summary.wtr.borrow().count()); } // If our binary detection method says to quit after seeing binary // data, then we shouldn't print any results at all, even if we've // found a match before detecting binary data. The intent here is to // keep BinaryDetection::quit as a form of filter. Otherwise, we can // present a matching file with a smaller number of matches than // there might be, which can be quite misleading. // // If our binary detection method is to convert binary data, then we // don't quit and therefore search the entire contents of the file. // // There is an unfortunate inconsistency here. Namely, when using // QuietWithMatch or PathWithMatch, then the printer can quit after the // first match seen, which could be long before seeing binary data. // This means that using PathWithMatch can print a path where as using // Count might not print it at all because of binary data. // // It's not possible to fix this without also potentially significantly // impacting the performance of QuietWithMatch or PathWithMatch, so we // accept the bug. if self.binary_byte_offset.is_some() && searcher.binary_detection().quit_byte().is_some() { // Squash the match count. The statistics reported will still // contain the match count, but the "official" match count should // be zero. self.match_count = 0; return Ok(()); } let show_count = !self.summary.config.exclude_zero || self.match_count > 0; match self.summary.config.kind { SummaryKind::Count => { if show_count { self.write_path_field()?; self.write(self.match_count.to_string().as_bytes())?; self.write_line_term(searcher)?; } } SummaryKind::CountMatches => { if show_count { self.write_path_field()?; let stats = self .stats .as_ref() .expect("CountMatches should enable stats tracking"); self.write(stats.matches().to_string().as_bytes())?; self.write_line_term(searcher)?; } } SummaryKind::PathWithMatch => { if self.match_count > 0 { self.write_path_line(searcher)?; } } SummaryKind::PathWithoutMatch => { if self.match_count == 0 { self.write_path_line(searcher)?; } } SummaryKind::QuietWithMatch | SummaryKind::QuietWithoutMatch => {} } Ok(()) } } #[cfg(test)] mod tests { use grep_regex::RegexMatcher; use grep_searcher::SearcherBuilder; use termcolor::NoColor; use super::{Summary, SummaryBuilder, SummaryKind}; const SHERLOCK: &'static [u8] = b"\ For the Doctor Watsons of this world, as opposed to the Sherlock Holmeses, success in the province of detective work must always be, to a very large extent, the result of luck. Sherlock Holmes can extract a clew from a wisp of straw or a flake of cigar ash; but Doctor Watson has to have it taken out for him and dusted, and exhibited clearly, with a label attached. "; fn printer_contents(printer: &mut Summary<NoColor<Vec<u8>>>) -> String { String::from_utf8(printer.get_mut().get_ref().to_owned()).unwrap() } #[test] fn path_with_match_error() { let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithMatch) .build_no_color(vec![]); let res = SearcherBuilder::new().build().search_reader( &matcher, SHERLOCK, printer.sink(&matcher), ); assert!(res.is_err()); } #[test] fn path_without_match_error() { let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::PathWithoutMatch) .build_no_color(vec![]); let res = SearcherBuilder::new().build().search_reader( &matcher, SHERLOCK, printer.sink(&matcher), ); assert!(res.is_err()); } #[test] fn count_no_path() { let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .build_no_color(vec![]); SearcherBuilder::new() .build() .search_reader(&matcher, SHERLOCK, printer.sink(&matcher)) .unwrap(); let got = printer_contents(&mut printer); assert_eq_printed!("2\n", got); } #[test] fn count_no_path_even_with_path() { let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .path(false) .build_no_color(vec![]); SearcherBuilder::new() .build() .search_reader( &matcher, SHERLOCK, printer.sink_with_path(&matcher, "sherlock"), ) .unwrap(); let got = printer_contents(&mut printer); assert_eq_printed!("2\n", got); } #[test] fn count_path() { let matcher = RegexMatcher::new(r"Watson").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .build_no_color(vec![]); SearcherBuilder::new() .build() .search_reader( &matcher, SHERLOCK, printer.sink_with_path(&matcher, "sherlock"), ) .unwrap(); let got = printer_contents(&mut printer); assert_eq_printed!("sherlock:2\n", got); } #[test] fn count_path_with_zero() { let matcher = RegexMatcher::new(r"NO MATCH").unwrap(); let mut printer = SummaryBuilder::new() .kind(SummaryKind::Count) .exclude_zero(false) .build_no_color(vec![]); SearcherBuilder::new() .build() .search_reader( &matcher, SHERLOCK, printer.sink_with_path(&matcher, "sherlock"), ) .unwrap();
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/macros.rs
crates/printer/src/macros.rs
/// Like assert_eq, but nicer output for long strings. #[cfg(test)] #[macro_export] macro_rules! assert_eq_printed { ($expected:expr, $got:expr) => { let expected = &*$expected; let got = &*$got; if expected != got { panic!(" printed outputs differ! expected: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ {} ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ got: ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ {} ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ", expected, got); } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/counter.rs
crates/printer/src/counter.rs
use std::io::{self, Write}; use termcolor::{ColorSpec, HyperlinkSpec, WriteColor}; /// A writer that counts the number of bytes that have been successfully /// written. #[derive(Clone, Debug)] pub(crate) struct CounterWriter<W> { wtr: W, count: u64, total_count: u64, } impl<W: Write> CounterWriter<W> { pub(crate) fn new(wtr: W) -> CounterWriter<W> { CounterWriter { wtr, count: 0, total_count: 0 } } } impl<W> CounterWriter<W> { /// Returns the total number of bytes written since construction or the /// last time `reset` was called. #[inline] pub(crate) fn count(&self) -> u64 { self.count } /// Returns the total number of bytes written since construction. #[inline] pub(crate) fn total_count(&self) -> u64 { self.total_count + self.count } /// Resets the number of bytes written to `0`. #[inline] pub(crate) fn reset_count(&mut self) { self.total_count += self.count; self.count = 0; } #[inline] pub(crate) fn get_mut(&mut self) -> &mut W { &mut self.wtr } #[inline] pub(crate) fn into_inner(self) -> W { self.wtr } } impl<W: Write> Write for CounterWriter<W> { // A high match count ad hoc benchmark flagged this as a hot spot. #[inline(always)] fn write(&mut self, buf: &[u8]) -> Result<usize, io::Error> { let n = self.wtr.write(buf)?; self.count += n as u64; Ok(n) } #[inline] fn flush(&mut self) -> Result<(), io::Error> { self.wtr.flush() } } impl<W: WriteColor> WriteColor for CounterWriter<W> { #[inline] fn supports_color(&self) -> bool { self.wtr.supports_color() } #[inline] fn supports_hyperlinks(&self) -> bool { self.wtr.supports_hyperlinks() } #[inline] fn set_color(&mut self, spec: &ColorSpec) -> io::Result<()> { self.wtr.set_color(spec) } #[inline] fn set_hyperlink(&mut self, link: &HyperlinkSpec) -> io::Result<()> { self.wtr.set_hyperlink(link) } #[inline] fn reset(&mut self) -> io::Result<()> { self.wtr.reset() } #[inline] fn is_synchronous(&self) -> bool { self.wtr.is_synchronous() } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/color.rs
crates/printer/src/color.rs
use termcolor::{Color, ColorSpec, ParseColorError}; /// Returns a default set of color specifications. /// /// This may change over time, but the color choices are meant to be fairly /// conservative that work across terminal themes. /// /// Additional color specifications can be added to the list returned. More /// recently added specifications override previously added specifications. pub fn default_color_specs() -> Vec<UserColorSpec> { vec![ #[cfg(unix)] "path:fg:magenta".parse().unwrap(), #[cfg(windows)] "path:fg:cyan".parse().unwrap(), "line:fg:green".parse().unwrap(), "match:fg:red".parse().unwrap(), "match:style:bold".parse().unwrap(), ] } /// An error that can occur when parsing color specifications. #[derive(Clone, Debug, Eq, PartialEq)] pub enum ColorError { /// This occurs when an unrecognized output type is used. UnrecognizedOutType(String), /// This occurs when an unrecognized spec type is used. UnrecognizedSpecType(String), /// This occurs when an unrecognized color name is used. UnrecognizedColor(String, String), /// This occurs when an unrecognized style attribute is used. UnrecognizedStyle(String), /// This occurs when the format of a color specification is invalid. InvalidFormat(String), } impl std::error::Error for ColorError {} impl ColorError { fn from_parse_error(err: ParseColorError) -> ColorError { ColorError::UnrecognizedColor( err.invalid().to_string(), err.to_string(), ) } } impl std::fmt::Display for ColorError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { ColorError::UnrecognizedOutType(ref name) => write!( f, "unrecognized output type '{}'. Choose from: \ path, line, column, match, highlight.", name, ), ColorError::UnrecognizedSpecType(ref name) => write!( f, "unrecognized spec type '{}'. Choose from: \ fg, bg, style, none.", name, ), ColorError::UnrecognizedColor(_, ref msg) => write!(f, "{}", msg), ColorError::UnrecognizedStyle(ref name) => write!( f, "unrecognized style attribute '{}'. Choose from: \ nobold, bold, nointense, intense, nounderline, \ underline, noitalic, italic.", name, ), ColorError::InvalidFormat(ref original) => write!( f, "invalid color spec format: '{}'. Valid format is \ '(path|line|column|match|highlight):(fg|bg|style):(value)'.", original, ), } } } /// A merged set of color specifications. /// /// This set of color specifications represents the various color types that /// are supported by the printers in this crate. A set of color specifications /// can be created from a sequence of /// [`UserColorSpec`]s. #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct ColorSpecs { path: ColorSpec, line: ColorSpec, column: ColorSpec, matched: ColorSpec, highlight: ColorSpec, } /// A single color specification provided by the user. /// /// ## Format /// /// The format of a `Spec` is a triple: `{type}:{attribute}:{value}`. Each /// component is defined as follows: /// /// * `{type}` can be one of `path`, `line`, `column`, `match` or `highlight`. /// * `{attribute}` can be one of `fg`, `bg` or `style`. `{attribute}` may also /// be the special value `none`, in which case, `{value}` can be omitted. /// * `{value}` is either a color name (for `fg`/`bg`) or a style instruction. /// /// `{type}` controls which part of the output should be styled. /// /// When `{attribute}` is `none`, then this should cause any existing style /// settings to be cleared for the specified `type`. /// /// `{value}` should be a color when `{attribute}` is `fg` or `bg`, or it /// should be a style instruction when `{attribute}` is `style`. When /// `{attribute}` is `none`, `{value}` must be omitted. /// /// Valid colors are `black`, `blue`, `green`, `red`, `cyan`, `magenta`, /// `yellow`, `white`. Extended colors can also be specified, and are formatted /// as `x` (for 256-bit colors) or `x,x,x` (for 24-bit true color), where /// `x` is a number between 0 and 255 inclusive. `x` may be given as a normal /// decimal number of a hexadecimal number, where the latter is prefixed by /// `0x`. /// /// Valid style instructions are `nobold`, `bold`, `intense`, `nointense`, /// `underline`, `nounderline`, `italic`, `noitalic`. /// /// ## Example /// /// The standard way to build a `UserColorSpec` is to parse it from a string. /// Once multiple `UserColorSpec`s have been constructed, they can be provided /// to the standard printer where they will automatically be applied to the /// output. /// /// A `UserColorSpec` can also be converted to a `termcolor::ColorSpec`: /// /// ```rust /// # fn main() { /// use termcolor::{Color, ColorSpec}; /// use grep_printer::UserColorSpec; /// /// let user_spec1: UserColorSpec = "path:fg:blue".parse().unwrap(); /// let user_spec2: UserColorSpec = "match:bg:0xff,0x7f,0x00".parse().unwrap(); /// /// let spec1 = user_spec1.to_color_spec(); /// let spec2 = user_spec2.to_color_spec(); /// /// assert_eq!(spec1.fg(), Some(&Color::Blue)); /// assert_eq!(spec2.bg(), Some(&Color::Rgb(0xFF, 0x7F, 0x00))); /// # } /// ``` #[derive(Clone, Debug, Eq, PartialEq)] pub struct UserColorSpec { ty: OutType, value: SpecValue, } impl UserColorSpec { /// Convert this user provided color specification to a specification that /// can be used with `termcolor`. This drops the type of this specification /// (where the type indicates where the color is applied in the standard /// printer, e.g., to the file path or the line numbers, etc.). pub fn to_color_spec(&self) -> ColorSpec { let mut spec = ColorSpec::default(); self.value.merge_into(&mut spec); spec } } /// The actual value given by the specification. #[derive(Clone, Debug, Eq, PartialEq)] enum SpecValue { None, Fg(Color), Bg(Color), Style(Style), } /// The set of configurable portions of ripgrep's output. #[derive(Clone, Debug, Eq, PartialEq)] enum OutType { Path, Line, Column, Match, Highlight, } /// The specification type. #[derive(Clone, Debug, Eq, PartialEq)] enum SpecType { Fg, Bg, Style, None, } /// The set of available styles for use in the terminal. #[derive(Clone, Debug, Eq, PartialEq)] enum Style { Bold, NoBold, Intense, NoIntense, Underline, NoUnderline, Italic, NoItalic, } impl ColorSpecs { /// Create color specifications from a list of user supplied /// specifications. pub fn new(specs: &[UserColorSpec]) -> ColorSpecs { let mut merged = ColorSpecs::default(); for spec in specs { match spec.ty { OutType::Path => spec.merge_into(&mut merged.path), OutType::Line => spec.merge_into(&mut merged.line), OutType::Column => spec.merge_into(&mut merged.column), OutType::Match => spec.merge_into(&mut merged.matched), OutType::Highlight => spec.merge_into(&mut merged.highlight), } } merged } /// Create a default set of specifications that have color. /// /// This is distinct from `ColorSpecs`'s `Default` implementation in that /// this provides a set of default color choices, where as the `Default` /// implementation provides no color choices. pub fn default_with_color() -> ColorSpecs { ColorSpecs::new(&default_color_specs()) } /// Return the color specification for coloring file paths. pub fn path(&self) -> &ColorSpec { &self.path } /// Return the color specification for coloring line numbers. pub fn line(&self) -> &ColorSpec { &self.line } /// Return the color specification for coloring column numbers. pub fn column(&self) -> &ColorSpec { &self.column } /// Return the color specification for coloring matched text. pub fn matched(&self) -> &ColorSpec { &self.matched } /// Return the color specification for coloring entire line if there is a /// matched text. pub fn highlight(&self) -> &ColorSpec { &self.highlight } } impl UserColorSpec { /// Merge this spec into the given color specification. fn merge_into(&self, cspec: &mut ColorSpec) { self.value.merge_into(cspec); } } impl SpecValue { /// Merge this spec value into the given color specification. fn merge_into(&self, cspec: &mut ColorSpec) { match *self { SpecValue::None => cspec.clear(), SpecValue::Fg(ref color) => { cspec.set_fg(Some(color.clone())); } SpecValue::Bg(ref color) => { cspec.set_bg(Some(color.clone())); } SpecValue::Style(ref style) => match *style { Style::Bold => { cspec.set_bold(true); } Style::NoBold => { cspec.set_bold(false); } Style::Intense => { cspec.set_intense(true); } Style::NoIntense => { cspec.set_intense(false); } Style::Underline => { cspec.set_underline(true); } Style::NoUnderline => { cspec.set_underline(false); } Style::Italic => { cspec.set_italic(true); } Style::NoItalic => { cspec.set_italic(false); } }, } } } impl std::str::FromStr for UserColorSpec { type Err = ColorError; fn from_str(s: &str) -> Result<UserColorSpec, ColorError> { let pieces: Vec<&str> = s.split(':').collect(); if pieces.len() <= 1 || pieces.len() > 3 { return Err(ColorError::InvalidFormat(s.to_string())); } let otype: OutType = pieces[0].parse()?; match pieces[1].parse()? { SpecType::None => { Ok(UserColorSpec { ty: otype, value: SpecValue::None }) } SpecType::Style => { if pieces.len() < 3 { return Err(ColorError::InvalidFormat(s.to_string())); } let style: Style = pieces[2].parse()?; Ok(UserColorSpec { ty: otype, value: SpecValue::Style(style) }) } SpecType::Fg => { if pieces.len() < 3 { return Err(ColorError::InvalidFormat(s.to_string())); } let color: Color = pieces[2].parse().map_err(ColorError::from_parse_error)?; Ok(UserColorSpec { ty: otype, value: SpecValue::Fg(color) }) } SpecType::Bg => { if pieces.len() < 3 { return Err(ColorError::InvalidFormat(s.to_string())); } let color: Color = pieces[2].parse().map_err(ColorError::from_parse_error)?; Ok(UserColorSpec { ty: otype, value: SpecValue::Bg(color) }) } } } } impl std::str::FromStr for OutType { type Err = ColorError; fn from_str(s: &str) -> Result<OutType, ColorError> { match &*s.to_lowercase() { "path" => Ok(OutType::Path), "line" => Ok(OutType::Line), "column" => Ok(OutType::Column), "match" => Ok(OutType::Match), "highlight" => Ok(OutType::Highlight), _ => Err(ColorError::UnrecognizedOutType(s.to_string())), } } } impl std::str::FromStr for SpecType { type Err = ColorError; fn from_str(s: &str) -> Result<SpecType, ColorError> { match &*s.to_lowercase() { "fg" => Ok(SpecType::Fg), "bg" => Ok(SpecType::Bg), "style" => Ok(SpecType::Style), "none" => Ok(SpecType::None), _ => Err(ColorError::UnrecognizedSpecType(s.to_string())), } } } impl std::str::FromStr for Style { type Err = ColorError; fn from_str(s: &str) -> Result<Style, ColorError> { match &*s.to_lowercase() { "bold" => Ok(Style::Bold), "nobold" => Ok(Style::NoBold), "intense" => Ok(Style::Intense), "nointense" => Ok(Style::NoIntense), "underline" => Ok(Style::Underline), "nounderline" => Ok(Style::NoUnderline), "italic" => Ok(Style::Italic), "noitalic" => Ok(Style::NoItalic), _ => Err(ColorError::UnrecognizedStyle(s.to_string())), } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/hyperlink/mod.rs
crates/printer/src/hyperlink/mod.rs
use std::{cell::RefCell, io, path::Path, sync::Arc}; use { bstr::ByteSlice, termcolor::{HyperlinkSpec, WriteColor}, }; use crate::util::DecimalFormatter; use self::aliases::HYPERLINK_PATTERN_ALIASES; mod aliases; /// Hyperlink configuration. /// /// This configuration specifies both the [hyperlink format](HyperlinkFormat) /// and an [environment](HyperlinkConfig) for interpolating a subset of /// variables. The specific subset includes variables that are intended to /// be invariant throughout the lifetime of a process, such as a machine's /// hostname. /// /// A hyperlink configuration can be provided to printer builders such as /// [`StandardBuilder::hyperlink`](crate::StandardBuilder::hyperlink). #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct HyperlinkConfig(Arc<HyperlinkConfigInner>); #[derive(Clone, Debug, Default, Eq, PartialEq)] struct HyperlinkConfigInner { env: HyperlinkEnvironment, format: HyperlinkFormat, } impl HyperlinkConfig { /// Create a new configuration from an environment and a format. pub fn new( env: HyperlinkEnvironment, format: HyperlinkFormat, ) -> HyperlinkConfig { HyperlinkConfig(Arc::new(HyperlinkConfigInner { env, format })) } /// Returns the hyperlink environment in this configuration. pub(crate) fn environment(&self) -> &HyperlinkEnvironment { &self.0.env } /// Returns the hyperlink format in this configuration. pub(crate) fn format(&self) -> &HyperlinkFormat { &self.0.format } } /// A hyperlink format with variables. /// /// This can be created by parsing a string using `HyperlinkFormat::from_str`. /// /// The default format is empty. An empty format is valid and effectively /// disables hyperlinks. /// /// # Example /// /// ``` /// use grep_printer::HyperlinkFormat; /// /// let fmt = "vscode".parse::<HyperlinkFormat>()?; /// assert_eq!(fmt.to_string(), "vscode://file{path}:{line}:{column}"); /// /// # Ok::<(), Box<dyn std::error::Error>>(()) /// ``` #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct HyperlinkFormat { parts: Vec<Part>, is_line_dependent: bool, } impl HyperlinkFormat { /// Creates an empty hyperlink format. pub fn empty() -> HyperlinkFormat { HyperlinkFormat::default() } /// Returns true if this format is empty. pub fn is_empty(&self) -> bool { self.parts.is_empty() } /// Creates a [`HyperlinkConfig`] from this format and the environment /// given. pub fn into_config(self, env: HyperlinkEnvironment) -> HyperlinkConfig { HyperlinkConfig::new(env, self) } /// Returns true if the format can produce line-dependent hyperlinks. pub(crate) fn is_line_dependent(&self) -> bool { self.is_line_dependent } } impl std::str::FromStr for HyperlinkFormat { type Err = HyperlinkFormatError; fn from_str(s: &str) -> Result<HyperlinkFormat, HyperlinkFormatError> { use self::HyperlinkFormatErrorKind::*; #[derive(Debug)] enum State { Verbatim, VerbatimCloseVariable, OpenVariable, InVariable, } let mut builder = FormatBuilder::new(); let input = match HyperlinkAlias::find(s) { Some(alias) => alias.format(), None => s, }; let mut name = String::new(); let mut state = State::Verbatim; let err = |kind| HyperlinkFormatError { kind }; for ch in input.chars() { state = match state { State::Verbatim => { if ch == '{' { State::OpenVariable } else if ch == '}' { State::VerbatimCloseVariable } else { builder.append_char(ch); State::Verbatim } } State::VerbatimCloseVariable => { if ch == '}' { builder.append_char('}'); State::Verbatim } else { return Err(err(InvalidCloseVariable)); } } State::OpenVariable => { if ch == '{' { builder.append_char('{'); State::Verbatim } else { name.clear(); if ch == '}' { builder.append_var(&name)?; State::Verbatim } else { name.push(ch); State::InVariable } } } State::InVariable => { if ch == '}' { builder.append_var(&name)?; State::Verbatim } else { name.push(ch); State::InVariable } } }; } match state { State::Verbatim => builder.build(), State::VerbatimCloseVariable => Err(err(InvalidCloseVariable)), State::OpenVariable | State::InVariable => { Err(err(UnclosedVariable)) } } } } impl std::fmt::Display for HyperlinkFormat { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { for part in self.parts.iter() { part.fmt(f)?; } Ok(()) } } /// An alias for a hyperlink format. /// /// Hyperlink aliases are built-in formats, therefore they hold static values. /// Some of their features are usable in const blocks. #[derive(Clone, Debug)] pub struct HyperlinkAlias { name: &'static str, description: &'static str, format: &'static str, display_priority: Option<i16>, } impl HyperlinkAlias { /// Returns the name of the alias. pub const fn name(&self) -> &str { self.name } /// Returns a very short description of this hyperlink alias. pub const fn description(&self) -> &str { self.description } /// Returns the display priority of this alias. /// /// If no priority is set, then `None` is returned. /// /// The display priority is meant to reflect some special status associated /// with an alias. For example, the `default` and `none` aliases have a /// display priority. This is meant to encourage listing them first in /// documentation. /// /// A lower display priority implies the alias should be shown before /// aliases with a higher (or absent) display priority. /// /// Callers cannot rely on any specific display priority value to remain /// stable across semver compatible releases of this crate. pub const fn display_priority(&self) -> Option<i16> { self.display_priority } /// Returns the format string of the alias. const fn format(&self) -> &'static str { self.format } /// Looks for the hyperlink alias defined by the given name. /// /// If one does not exist, `None` is returned. fn find(name: &str) -> Option<&HyperlinkAlias> { HYPERLINK_PATTERN_ALIASES .binary_search_by_key(&name, |alias| alias.name()) .map(|i| &HYPERLINK_PATTERN_ALIASES[i]) .ok() } } /// A static environment for hyperlink interpolation. /// /// This environment permits setting the values of variables used in hyperlink /// interpolation that are not expected to change for the lifetime of a program. /// That is, these values are invariant. /// /// Currently, this includes the hostname and a WSL distro prefix. #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct HyperlinkEnvironment { host: Option<String>, wsl_prefix: Option<String>, } impl HyperlinkEnvironment { /// Create a new empty hyperlink environment. pub fn new() -> HyperlinkEnvironment { HyperlinkEnvironment::default() } /// Set the `{host}` variable, which fills in any hostname components of /// a hyperlink. /// /// One can get the hostname in the current environment via the `hostname` /// function in the `grep-cli` crate. pub fn host(&mut self, host: Option<String>) -> &mut HyperlinkEnvironment { self.host = host; self } /// Set the `{wslprefix}` variable, which contains the WSL distro prefix. /// An example value is `wsl$/Ubuntu`. The distro name can typically be /// discovered from the `WSL_DISTRO_NAME` environment variable. pub fn wsl_prefix( &mut self, wsl_prefix: Option<String>, ) -> &mut HyperlinkEnvironment { self.wsl_prefix = wsl_prefix; self } } /// An error that can occur when parsing a hyperlink format. #[derive(Clone, Debug, Eq, PartialEq)] pub struct HyperlinkFormatError { kind: HyperlinkFormatErrorKind, } #[derive(Clone, Debug, Eq, PartialEq)] enum HyperlinkFormatErrorKind { /// This occurs when there are zero variables in the format. NoVariables, /// This occurs when the {path} variable is missing. NoPathVariable, /// This occurs when the {line} variable is missing, while the {column} /// variable is present. NoLineVariable, /// This occurs when an unknown variable is used. InvalidVariable(String), /// The format doesn't start with a valid scheme. InvalidScheme, /// This occurs when an unescaped `}` is found without a corresponding /// `{` preceding it. InvalidCloseVariable, /// This occurs when a `{` is found without a corresponding `}` following /// it. UnclosedVariable, } impl std::error::Error for HyperlinkFormatError {} impl std::fmt::Display for HyperlinkFormatError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use self::HyperlinkFormatErrorKind::*; match self.kind { NoVariables => { let mut aliases = hyperlink_aliases(); aliases.sort_by_key(|alias| { alias.display_priority().unwrap_or(i16::MAX) }); let names: Vec<&str> = aliases.iter().map(|alias| alias.name()).collect(); write!( f, "at least a {{path}} variable is required in a \ hyperlink format, or otherwise use a valid alias: \ {aliases}", aliases = names.join(", "), ) } NoPathVariable => { write!( f, "the {{path}} variable is required in a hyperlink format", ) } NoLineVariable => { write!( f, "the hyperlink format contains a {{column}} variable, \ but no {{line}} variable is present", ) } InvalidVariable(ref name) => { write!( f, "invalid hyperlink format variable: '{name}', choose \ from: path, line, column, host, wslprefix", ) } InvalidScheme => { write!( f, "the hyperlink format must start with a valid URL scheme, \ i.e., [0-9A-Za-z+-.]+:", ) } InvalidCloseVariable => { write!( f, "unopened variable: found '}}' without a \ corresponding '{{' preceding it", ) } UnclosedVariable => { write!( f, "unclosed variable: found '{{' without a \ corresponding '}}' following it", ) } } } } /// A builder for `HyperlinkFormat`. /// /// Once a `HyperlinkFormat` is built, it is immutable. #[derive(Debug)] struct FormatBuilder { parts: Vec<Part>, } impl FormatBuilder { /// Creates a new hyperlink format builder. fn new() -> FormatBuilder { FormatBuilder { parts: vec![] } } /// Appends static text. fn append_slice(&mut self, text: &[u8]) -> &mut FormatBuilder { if let Some(Part::Text(contents)) = self.parts.last_mut() { contents.extend_from_slice(text); } else if !text.is_empty() { self.parts.push(Part::Text(text.to_vec())); } self } /// Appends a single character. fn append_char(&mut self, ch: char) -> &mut FormatBuilder { self.append_slice(ch.encode_utf8(&mut [0; 4]).as_bytes()) } /// Appends a variable with the given name. If the name isn't recognized, /// then this returns an error. fn append_var( &mut self, name: &str, ) -> Result<&mut FormatBuilder, HyperlinkFormatError> { let part = match name { "host" => Part::Host, "wslprefix" => Part::WSLPrefix, "path" => Part::Path, "line" => Part::Line, "column" => Part::Column, unknown => { let err = HyperlinkFormatError { kind: HyperlinkFormatErrorKind::InvalidVariable( unknown.to_string(), ), }; return Err(err); } }; self.parts.push(part); Ok(self) } /// Builds the format. fn build(&self) -> Result<HyperlinkFormat, HyperlinkFormatError> { self.validate()?; Ok(HyperlinkFormat { parts: self.parts.clone(), is_line_dependent: self.parts.contains(&Part::Line), }) } /// Validate that the format is well-formed. fn validate(&self) -> Result<(), HyperlinkFormatError> { use self::HyperlinkFormatErrorKind::*; let err = |kind| HyperlinkFormatError { kind }; // An empty format is fine. It just means hyperlink support is // disabled. if self.parts.is_empty() { return Ok(()); } // If all parts are just text, then there are no variables. It's // likely a reference to an invalid alias. if self.parts.iter().all(|p| matches!(*p, Part::Text(_))) { return Err(err(NoVariables)); } // Even if we have other variables, no path variable means the // hyperlink can't possibly work the way it is intended. if !self.parts.contains(&Part::Path) { return Err(err(NoPathVariable)); } // If the {column} variable is used, then we also need a {line} // variable or else {column} can't possibly work. if self.parts.contains(&Part::Column) && !self.parts.contains(&Part::Line) { return Err(err(NoLineVariable)); } self.validate_scheme() } /// Validate that the format starts with a valid scheme. Validation is done /// according to how a scheme is defined in RFC 1738 sections 2.1[1] and /// 5[2]. In short, a scheme is this: /// /// scheme = 1*[ lowalpha | digit | "+" | "-" | "." ] /// /// but is case insensitive. /// /// [1]: https://datatracker.ietf.org/doc/html/rfc1738#section-2.1 /// [2]: https://datatracker.ietf.org/doc/html/rfc1738#section-5 fn validate_scheme(&self) -> Result<(), HyperlinkFormatError> { let err_invalid_scheme = HyperlinkFormatError { kind: HyperlinkFormatErrorKind::InvalidScheme, }; let Some(Part::Text(part)) = self.parts.first() else { return Err(err_invalid_scheme); }; let Some(colon) = part.find_byte(b':') else { return Err(err_invalid_scheme); }; let scheme = &part[..colon]; if scheme.is_empty() { return Err(err_invalid_scheme); } let is_valid_scheme_char = |byte| match byte { b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z' | b'+' | b'-' | b'.' => { true } _ => false, }; if !scheme.iter().all(|&b| is_valid_scheme_char(b)) { return Err(err_invalid_scheme); } Ok(()) } } /// A hyperlink format part. /// /// A sequence of these corresponds to a complete format. (Not all sequences /// are valid.) #[derive(Clone, Debug, Eq, PartialEq)] enum Part { /// Static text. /// /// We use `Vec<u8>` here (and more generally treat a format string as a /// sequence of bytes) because file paths may be arbitrary bytes. A rare /// case, but one for which there is no good reason to choke on. Text(Vec<u8>), /// Variable for the hostname. Host, /// Variable for a WSL path prefix. WSLPrefix, /// Variable for the file path. Path, /// Variable for the line number. Line, /// Variable for the column number. Column, } impl Part { /// Interpolate this part using the given `env` and `values`, and write /// the result of interpolation to the buffer provided. fn interpolate_to( &self, env: &HyperlinkEnvironment, values: &Values, dest: &mut Vec<u8>, ) { match *self { Part::Text(ref text) => dest.extend_from_slice(text), Part::Host => dest.extend_from_slice( env.host.as_ref().map(|s| s.as_bytes()).unwrap_or(b""), ), Part::WSLPrefix => dest.extend_from_slice( env.wsl_prefix.as_ref().map(|s| s.as_bytes()).unwrap_or(b""), ), Part::Path => dest.extend_from_slice(&values.path.0), Part::Line => { let line = DecimalFormatter::new(values.line.unwrap_or(1)); dest.extend_from_slice(line.as_bytes()); } Part::Column => { let column = DecimalFormatter::new(values.column.unwrap_or(1)); dest.extend_from_slice(column.as_bytes()); } } } } impl std::fmt::Display for Part { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Part::Text(text) => write!(f, "{}", String::from_utf8_lossy(text)), Part::Host => write!(f, "{{host}}"), Part::WSLPrefix => write!(f, "{{wslprefix}}"), Part::Path => write!(f, "{{path}}"), Part::Line => write!(f, "{{line}}"), Part::Column => write!(f, "{{column}}"), } } } /// The values to replace the format variables with. /// /// This only consists of values that depend on each path or match printed. /// Values that are invariant throughout the lifetime of the process are set /// via a [`HyperlinkEnvironment`]. #[derive(Clone, Debug)] pub(crate) struct Values<'a> { path: &'a HyperlinkPath, line: Option<u64>, column: Option<u64>, } impl<'a> Values<'a> { /// Creates a new set of values, starting with the path given. /// /// Callers may also set the line and column number using the mutator /// methods. pub(crate) fn new(path: &'a HyperlinkPath) -> Values<'a> { Values { path, line: None, column: None } } /// Sets the line number for these values. /// /// If a line number is not set and a hyperlink format contains a `{line}` /// variable, then it is interpolated with the value of `1` automatically. pub(crate) fn line(mut self, line: Option<u64>) -> Values<'a> { self.line = line; self } /// Sets the column number for these values. /// /// If a column number is not set and a hyperlink format contains a /// `{column}` variable, then it is interpolated with the value of `1` /// automatically. pub(crate) fn column(mut self, column: Option<u64>) -> Values<'a> { self.column = column; self } } /// An abstraction for interpolating a hyperlink format with values for every /// variable. /// /// Interpolation of variables occurs through two different sources. The /// first is via a `HyperlinkEnvironment` for values that are expected to /// be invariant. This comes from the `HyperlinkConfig` used to build this /// interpolator. The second source is via `Values`, which is provided to /// `Interpolator::begin`. The `Values` contains things like the file path, /// line number and column number. #[derive(Clone, Debug)] pub(crate) struct Interpolator { config: HyperlinkConfig, buf: RefCell<Vec<u8>>, } impl Interpolator { /// Create a new interpolator for the given hyperlink format configuration. pub(crate) fn new(config: &HyperlinkConfig) -> Interpolator { Interpolator { config: config.clone(), buf: RefCell::new(vec![]) } } /// Start interpolation with the given values by writing a hyperlink /// to `wtr`. Subsequent writes to `wtr`, until `Interpolator::end` is /// called, are the label for the hyperlink. /// /// This returns an interpolator status which indicates whether the /// hyperlink was written. It might not be written, for example, if the /// underlying writer doesn't support hyperlinks or if the hyperlink /// format is empty. The status should be provided to `Interpolator::end` /// as an instruction for whether to close the hyperlink or not. pub(crate) fn begin<W: WriteColor>( &self, values: &Values, mut wtr: W, ) -> io::Result<InterpolatorStatus> { if self.config.format().is_empty() || !wtr.supports_hyperlinks() || !wtr.supports_color() { return Ok(InterpolatorStatus::inactive()); } let mut buf = self.buf.borrow_mut(); buf.clear(); for part in self.config.format().parts.iter() { part.interpolate_to(self.config.environment(), values, &mut buf); } let spec = HyperlinkSpec::open(&buf); wtr.set_hyperlink(&spec)?; Ok(InterpolatorStatus { active: true }) } /// Writes the correct escape sequences to `wtr` to close any extant /// hyperlink, marking the end of a hyperlink's label. /// /// The status given should be returned from a corresponding /// `Interpolator::begin` call. Since `begin` may not write a hyperlink /// (e.g., if the underlying writer doesn't support hyperlinks), it follows /// that `finish` must not close a hyperlink that was never opened. The /// status indicates whether the hyperlink was opened or not. pub(crate) fn finish<W: WriteColor>( &self, status: InterpolatorStatus, mut wtr: W, ) -> io::Result<()> { if !status.active { return Ok(()); } wtr.set_hyperlink(&HyperlinkSpec::close()) } } /// A status indicating whether a hyperlink was written or not. /// /// This is created by `Interpolator::begin` and used by `Interpolator::finish` /// to determine whether a hyperlink was actually opened or not. If it wasn't /// opened, then finishing interpolation is a no-op. #[derive(Debug)] pub(crate) struct InterpolatorStatus { active: bool, } impl InterpolatorStatus { /// Create an inactive interpolator status. #[inline] pub(crate) fn inactive() -> InterpolatorStatus { InterpolatorStatus { active: false } } } /// Represents the `{path}` part of a hyperlink. /// /// This is the value to use as-is in the hyperlink, converted from an OS file /// path. #[derive(Clone, Debug)] pub(crate) struct HyperlinkPath(Vec<u8>); impl HyperlinkPath { /// Returns a hyperlink path from an OS path. #[cfg(unix)] pub(crate) fn from_path(original_path: &Path) -> Option<HyperlinkPath> { use std::os::unix::ffi::OsStrExt; // We canonicalize the path in order to get an absolute version of it // without any `.` or `..` or superfluous separators. Unfortunately, // this does also remove symlinks, and in theory, it would be nice to // retain them. Perhaps even simpler, we could just join the current // working directory with the path and be done with it. There was // some discussion about this on PR#2483, and there generally appears // to be some uncertainty about the extent to which hyperlinks with // things like `..` in them actually work. So for now, we do the safest // thing possible even though I think it can result in worse user // experience. (Because it means the path you click on and the actual // path that gets followed are different, even though they ostensibly // refer to the same file.) // // There's also the potential issue that path canonicalization is // expensive since it can touch the file system. That is probably // less of an issue since hyperlinks are only created when they're // supported, i.e., when writing to a tty. // // [1]: https://github.com/BurntSushi/ripgrep/pull/2483 let path = match original_path.canonicalize() { Ok(path) => path, Err(err) => { log::debug!( "hyperlink creation for {:?} failed, error occurred \ during path canonicalization: {}", original_path, err, ); return None; } }; let bytes = path.as_os_str().as_bytes(); // This should not be possible since one imagines that canonicalization // should always return an absolute path. But it doesn't actually // appear guaranteed by POSIX, so we check whether it's true or not and // refuse to create a hyperlink from a relative path if it isn't. if !bytes.starts_with(b"/") { log::debug!( "hyperlink creation for {:?} failed, canonicalization \ returned {:?}, which does not start with a slash", original_path, path, ); return None; } Some(HyperlinkPath::encode(bytes)) } /// Returns a hyperlink path from an OS path. #[cfg(windows)] pub(crate) fn from_path(original_path: &Path) -> Option<HyperlinkPath> { // On Windows, we use `std::path::absolute` instead of `Path::canonicalize` // as it can be much faster since it does not touch the file system. // It wraps the [`GetFullPathNameW`][1] API, except for verbatim paths // (those which start with `\\?\`, see [the documentation][2] for details). // // Here, we strip any verbatim path prefixes since we cannot use them // in hyperlinks anyway. This can only happen if the user explicitly // supplies a verbatim path as input, which already needs to be absolute: // // \\?\C:\dir\file.txt (local path) // \\?\UNC\server\dir\file.txt (network share) // // The `\\?\` prefix is constant for verbatim paths, and can be followed // by `UNC\` (universal naming convention), which denotes a network share. // // Given that the default URL format on Windows is file://{path} // we need to return the following from this function: // // /C:/dir/file.txt (local path) // //server/dir/file.txt (network share) // // Which produces the following links: // // file:///C:/dir/file.txt (local path) // file:////server/dir/file.txt (network share) // // This substitutes the {path} variable with the expected value for // the most common DOS paths, but on the other hand, network paths // start with a single slash, which may be unexpected. It seems to work // though? // // Note that the following URL syntax also seems to be valid? // // file://server/dir/file.txt // // But the initial implementation of this routine went for the format // above. // // Also note that the file://C:/dir/file.txt syntax is not correct, // even though it often works in practice. // // In the end, this choice was confirmed by VSCode, whose format is // // vscode://file{path}:{line}:{column} // // and which correctly understands the following URL format for network // drives: // // vscode://file//server/dir/file.txt:1:1 // // It doesn't parse any other number of slashes in "file//server" as a // network path. // // [1]: https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfullpathnamew // [2]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file const WIN32_NAMESPACE_PREFIX: &str = r"\\?\"; const UNC_PREFIX: &str = r"UNC\"; let path = match std::path::absolute(original_path) { Ok(path) => path, Err(err) => { log::debug!( "hyperlink creation for {:?} failed, error occurred \ during conversion to absolute path: {}", original_path, err, ); return None; } }; // We convert the path to a string for easier manipulation. If it // wasn't valid UTF-16 (and thus could not be non-lossily transcoded // to UTF-8), then we just give up. It's not clear we could make // a meaningful hyperlink from it anyway. And this should be an // exceptionally rare case. let mut string = match path.to_str() { Some(string) => string, None => { log::debug!( "hyperlink creation for {:?} failed, path is not \ valid UTF-8", original_path, ); return None; } }; // Strip verbatim path prefixes (see the comment above for details). if string.starts_with(WIN32_NAMESPACE_PREFIX) { string = &string[WIN32_NAMESPACE_PREFIX.len()..]; // Drop the UNC prefix if there is one, but keep the leading slash. if string.starts_with(UNC_PREFIX) { string = &string[(UNC_PREFIX.len() - 1)..]; } } else if string.starts_with(r"\\") || string.starts_with(r"//") { // Drop one of the two leading slashes of network paths, it will be added back. string = &string[1..]; } // Finally, add a leading slash. In the local file case, this turns // C:\foo\bar into /C:\foo\bar (and then percent encoding turns it into // /C:/foo/bar). In the network share case, this turns \share\foo\bar // into /\share/foo/bar (and then percent encoding turns it into // //share/foo/bar). let with_slash = format!("/{string}"); Some(HyperlinkPath::encode(with_slash.as_bytes())) } /// For other platforms (not windows, not unix), return None and log a debug message. #[cfg(not(any(windows, unix)))] pub(crate) fn from_path(original_path: &Path) -> Option<HyperlinkPath> { log::debug!("hyperlinks are not supported on this platform"); None } /// Percent-encodes a path. /// /// The alphanumeric ASCII characters and "-", ".", "_", "~" are unreserved /// as per section 2.3 of RFC 3986 (Uniform Resource Identifier (URI): /// Generic Syntax), and are not encoded. The other ASCII characters except /// "/" and ":" are percent-encoded, and "\" is replaced by "/" on Windows. /// /// Section 4 of RFC 8089 (The "file" URI Scheme) does not mandate precise /// encoding requirements for non-ASCII characters, and this implementation /// leaves them unencoded. On Windows, the UrlCreateFromPathW function does /// not encode non-ASCII characters. Doing so with UTF-8 encoded paths /// creates invalid file:// URLs on that platform. fn encode(input: &[u8]) -> HyperlinkPath { let mut result = Vec::with_capacity(input.len()); for &byte in input.iter() { match byte { b'0'..=b'9' | b'A'..=b'Z' | b'a'..=b'z' | b'/' | b':' | b'-' | b'.' | b'_'
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
true
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/printer/src/hyperlink/aliases.rs
crates/printer/src/hyperlink/aliases.rs
use crate::hyperlink::HyperlinkAlias; /// Aliases to well-known hyperlink schemes. /// /// These need to be sorted by name. pub(super) const HYPERLINK_PATTERN_ALIASES: &[HyperlinkAlias] = &[ alias( "cursor", "Cursor scheme (cursor://)", "cursor://file{path}:{line}:{column}", ), prioritized_alias( 0, "default", "RFC 8089 scheme (file://) (platform-aware)", { #[cfg(not(windows))] { "file://{host}{path}" } #[cfg(windows)] { "file://{path}" } }, ), alias( "file", "RFC 8089 scheme (file://) with host", "file://{host}{path}", ), // https://github.com/misaki-web/grepp alias("grep+", "grep+ scheme (grep+://)", "grep+://{path}:{line}"), alias( "kitty", "kitty-style RFC 8089 scheme (file://) with line number", "file://{host}{path}#{line}", ), // https://macvim.org/docs/gui_mac.txt.html#mvim%3A%2F%2F alias( "macvim", "MacVim scheme (mvim://)", "mvim://open?url=file://{path}&line={line}&column={column}", ), prioritized_alias(1, "none", "disable hyperlinks", ""), // https://macromates.com/blog/2007/the-textmate-url-scheme/ alias( "textmate", "TextMate scheme (txmt://)", "txmt://open?url=file://{path}&line={line}&column={column}", ), // https://code.visualstudio.com/docs/editor/command-line#_opening-vs-code-with-urls alias( "vscode", "VS Code scheme (vscode://)", "vscode://file{path}:{line}:{column}", ), alias( "vscode-insiders", "VS Code Insiders scheme (vscode-insiders://)", "vscode-insiders://file{path}:{line}:{column}", ), alias( "vscodium", "VSCodium scheme (vscodium://)", "vscodium://file{path}:{line}:{column}", ), ]; /// Creates a [`HyperlinkAlias`]. const fn alias( name: &'static str, description: &'static str, format: &'static str, ) -> HyperlinkAlias { HyperlinkAlias { name, description, format, display_priority: None } } /// Creates a [`HyperlinkAlias`] with a display priority. const fn prioritized_alias( priority: i16, name: &'static str, description: &'static str, format: &'static str, ) -> HyperlinkAlias { HyperlinkAlias { name, description, format, display_priority: Some(priority), } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/pcre2/src/lib.rs
crates/pcre2/src/lib.rs
/*! An implementation of `grep-matcher`'s `Matcher` trait for [PCRE2](https://www.pcre.org/). */ #![deny(missing_docs)] pub use pcre2::{is_jit_available, version}; pub use crate::{ error::{Error, ErrorKind}, matcher::{RegexCaptures, RegexMatcher, RegexMatcherBuilder}, }; mod error; mod matcher;
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/pcre2/src/error.rs
crates/pcre2/src/error.rs
/// An error that can occur in this crate. /// /// Generally, this error corresponds to problems building a regular /// expression, whether it's in parsing, compilation or a problem with /// guaranteeing a configured optimization. #[derive(Clone, Debug)] pub struct Error { kind: ErrorKind, } impl Error { pub(crate) fn regex<E: std::error::Error>(err: E) -> Error { Error { kind: ErrorKind::Regex(err.to_string()) } } /// Return the kind of this error. pub fn kind(&self) -> &ErrorKind { &self.kind } } /// The kind of an error that can occur. #[derive(Clone, Debug)] #[non_exhaustive] pub enum ErrorKind { /// An error that occurred as a result of parsing a regular expression. /// This can be a syntax error or an error that results from attempting to /// compile a regular expression that is too big. /// /// The string here is the underlying error converted to a string. Regex(String), } impl std::error::Error for Error { fn description(&self) -> &str { match self.kind { ErrorKind::Regex(_) => "regex error", } } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.kind { ErrorKind::Regex(ref s) => write!(f, "{}", s), } } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
BurntSushi/ripgrep
https://github.com/BurntSushi/ripgrep/blob/0a88cccd5188074de96f54a4b6b44a63971ac157/crates/pcre2/src/matcher.rs
crates/pcre2/src/matcher.rs
use std::collections::HashMap; use { grep_matcher::{Captures, Match, Matcher}, pcre2::bytes::{CaptureLocations, Regex, RegexBuilder}, }; use crate::error::Error; /// A builder for configuring the compilation of a PCRE2 regex. #[derive(Clone, Debug)] pub struct RegexMatcherBuilder { builder: RegexBuilder, case_smart: bool, word: bool, fixed_strings: bool, whole_line: bool, } impl RegexMatcherBuilder { /// Create a new matcher builder with a default configuration. pub fn new() -> RegexMatcherBuilder { RegexMatcherBuilder { builder: RegexBuilder::new(), case_smart: false, word: false, fixed_strings: false, whole_line: false, } } /// Compile the given pattern into a PCRE matcher using the current /// configuration. /// /// If there was a problem compiling the pattern, then an error is /// returned. pub fn build(&self, pattern: &str) -> Result<RegexMatcher, Error> { self.build_many(&[pattern]) } /// Compile all of the given patterns into a single regex that matches when /// at least one of the patterns matches. /// /// If there was a problem building the regex, then an error is returned. pub fn build_many<P: AsRef<str>>( &self, patterns: &[P], ) -> Result<RegexMatcher, Error> { let mut builder = self.builder.clone(); let mut pats = Vec::with_capacity(patterns.len()); for p in patterns.iter() { pats.push(if self.fixed_strings { format!("(?:{})", pcre2::escape(p.as_ref())) } else { format!("(?:{})", p.as_ref()) }); } let mut singlepat = if patterns.is_empty() { // A way to spell a pattern that can never match anything. r"[^\S\s]".to_string() } else { pats.join("|") }; if self.case_smart && !has_uppercase_literal(&singlepat) { builder.caseless(true); } if self.whole_line { singlepat = format!(r"(?m:^)(?:{})(?m:$)", singlepat); } else if self.word { // We make this option exclusive with whole_line because when // whole_line is enabled, all matches necessary fall on word // boundaries. So this extra goop is strictly redundant. singlepat = format!(r"(?<!\w)(?:{})(?!\w)", singlepat); } log::trace!("final regex: {:?}", singlepat); builder.build(&singlepat).map_err(Error::regex).map(|regex| { let mut names = HashMap::new(); for (i, name) in regex.capture_names().iter().enumerate() { if let Some(ref name) = *name { names.insert(name.to_string(), i); } } RegexMatcher { regex, names } }) } /// Enables case insensitive matching. /// /// If the `utf` option is also set, then Unicode case folding is used /// to determine case insensitivity. When the `utf` option is not set, /// then only standard ASCII case insensitivity is considered. /// /// This option corresponds to the `i` flag. pub fn caseless(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.caseless(yes); self } /// Whether to enable "smart case" or not. /// /// When smart case is enabled, the builder will automatically enable /// case insensitive matching based on how the pattern is written. Namely, /// case insensitive mode is enabled when both of the following things /// are believed to be true: /// /// 1. The pattern contains at least one literal character. For example, /// `a\w` contains a literal (`a`) but `\w` does not. /// 2. Of the literals in the pattern, none of them are considered to be /// uppercase according to Unicode. For example, `foo\pL` has no /// uppercase literals but `Foo\pL` does. /// /// Note that the implementation of this is not perfect. Namely, `\p{Ll}` /// will prevent case insensitive matching even though it is part of a meta /// sequence. This bug will probably never be fixed. pub fn case_smart(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.case_smart = yes; self } /// Enables "dot all" matching. /// /// When enabled, the `.` metacharacter in the pattern matches any /// character, include `\n`. When disabled (the default), `.` will match /// any character except for `\n`. /// /// This option corresponds to the `s` flag. pub fn dotall(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.dotall(yes); self } /// Enable "extended" mode in the pattern, where whitespace is ignored. /// /// This option corresponds to the `x` flag. pub fn extended(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.extended(yes); self } /// Enable multiline matching mode. /// /// When enabled, the `^` and `$` anchors will match both at the beginning /// and end of a subject string, in addition to matching at the start of /// a line and the end of a line. When disabled, the `^` and `$` anchors /// will only match at the beginning and end of a subject string. /// /// This option corresponds to the `m` flag. pub fn multi_line(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.multi_line(yes); self } /// Enable matching of CRLF as a line terminator. /// /// When enabled, anchors such as `^` and `$` will match any of the /// following as a line terminator: `\r`, `\n` or `\r\n`. /// /// This is disabled by default, in which case, only `\n` is recognized as /// a line terminator. pub fn crlf(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.crlf(yes); self } /// Require that all matches occur on word boundaries. /// /// Enabling this option is subtly different than putting `\b` assertions /// on both sides of your pattern. In particular, a `\b` assertion requires /// that one side of it match a word character while the other match a /// non-word character. This option, in contrast, merely requires that /// one side match a non-word character. /// /// For example, `\b-2\b` will not match `foo -2 bar` since `-` is not a /// word character. However, `-2` with this `word` option enabled will /// match the `-2` in `foo -2 bar`. pub fn word(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.word = yes; self } /// Whether the patterns should be treated as literal strings or not. When /// this is active, all characters, including ones that would normally be /// special regex meta characters, are matched literally. pub fn fixed_strings(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.fixed_strings = yes; self } /// Whether each pattern should match the entire line or not. This is /// equivalent to surrounding the pattern with `(?m:^)` and `(?m:$)`. pub fn whole_line(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.whole_line = yes; self } /// Enable Unicode matching mode. /// /// When enabled, the following patterns become Unicode aware: `\b`, `\B`, /// `\d`, `\D`, `\s`, `\S`, `\w`, `\W`. /// /// When set, this implies UTF matching mode. It is not possible to enable /// Unicode matching mode without enabling UTF matching mode. /// /// This is disabled by default. pub fn ucp(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.ucp(yes); self } /// Enable UTF matching mode. /// /// When enabled, characters are treated as sequences of code units that /// make up a single codepoint instead of as single bytes. For example, /// this will cause `.` to match any single UTF-8 encoded codepoint, where /// as when this is disabled, `.` will any single byte (except for `\n` in /// both cases, unless "dot all" mode is enabled). /// /// Note that when UTF matching mode is enabled, every search performed /// will do a UTF-8 validation check, which can impact performance. The /// UTF-8 check can be disabled via the `disable_utf_check` option, but it /// is undefined behavior to enable UTF matching mode and search invalid /// UTF-8. /// /// This is disabled by default. pub fn utf(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.utf(yes); self } /// This is now deprecated and is a no-op. /// /// Previously, this option permitted disabling PCRE2's UTF-8 validity /// check, which could result in undefined behavior if the haystack was /// not valid UTF-8. But PCRE2 introduced a new option, `PCRE2_MATCH_INVALID_UTF`, /// in 10.34 which this crate always sets. When this option is enabled, /// PCRE2 claims to not have undefined behavior when the haystack is /// invalid UTF-8. /// /// Therefore, disabling the UTF-8 check is not something that is exposed /// by this crate. #[deprecated( since = "0.2.4", note = "now a no-op due to new PCRE2 features" )] pub fn disable_utf_check(&mut self) -> &mut RegexMatcherBuilder { self } /// Enable PCRE2's JIT and return an error if it's not available. /// /// This generally speeds up matching quite a bit. The downside is that it /// can increase the time it takes to compile a pattern. /// /// If the JIT isn't available or if JIT compilation returns an error, then /// regex compilation will fail with the corresponding error. /// /// This is disabled by default, and always overrides `jit_if_available`. pub fn jit(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.jit(yes); self } /// Enable PCRE2's JIT if it's available. /// /// This generally speeds up matching quite a bit. The downside is that it /// can increase the time it takes to compile a pattern. /// /// If the JIT isn't available or if JIT compilation returns an error, /// then a debug message with the error will be emitted and the regex will /// otherwise silently fall back to non-JIT matching. /// /// This is disabled by default, and always overrides `jit`. pub fn jit_if_available(&mut self, yes: bool) -> &mut RegexMatcherBuilder { self.builder.jit_if_available(yes); self } /// Set the maximum size of PCRE2's JIT stack, in bytes. If the JIT is /// not enabled, then this has no effect. /// /// When `None` is given, no custom JIT stack will be created, and instead, /// the default JIT stack is used. When the default is used, its maximum /// size is 32 KB. /// /// When this is set, then a new JIT stack will be created with the given /// maximum size as its limit. /// /// Increasing the stack size can be useful for larger regular expressions. /// /// By default, this is set to `None`. pub fn max_jit_stack_size( &mut self, bytes: Option<usize>, ) -> &mut RegexMatcherBuilder { self.builder.max_jit_stack_size(bytes); self } } /// An implementation of the `Matcher` trait using PCRE2. #[derive(Clone, Debug)] pub struct RegexMatcher { regex: Regex, names: HashMap<String, usize>, } impl RegexMatcher { /// Create a new matcher from the given pattern using the default /// configuration. pub fn new(pattern: &str) -> Result<RegexMatcher, Error> { RegexMatcherBuilder::new().build(pattern) } } impl Matcher for RegexMatcher { type Captures = RegexCaptures; type Error = Error; fn find_at( &self, haystack: &[u8], at: usize, ) -> Result<Option<Match>, Error> { Ok(self .regex .find_at(haystack, at) .map_err(Error::regex)? .map(|m| Match::new(m.start(), m.end()))) } fn new_captures(&self) -> Result<RegexCaptures, Error> { Ok(RegexCaptures::new(self.regex.capture_locations())) } fn capture_count(&self) -> usize { self.regex.captures_len() } fn capture_index(&self, name: &str) -> Option<usize> { self.names.get(name).map(|i| *i) } fn try_find_iter<F, E>( &self, haystack: &[u8], mut matched: F, ) -> Result<Result<(), E>, Error> where F: FnMut(Match) -> Result<bool, E>, { for result in self.regex.find_iter(haystack) { let m = result.map_err(Error::regex)?; match matched(Match::new(m.start(), m.end())) { Ok(true) => continue, Ok(false) => return Ok(Ok(())), Err(err) => return Ok(Err(err)), } } Ok(Ok(())) } fn captures_at( &self, haystack: &[u8], at: usize, caps: &mut RegexCaptures, ) -> Result<bool, Error> { Ok(self .regex .captures_read_at(&mut caps.locs, haystack, at) .map_err(Error::regex)? .is_some()) } } /// Represents the match offsets of each capturing group in a match. /// /// The first, or `0`th capture group, always corresponds to the entire match /// and is guaranteed to be present when a match occurs. The next capture /// group, at index `1`, corresponds to the first capturing group in the regex, /// ordered by the position at which the left opening parenthesis occurs. /// /// Note that not all capturing groups are guaranteed to be present in a match. /// For example, in the regex, `(?P<foo>\w)|(?P<bar>\W)`, only one of `foo` /// or `bar` will ever be set in any given match. /// /// In order to access a capture group by name, you'll need to first find the /// index of the group using the corresponding matcher's `capture_index` /// method, and then use that index with `RegexCaptures::get`. #[derive(Clone, Debug)] pub struct RegexCaptures { /// Where the locations are stored. locs: CaptureLocations, } impl Captures for RegexCaptures { fn len(&self) -> usize { self.locs.len() } fn get(&self, i: usize) -> Option<Match> { self.locs.get(i).map(|(s, e)| Match::new(s, e)) } } impl RegexCaptures { pub(crate) fn new(locs: CaptureLocations) -> RegexCaptures { RegexCaptures { locs } } } /// Determine whether the pattern contains an uppercase character which should /// negate the effect of the smart-case option. /// /// Ideally we would be able to check the AST in order to correctly handle /// things like '\p{Ll}' and '\p{Lu}' (which should be treated as explicitly /// cased), but PCRE doesn't expose enough details for that kind of analysis. /// For now, our 'good enough' solution is to simply perform a semi-naïve /// scan of the input pattern and ignore all characters following a '\'. The /// This at least lets us support the most common cases, like 'foo\w' and /// 'foo\S', in an intuitive manner. fn has_uppercase_literal(pattern: &str) -> bool { let mut chars = pattern.chars(); while let Some(c) = chars.next() { if c == '\\' { chars.next(); } else if c.is_uppercase() { return true; } } false } #[cfg(test)] mod tests { use grep_matcher::LineMatchKind; use super::*; // Test that enabling word matches does the right thing and demonstrate // the difference between it and surrounding the regex in `\b`. #[test] fn word() { let matcher = RegexMatcherBuilder::new().word(true).build(r"-2").unwrap(); assert!(matcher.is_match(b"abc -2 foo").unwrap()); let matcher = RegexMatcherBuilder::new().word(false).build(r"\b-2\b").unwrap(); assert!(!matcher.is_match(b"abc -2 foo").unwrap()); } // Test that enabling CRLF permits `$` to match at the end of a line. #[test] fn line_terminator_crlf() { // Test normal use of `$` with a `\n` line terminator. let matcher = RegexMatcherBuilder::new() .multi_line(true) .build(r"abc$") .unwrap(); assert!(matcher.is_match(b"abc\n").unwrap()); // Test that `$` doesn't match at `\r\n` boundary normally. let matcher = RegexMatcherBuilder::new() .multi_line(true) .build(r"abc$") .unwrap(); assert!(!matcher.is_match(b"abc\r\n").unwrap()); // Now check the CRLF handling. let matcher = RegexMatcherBuilder::new() .multi_line(true) .crlf(true) .build(r"abc$") .unwrap(); assert!(matcher.is_match(b"abc\r\n").unwrap()); } // Test that smart case works. #[test] fn case_smart() { let matcher = RegexMatcherBuilder::new().case_smart(true).build(r"abc").unwrap(); assert!(matcher.is_match(b"ABC").unwrap()); let matcher = RegexMatcherBuilder::new().case_smart(true).build(r"aBc").unwrap(); assert!(!matcher.is_match(b"ABC").unwrap()); } // Test that finding candidate lines works as expected. #[test] fn candidate_lines() { fn is_confirmed(m: LineMatchKind) -> bool { match m { LineMatchKind::Confirmed(_) => true, _ => false, } } let matcher = RegexMatcherBuilder::new().build(r"\wfoo\s").unwrap(); let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap(); assert!(is_confirmed(m)); } }
rust
Unlicense
0a88cccd5188074de96f54a4b6b44a63971ac157
2026-01-04T15:31:58.730867Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/build.rs
build.rs
fn main() { // Fix building from source on Windows because it can't handle file links. #[cfg(windows)] let _ = std::fs::copy("dev/Cargo.toml", "dev-Cargo.toml"); }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/19_smart_pointers/cow1.rs
solutions/19_smart_pointers/cow1.rs
// This exercise explores the `Cow` (Clone-On-Write) smart pointer. It can // enclose and provide immutable access to borrowed data and clone the data // lazily when mutation or ownership is required. The type is designed to work // with general borrowed data via the `Borrow` trait. use std::borrow::Cow; fn abs_all(input: &mut Cow<[i32]>) { for ind in 0..input.len() { let value = input[ind]; if value < 0 { // Clones into a vector if not already owned. input.to_mut()[ind] = -value; } } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn reference_mutation() { // Clone occurs because `input` needs to be mutated. let vec = vec![-1, 0, 1]; let mut input = Cow::from(&vec); abs_all(&mut input); assert!(matches!(input, Cow::Owned(_))); } #[test] fn reference_no_mutation() { // No clone occurs because `input` doesn't need to be mutated. let vec = vec![0, 1, 2]; let mut input = Cow::from(&vec); abs_all(&mut input); assert!(matches!(input, Cow::Borrowed(_))); // ^^^^^^^^^^^^^^^^ } #[test] fn owned_no_mutation() { // We can also pass `vec` without `&` so `Cow` owns it directly. In this // case, no mutation occurs (all numbers are already absolute) and thus // also no clone. But the result is still owned because it was never // borrowed or mutated. let vec = vec![0, 1, 2]; let mut input = Cow::from(vec); abs_all(&mut input); assert!(matches!(input, Cow::Owned(_))); // ^^^^^^^^^^^^^ } #[test] fn owned_mutation() { // Of course this is also the case if a mutation does occur (not all // numbers are absolute). In this case, the call to `to_mut()` in the // `abs_all` function returns a reference to the same data as before. let vec = vec![-1, 0, 1]; let mut input = Cow::from(vec); abs_all(&mut input); assert!(matches!(input, Cow::Owned(_))); // ^^^^^^^^^^^^^ } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/19_smart_pointers/box1.rs
solutions/19_smart_pointers/box1.rs
// At compile time, Rust needs to know how much space a type takes up. This // becomes problematic for recursive types, where a value can have as part of // itself another value of the same type. To get around the issue, we can use a // `Box` - a smart pointer used to store data on the heap, which also allows us // to wrap a recursive type. // // The recursive type we're implementing in this exercise is the "cons list", a // data structure frequently found in functional programming languages. Each // item in a cons list contains two elements: The value of the current item and // the next item. The last item is a value called `Nil`. #[derive(PartialEq, Debug)] enum List { Cons(i32, Box<List>), Nil, } fn create_empty_list() -> List { List::Nil } fn create_non_empty_list() -> List { List::Cons(42, Box::new(List::Nil)) } fn main() { println!("This is an empty cons list: {:?}", create_empty_list()); println!( "This is a non-empty cons list: {:?}", create_non_empty_list(), ); } #[cfg(test)] mod tests { use super::*; #[test] fn test_create_empty_list() { assert_eq!(create_empty_list(), List::Nil); } #[test] fn test_create_non_empty_list() { assert_ne!(create_empty_list(), create_non_empty_list()); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/19_smart_pointers/arc1.rs
solutions/19_smart_pointers/arc1.rs
// In this exercise, we are given a `Vec` of `u32` called `numbers` with values // ranging from 0 to 99. We would like to use this set of numbers within 8 // different threads simultaneously. Each thread is going to get the sum of // every eighth value with an offset. // // The first thread (offset 0), will sum 0, 8, 16, … // The second thread (offset 1), will sum 1, 9, 17, … // The third thread (offset 2), will sum 2, 10, 18, … // … // The eighth thread (offset 7), will sum 7, 15, 23, … // // Each thread should own a reference-counting pointer to the vector of // numbers. But `Rc` isn't thread-safe. Therefore, we need to use `Arc`. // // Don't get distracted by how threads are spawned and joined. We will practice // that later in the exercises about threads. // Don't change the lines below. #![forbid(unused_imports)] use std::{sync::Arc, thread}; fn main() { let numbers: Vec<_> = (0..100u32).collect(); let shared_numbers = Arc::new(numbers); // ^^^^^^^^^^^^^^^^^ let mut join_handles = Vec::new(); for offset in 0..8 { let child_numbers = Arc::clone(&shared_numbers); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^ let handle = thread::spawn(move || { let sum: u32 = child_numbers.iter().filter(|&&n| n % 8 == offset).sum(); println!("Sum of offset {offset} is {sum}"); }); join_handles.push(handle); } for handle in join_handles.into_iter() { handle.join().unwrap(); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/19_smart_pointers/rc1.rs
solutions/19_smart_pointers/rc1.rs
// In this exercise, we want to express the concept of multiple owners via the // `Rc<T>` type. This is a model of our solar system - there is a `Sun` type and // multiple `Planet`s. The planets take ownership of the sun, indicating that // they revolve around the sun. use std::rc::Rc; #[derive(Debug)] struct Sun; #[derive(Debug)] enum Planet { Mercury(Rc<Sun>), Venus(Rc<Sun>), Earth(Rc<Sun>), Mars(Rc<Sun>), Jupiter(Rc<Sun>), Saturn(Rc<Sun>), Uranus(Rc<Sun>), Neptune(Rc<Sun>), } impl Planet { fn details(&self) { println!("Hi from {self:?}!"); } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn rc1() { let sun = Rc::new(Sun); println!("reference count = {}", Rc::strong_count(&sun)); // 1 reference let mercury = Planet::Mercury(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 2 references mercury.details(); let venus = Planet::Venus(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 3 references venus.details(); let earth = Planet::Earth(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 4 references earth.details(); let mars = Planet::Mars(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 5 references mars.details(); let jupiter = Planet::Jupiter(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 6 references jupiter.details(); let saturn = Planet::Saturn(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 7 references saturn.details(); let uranus = Planet::Uranus(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 8 references uranus.details(); let neptune = Planet::Neptune(Rc::clone(&sun)); println!("reference count = {}", Rc::strong_count(&sun)); // 9 references neptune.details(); assert_eq!(Rc::strong_count(&sun), 9); drop(neptune); println!("reference count = {}", Rc::strong_count(&sun)); // 8 references drop(uranus); println!("reference count = {}", Rc::strong_count(&sun)); // 7 references drop(saturn); println!("reference count = {}", Rc::strong_count(&sun)); // 6 references drop(jupiter); println!("reference count = {}", Rc::strong_count(&sun)); // 5 references drop(mars); println!("reference count = {}", Rc::strong_count(&sun)); // 4 references drop(earth); println!("reference count = {}", Rc::strong_count(&sun)); // 3 references drop(venus); println!("reference count = {}", Rc::strong_count(&sun)); // 2 references drop(mercury); println!("reference count = {}", Rc::strong_count(&sun)); // 1 reference assert_eq!(Rc::strong_count(&sun), 1); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/22_clippy/clippy1.rs
solutions/22_clippy/clippy1.rs
// The Clippy tool is a collection of lints to analyze your code so you can // catch common mistakes and improve your Rust code. // // For these exercises, the code will fail to compile when there are Clippy // warnings. Check Clippy's suggestions from the output to solve the exercise. use std::f32::consts::PI; fn main() { // Use the more accurate `PI` constant. let pi = PI; let radius: f32 = 5.0; let area = pi * radius.powi(2); println!("The area of a circle with radius {radius:.2} is {area:.5}"); }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/22_clippy/clippy3.rs
solutions/22_clippy/clippy3.rs
use std::mem; #[allow(unused_variables, unused_assignments)] fn main() { let my_option: Option<&str> = None; // `unwrap` of an `Option` after checking if it is `None` will panic. // Use `if-let` instead. if let Some(value) = my_option { println!("{value}"); } // A comma was missing. #[rustfmt::skip] let my_arr = &[ -1, -2, -3, -4, -5, -6, ]; println!("My array! Here it is: {my_arr:?}"); let mut my_vec = vec![1, 2, 3, 4, 5]; // `resize` mutates a vector instead of returning a new one. // `resize(0, …)` clears a vector, so it is better to use `clear`. my_vec.clear(); println!("This Vec is empty, see? {my_vec:?}"); let mut value_a = 45; let mut value_b = 66; // Use `mem::swap` to correctly swap two values. mem::swap(&mut value_a, &mut value_b); println!("value a: {value_a}; value b: {value_b}"); }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/22_clippy/clippy2.rs
solutions/22_clippy/clippy2.rs
fn main() { let mut res = 42; let option = Some(12); // Use `if-let` instead of iteration. if let Some(x) = option { res += x; } println!("{res}"); }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/09_strings/strings1.rs
solutions/09_strings/strings1.rs
fn current_favorite_color() -> String { // Equivalent to `String::from("blue")` "blue".to_string() } fn main() { let answer = current_favorite_color(); println!("My current favorite color is {answer}"); }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/09_strings/strings2.rs
solutions/09_strings/strings2.rs
fn is_a_color_word(attempt: &str) -> bool { attempt == "green" || attempt == "blue" || attempt == "red" } fn main() { let word = String::from("green"); if is_a_color_word(&word) { // ^ added to have `&String` which is automatically // coerced to `&str` by the compiler. println!("That is a color word I know!"); } else { println!("That is not a color word I know."); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/09_strings/strings3.rs
solutions/09_strings/strings3.rs
fn trim_me(input: &str) -> &str { input.trim() } fn compose_me(input: &str) -> String { // The macro `format!` has the same syntax as `println!`, but it returns a // string instead of printing it to the terminal. // Equivalent to `input.to_string() + " world!"` format!("{input} world!") } fn replace_me(input: &str) -> String { input.replace("cars", "balloons") } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn trim_a_string() { assert_eq!(trim_me("Hello! "), "Hello!"); assert_eq!(trim_me(" What's up!"), "What's up!"); assert_eq!(trim_me(" Hola! "), "Hola!"); assert_eq!(trim_me("Hi!"), "Hi!"); } #[test] fn compose_a_string() { assert_eq!(compose_me("Hello"), "Hello world!"); assert_eq!(compose_me("Goodbye"), "Goodbye world!"); } #[test] fn replace_a_string() { assert_eq!( replace_me("I think cars are cool"), "I think balloons are cool", ); assert_eq!( replace_me("I love to look at cars"), "I love to look at balloons", ); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/09_strings/strings4.rs
solutions/09_strings/strings4.rs
fn string_slice(arg: &str) { println!("{arg}"); } fn string(arg: String) { println!("{arg}"); } fn main() { string_slice("blue"); string("red".to_string()); string(String::from("hi")); string("rust is fun!".to_owned()); // Here, both answers work. // `.into()` converts a type into an expected type. // If it is called where `String` is expected, it will convert `&str` to `String`. string("nice weather".into()); // But if it is called where `&str` is expected, then `&str` is kept as `&str` since no conversion is needed. // If you remove the `#[allow(…)]` line, then Clippy will tell you to remove `.into()` below since it is a useless conversion. #[allow(clippy::useless_conversion)] string_slice("nice weather".into()); string(format!("Interpolation {}", "Station")); // WARNING: This is byte indexing, not character indexing. // Character indexing can be done using `s.chars().nth(INDEX)`. string_slice(&String::from("abc")[0..1]); string_slice(" hello there ".trim()); string("Happy Monday!".replace("Mon", "Tues")); string("mY sHiFt KeY iS sTiCkY".to_lowercase()); }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/20_threads/threads3.rs
solutions/20_threads/threads3.rs
use std::{sync::mpsc, thread, time::Duration}; struct Queue { first_half: Vec<u32>, second_half: Vec<u32>, } impl Queue { fn new() -> Self { Self { first_half: vec![1, 2, 3, 4, 5], second_half: vec![6, 7, 8, 9, 10], } } } fn send_tx(q: Queue, tx: mpsc::Sender<u32>) { // Clone the sender `tx` first. let tx_clone = tx.clone(); thread::spawn(move || { for val in q.first_half { println!("Sending {val:?}"); // Then use the clone in the first thread. This means that // `tx_clone` is moved to the first thread and `tx` to the second. tx_clone.send(val).unwrap(); thread::sleep(Duration::from_millis(250)); } }); thread::spawn(move || { for val in q.second_half { println!("Sending {val:?}"); tx.send(val).unwrap(); thread::sleep(Duration::from_millis(250)); } }); } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn threads3() { let (tx, rx) = mpsc::channel(); let queue = Queue::new(); send_tx(queue, tx); let mut received = Vec::with_capacity(10); for value in rx { received.push(value); } received.sort(); assert_eq!(received, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/20_threads/threads2.rs
solutions/20_threads/threads2.rs
// Building on the last exercise, we want all of the threads to complete their // work. But this time, the spawned threads need to be in charge of updating a // shared value: `JobStatus.jobs_done` use std::{ sync::{Arc, Mutex}, thread, time::Duration, }; struct JobStatus { jobs_done: u32, } fn main() { // `Arc` isn't enough if you want a **mutable** shared state. // We need to wrap the value with a `Mutex`. let status = Arc::new(Mutex::new(JobStatus { jobs_done: 0 })); // ^^^^^^^^^^^ ^ let mut handles = Vec::new(); for _ in 0..10 { let status_shared = Arc::clone(&status); let handle = thread::spawn(move || { thread::sleep(Duration::from_millis(250)); // Lock before you update a shared value. status_shared.lock().unwrap().jobs_done += 1; // ^^^^^^^^^^^^^^^^ }); handles.push(handle); } // Waiting for all jobs to complete. for handle in handles { handle.join().unwrap(); } println!("Jobs done: {}", status.lock().unwrap().jobs_done); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/20_threads/threads1.rs
solutions/20_threads/threads1.rs
// This program spawns multiple threads that each runs for at least 250ms, and // each thread returns how much time it took to complete. The program should // wait until all the spawned threads have finished and should collect their // return values into a vector. use std::{ thread, time::{Duration, Instant}, }; fn main() { let mut handles = Vec::new(); for i in 0..10 { let handle = thread::spawn(move || { let start = Instant::now(); thread::sleep(Duration::from_millis(250)); println!("Thread {i} done"); start.elapsed().as_millis() }); handles.push(handle); } let mut results = Vec::new(); for handle in handles { // Collect the results of all threads into the `results` vector. results.push(handle.join().unwrap()); } if results.len() != 10 { panic!("Oh no! Some thread isn't done yet!"); } println!(); for (i, result) in results.into_iter().enumerate() { println!("Thread {i} took {result}ms"); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/13_error_handling/errors2.rs
solutions/13_error_handling/errors2.rs
// Say we're writing a game where you can buy items with tokens. All items cost // 5 tokens, and whenever you purchase items there is a processing fee of 1 // token. A player of the game will type in how many items they want to buy, and // the `total_cost` function will calculate the total cost of the items. Since // the player typed in the quantity, we get it as a string. They might have // typed anything, not just numbers! // // Right now, this function isn't handling the error case at all. What we want // to do is: If we call the `total_cost` function on a string that is not a // number, that function will return a `ParseIntError`. In that case, we want to // immediately return that error from our function and not try to multiply and // add. // // There are at least two ways to implement this that are both correct. But one // is a lot shorter! use std::num::ParseIntError; #[allow(unused_variables, clippy::question_mark)] fn total_cost(item_quantity: &str) -> Result<i32, ParseIntError> { let processing_fee = 1; let cost_per_item = 5; // Added `?` to propagate the error. let qty = item_quantity.parse::<i32>()?; // ^ added // Equivalent to this verbose version: let qty = match item_quantity.parse::<i32>() { Ok(v) => v, Err(e) => return Err(e), }; Ok(qty * cost_per_item + processing_fee) } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; use std::num::IntErrorKind; #[test] fn item_quantity_is_a_valid_number() { assert_eq!(total_cost("34"), Ok(171)); } #[test] fn item_quantity_is_an_invalid_number() { assert_eq!( total_cost("beep boop").unwrap_err().kind(), &IntErrorKind::InvalidDigit, ); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/13_error_handling/errors6.rs
solutions/13_error_handling/errors6.rs
// Using catch-all error types like `Box<dyn Error>` isn't recommended for // library code where callers might want to make decisions based on the error // content instead of printing it out or propagating it further. Here, we define // a custom error type to make it possible for callers to decide what to do next // when our function returns an error. use std::num::ParseIntError; #[derive(PartialEq, Debug)] enum CreationError { Negative, Zero, } // A custom error type that we will be using in `PositiveNonzeroInteger::parse`. #[derive(PartialEq, Debug)] enum ParsePosNonzeroError { Creation(CreationError), ParseInt(ParseIntError), } impl ParsePosNonzeroError { fn from_creation(err: CreationError) -> Self { Self::Creation(err) } fn from_parse_int(err: ParseIntError) -> Self { Self::ParseInt(err) } } // As an alternative solution, implementing the `From` trait allows for the // automatic conversion from a `ParseIntError` into a `ParsePosNonzeroError` // using the `?` operator, without the need to call `map_err`. // // ``` // let x: i64 = s.parse()?; // ``` // // Traits like `From` will be dealt with in later exercises. impl From<ParseIntError> for ParsePosNonzeroError { fn from(err: ParseIntError) -> Self { ParsePosNonzeroError::ParseInt(err) } } #[derive(PartialEq, Debug)] struct PositiveNonzeroInteger(u64); impl PositiveNonzeroInteger { fn new(value: i64) -> Result<Self, CreationError> { match value { x if x < 0 => Err(CreationError::Negative), 0 => Err(CreationError::Zero), x => Ok(Self(x as u64)), } } fn parse(s: &str) -> Result<Self, ParsePosNonzeroError> { // Return an appropriate error instead of panicking when `parse()` // returns an error. let x: i64 = s.parse().map_err(ParsePosNonzeroError::from_parse_int)?; // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Self::new(x).map_err(ParsePosNonzeroError::from_creation) } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod test { use super::*; #[test] fn test_parse_error() { assert!(matches!( PositiveNonzeroInteger::parse("not a number"), Err(ParsePosNonzeroError::ParseInt(_)), )); } #[test] fn test_negative() { assert_eq!( PositiveNonzeroInteger::parse("-555"), Err(ParsePosNonzeroError::Creation(CreationError::Negative)), ); } #[test] fn test_zero() { assert_eq!( PositiveNonzeroInteger::parse("0"), Err(ParsePosNonzeroError::Creation(CreationError::Zero)), ); } #[test] fn test_positive() { let x = PositiveNonzeroInteger::new(42).unwrap(); assert_eq!(x.0, 42); assert_eq!(PositiveNonzeroInteger::parse("42"), Ok(x)); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/13_error_handling/errors3.rs
solutions/13_error_handling/errors3.rs
// This is a program that is trying to use a completed version of the // `total_cost` function from the previous exercise. It's not working though! // Why not? What should we do to fix it? use std::num::ParseIntError; // Don't change this function. fn total_cost(item_quantity: &str) -> Result<i32, ParseIntError> { let processing_fee = 1; let cost_per_item = 5; let qty = item_quantity.parse::<i32>()?; Ok(qty * cost_per_item + processing_fee) } fn main() -> Result<(), ParseIntError> { // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ added let mut tokens = 100; let pretend_user_input = "8"; let cost = total_cost(pretend_user_input)?; if cost > tokens { println!("You can't afford that many!"); } else { tokens -= cost; println!("You now have {tokens} tokens."); } // Added this line to return the `Ok` variant of the expected `Result`. Ok(()) }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/13_error_handling/errors1.rs
solutions/13_error_handling/errors1.rs
fn generate_nametag_text(name: String) -> Result<String, String> { // ^^^^^^ ^^^^^^ if name.is_empty() { // `Err(String)` instead of `None`. Err("Empty names aren't allowed".to_string()) } else { // `Ok` instead of `Some`. Ok(format!("Hi! My name is {name}")) } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn generates_nametag_text_for_a_nonempty_name() { assert_eq!( generate_nametag_text("Beyoncé".to_string()).as_deref(), Ok("Hi! My name is Beyoncé"), ); } #[test] fn explains_why_generating_nametag_text_fails() { assert_eq!( generate_nametag_text(String::new()) .as_ref() .map_err(|e| e.as_str()), Err("Empty names aren't allowed"), ); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/13_error_handling/errors4.rs
solutions/13_error_handling/errors4.rs
use std::cmp::Ordering; #[derive(PartialEq, Debug)] enum CreationError { Negative, Zero, } #[derive(PartialEq, Debug)] struct PositiveNonzeroInteger(u64); impl PositiveNonzeroInteger { fn new(value: i64) -> Result<Self, CreationError> { match value.cmp(&0) { Ordering::Less => Err(CreationError::Negative), Ordering::Equal => Err(CreationError::Zero), Ordering::Greater => Ok(Self(value as u64)), } } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn test_creation() { assert_eq!( PositiveNonzeroInteger::new(10), Ok(PositiveNonzeroInteger(10)), ); assert_eq!( PositiveNonzeroInteger::new(-10), Err(CreationError::Negative), ); assert_eq!(PositiveNonzeroInteger::new(0), Err(CreationError::Zero)); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/13_error_handling/errors5.rs
solutions/13_error_handling/errors5.rs
// This exercise is an altered version of the `errors4` exercise. It uses some // concepts that we won't get to until later in the course, like `Box` and the // `From` trait. It's not important to understand them in detail right now, but // you can read ahead if you like. For now, think of the `Box<dyn ???>` type as // an "I want anything that does ???" type. // // In short, this particular use case for boxes is for when you want to own a // value and you care only that it is a type which implements a particular // trait. To do so, the `Box` is declared as of type `Box<dyn Trait>` where // `Trait` is the trait the compiler looks for on any value used in that // context. For this exercise, that context is the potential errors which // can be returned in a `Result`. use std::error::Error; use std::fmt; #[derive(PartialEq, Debug)] enum CreationError { Negative, Zero, } // This is required so that `CreationError` can implement `Error`. impl fmt::Display for CreationError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let description = match *self { CreationError::Negative => "number is negative", CreationError::Zero => "number is zero", }; f.write_str(description) } } impl Error for CreationError {} #[derive(PartialEq, Debug)] struct PositiveNonzeroInteger(u64); impl PositiveNonzeroInteger { fn new(value: i64) -> Result<PositiveNonzeroInteger, CreationError> { match value { x if x < 0 => Err(CreationError::Negative), 0 => Err(CreationError::Zero), x => Ok(PositiveNonzeroInteger(x as u64)), } } } fn main() -> Result<(), Box<dyn Error>> { let pretend_user_input = "42"; let x: i64 = pretend_user_input.parse()?; println!("output={:?}", PositiveNonzeroInteger::new(x)?); Ok(()) }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/23_conversions/from_str.rs
solutions/23_conversions/from_str.rs
// This is similar to the previous `from_into` exercise. But this time, we'll // implement `FromStr` and return errors instead of falling back to a default // value. Additionally, upon implementing `FromStr`, you can use the `parse` // method on strings to generate an object of the implementor type. You can read // more about it in the documentation: // https://doc.rust-lang.org/std/str/trait.FromStr.html use std::num::ParseIntError; use std::str::FromStr; #[derive(Debug, PartialEq)] struct Person { name: String, age: u8, } // We will use this error type for the `FromStr` implementation. #[derive(Debug, PartialEq)] enum ParsePersonError { // Incorrect number of fields BadLen, // Empty name field NoName, // Wrapped error from parse::<u8>() ParseInt(ParseIntError), } impl FromStr for Person { type Err = ParsePersonError; fn from_str(s: &str) -> Result<Self, Self::Err> { let mut split = s.split(','); let (Some(name), Some(age), None) = (split.next(), split.next(), split.next()) else { // ^^^^ there should be no third element return Err(ParsePersonError::BadLen); }; if name.is_empty() { return Err(ParsePersonError::NoName); } let age = age.parse().map_err(ParsePersonError::ParseInt)?; Ok(Self { name: name.into(), age, }) } } fn main() { let p = "Mark,20".parse::<Person>(); println!("{p:?}"); } #[cfg(test)] mod tests { use super::*; use ParsePersonError::*; #[test] fn empty_input() { assert_eq!("".parse::<Person>(), Err(BadLen)); } #[test] fn good_input() { let p = "John,32".parse::<Person>(); assert!(p.is_ok()); let p = p.unwrap(); assert_eq!(p.name, "John"); assert_eq!(p.age, 32); } #[test] fn missing_age() { assert!(matches!("John,".parse::<Person>(), Err(ParseInt(_)))); } #[test] fn invalid_age() { assert!(matches!("John,twenty".parse::<Person>(), Err(ParseInt(_)))); } #[test] fn missing_comma_and_age() { assert_eq!("John".parse::<Person>(), Err(BadLen)); } #[test] fn missing_name() { assert_eq!(",1".parse::<Person>(), Err(NoName)); } #[test] fn missing_name_and_age() { assert!(matches!(",".parse::<Person>(), Err(NoName | ParseInt(_)))); } #[test] fn missing_name_and_invalid_age() { assert!(matches!( ",one".parse::<Person>(), Err(NoName | ParseInt(_)), )); } #[test] fn trailing_comma() { assert_eq!("John,32,".parse::<Person>(), Err(BadLen)); } #[test] fn trailing_comma_and_some_string() { assert_eq!("John,32,man".parse::<Person>(), Err(BadLen)); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/23_conversions/try_from_into.rs
solutions/23_conversions/try_from_into.rs
// `TryFrom` is a simple and safe type conversion that may fail in a controlled // way under some circumstances. Basically, this is the same as `From`. The main // difference is that this should return a `Result` type instead of the target // type itself. You can read more about it in the documentation: // https://doc.rust-lang.org/std/convert/trait.TryFrom.html #![allow(clippy::useless_vec)] use std::convert::{TryFrom, TryInto}; #[derive(Debug, PartialEq)] struct Color { red: u8, green: u8, blue: u8, } // We will use this error type for the `TryFrom` conversions. #[derive(Debug, PartialEq)] enum IntoColorError { // Incorrect length of slice BadLen, // Integer conversion error IntConversion, } impl TryFrom<(i16, i16, i16)> for Color { type Error = IntoColorError; fn try_from(tuple: (i16, i16, i16)) -> Result<Self, Self::Error> { let (Ok(red), Ok(green), Ok(blue)) = ( u8::try_from(tuple.0), u8::try_from(tuple.1), u8::try_from(tuple.2), ) else { return Err(IntoColorError::IntConversion); }; Ok(Self { red, green, blue }) } } impl TryFrom<[i16; 3]> for Color { type Error = IntoColorError; fn try_from(arr: [i16; 3]) -> Result<Self, Self::Error> { // Reuse the implementation for a tuple. Self::try_from((arr[0], arr[1], arr[2])) } } impl TryFrom<&[i16]> for Color { type Error = IntoColorError; fn try_from(slice: &[i16]) -> Result<Self, Self::Error> { // Check the length. if slice.len() != 3 { return Err(IntoColorError::BadLen); } // Reuse the implementation for a tuple. Self::try_from((slice[0], slice[1], slice[2])) } } fn main() { // Using the `try_from` function. let c1 = Color::try_from((183, 65, 14)); println!("{c1:?}"); // Since `TryFrom` is implemented for `Color`, we can use `TryInto`. let c2: Result<Color, _> = [183, 65, 14].try_into(); println!("{c2:?}"); let v = vec![183, 65, 14]; // With slice we should use the `try_from` function let c3 = Color::try_from(&v[..]); println!("{c3:?}"); // or put the slice within round brackets and use `try_into`. let c4: Result<Color, _> = (&v[..]).try_into(); println!("{c4:?}"); } #[cfg(test)] mod tests { use super::*; use IntoColorError::*; #[test] fn test_tuple_out_of_range_positive() { assert_eq!(Color::try_from((256, 1000, 10000)), Err(IntConversion)); } #[test] fn test_tuple_out_of_range_negative() { assert_eq!(Color::try_from((-1, -10, -256)), Err(IntConversion)); } #[test] fn test_tuple_sum() { assert_eq!(Color::try_from((-1, 255, 255)), Err(IntConversion)); } #[test] fn test_tuple_correct() { let c: Result<Color, _> = (183, 65, 14).try_into(); assert!(c.is_ok()); assert_eq!( c.unwrap(), Color { red: 183, green: 65, blue: 14, } ); } #[test] fn test_array_out_of_range_positive() { let c: Result<Color, _> = [1000, 10000, 256].try_into(); assert_eq!(c, Err(IntConversion)); } #[test] fn test_array_out_of_range_negative() { let c: Result<Color, _> = [-10, -256, -1].try_into(); assert_eq!(c, Err(IntConversion)); } #[test] fn test_array_sum() { let c: Result<Color, _> = [-1, 255, 255].try_into(); assert_eq!(c, Err(IntConversion)); } #[test] fn test_array_correct() { let c: Result<Color, _> = [183, 65, 14].try_into(); assert!(c.is_ok()); assert_eq!( c.unwrap(), Color { red: 183, green: 65, blue: 14 } ); } #[test] fn test_slice_out_of_range_positive() { let arr = [10000, 256, 1000]; assert_eq!(Color::try_from(&arr[..]), Err(IntConversion)); } #[test] fn test_slice_out_of_range_negative() { let arr = [-256, -1, -10]; assert_eq!(Color::try_from(&arr[..]), Err(IntConversion)); } #[test] fn test_slice_sum() { let arr = [-1, 255, 255]; assert_eq!(Color::try_from(&arr[..]), Err(IntConversion)); } #[test] fn test_slice_correct() { let v = vec![183, 65, 14]; let c: Result<Color, _> = Color::try_from(&v[..]); assert!(c.is_ok()); assert_eq!( c.unwrap(), Color { red: 183, green: 65, blue: 14, } ); } #[test] fn test_slice_excess_length() { let v = vec![0, 0, 0, 0]; assert_eq!(Color::try_from(&v[..]), Err(BadLen)); } #[test] fn test_slice_insufficient_length() { let v = vec![0, 0]; assert_eq!(Color::try_from(&v[..]), Err(BadLen)); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/23_conversions/using_as.rs
solutions/23_conversions/using_as.rs
// Type casting in Rust is done via the usage of the `as` operator. // Note that the `as` operator is not only used when type casting. It also helps // with renaming imports. fn average(values: &[f64]) -> f64 { let total = values.iter().sum::<f64>(); total / values.len() as f64 // ^^^^^^ } fn main() { let values = [3.5, 0.3, 13.0, 11.7]; println!("{}", average(&values)); } #[cfg(test)] mod tests { use super::*; #[test] fn returns_proper_type_and_value() { assert_eq!(average(&[3.5, 0.3, 13.0, 11.7]), 7.125); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/23_conversions/from_into.rs
solutions/23_conversions/from_into.rs
// The `From` trait is used for value-to-value conversions. If `From` is // implemented, an implementation of `Into` is automatically provided. // You can read more about it in the documentation: // https://doc.rust-lang.org/std/convert/trait.From.html #[derive(Debug)] struct Person { name: String, age: u8, } // We implement the Default trait to use it as a fallback when the provided // string is not convertible into a `Person` object. impl Default for Person { fn default() -> Self { Self { name: String::from("John"), age: 30, } } } impl From<&str> for Person { fn from(s: &str) -> Self { let mut split = s.split(','); let (Some(name), Some(age), None) = (split.next(), split.next(), split.next()) else { // ^^^^ there should be no third element return Self::default(); }; if name.is_empty() { return Self::default(); } let Ok(age) = age.parse() else { return Self::default(); }; Self { name: name.into(), age, } } } fn main() { // Use the `from` function. let p1 = Person::from("Mark,20"); println!("{p1:?}"); // Since `From` is implemented for Person, we are able to use `Into`. let p2: Person = "Gerald,70".into(); println!("{p2:?}"); } #[cfg(test)] mod tests { use super::*; #[test] fn test_default() { let dp = Person::default(); assert_eq!(dp.name, "John"); assert_eq!(dp.age, 30); } #[test] fn test_bad_convert() { let p = Person::from(""); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_good_convert() { let p = Person::from("Mark,20"); assert_eq!(p.name, "Mark"); assert_eq!(p.age, 20); } #[test] fn test_bad_age() { let p = Person::from("Mark,twenty"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_comma_and_age() { let p: Person = Person::from("Mark"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_age() { let p: Person = Person::from("Mark,"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_name() { let p: Person = Person::from(",1"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_name_and_age() { let p: Person = Person::from(","); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_missing_name_and_invalid_age() { let p: Person = Person::from(",one"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_trailing_comma() { let p: Person = Person::from("Mike,32,"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } #[test] fn test_trailing_comma_and_some_string() { let p: Person = Person::from("Mike,32,dog"); assert_eq!(p.name, "John"); assert_eq!(p.age, 30); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/23_conversions/as_ref_mut.rs
solutions/23_conversions/as_ref_mut.rs
// AsRef and AsMut allow for cheap reference-to-reference conversions. Read more // about them at https://doc.rust-lang.org/std/convert/trait.AsRef.html and // https://doc.rust-lang.org/std/convert/trait.AsMut.html, respectively. // Obtain the number of bytes (not characters) in the given argument // (`.len()` returns the number of bytes in a string). fn byte_counter<T: AsRef<str>>(arg: T) -> usize { arg.as_ref().len() } // Obtain the number of characters (not bytes) in the given argument. fn char_counter<T: AsRef<str>>(arg: T) -> usize { arg.as_ref().chars().count() } // Squares a number using `as_mut()`. fn num_sq<T: AsMut<u32>>(arg: &mut T) { let arg = arg.as_mut(); *arg *= *arg; } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn different_counts() { let s = "Café au lait"; assert_ne!(char_counter(s), byte_counter(s)); } #[test] fn same_counts() { let s = "Cafe au lait"; assert_eq!(char_counter(s), byte_counter(s)); } #[test] fn different_counts_using_string() { let s = String::from("Café au lait"); assert_ne!(char_counter(s.clone()), byte_counter(s)); } #[test] fn same_counts_using_string() { let s = String::from("Cafe au lait"); assert_eq!(char_counter(s.clone()), byte_counter(s)); } #[test] fn mut_box() { let mut num: Box<u32> = Box::new(3); num_sq(&mut num); assert_eq!(*num, 9); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/07_structs/structs1.rs
solutions/07_structs/structs1.rs
struct ColorRegularStruct { red: u8, green: u8, blue: u8, } struct ColorTupleStruct(u8, u8, u8); #[derive(Debug)] struct UnitStruct; fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn regular_structs() { let green = ColorRegularStruct { red: 0, green: 255, blue: 0, }; assert_eq!(green.red, 0); assert_eq!(green.green, 255); assert_eq!(green.blue, 0); } #[test] fn tuple_structs() { let green = ColorTupleStruct(0, 255, 0); assert_eq!(green.0, 0); assert_eq!(green.1, 255); assert_eq!(green.2, 0); } #[test] fn unit_structs() { let unit_struct = UnitStruct; let message = format!("{unit_struct:?}s are fun!"); assert_eq!(message, "UnitStructs are fun!"); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/07_structs/structs2.rs
solutions/07_structs/structs2.rs
#[derive(Debug)] struct Order { name: String, year: u32, made_by_phone: bool, made_by_mobile: bool, made_by_email: bool, item_number: u32, count: u32, } fn create_order_template() -> Order { Order { name: String::from("Bob"), year: 2019, made_by_phone: false, made_by_mobile: false, made_by_email: true, item_number: 123, count: 0, } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn your_order() { let order_template = create_order_template(); let your_order = Order { name: String::from("Hacker in Rust"), count: 1, // Struct update syntax ..order_template }; assert_eq!(your_order.name, "Hacker in Rust"); assert_eq!(your_order.year, order_template.year); assert_eq!(your_order.made_by_phone, order_template.made_by_phone); assert_eq!(your_order.made_by_mobile, order_template.made_by_mobile); assert_eq!(your_order.made_by_email, order_template.made_by_email); assert_eq!(your_order.item_number, order_template.item_number); assert_eq!(your_order.count, 1); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/07_structs/structs3.rs
solutions/07_structs/structs3.rs
#[derive(Debug)] struct Package { sender_country: String, recipient_country: String, weight_in_grams: u32, } impl Package { fn new(sender_country: String, recipient_country: String, weight_in_grams: u32) -> Self { if weight_in_grams < 10 { // This isn't how you should handle errors in Rust, but we will // learn about error handling later. panic!("Can't ship a package with weight below 10 grams"); } Self { sender_country, recipient_country, weight_in_grams, } } fn is_international(&self) -> bool { // ^^^^^^^ added self.sender_country != self.recipient_country } fn get_fees(&self, cents_per_gram: u32) -> u32 { // ^^^^^^ added self.weight_in_grams * cents_per_gram } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] #[should_panic] fn fail_creating_weightless_package() { let sender_country = String::from("Spain"); let recipient_country = String::from("Austria"); Package::new(sender_country, recipient_country, 5); } #[test] fn create_international_package() { let sender_country = String::from("Spain"); let recipient_country = String::from("Russia"); let package = Package::new(sender_country, recipient_country, 1200); assert!(package.is_international()); } #[test] fn create_local_package() { let sender_country = String::from("Canada"); let recipient_country = sender_country.clone(); let package = Package::new(sender_country, recipient_country, 1200); assert!(!package.is_international()); } #[test] fn calculate_transport_fees() { let sender_country = String::from("Spain"); let recipient_country = String::from("Spain"); let cents_per_gram = 3; let package = Package::new(sender_country, recipient_country, 1500); assert_eq!(package.get_fees(cents_per_gram), 4500); assert_eq!(package.get_fees(cents_per_gram * 2), 9000); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/17_tests/tests1.rs
solutions/17_tests/tests1.rs
// Tests are important to ensure that your code does what you think it should // do. fn is_even(n: i64) -> bool { n % 2 == 0 } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { // When writing unit tests, it is common to import everything from the outer // module (`super`) using a wildcard. use super::*; #[test] fn you_can_assert() { assert!(is_even(0)); assert!(!is_even(-1)); // ^ You can assert `false` using the negation operator `!`. } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/17_tests/tests2.rs
solutions/17_tests/tests2.rs
// Calculates the power of 2 using a bit shift. // `1 << n` is equivalent to "2 to the power of n". fn power_of_2(n: u8) -> u64 { 1 << n } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn you_can_assert_eq() { assert_eq!(power_of_2(0), 1); assert_eq!(power_of_2(1), 2); assert_eq!(power_of_2(2), 4); assert_eq!(power_of_2(3), 8); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/17_tests/tests3.rs
solutions/17_tests/tests3.rs
struct Rectangle { width: i32, height: i32, } impl Rectangle { // Don't change this function. fn new(width: i32, height: i32) -> Self { if width <= 0 || height <= 0 { // Returning a `Result` would be better here. But we want to learn // how to test functions that can panic. panic!("Rectangle width and height must be positive"); } Rectangle { width, height } } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn correct_width_and_height() { let rect = Rectangle::new(10, 20); assert_eq!(rect.width, 10); // Check width assert_eq!(rect.height, 20); // Check height } #[test] #[should_panic] // Added this attribute to check that the test panics. fn negative_width() { let _rect = Rectangle::new(-10, 10); } #[test] #[should_panic] // Added this attribute to check that the test panics. fn negative_height() { let _rect = Rectangle::new(10, -10); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/15_traits/traits3.rs
solutions/15_traits/traits3.rs
trait Licensed { fn licensing_info(&self) -> String { "Default license".to_string() } } struct SomeSoftware { version_number: i32, } struct OtherSoftware { version_number: String, } impl Licensed for SomeSoftware {} impl Licensed for OtherSoftware {} fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn is_licensing_info_the_same() { let licensing_info = "Default license"; let some_software = SomeSoftware { version_number: 1 }; let other_software = OtherSoftware { version_number: "v2.0.0".to_string(), }; assert_eq!(some_software.licensing_info(), licensing_info); assert_eq!(other_software.licensing_info(), licensing_info); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/15_traits/traits4.rs
solutions/15_traits/traits4.rs
trait Licensed { fn licensing_info(&self) -> String { "Default license".to_string() } } struct SomeSoftware; struct OtherSoftware; impl Licensed for SomeSoftware {} impl Licensed for OtherSoftware {} fn compare_license_types(software1: impl Licensed, software2: impl Licensed) -> bool { // ^^^^^^^^^^^^^ ^^^^^^^^^^^^^ software1.licensing_info() == software2.licensing_info() } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn compare_license_information() { assert!(compare_license_types(SomeSoftware, OtherSoftware)); } #[test] fn compare_license_information_backwards() { assert!(compare_license_types(OtherSoftware, SomeSoftware)); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/15_traits/traits1.rs
solutions/15_traits/traits1.rs
// The trait `AppendBar` has only one function which appends "Bar" to any object // implementing this trait. trait AppendBar { fn append_bar(self) -> Self; } impl AppendBar for String { fn append_bar(self) -> Self { self + "Bar" } } fn main() { let s = String::from("Foo"); let s = s.append_bar(); println!("s: {s}"); } #[cfg(test)] mod tests { use super::*; #[test] fn is_foo_bar() { assert_eq!(String::from("Foo").append_bar(), "FooBar"); } #[test] fn is_bar_bar() { assert_eq!(String::from("").append_bar().append_bar(), "BarBar"); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/15_traits/traits5.rs
solutions/15_traits/traits5.rs
trait SomeTrait { fn some_function(&self) -> bool { true } } trait OtherTrait { fn other_function(&self) -> bool { true } } struct SomeStruct; impl SomeTrait for SomeStruct {} impl OtherTrait for SomeStruct {} struct OtherStruct; impl SomeTrait for OtherStruct {} impl OtherTrait for OtherStruct {} fn some_func(item: impl SomeTrait + OtherTrait) -> bool { // ^^^^^^^^^^^^^^^^^^^^^^^^^^^ item.some_function() && item.other_function() } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn test_some_func() { assert!(some_func(SomeStruct)); assert!(some_func(OtherStruct)); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/15_traits/traits2.rs
solutions/15_traits/traits2.rs
trait AppendBar { fn append_bar(self) -> Self; } impl AppendBar for Vec<String> { fn append_bar(mut self) -> Self { // ^^^ this is important self.push(String::from("Bar")); self } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn is_vec_pop_eq_bar() { let mut foo = vec![String::from("Foo")].append_bar(); assert_eq!(foo.pop().unwrap(), "Bar"); assert_eq!(foo.pop().unwrap(), "Foo"); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/11_hashmaps/hashmaps3.rs
solutions/11_hashmaps/hashmaps3.rs
// A list of scores (one per line) of a soccer match is given. Each line is of // the form "<team_1_name>,<team_2_name>,<team_1_goals>,<team_2_goals>" // Example: "England,France,4,2" (England scored 4 goals, France 2). // // You have to build a scores table containing the name of the team, the total // number of goals the team scored, and the total number of goals the team // conceded. use std::collections::HashMap; // A structure to store the goal details of a team. #[derive(Default)] struct TeamScores { goals_scored: u8, goals_conceded: u8, } fn build_scores_table(results: &str) -> HashMap<&str, TeamScores> { // The name of the team is the key and its associated struct is the value. let mut scores = HashMap::<&str, TeamScores>::new(); for line in results.lines() { let mut split_iterator = line.split(','); // NOTE: We use `unwrap` because we didn't deal with error handling yet. let team_1_name = split_iterator.next().unwrap(); let team_2_name = split_iterator.next().unwrap(); let team_1_score: u8 = split_iterator.next().unwrap().parse().unwrap(); let team_2_score: u8 = split_iterator.next().unwrap().parse().unwrap(); // Insert the default with zeros if a team doesn't exist yet. let team_1 = scores.entry(team_1_name).or_default(); // Update the values. team_1.goals_scored += team_1_score; team_1.goals_conceded += team_2_score; // Similarly for the second team. let team_2 = scores.entry(team_2_name).or_default(); team_2.goals_scored += team_2_score; team_2.goals_conceded += team_1_score; } scores } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; const RESULTS: &str = "England,France,4,2 France,Italy,3,1 Poland,Spain,2,0 Germany,England,2,1 England,Spain,1,0"; #[test] fn build_scores() { let scores = build_scores_table(RESULTS); assert!( ["England", "France", "Germany", "Italy", "Poland", "Spain"] .into_iter() .all(|team_name| scores.contains_key(team_name)) ); } #[test] fn validate_team_score_1() { let scores = build_scores_table(RESULTS); let team = scores.get("England").unwrap(); assert_eq!(team.goals_scored, 6); assert_eq!(team.goals_conceded, 4); } #[test] fn validate_team_score_2() { let scores = build_scores_table(RESULTS); let team = scores.get("Spain").unwrap(); assert_eq!(team.goals_scored, 0); assert_eq!(team.goals_conceded, 3); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/11_hashmaps/hashmaps2.rs
solutions/11_hashmaps/hashmaps2.rs
// We're collecting different fruits to bake a delicious fruit cake. For this, // we have a basket, which we'll represent in the form of a hash map. The key // represents the name of each fruit we collect and the value represents how // many of that particular fruit we have collected. Three types of fruits - // Apple (4), Mango (2) and Lychee (5) are already in the basket hash map. You // must add fruit to the basket so that there is at least one of each kind and // more than 11 in total - we have a lot of mouths to feed. You are not allowed // to insert any more of the fruits that are already in the basket (Apple, // Mango, and Lychee). use std::collections::HashMap; #[derive(Hash, PartialEq, Eq, Debug)] enum Fruit { Apple, Banana, Mango, Lychee, Pineapple, } fn fruit_basket(basket: &mut HashMap<Fruit, u32>) { let fruit_kinds = [ Fruit::Apple, Fruit::Banana, Fruit::Mango, Fruit::Lychee, Fruit::Pineapple, ]; for fruit in fruit_kinds { // If fruit doesn't exist, insert it with some value. basket.entry(fruit).or_insert(5); } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; // Don't modify this function! fn get_fruit_basket() -> HashMap<Fruit, u32> { let content = [(Fruit::Apple, 4), (Fruit::Mango, 2), (Fruit::Lychee, 5)]; HashMap::from_iter(content) } #[test] fn test_given_fruits_are_not_modified() { let mut basket = get_fruit_basket(); fruit_basket(&mut basket); assert_eq!(*basket.get(&Fruit::Apple).unwrap(), 4); assert_eq!(*basket.get(&Fruit::Mango).unwrap(), 2); assert_eq!(*basket.get(&Fruit::Lychee).unwrap(), 5); } #[test] fn at_least_five_types_of_fruits() { let mut basket = get_fruit_basket(); fruit_basket(&mut basket); let count_fruit_kinds = basket.len(); assert!(count_fruit_kinds >= 5); } #[test] fn greater_than_eleven_fruits() { let mut basket = get_fruit_basket(); fruit_basket(&mut basket); let count = basket.values().sum::<u32>(); assert!(count > 11); } #[test] fn all_fruit_types_in_basket() { let fruit_kinds = [ Fruit::Apple, Fruit::Banana, Fruit::Mango, Fruit::Lychee, Fruit::Pineapple, ]; let mut basket = get_fruit_basket(); fruit_basket(&mut basket); for fruit_kind in fruit_kinds { let Some(amount) = basket.get(&fruit_kind) else { panic!("Fruit kind {fruit_kind:?} was not found in basket"); }; assert!(*amount > 0); } } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/11_hashmaps/hashmaps1.rs
solutions/11_hashmaps/hashmaps1.rs
// A basket of fruits in the form of a hash map needs to be defined. The key // represents the name of the fruit and the value represents how many of that // particular fruit is in the basket. You have to put at least 3 different // types of fruits (e.g. apple, banana, mango) in the basket and the total count // of all the fruits should be at least 5. use std::collections::HashMap; fn fruit_basket() -> HashMap<String, u32> { // Declare the hash map. let mut basket = HashMap::new(); // Two bananas are already given for you :) basket.insert(String::from("banana"), 2); // Put more fruits in your basket. basket.insert(String::from("apple"), 3); basket.insert(String::from("mango"), 1); basket } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn at_least_three_types_of_fruits() { let basket = fruit_basket(); assert!(basket.len() >= 3); } #[test] fn at_least_five_fruits() { let basket = fruit_basket(); assert!(basket.values().sum::<u32>() >= 5); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/08_enums/enums2.rs
solutions/08_enums/enums2.rs
#[derive(Debug)] struct Point { x: u64, y: u64, } #[derive(Debug)] enum Message { Resize { width: u64, height: u64 }, Move(Point), Echo(String), ChangeColor(u8, u8, u8), Quit, } impl Message { fn call(&self) { println!("{self:?}"); } } fn main() { let messages = [ Message::Resize { width: 10, height: 30, }, Message::Move(Point { x: 10, y: 15 }), Message::Echo(String::from("hello world")), Message::ChangeColor(200, 255, 255), Message::Quit, ]; for message in &messages { message.call(); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/08_enums/enums1.rs
solutions/08_enums/enums1.rs
#[derive(Debug)] enum Message { Resize, Move, Echo, ChangeColor, Quit, } fn main() { println!("{:?}", Message::Resize); println!("{:?}", Message::Move); println!("{:?}", Message::Echo); println!("{:?}", Message::ChangeColor); println!("{:?}", Message::Quit); }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/08_enums/enums3.rs
solutions/08_enums/enums3.rs
struct Point { x: u64, y: u64, } enum Message { Resize { width: u64, height: u64 }, Move(Point), Echo(String), ChangeColor(u8, u8, u8), Quit, } struct State { width: u64, height: u64, position: Point, message: String, color: (u8, u8, u8), quit: bool, } impl State { fn resize(&mut self, width: u64, height: u64) { self.width = width; self.height = height; } fn move_position(&mut self, point: Point) { self.position = point; } fn echo(&mut self, s: String) { self.message = s; } fn change_color(&mut self, red: u8, green: u8, blue: u8) { self.color = (red, green, blue); } fn quit(&mut self) { self.quit = true; } fn process(&mut self, message: Message) { match message { Message::Resize { width, height } => self.resize(width, height), Message::Move(point) => self.move_position(point), Message::Echo(string) => self.echo(string), Message::ChangeColor(red, green, blue) => self.change_color(red, green, blue), Message::Quit => self.quit(), } } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn test_match_message_call() { let mut state = State { width: 0, height: 0, position: Point { x: 0, y: 0 }, message: String::from("hello world"), color: (0, 0, 0), quit: false, }; state.process(Message::Resize { width: 10, height: 30, }); state.process(Message::Move(Point { x: 10, y: 15 })); state.process(Message::Echo(String::from("Hello world!"))); state.process(Message::ChangeColor(255, 0, 255)); state.process(Message::Quit); assert_eq!(state.width, 10); assert_eq!(state.height, 30); assert_eq!(state.position.x, 10); assert_eq!(state.position.y, 15); assert_eq!(state.message, "Hello world!"); assert_eq!(state.color, (255, 0, 255)); assert!(state.quit); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/18_iterators/iterators1.rs
solutions/18_iterators/iterators1.rs
// When performing operations on elements within a collection, iterators are // essential. This module helps you get familiar with the structure of using an // iterator and how to go through elements within an iterable collection. fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { #[test] fn iterators() { let my_fav_fruits = ["banana", "custard apple", "avocado", "peach", "raspberry"]; // Create an iterator over the array. let mut fav_fruits_iterator = my_fav_fruits.iter(); assert_eq!(fav_fruits_iterator.next(), Some(&"banana")); assert_eq!(fav_fruits_iterator.next(), Some(&"custard apple")); assert_eq!(fav_fruits_iterator.next(), Some(&"avocado")); assert_eq!(fav_fruits_iterator.next(), Some(&"peach")); assert_eq!(fav_fruits_iterator.next(), Some(&"raspberry")); assert_eq!(fav_fruits_iterator.next(), None); // ^^^^ reached the end } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/18_iterators/iterators4.rs
solutions/18_iterators/iterators4.rs
// 3 possible solutions are presented. // With `for` loop and a mutable variable. fn factorial_for(num: u64) -> u64 { let mut result = 1; for x in 2..=num { result *= x; } result } // Equivalent to `factorial_for` but shorter and without a `for` loop and // mutable variables. fn factorial_fold(num: u64) -> u64 { // Case num==0: The iterator 2..=0 is empty // -> The initial value of `fold` is returned which is 1. // Case num==1: The iterator 2..=1 is also empty // -> The initial value 1 is returned. // Case num==2: The iterator 2..=2 contains one element // -> The initial value 1 is multiplied by 2 and the result // is returned. // Case num==3: The iterator 2..=3 contains 2 elements // -> 1 * 2 is calculated, then the result 2 is multiplied by // the second element 3 so the result 6 is returned. // And so on… #[allow(clippy::unnecessary_fold)] (2..=num).fold(1, |acc, x| acc * x) } // Equivalent to `factorial_fold` but with a built-in method that is suggested // by Clippy. fn factorial_product(num: u64) -> u64 { (2..=num).product() } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn factorial_of_0() { assert_eq!(factorial_for(0), 1); assert_eq!(factorial_fold(0), 1); assert_eq!(factorial_product(0), 1); } #[test] fn factorial_of_1() { assert_eq!(factorial_for(1), 1); assert_eq!(factorial_fold(1), 1); assert_eq!(factorial_product(1), 1); } #[test] fn factorial_of_2() { assert_eq!(factorial_for(2), 2); assert_eq!(factorial_fold(2), 2); assert_eq!(factorial_product(2), 2); } #[test] fn factorial_of_4() { assert_eq!(factorial_for(4), 24); assert_eq!(factorial_fold(4), 24); assert_eq!(factorial_product(4), 24); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/18_iterators/iterators3.rs
solutions/18_iterators/iterators3.rs
#[derive(Debug, PartialEq, Eq)] enum DivisionError { // Example: 42 / 0 DivideByZero, // Only case for `i64`: `i64::MIN / -1` because the result is `i64::MAX + 1` IntegerOverflow, // Example: 5 / 2 = 2.5 NotDivisible, } fn divide(a: i64, b: i64) -> Result<i64, DivisionError> { if b == 0 { return Err(DivisionError::DivideByZero); } if a == i64::MIN && b == -1 { return Err(DivisionError::IntegerOverflow); } if a % b != 0 { return Err(DivisionError::NotDivisible); } Ok(a / b) } fn result_with_list() -> Result<Vec<i64>, DivisionError> { // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ let numbers = [27, 297, 38502, 81]; let division_results = numbers.into_iter().map(|n| divide(n, 27)); // Collects to the expected return type. Returns the first error in the // division results (if one exists). division_results.collect() } fn list_of_results() -> Vec<Result<i64, DivisionError>> { // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ let numbers = [27, 297, 38502, 81]; let division_results = numbers.into_iter().map(|n| divide(n, 27)); // Collects to the expected return type. division_results.collect() } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn test_success() { assert_eq!(divide(81, 9), Ok(9)); assert_eq!(divide(81, -1), Ok(-81)); assert_eq!(divide(i64::MIN, i64::MIN), Ok(1)); } #[test] fn test_divide_by_0() { assert_eq!(divide(81, 0), Err(DivisionError::DivideByZero)); } #[test] fn test_integer_overflow() { assert_eq!(divide(i64::MIN, -1), Err(DivisionError::IntegerOverflow)); } #[test] fn test_not_divisible() { assert_eq!(divide(81, 6), Err(DivisionError::NotDivisible)); } #[test] fn test_divide_0_by_something() { assert_eq!(divide(0, 81), Ok(0)); } #[test] fn test_result_with_list() { assert_eq!(result_with_list().unwrap(), [1, 11, 1426, 3]); } #[test] fn test_list_of_results() { assert_eq!(list_of_results(), [Ok(1), Ok(11), Ok(1426), Ok(3)]); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/18_iterators/iterators2.rs
solutions/18_iterators/iterators2.rs
// In this exercise, you'll learn some of the unique advantages that iterators // can offer. // "hello" -> "Hello" fn capitalize_first(input: &str) -> String { let mut chars = input.chars(); match chars.next() { None => String::new(), Some(first) => first.to_uppercase().to_string() + chars.as_str(), } } // Apply the `capitalize_first` function to a slice of string slices. // Return a vector of strings. // ["hello", "world"] -> ["Hello", "World"] fn capitalize_words_vector(words: &[&str]) -> Vec<String> { words.iter().map(|word| capitalize_first(word)).collect() } // Apply the `capitalize_first` function again to a slice of string // slices. Return a single string. // ["hello", " ", "world"] -> "Hello World" fn capitalize_words_string(words: &[&str]) -> String { words.iter().map(|word| capitalize_first(word)).collect() } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn test_success() { assert_eq!(capitalize_first("hello"), "Hello"); } #[test] fn test_empty() { assert_eq!(capitalize_first(""), ""); } #[test] fn test_iterate_string_vec() { let words = vec!["hello", "world"]; assert_eq!(capitalize_words_vector(&words), ["Hello", "World"]); } #[test] fn test_iterate_into_string() { let words = vec!["hello", " ", "world"]; assert_eq!(capitalize_words_string(&words), "Hello World"); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/18_iterators/iterators5.rs
solutions/18_iterators/iterators5.rs
// Let's define a simple model to track Rustlings' exercise progress. Progress // will be modelled using a hash map. The name of the exercise is the key and // the progress is the value. Two counting functions were created to count the // number of exercises with a given progress. Recreate this counting // functionality using iterators. Try to not use imperative loops (for/while). use std::collections::HashMap; #[derive(Clone, Copy, PartialEq, Eq)] enum Progress { None, Some, Complete, } fn count_for(map: &HashMap<String, Progress>, value: Progress) -> usize { let mut count = 0; for val in map.values() { if *val == value { count += 1; } } count } fn count_iterator(map: &HashMap<String, Progress>, value: Progress) -> usize { // `map` is a hash map with `String` keys and `Progress` values. // map = { "variables1": Complete, "from_str": None, … } map.values().filter(|val| **val == value).count() } fn count_collection_for(collection: &[HashMap<String, Progress>], value: Progress) -> usize { let mut count = 0; for map in collection { count += count_for(map, value); } count } fn count_collection_iterator(collection: &[HashMap<String, Progress>], value: Progress) -> usize { // `collection` is a slice of hash maps. // collection = [{ "variables1": Complete, "from_str": None, … }, // { "variables2": Complete, … }, … ] collection .iter() .map(|map| count_iterator(map, value)) .sum() } // Equivalent to `count_collection_iterator` and `count_iterator`, iterating as // if the collection was a single container instead of a container of containers // (and more accurately, a single iterator instead of an iterator of iterators). fn count_collection_iterator_flat( collection: &[HashMap<String, Progress>], value: Progress, ) -> usize { // `collection` is a slice of hash maps. // collection = [{ "variables1": Complete, "from_str": None, … }, // { "variables2": Complete, … }, … ] collection .iter() .flat_map(HashMap::values) // or just `.flatten()` when wanting the default iterator (`HashMap::iter`) .filter(|val| **val == value) .count() } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; use Progress::*; fn get_map() -> HashMap<String, Progress> { let mut map = HashMap::new(); map.insert(String::from("variables1"), Complete); map.insert(String::from("functions1"), Complete); map.insert(String::from("hashmap1"), Complete); map.insert(String::from("arc1"), Some); map.insert(String::from("as_ref_mut"), None); map.insert(String::from("from_str"), None); map } fn get_vec_map() -> Vec<HashMap<String, Progress>> { let map = get_map(); let mut other = HashMap::new(); other.insert(String::from("variables2"), Complete); other.insert(String::from("functions2"), Complete); other.insert(String::from("if1"), Complete); other.insert(String::from("from_into"), None); other.insert(String::from("try_from_into"), None); vec![map, other] } #[test] fn count_complete() { let map = get_map(); assert_eq!(count_iterator(&map, Complete), 3); } #[test] fn count_some() { let map = get_map(); assert_eq!(count_iterator(&map, Some), 1); } #[test] fn count_none() { let map = get_map(); assert_eq!(count_iterator(&map, None), 2); } #[test] fn count_complete_equals_for() { let map = get_map(); let progress_states = [Complete, Some, None]; for progress_state in progress_states { assert_eq!( count_for(&map, progress_state), count_iterator(&map, progress_state), ); } } #[test] fn count_collection_complete() { let collection = get_vec_map(); assert_eq!(count_collection_iterator(&collection, Complete), 6); assert_eq!(count_collection_iterator_flat(&collection, Complete), 6); } #[test] fn count_collection_some() { let collection = get_vec_map(); assert_eq!(count_collection_iterator(&collection, Some), 1); assert_eq!(count_collection_iterator_flat(&collection, Some), 1); } #[test] fn count_collection_none() { let collection = get_vec_map(); assert_eq!(count_collection_iterator(&collection, None), 4); assert_eq!(count_collection_iterator_flat(&collection, None), 4); } #[test] fn count_collection_equals_for() { let collection = get_vec_map(); let progress_states = [Complete, Some, None]; for progress_state in progress_states { assert_eq!( count_collection_for(&collection, progress_state), count_collection_iterator(&collection, progress_state), ); assert_eq!( count_collection_for(&collection, progress_state), count_collection_iterator_flat(&collection, progress_state), ); } } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/03_if/if1.rs
solutions/03_if/if1.rs
fn bigger(a: i32, b: i32) -> i32 { if a > b { a } else { b } } fn main() { // You can optionally experiment here. } // Don't mind this for now :) #[cfg(test)] mod tests { use super::*; #[test] fn ten_is_bigger_than_eight() { assert_eq!(10, bigger(10, 8)); } #[test] fn fortytwo_is_bigger_than_thirtytwo() { assert_eq!(42, bigger(32, 42)); } #[test] fn equal_numbers() { assert_eq!(42, bigger(42, 42)); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/03_if/if3.rs
solutions/03_if/if3.rs
fn animal_habitat(animal: &str) -> &str { let identifier = if animal == "crab" { 1 } else if animal == "gopher" { 2 } else if animal == "snake" { 3 } else { // Any unused identifier. 4 }; // Instead of such an identifier, you would use an enum in Rust. // But we didn't get into enums yet. if identifier == 1 { "Beach" } else if identifier == 2 { "Burrow" } else if identifier == 3 { "Desert" } else { "Unknown" } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn gopher_lives_in_burrow() { assert_eq!(animal_habitat("gopher"), "Burrow") } #[test] fn snake_lives_in_desert() { assert_eq!(animal_habitat("snake"), "Desert") } #[test] fn crab_lives_on_beach() { assert_eq!(animal_habitat("crab"), "Beach") } #[test] fn unknown_animal() { assert_eq!(animal_habitat("dinosaur"), "Unknown") } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false
rust-lang/rustlings
https://github.com/rust-lang/rustlings/blob/7850a73d95c02840f4ab3bf8d9571b08410e5467/solutions/03_if/if2.rs
solutions/03_if/if2.rs
fn picky_eater(food: &str) -> &str { if food == "strawberry" { "Yummy!" } else if food == "potato" { "I guess I can eat that." } else { "No thanks!" } } fn main() { // You can optionally experiment here. } #[cfg(test)] mod tests { use super::*; #[test] fn yummy_food() { assert_eq!(picky_eater("strawberry"), "Yummy!"); } #[test] fn neutral_food() { assert_eq!(picky_eater("potato"), "I guess I can eat that."); } #[test] fn default_disliked_food() { assert_eq!(picky_eater("broccoli"), "No thanks!"); assert_eq!(picky_eater("gummy bears"), "No thanks!"); assert_eq!(picky_eater("literally anything"), "No thanks!"); } }
rust
MIT
7850a73d95c02840f4ab3bf8d9571b08410e5467
2026-01-04T15:31:58.719144Z
false