repo string | pull_number int64 | instance_id string | issue_numbers sequence | base_commit string | patch string | test_patch string | problem_statement string | hints_text string | created_at string | version string | updated_at string | environment_setup_commit string |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
BurntSushi/memchr | 126 | BurntSushi__memchr-126 | [
"117",
"119"
] | be564d425bfcabd2c9925a8a360f4a62b866bb90 | diff --git /dev/null b/bench/src/memmem/byterank.rs
new file mode 100644
--- /dev/null
+++ b/bench/src/memmem/byterank.rs
@@ -0,0 +1,116 @@
+use criterion::Criterion;
+use memchr::memmem::HeuristicFrequencyRank;
+
+use crate::define;
+
+pub(crate) fn all(c: &mut Criterion) {
+ finder_construction(c);
+ byte_frequencies(c);
+}
+
+fn finder_construction(c: &mut Criterion) {
+ // This benchmark is purely for measuring the time taken to create a
+ // `Finder`. It is here to prevent regressions when adding new features
+ // to the `Finder`, such as the ability to construct with a custom
+ // `HeuristicFrequencyRank`.
+ const NEEDLES: [&str; 3] = ["a", "abcd", "abcdefgh12345678"];
+
+ for needle in NEEDLES {
+ define(
+ c,
+ &format!(
+ "memmem/krate/bytefreq/construct-finder/default(len={})",
+ needle.len()
+ ),
+ needle.as_bytes(),
+ Box::new(move |b| {
+ b.iter(|| {
+ memchr::memmem::FinderBuilder::new()
+ .build_forward(needle.as_bytes())
+ });
+ }),
+ );
+ define(
+ c,
+ &format!(
+ "memmem/krate/bytefreq/construct-finder/custom(len={})",
+ needle.len()
+ ),
+ needle.as_bytes(),
+ Box::new(move |b| {
+ b.iter(|| {
+ memchr::memmem::FinderBuilder::new()
+ .build_forward_with_ranker(Binary, needle.as_bytes())
+ });
+ }),
+ );
+ }
+}
+
+fn byte_frequencies(c: &mut Criterion) {
+ // This benchmark exists to demonstrate a common use case for
+ // customizing the byte frequency table used by a `Finder`
+ // and the relative performance gain from using an optimal table.
+ // This is essentially why `HeuristicFrequencyRank` was added.
+
+ // Bytes we want to scan for that are rare in strings but common in
+ // executables.
+ const NEEDLE: &[u8] = b"\x00\x00\xdd\xdd'";
+
+ // The input for the benchmark is the benchmark binary itself
+ let exe = std::env::args().next().unwrap();
+ let corpus = std::fs::read(exe).unwrap();
+
+ let bin = corpus.clone();
+ define(
+ c,
+ &format!("memmem/krate/bytefreq/default"),
+ &corpus,
+ Box::new(move |b| {
+ let finder =
+ memchr::memmem::FinderBuilder::new().build_forward(NEEDLE);
+ b.iter(|| {
+ assert_eq!(1, finder.find_iter(&bin).count());
+ });
+ }),
+ );
+
+ let bin = corpus.clone();
+ define(
+ c,
+ &format!("memmem/krate/bytefreq/custom"),
+ &corpus,
+ Box::new(move |b| {
+ let finder = memchr::memmem::FinderBuilder::new()
+ .build_forward_with_ranker(Binary, NEEDLE);
+ b.iter(|| {
+ assert_eq!(1, finder.find_iter(&bin).count());
+ });
+ }),
+ );
+}
+
+/// A byte-frequency table that is good for scanning binary executables.
+struct Binary;
+
+impl HeuristicFrequencyRank for Binary {
+ fn rank(&self, byte: u8) -> u8 {
+ const TABLE: [u8; 256] = [
+ 255, 128, 61, 43, 50, 41, 27, 28, 57, 15, 21, 13, 24, 17, 17, 89,
+ 58, 16, 11, 7, 14, 23, 7, 6, 24, 9, 6, 5, 9, 4, 7, 16, 68, 11, 9,
+ 6, 88, 7, 4, 4, 23, 9, 4, 8, 8, 5, 10, 4, 30, 11, 9, 24, 11, 5, 5,
+ 5, 19, 11, 6, 17, 9, 9, 6, 8, 48, 58, 11, 14, 53, 40, 9, 9, 254,
+ 35, 3, 6, 52, 23, 6, 6, 27, 4, 7, 11, 14, 13, 10, 11, 11, 5, 2,
+ 10, 16, 12, 6, 19, 19, 20, 5, 14, 16, 31, 19, 7, 14, 20, 4, 4, 19,
+ 8, 18, 20, 24, 1, 25, 19, 58, 29, 10, 5, 15, 20, 2, 2, 9, 4, 3, 5,
+ 51, 11, 4, 53, 23, 39, 6, 4, 13, 81, 4, 186, 5, 67, 3, 2, 15, 0,
+ 0, 1, 3, 2, 0, 0, 5, 0, 0, 0, 2, 0, 0, 0, 12, 2, 1, 1, 3, 1, 1, 1,
+ 6, 1, 2, 1, 3, 1, 1, 2, 9, 1, 1, 0, 2, 2, 4, 4, 11, 6, 7, 3, 6, 9,
+ 4, 5, 46, 18, 8, 18, 17, 3, 8, 20, 16, 10, 3, 7, 175, 4, 6, 7, 13,
+ 3, 7, 3, 3, 1, 3, 3, 10, 3, 1, 5, 2, 0, 1, 2, 16, 3, 5, 1, 6, 1,
+ 1, 2, 58, 20, 3, 14, 12, 2, 1, 3, 16, 3, 5, 8, 3, 1, 8, 6, 17, 6,
+ 5, 3, 8, 6, 13, 175,
+ ];
+ TABLE[byte as usize]
+ }
+}
diff --git a/bench/src/memmem/mod.rs b/bench/src/memmem/mod.rs
--- a/bench/src/memmem/mod.rs
+++ b/bench/src/memmem/mod.rs
@@ -96,6 +96,7 @@ use criterion::Criterion;
use crate::{define, memmem::inputs::INPUTS};
+mod byterank;
mod imp;
mod inputs;
mod sliceslice;
diff --git a/bench/src/memmem/mod.rs b/bench/src/memmem/mod.rs
--- a/bench/src/memmem/mod.rs
+++ b/bench/src/memmem/mod.rs
@@ -106,6 +107,7 @@ pub fn all(c: &mut Criterion) {
oneshot_iter(c);
prebuilt_iter(c);
sliceslice::all(c);
+ byterank::all(c);
}
fn oneshot(c: &mut Criterion) {
diff --git a/src/memmem/byte_frequencies.rs b/src/memmem/byterank/default.rs
--- a/src/memmem/byte_frequencies.rs
+++ b/src/memmem/byterank/default.rs
@@ -1,4 +1,4 @@
-pub const BYTE_FREQUENCIES: [u8; 256] = [
+pub const RANK: [u8; 256] = [
55, // '\x00'
52, // '\x01'
51, // '\x02'
diff --git /dev/null b/src/memmem/byterank/mod.rs
new file mode 100644
--- /dev/null
+++ b/src/memmem/byterank/mod.rs
@@ -0,0 +1,89 @@
+mod default;
+
+/// This trait allows the user to customize the heuristic used to determine the
+/// relative frequency of a given byte in the dataset being searched.
+///
+/// The use of this trait can have a dramatic impact on performance depending
+/// on the type of data being searched. The details of why are explained in the
+/// docs of [`prefilter::Prefilter`]. To summarize, the core algorithm uses a
+/// prefilter to quickly identify candidate matches that are later verified
+/// more slowly. This prefilter is implemented in terms of trying to find
+/// `rare` bytes at specific offsets that will occur less frequently in the
+/// dataset. While the concept of a `rare` byte is similar for most datasets,
+/// there are some specific datasets (like binary executables) that have
+/// dramatically different byte distributions. For these datasets customizing
+/// the byte frequency heuristic can have a massive impact on performance, and
+/// might even need to be done at runtime.
+///
+/// The default implementation of `HeuristicFrequencyRank` reads from the
+/// static frequency table defined in `src/memmem/byte_frequencies.rs`. This
+/// is optimal for most inputs, so if you are unsure of the impact of using a
+/// custom `HeuristicFrequencyRank` you should probably just use the default.
+///
+/// # Example
+///
+/// ```
+/// use memchr::memmem::{FinderBuilder, HeuristicFrequencyRank};
+///
+/// /// A byte-frequency table that is good for scanning binary executables.
+/// struct Binary;
+///
+/// impl HeuristicFrequencyRank for Binary {
+/// fn rank(&self, byte: u8) -> u8 {
+/// const TABLE: [u8; 256] = [
+/// 255, 128, 61, 43, 50, 41, 27, 28, 57, 15, 21, 13, 24, 17, 17,
+/// 89, 58, 16, 11, 7, 14, 23, 7, 6, 24, 9, 6, 5, 9, 4, 7, 16,
+/// 68, 11, 9, 6, 88, 7, 4, 4, 23, 9, 4, 8, 8, 5, 10, 4, 30, 11,
+/// 9, 24, 11, 5, 5, 5, 19, 11, 6, 17, 9, 9, 6, 8,
+/// 48, 58, 11, 14, 53, 40, 9, 9, 254, 35, 3, 6, 52, 23, 6, 6, 27,
+/// 4, 7, 11, 14, 13, 10, 11, 11, 5, 2, 10, 16, 12, 6, 19,
+/// 19, 20, 5, 14, 16, 31, 19, 7, 14, 20, 4, 4, 19, 8, 18, 20, 24,
+/// 1, 25, 19, 58, 29, 10, 5, 15, 20, 2, 2, 9, 4, 3, 5,
+/// 51, 11, 4, 53, 23, 39, 6, 4, 13, 81, 4, 186, 5, 67, 3, 2, 15,
+/// 0, 0, 1, 3, 2, 0, 0, 5, 0, 0, 0, 2, 0, 0, 0,
+/// 12, 2, 1, 1, 3, 1, 1, 1, 6, 1, 2, 1, 3, 1, 1, 2, 9, 1, 1, 0,
+/// 2, 2, 4, 4, 11, 6, 7, 3, 6, 9, 4, 5,
+/// 46, 18, 8, 18, 17, 3, 8, 20, 16, 10, 3, 7, 175, 4, 6, 7, 13,
+/// 3, 7, 3, 3, 1, 3, 3, 10, 3, 1, 5, 2, 0, 1, 2,
+/// 16, 3, 5, 1, 6, 1, 1, 2, 58, 20, 3, 14, 12, 2, 1, 3, 16, 3, 5,
+/// 8, 3, 1, 8, 6, 17, 6, 5, 3, 8, 6, 13, 175,
+/// ];
+/// TABLE[byte as usize]
+/// }
+/// }
+/// // Create a new finder with the custom heuristic.
+/// let finder = FinderBuilder::new()
+/// .build_forward_with_ranker(Binary, b"\x00\x00\xdd\xdd");
+/// // Find needle with custom heuristic.
+/// assert!(finder.find(b"\x00\x00\x00\xdd\xdd").is_some());
+/// ```
+pub trait HeuristicFrequencyRank {
+ /// Return the heuristic frequency rank of the given byte. A lower rank
+ /// means the byte is believed to occur less frequently in the haystack.
+ ///
+ /// Some uses of this heuristic may treat arbitrary absolute rank values as
+ /// significant. For example, an implementation detail in this crate may
+ /// determine that heuristic prefilters are inappropriate if every byte in
+ /// the needle has a "high" rank.
+ fn rank(&self, byte: u8) -> u8;
+}
+
+/// The default byte frequency heuristic that is good for most haystacks.
+pub(crate) struct DefaultFrequencyRank;
+
+impl HeuristicFrequencyRank for DefaultFrequencyRank {
+ fn rank(&self, byte: u8) -> u8 {
+ self::default::RANK[usize::from(byte)]
+ }
+}
+
+/// This permits passing any implementation of `HeuristicFrequencyRank` as a
+/// borrowed version of itself.
+impl<'a, R> HeuristicFrequencyRank for &'a R
+where
+ R: HeuristicFrequencyRank,
+{
+ fn rank(&self, byte: u8) -> u8 {
+ (**self).rank(byte)
+ }
+}
diff --git a/src/memmem/mod.rs b/src/memmem/mod.rs
--- a/src/memmem/mod.rs
+++ b/src/memmem/mod.rs
@@ -66,11 +66,12 @@ assert_eq!(None, finder.find(b"quux baz bar"));
```
*/
-pub use self::prefilter::Prefilter;
+pub use self::{byterank::HeuristicFrequencyRank, prefilter::Prefilter};
use crate::{
cow::CowBytes,
memmem::{
+ byterank::DefaultFrequencyRank,
prefilter::{Pre, PrefilterFn, PrefilterState},
rabinkarp::NeedleHash,
rarebytes::RareNeedleBytes,
diff --git a/src/memmem/mod.rs b/src/memmem/mod.rs
--- a/src/memmem/mod.rs
+++ b/src/memmem/mod.rs
@@ -712,7 +713,24 @@ impl FinderBuilder {
&self,
needle: &'n B,
) -> Finder<'n> {
- Finder { searcher: Searcher::new(self.config, needle.as_ref()) }
+ self.build_forward_with_ranker(DefaultFrequencyRank, needle)
+ }
+
+ /// Build a forward finder using the given needle and a custom heuristic for
+ /// determining the frequency of a given byte in the dataset.
+ /// See [`HeuristicFrequencyRank`] for more details.
+ pub fn build_forward_with_ranker<
+ 'n,
+ R: HeuristicFrequencyRank,
+ B: ?Sized + AsRef<[u8]>,
+ >(
+ &self,
+ ranker: R,
+ needle: &'n B,
+ ) -> Finder<'n> {
+ Finder {
+ searcher: Searcher::new(self.config, ranker, needle.as_ref()),
+ }
}
/// Build a reverse finder using the given needle from the current
diff --git a/src/memmem/mod.rs b/src/memmem/mod.rs
--- a/src/memmem/mod.rs
+++ b/src/memmem/mod.rs
@@ -817,14 +835,19 @@ enum SearcherKind {
}
impl<'n> Searcher<'n> {
- fn new(config: SearcherConfig, needle: &'n [u8]) -> Searcher<'n> {
+ fn new<R: HeuristicFrequencyRank>(
+ config: SearcherConfig,
+ ranker: R,
+ needle: &'n [u8],
+ ) -> Searcher<'n> {
use self::SearcherKind::*;
- let ninfo = NeedleInfo::new(needle);
+ let ninfo = NeedleInfo::new(&ranker, needle);
let mk = |kind: SearcherKind| {
let prefn = prefilter::forward(
&config.prefilter,
&ninfo.rarebytes,
+ ranker,
needle,
);
Searcher { needle: CowBytes::new(needle), ninfo, prefn, kind }
diff --git a/src/memmem/mod.rs b/src/memmem/mod.rs
--- a/src/memmem/mod.rs
+++ b/src/memmem/mod.rs
@@ -1010,9 +1033,12 @@ impl<'n> Searcher<'n> {
}
impl NeedleInfo {
- pub(crate) fn new(needle: &[u8]) -> NeedleInfo {
+ pub(crate) fn new<R: HeuristicFrequencyRank>(
+ ranker: &R,
+ needle: &[u8],
+ ) -> NeedleInfo {
NeedleInfo {
- rarebytes: RareNeedleBytes::forward(needle),
+ rarebytes: RareNeedleBytes::forward(ranker, needle),
nhash: NeedleHash::forward(needle),
}
}
diff --git a/src/memmem/prefilter/mod.rs b/src/memmem/prefilter/mod.rs
--- a/src/memmem/prefilter/mod.rs
+++ b/src/memmem/prefilter/mod.rs
@@ -1,4 +1,6 @@
-use crate::memmem::{rarebytes::RareNeedleBytes, NeedleInfo};
+use crate::memmem::{
+ rarebytes::RareNeedleBytes, HeuristicFrequencyRank, NeedleInfo,
+};
mod fallback;
#[cfg(memchr_runtime_simd)]
diff --git a/src/memmem/prefilter/mod.rs b/src/memmem/prefilter/mod.rs
--- a/src/memmem/prefilter/mod.rs
+++ b/src/memmem/prefilter/mod.rs
@@ -287,9 +289,10 @@ impl PrefilterState {
/// is the default). In general, we try to use an AVX prefilter, followed by
/// SSE and then followed by a generic one based on memchr.
#[inline(always)]
-pub(crate) fn forward(
+pub(crate) fn forward<R: HeuristicFrequencyRank>(
config: &Prefilter,
rare: &RareNeedleBytes,
+ ranker: R,
needle: &[u8],
) -> Option<PrefilterFn> {
if config.is_none() || needle.len() <= 1 {
diff --git a/src/memmem/prefilter/mod.rs b/src/memmem/prefilter/mod.rs
--- a/src/memmem/prefilter/mod.rs
+++ b/src/memmem/prefilter/mod.rs
@@ -327,7 +330,8 @@ pub(crate) fn forward(
// Check that our rarest byte has a reasonably low rank. The main issue
// here is that the fallback prefilter can perform pretty poorly if it's
// given common bytes. So we try to avoid the worst cases here.
- let (rare1_rank, _) = rare.as_ranks(needle);
+ let (rare1, _) = rare.as_rare_bytes(needle);
+ let rare1_rank = usize::from(ranker.rank(rare1));
if rare1_rank <= MAX_FALLBACK_RANK {
// SAFETY: fallback::find is safe to call in all environments.
return unsafe { Some(PrefilterFn::new(fallback::find)) };
diff --git a/src/memmem/rarebytes.rs b/src/memmem/rarebytes.rs
--- a/src/memmem/rarebytes.rs
+++ b/src/memmem/rarebytes.rs
@@ -1,3 +1,5 @@
+use super::HeuristicFrequencyRank;
+
/// A heuristic frequency based detection of rare bytes for substring search.
///
/// This detector attempts to pick out two bytes in a needle that are predicted
diff --git a/src/memmem/rarebytes.rs b/src/memmem/rarebytes.rs
--- a/src/memmem/rarebytes.rs
+++ b/src/memmem/rarebytes.rs
@@ -44,7 +46,10 @@ impl RareNeedleBytes {
/// Detect the leftmost offsets of the two rarest bytes in the given
/// needle.
- pub(crate) fn forward(needle: &[u8]) -> RareNeedleBytes {
+ pub(crate) fn forward<R: HeuristicFrequencyRank>(
+ ranker: &R,
+ needle: &[u8],
+ ) -> RareNeedleBytes {
if needle.len() <= 1 || needle.len() > core::u8::MAX as usize {
// For needles bigger than u8::MAX, our offsets aren't big enough.
// (We make our offsets small to reduce stack copying.)
diff --git a/src/memmem/rarebytes.rs b/src/memmem/rarebytes.rs
--- a/src/memmem/rarebytes.rs
+++ b/src/memmem/rarebytes.rs
@@ -62,17 +67,17 @@ impl RareNeedleBytes {
// Find the rarest two bytes. We make them distinct by construction.
let (mut rare1, mut rare1i) = (needle[0], 0);
let (mut rare2, mut rare2i) = (needle[1], 1);
- if rank(rare2) < rank(rare1) {
+ if ranker.rank(rare2) < ranker.rank(rare1) {
core::mem::swap(&mut rare1, &mut rare2);
core::mem::swap(&mut rare1i, &mut rare2i);
}
for (i, &b) in needle.iter().enumerate().skip(2) {
- if rank(b) < rank(rare1) {
+ if ranker.rank(b) < ranker.rank(rare1) {
rare2 = rare1;
rare2i = rare1i;
rare1 = b;
rare1i = i as u8;
- } else if b != rare1 && rank(b) < rank(rare2) {
+ } else if b != rare1 && ranker.rank(b) < ranker.rank(rare2) {
rare2 = b;
rare2i = i as u8;
}
diff --git a/src/memmem/rarebytes.rs b/src/memmem/rarebytes.rs
--- a/src/memmem/rarebytes.rs
+++ b/src/memmem/rarebytes.rs
@@ -119,18 +124,4 @@ impl RareNeedleBytes {
pub(crate) fn as_rare_usize(&self) -> (usize, usize) {
(self.rare1i as usize, self.rare2i as usize)
}
-
- /// Return the byte frequency rank of each byte. The higher the rank, the
- /// more frequency the byte is predicted to be. The needle given must be
- /// the same one given to the RareNeedleBytes constructor.
- pub(crate) fn as_ranks(&self, needle: &[u8]) -> (usize, usize) {
- let (b1, b2) = self.as_rare_bytes(needle);
- (rank(b1), rank(b2))
- }
-}
-
-/// Return the heuristical frequency rank of the given byte. A lower rank
-/// means the byte is believed to occur less frequently.
-fn rank(b: u8) -> usize {
- crate::memmem::byte_frequencies::BYTE_FREQUENCIES[b as usize] as usize
}
| diff --git a/src/memmem/mod.rs b/src/memmem/mod.rs
--- a/src/memmem/mod.rs
+++ b/src/memmem/mod.rs
@@ -145,7 +146,7 @@ macro_rules! define_memmem_simple_tests {
};
}
-mod byte_frequencies;
+mod byterank;
#[cfg(memchr_runtime_simd)]
mod genericsimd;
mod prefilter;
diff --git a/src/memmem/prefilter/fallback.rs b/src/memmem/prefilter/fallback.rs
--- a/src/memmem/prefilter/fallback.rs
+++ b/src/memmem/prefilter/fallback.rs
@@ -93,7 +93,10 @@ mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
- let ninfo = NeedleInfo::new(needle);
+ let ninfo = NeedleInfo::new(
+ &crate::memmem::byterank::DefaultFrequencyRank,
+ needle,
+ );
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
| Runtime configuration of byte frequency table used to classify rare bytes
`memchr` implements a generic SIMD accelerated search that is ideal for implementing something like `CheatEngine` where you scan the memory of an executable process to aid reverse engineering. This process involves repeatedly scanning for possibly millions of small values (u16, i32, ...) in the memory of that process. The user might have information ahead of time about the frequency distribution of bytes in the memory being scanned, which may vary wildly between executables. The user might also be able to control the program to ensure certain rare bytes appear in the program's memory at certain times.
There is an issue that prevents `memchr` from performing optimally when scanning binary executables - the byte frequency table. The core algorithm is based on detecting rare bytes with specific positions in haystack (the prefilter) and then testing these matches to check if the needle has been found. As mentioned in the incredibly detailed comments, the performance of this algorithm is highly dependent on the byte frequency table used to determine what is a rare byte. While the table that is included in `memchr` is optimal for the majority of cases, there are some specific data types that have very different byte frequency distributions, which causes `memchr` to perform worse on those inputs than it otherwise might with a different byte frequency table.
To illustrate this point, consider the following byte frequencies (where `ideal` is the ideal frequency for an x86 binary):
| byte | `memchr` | ideal |
|--------|----------|-------|
| `\x00` | 55 | 255 |
| `\xdd` | 255 | 0 |
| `\x8b` | 80 | 186 |
| `H` | 150 | 254 |
Now, consider scanning for the needle `H\x00\xdd\x8b` in an x86 binary. `memchr` would identify `\x00` and `\x8b` as the rarest bytes, when they are in fact common bytes. Even if `memchr` considered `\x00` to be a frequent byte via configuration, it would still choose `H` and `\x8b` as the rarest bytes, which are both much more common than `\xdd`, the only actually rare byte. This would result in a lot of unnecessary false positives, decreasing the throughput. This is a simple case, but it is easy to extend this idea to many other pathological input sequences that defeat the default frequency table, and might also reasonably appear in an executable or be scanned for by a user.
Now consider a haystack that contains `HHH\x00\xdd\x8b`. The user might know in advance that searching for `HHH\x00` and searching for `H\x00\xdd\x8b` will both return a single unique match, the sub-slice that was mentioned earlier (the exact indices are not identical but that is not the point). The user might also know that `\xdd` is a very rare byte in their dataset. The user should be able to choose scanning for `\xdd` instead of a more common byte to speed up their searches. I cannot imagine how to support something like this without providing the user a mechanism for customizing the byte frequency table.
The proposed solution is to allow the user to specify the byte frequency table at runtime by modifying the `memchr::memmem::rarebytes::rank` function. Currently, this function reads from the global byte frequency table.
My first idea was to create an enum that can be provided to a `FinderBuilder` and then forwarded to `RareNeedleBytes` to choose the table:
```rust
enum ByteFrequencies<'a> {
Default,
Custom(&'a [u8; 256]),
}
```
This enum can be stored in the `NeedleInfo` struct and used at runtime to determine which byte frequency table to use. However, this introduces the lifetime 'a, which may or may not be the same as the needle ('n) and haystack ('h) lifetimes that are stored in related structs. Considering lifetime 'a to be separate and different requires the public API of `Finder` to be changed to add this lifetime.
- ByteFrequencies -> NeedleInfo -> Searcher -> Finder
I believe that the extra lifetime might make life more difficult for the compiler, which is why I observed a small but noticeable (around 10%) impact on the performance of constructing a `Finder` with the default frequency table on my local machine.
Also, by introducing a new member on the struct `NeedleInfo`, the size/alignment properties of `Finder`, `Searcher` and `NeedleInfo` changed, which also might be the reason for the performance impact I observed. (if this sounds crazy to anyone I suggest you take a look at the wonderful performance talk by the legend Emery Berger titled 'Performance Matters' for more details https://www.youtube.com/watch?v=r-TLSBdHe1A).
An idea to remove the generic lifetime from `ByteFrequencies`:
```rust
enum ByteFrequencies {
Default,
Custom(&'static [u8; 256]),
}
```
However, I believe this static API is logically inconsistent with the `FinderBuilder` API. You can construct millions of unique `Finder`s at runtime and then discard them later, but the same cannot be said for static arrays.
Also, the user might want to perform analysis of their specific corpus at runtime to generate a specialized byte frequency table (like 'pre-training'). This is a very interesting use case in the context of the analysis of binary executables, as there is a lot of information that can only be obtained at runtime and can be useful in optimizing many kinds of searches. Forcing the user to use a static byte frequency table would necessarily prevent this use-case.
Another idea to remove the generic lifetime and also allow runtime generation of the byte frequency table:
```rust
enum ByteFrequencies {
Default,
Custom(Box<[u8; 256]>),
}
```
However, an issue with this approach is that the `ByteFrequencies` enum has a size of 16 bytes which is mostly wasted. Another issue is that it seems that conceptually we should be passing around some kind of reference to a byte table that can be reused, instead of copying the table for each construction, but that ultimately depends on benchmarks. Also, now the standard library and memory allocation are required for an operation that is unrelated to both of those things (`Rc`, `Arc` and others have similar issues).
I also tried storing the byte table inline, but this had disastrous results on performance. This is probably because this extra storage pushed important members on related structs into new cache lines, which affected subsequent operations on these members.
```rust
enum ByteFrequencies {
Default,
Custom([u8; 256]),
}
```
One thing I have not tried yet but might be interesting is trying to re-organize the members and memory layout of any struct that stores a `ByteFrequencies` object. This might allow using an inline byte frequency table for example, but would likely result in breaking changes to the layout of public structs in `memchr`. Even just introducing the `ByteFrequencies` object already changes the memory layout of certain structs, which I am not sure about whether it is something undesirable or not.
All of this culminated in the pull request I submitted, but I realize now it is better to just lay it all out here and figure out the best path forward together. I appreciate any feedback you may have on these suggestions.
P.S. I think `memchr` is an incredible library and the code quality and detail of documentation definitely helped me greatly in understanding the internals and even being able to suggest this in the first place, so kudos.
Runtime customization of byte-frequency table - Default type param version
| Thank you this is a lovely issue! I definitely agree this is a problem we should solve and I think it's solveable.
One thing that sticks out to me here is that the representation of "byte frequency table" as a `[u8; 256]` is perhaps itself just an implementation detail that doesn't necessarily need to be exposed. Namely, even internally, the table isn't really used directly. Instead, what's used is a ranking function:
https://github.com/BurntSushi/memchr/blob/8037d11b4357b0f07be2bb66dc2659d9cf28ad32/src/memmem/rarebytes.rs#L132-L136
Indeed, I think that all we actually need from the caller is something that takes any byte value as input and returns a rank (in the range `0..=255`) as output. Ideally, the contract of the function would be as simple as this: a smaller rank represents bytes that are heuristically assumed to be less frequent. (The reality is that this may not quite be the full story. While the `memchr` crate doesn't currently do this, it is plausible that there could be checks like "if the rank of the rarest byte is above a certain value, then don't use a prefilter whose perf is tied to the frequency distribution of bytes at all." This means that it may not just be the relative rank that matters but also the absolute rank. Which is... kind of unfortunate because it makes the contract quite a bit more complicated.)
What this means is that we have a fair bit of flexibility in what we could accept. There are possibly more options, but I'll just list the ones I can think of off the top of my head quickly:
1. Accept a `fn(u8) -> u8`. This is kind of analogous to accepting a `&'static [u8; 256]` in that it doesn't permit much flexibility. But because it's a function pointer, the caller can technically insert some shenanigans to do various things, including using a frequency table that has been computed at runtime. (Such a thing would look quite similar to your PR implementation I imagine.)
2. Accept a `Arc<dyn (FnMut(u8) -> u8) + Send + Sync + UnwindSafe + RefUnwindSafe + 'static>`. This requires `alloc` unfortunately, but otherwise works like (1).
3. This is almost isomorphic to (2), but we could define a new trait `HeuristicFrequencyRank` that can be implemented by any type (including zero sized types). But it has the same problem as (2). In order to avoid the type parameter from infecting the type signature (which I would want to do even if we were designing this API from scratch), we'd have to erase it. And the only way to erase it without introducing a lifetime parameter is with an allocation. So this approach would also require `alloc`.
It is quite tempting to go with (1) because it's a universal API. If we didn't care about the no-std no-alloc use case, then I think (2) would be my preferred route.
One possible riff on (3) is to add a default type parameter to `Finder`. It would default to a zero sized type representing the default byte frequency table, but could be overridden to be anything else. This wouldn't require `alloc` because we would be accepting the infection of the type parameter instead, but kind of hiding it via the default type parameter trick. I _believe_ it is the case that adding a default type parameter to an existing type is a non-breaking change.
There are also more complex ideas, such as _combining_ the options above. For example, we might expose (1) in all configurations but (2) in just configurations that have `alloc`. But I'm not a huge fan of that because of the complexity, and I do wonder whether it might have a perf impact because of the additional space required to store both things.
I think I like (3) with the default type parameter trick the best? Because it should literally compile down to absolutely zero overhead for any table that is truly static, since it would just be implemented via a zero-sized type. As long as the default type parameter is a non-breaking change (and again I believe it is), it is a lamentable API addition but one that I think I can live with because folks can legitimately completely ignore it and not be worse off. The addition of an entire trait also kind of sucks too, and I honestly hate adding traits to crates unless they're really well motivated. But because of the no-std no-alloc constraint here, our choices are quite limited.
(Note: There is currently only a `std` mode and a no-std no-alloc mode. There _ought_ to be a no-std-but-with-alloc mode too, however it hasn't been added yet because most things in this crate don't benefit from it. The only exception to this currently is the [`Finder::into_owned`](https://docs.rs/memchr/latest/memchr/memmem/struct.Finder.html#method.into_owned) API, which is currently only available when `std` is enabled, but should in theory only require `alloc`. Otherwise, generally speaking, the only impact `std` has is that it enables runtime CPU feature detection to unlock SIMD optimizations that utilize AVX2.)
Thanks for the quick and very detailed response!
I agree that `HeuristicFrequencyRank` (3. with default type parameter) is the most flexible option, and the most likely to not regress performance given how in the default case the assembly should likely be very similar or identical to the original implementation, as the zero-sized type should be optimized away (like you mentioned).
> the representation of "byte frequency table" as a [u8; 256] is perhaps itself just an implementation detail that doesn't necessarily need to be exposed
I agree 100%. For some reason I completely ignored 'generic' implementations (like traits/functions) probably due to how small the input space is (0..=255) and the many restrictions that come along with no-std no-alloc considerations.
> or example, we might expose (1) in all configurations but (2) in just configurations that have alloc. But I'm not a huge fan of that because of the complexity
I am also against that kind of complexity, especially when it results in a lot more cases that need to be tested which I believe is the case here. Also the added complexity is pretty much an implementation detail due to no-std no-alloc rather than being necessary to represent the fundamental idea (mapping u8 -> u8).
> I believe it is the case that adding a default type parameter to an existing type is a non-breaking change.
I would imagine ideally it wouldn't but in practice might affect the output and I honestly do not know enough about how the rust compiler works yet to make that kind of decision. However since it seems worth investigating I will look into it.
> it is a lamentable API addition
> I honestly hate adding traits to crates unless they're really well motivated
I also agree with these, and the idea of changing the pubic API for people that are not interested in this feature makes me uncomfortable.
My first goal will be to implement `HeuristicFrequencyRank` and the default type parameter with as little impact as possible on the assembly generated for code that uses the default frequency table. I'm going to benchmark this with a specific benchmark that is only for measuring the time to create a `Finder` and use the selected frequency table once (or as few times as possible).
If I can implement this without affecting the default code path, I will consider it a success. I am still going to investigate methods that do not affect the public API at all, however If I am unsuccessful with that then I will upload the `HeuristicFrequencyRank` version as a PR.
It goes against my principles to change the public API (even if it still compiles) AND regress performance (even a small amount) for users that do not care about this feature. It seems to me that this would (rightly) encourage these users to use an older version of the library. Also in rust we like zero cost abstractions. I think I would only feel comfortable introducing something like this if EITHER the public API changes (but not how it is used) OR the default code path is minimally (1%-2%) slower, but not both.
Some more thoughts I had not directly related to what is above, but related to what you said:
> Accept a `fn(u8) -> u8`. This is kind of analogous to accepting a &'static [u8; 256] in that it doesn't permit much flexibility
This is interesting, but I think it might be more problematic than at first glance. If a user is going to do anything based on runtime info to generate the frequency table, then they have to include all of the data they need in some static struct that they use in the `fn(u8) -> u8` that is passed along to `memchr` (similar to the original PR). This involves either slow or unsafe code that ideally the user should not write.
I also think it kind of defeats the purpose of a generic solution if the only way to comfortably use that generic solution is to do the exact same thing you could already do with the non generic solution. In this case, the `fn(u8) -> u8` would pretty much just end up being a wrapper for a static array, unless the user does some ugly stuff themselves. In this case, why not just use a static array directly?
If `memchr` were to implement static storage and the unsafe code for just the frequency table, then the user can avoid having any statics/unsafe in their code when generating frequency tables at runtime. However, all of what you mentioned in the original PR made me realize that implementing this inside of `memchr` is not a good idea, so I don't think I'll be investigating this. I would like to mention however some thoughts I had on this for the sake of completeness (ignore the next paragraph if that does not interest you):
If `memchr` has a function to change the global byte frequency table that is thread safe, then the user can avoid all static/unsafe code. Furthermore, since the table is only queried during the construction of a `Finder`, that is the only time that the state of the global is relevant at all. If the purpose of a `Finder` is to be constructed ahead of time, and then re-used for multiple searches, then the user is probably creating finders in a synchronous loop ahead of time, and then using those finders immutably with multiple threads. If the user could call something like `set_byte_frequencies` before constructing each `Finder`, which they are likely doing in a small simple loop, then they could arbitrarily modify the frequency table at runtime safely without touching the original code at all. In the original PR I only considered setting a new static byte frequency distribution, not generating arbitrary distributions at runtime, but I think the latter could also be achieved by storing the entire `[u8; 256]` in a static variable alongside the pointer that stores the frequency table and copying the runtime frequency table if necessary. Regardless, I am not going down this path anymore but felt it might be helpful/interesting to someone reading this to write some of this down.
Your approach SGTM.
> It goes against my principles to change the public API (even if it still compiles) AND regress performance (even a small amount) for users that do not care about this feature. It seems to me that this would (rightly) encourage these users to use an older version of the library. Also in rust we like zero cost abstractions. I think I would only feel comfortable introducing something like this if EITHER the public API changes (but not how it is used) OR the default code path is minimally (1%-2%) slower, but not both.
Just to give you my feelings on the matter:
* Search speed is the most important thing in the context of this crate. With that said, measuring search speed changes at the level of 1-2% is pretty difficult in my experience because of noisy benchmarks. So if you wound up with a 1-2% regression in search speed, I'd probably throw that in the "cost of doing business" bucket and move on. I doubt anyone is going to pin or fork this crate over 1%.
* If you're referring to searcher _construction_, then 1-2% feels OK to me. You probably have even more wiggle room than that. Searcher construction is already fairly slow, and it isn't hard to [see it in real benchmark](https://blog.burntsushi.net/bstr/#motivation-based-on-performance). (Sorry, that section in my blog post is a bit long, but this comes up around the midway point of that section.) However, unlike many other substring search implementations (such as the one in Rust's standard library or even the venerable `memmem` from libc), this crate provides a way to mitigate the cost of searcher construction. You can build a searcher once and execute searches with it many times. Now obviously, we still want searcher construction to be fast, so you don't have carte blanch here, but a small regression isn't the end of the world.
Basically, in my experience, setting the standard to "doesn't change codegen at all" actually winds up being quite difficult to live up to in practice. Compilers do all sorts of crazy things, and there are cliffs and thresholds everywhere such that even a slight perturbation somewhere has a domino effect that impacts codegen substantially. But that doesn't necessarily mean actual wall clock times are impacted substantially.
So in summary, you probably have a little more wiggle room than you think. And I would be surprised if this trait approach with a default type parameter resulted in a perf change big enough to negate the strategy entirely.
> In this case, why not just use a static array directly?
To be clear, the reason is precisely because with a function pointer, the caller _can_ do shenanigans that make it possible to swap out the frequency table for something else at runtime. Asking for a `&'static [u8; 256]` means that's not possible to do without either unsound APIs or leaking memory. Basically, the function pointer introduces indirection in a way that is compatible with the no-std no-alloc constraint.
The way something like this would be justified is the idea that needing to use a different ranking function than the default is itself quite niche. And thus, forcing the caller to jump through hoops---but hoops that are possible and sound---to achieve their use case is "okay" with me _generally speaking_. That includes needing to write `unsafe`, especially when this is probably on the "easier" spectrum of `unsafe` IMO. But in this particular case, if we can make the default type parameter approach work then I think that's probably better enough to go that route than the more cumbersome `fn(u8) -> u8` approach.
Thanks for the insight!
I agree with you about the regressions and was probably exaggerating a bit, but I would much rather have you tell me that than come in here with non-conservative ideas about regressing performance for all users :)
> Basically, in my experience, setting the standard to "doesn't change codegen at all" actually winds up being quite difficult
Yeah you are totally right, and I honestly don't usually stress out too much given that the benchmarks are doing what I want them to. However, due to how popular `memchr` is and how the whole point of the library is speed I wanted to try everything before making modifications that regress performance.
I do think I've found a good solution. Here's a brief explanation of what I've done (in case you have some thoughts or are curious):
I realized `HeuristicFrequencyRank` does not need to be stored anywhere, it only exists for the duration of a call to `Searcher::new`. This allowed me to avoid touching the public API at all. Now there are 2 versions of `Searcher::new`, where one of them calls the other with a hardcoded value for `HeuristicFrequencyRank`.
There are only 2 additions to the public API, and no changes:
- `HeuristicFrequencyRank` trait
- `FinderBuilder::build_heuristic` generic method that accepts `HeuristicFrequencyRank`
There are a few tweaks to the internal code but they are also minimal.
Finally, the default code path has (at least on my machine) identical assembly and no performance regression (there is a benchmark to prove this too). Also, the cost of using `HeuristicFrequencyRank` when constructing a finder is a result of the performance difference of direct and indirect calls in assembly and pretty much nothing else.
P.S.
> I believe it is the case that adding a default type parameter to an existing type is a non-breaking change.
You were right about this, I was able to generate identical assembly to the original implementation with the default type parameter implementation.
Ah right, of course! The table is indeed only used during construction and I _believe_ I generally expect that to remain true.
The second `build_heuristic` method is a little kludgy, but is maybe better than adding a type parameter to `FinderBuilder`. (Which I think would be required if there was a mutator method to "set" the table.)
The names might need some work too.
> The second build_heuristic method is a little kludgy
Yeah you're right, I was having an issue with incorrect codegen (indirect calls) when storing the trait in the `Searcher`, so I kind of ignored the idea of storing the trait and moved on. I will change the implementation to use a mutator method and it shouldn't affect performance at all I think since the `FinderBuilder` can still call both the generic and regular versions of `Searcher::new`, which was the main problem.
So that only leaves the `HeuristicFrequencyRank`. My original inclination was to call it something like `ByteFrequencies`, but honestly a properly descriptive name seems quite long however you do it.
I would imagine the name of the mutator method should be the name of the trait in snake case or something similar, so I guess if that name is good then the method will also make sense.
Edit: Just realized I misread your comment, apologies.
The reason I thought the second `build_heuristic` method is better is to avoid changing the API, but I also agree that a mutator method would be more consistent with the `FinderBuilder::prefilter` method.
Either way I will try to think of some better names, especially for `build_heuristic`.
Sorry for the radio silence, I've had a lot of things to do this week and since I implemented the `HeuristicFrequencyRank` version in a local fork, I have just been using that.
I have documented and commented the code and am ready to upload a pull request, but there are a few more things I wanted to mention before and also list some names I have thought of to see if we can choose the best one. However if you feel we should continue the discussion in a PR I will upload what I have now and we can continue there.
Firstly, when I implemented the mutator method with the default type parameter for setting a custom `HeuristicFrequencyRank` I ran into what appears (to me at least) to be a fundamental problem.
The following line is in `Finder::new`, but is part of the public API and could be written by a user:
```rust
FinderBuilder::new().build_forward(needle)
```
When `FinderBuilder` is generic, then the line above causes an error, regardless of the default type parameter (requires `::<T>`). The following fix illustrates the problem.
```rust
type T = FinderBuilder;
T::new().build_forward(needle)
```
I believe the reason is that `FinderBuilder` is interpreted ONLY as a generic type when part of a regular expression, whereas in a type expression `FinderBuilder` is parsed based on what comes after. As far as I can tell this is a breaking change to the public API, and I cannot think of any way to resolve this.
Also there were a few other warts that made me feel worse about the default type parameter implementation (could just be me though). Off the top of my head:
- Having to implement a custom `Default` for `FinderBuilder<H>` to avoid `HeuristicFrequencyRank: Default`
- Having to change the signature of all `FinderBuilder` methods to move-only (i.e. `fn x(self) { ... }`) to avoid `HeuristicFrequencyRank: Copy`
- The mutator method returns a different type to the current type (`FinderBuilder<DefaultH>` -> `FinderBuilder<CustomH>`)
All in all I prefer the `build_heuristic` route due to the minimalism, but if you have some more suggestions I am happy to hear them.
That being said, after staring at my code for a few days I have to say I am definitely not a fan of the names at the moment (both `HeuristicFrequencyRank` and `build_heuristic`.
One idea I had is to just call it `Heuristic`. This would simplify and standardize all of the names, but is not very descriptive. However, I feel the argument can be made that `memchr` just uses the one heuristic, so if this heuristic is namespaced (`memchr::memmem::Heuristic`) then in theory it shouldn't be ambiguous.
Furthermore, since it is such a niche feature if someone intends to use it properly then they will likely need a much deeper understanding of what `Heuristic` does than can be gained from a more descriptive name. You mentioned that I'm the first person to ask for this in the many years this library has existed, and my intention (and I imagine the intention of others like me) is to put code that uses the `Heuristic` API in some library code that can be used without worrying about those specifics.
However I am not sure if I am 100% convinced by this argument, but I felt it was definitely worth mentioning. Some more ideas I've had for the names:
`HeuristicFrequencyRank`
- `HeuristicFrequency` (there is a `rank` method)
- `ByteFrequencies`/`ByteFrequency`
- `RareBytes` (public counterpart to `RareNeedleBytes`?)
- `RarityHeuristic`/`HeuristicRarity` (shorter, and `memchr` only deals with bytes anyway)
- `ByteHeuristic` (more descriptive than `Heuristic`, similar reasoning)
`build_heuristic` is a bit harder I think.
- `build_heuristic` is ambiguous (and does not necessarily imply `forward`)
- `build_forward_heuristic` is still kind of ambiguous
- `build_forward_with_heuristic` is very long
- just `build` is ambiguous and would be confusing since this is not a 'common' use case
- just `heuristic` or `with_heuristic` are inconsistent with `build_forward`/`build_reverse`
I also think the issues above would still apply if `heuristic` were replaced with another word.
Can you throw up what you have? I'll take a look. If the default type parameter path is indeed a dead end, then yeah, I think something like `build_heuristic` will do fine.
As for the naming... I'm not quite sure. I think the issue is using "heuristic" absent any other context. There are _lots_ of heuristics that go into `memmem` here, so it isn't a particularly discriminating name. The "rank" concept is really the key ingredient for this option, so the word "rank" or a synonym should be involved somewhere here.
A slightly longer name is also acceptable to me personally since this is going to be a very infrequently used feature.
So I'm thinking things like... `build_with_ranker` or `build_with_byte_ranker` or something along those lines.
Yes I will upload what I have.
> There are lots of heuristics that go into memmem here
Yes of course, I meant only one that the public can touch directly, but I agree completely which is part of the reason I wasn't convinced either.
Just to say, `build_with_ranker` sounds good to me, especially if combined with a trait that has `byte` in the name, but I like both better than `build_heuristic`.
| 2023-07-11T02:49:54.000Z | 2.5 | 2023-07-11T03:00:40Z | 9c4b93c931e34a5104f50e20be1bdd15bc593b0e |
BurntSushi/memchr | 82 | BurntSushi__memchr-82 | [
"72"
] | 427fdc384007d0a5b00b190e8313b3c8d3694a67 | "diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml\n--- a/.github/workflows/ci.yml\n+(...TRUNCATED) | "diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml\n--- a/.github/workflows/ci.yml\n+(...TRUNCATED) | "Use rustc_layout_scalar_valid_range_end(usize::MAX - 1) for the index\nFrom https://github.com/rust(...TRUNCATED) | "I guess I'm not opposed to documenting this, although it kind of seems self evident to me? `memchr`(...TRUNCATED) | 2021-04-30T00:21:45.000Z | 2.3 | 2022-02-27T13:28:37Z | 09f71f492d0f76d63cd286c3869c70676297e204 |
rust-lang/libc | 4,091 | rust-lang__libc-4091 | [
"3689"
] | 78d6dcb4b8d60c9ae0f2c59444613ecf5a2bf919 | "diff --git a/src/fuchsia/mod.rs b/src/fuchsia/mod.rs\n--- a/src/fuchsia/mod.rs\n+++ b/src/fuchsia/m(...TRUNCATED) | "diff --git a/libc-test/build.rs b/libc-test/build.rs\n--- a/libc-test/build.rs\n+++ b/libc-test/bui(...TRUNCATED) | "Missing function: aligned_alloc\nThis function is part of the [C standard](https://en.cppreference.(...TRUNCATED) | 2024-11-18T09:42:55.000Z | 0.2 | 2024-11-18T11:25:32Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 | |
rust-lang/libc | 4,086 | rust-lang__libc-4086 | [
"3190",
"3641"
] | 7c64d5d10c86bb1b19edf2c02bd3965bb4c80343 | "diff --git a/build.rs b/build.rs\n--- a/build.rs\n+++ b/build.rs\n@@ -132,7 +132,7 @@ fn rustc_vers(...TRUNCATED) | "diff --git a/ci/style.sh b/ci/style.sh\n--- a/ci/style.sh\n+++ b/ci/style.sh\n@@ -29,4 +29,12 @@ fo(...TRUNCATED) | "utmp function family missing on musl\nHello,\r\nfunctions like getutxent, setutxent, endutxent, str(...TRUNCATED) | "Why is this trying to change O_LARGEFILE?\n@joshtriplett did you mean to ask this in the pull-requ(...TRUNCATED) | 2024-11-17T07:53:53.000Z | 0.2 | 2024-11-18T04:53:05Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 |
rust-lang/libc | 4,033 | rust-lang__libc-4033 | [
"4031"
] | 3a0b0444d7206aa3feb30297c005597a960d0b33 | "diff --git a/src/unix/solarish/illumos.rs b/src/unix/solarish/illumos.rs\n--- a/src/unix/solarish/i(...TRUNCATED) | "diff --git a/libc-test/build.rs b/libc-test/build.rs\n--- a/libc-test/build.rs\n+++ b/libc-test/bui(...TRUNCATED) | "API request: AIO methods for illumos & solaris\nI would like:\n* aiocb\n* aio_read\n* aio_write\n* (...TRUNCATED) | 2024-11-13T13:00:37.000Z | 0.2 | 2024-11-16T07:29:36Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 | |
rust-lang/libc | 3,966 | rust-lang__libc-3966 | [
"3688"
] | 42d1000bc82fc608749f9df19f3d699d2b548ed6 | "diff --git a/README.md b/README.md\n--- a/README.md\n+++ b/README.md\n@@ -11,10 +11,14 @@ This crat(...TRUNCATED) | "diff --git a/libc-test/build.rs b/libc-test/build.rs\n--- a/libc-test/build.rs\n+++ b/libc-test/bui(...TRUNCATED) | "Add struct `fanotify_event_info_fid`\nHost triplet: `x86_64-unknown-linux-gnu`\r\n\r\nAPI struct is(...TRUNCATED) | 2024-10-15T01:50:55.000Z | 0.2 | 2024-10-15T03:47:25Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 | |
rust-lang/libc | 3,952 | rust-lang__libc-3952 | [
"3947"
] | b9e8477fa7c71408bdbc1eed9821af0783f97bdb | "diff --git a/src/unix/bsd/freebsdlike/freebsd/mod.rs b/src/unix/bsd/freebsdlike/freebsd/mod.rs\n---(...TRUNCATED) | "diff --git a/libc-test/build.rs b/libc-test/build.rs\n--- a/libc-test/build.rs\n+++ b/libc-test/bui(...TRUNCATED) | "CI fails on FreeBSD 15 for TCP_MAXPEAKRATE\nCI is currently failing on FreeBSD 15 because TCP_MAXPE(...TRUNCATED) | 2024-09-29T19:56:41.000Z | 0.2 | 2024-10-03T15:32:28Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 | |
rust-lang/libc | 3,950 | rust-lang__libc-3950 | [
"3947"
] | 09d7aa0d84f9b87d1a8bcb96dc5727282084397d | "diff --git a/src/unix/bsd/freebsdlike/freebsd/mod.rs b/src/unix/bsd/freebsdlike/freebsd/mod.rs\n---(...TRUNCATED) | "diff --git a/libc-test/build.rs b/libc-test/build.rs\n--- a/libc-test/build.rs\n+++ b/libc-test/bui(...TRUNCATED) | "CI fails on FreeBSD 15 for TCP_MAXPEAKRATE\nCI is currently failing on FreeBSD 15 because TCP_MAXPE(...TRUNCATED) | 2024-09-29T18:50:20.000Z | 0.2 | 2025-03-10T07:02:15Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 | |
rust-lang/libc | 3,885 | rust-lang__libc-3885 | [
"3760"
] | 5431bdb756c560b21bce18ea7dff72a2aad9975c | "diff --git a/src/unix/linux_like/linux/musl/mod.rs b/src/unix/linux_like/linux/musl/mod.rs\n--- a/s(...TRUNCATED) | "diff --git a/libc-test/build.rs b/libc-test/build.rs\n--- a/libc-test/build.rs\n+++ b/libc-test/bui(...TRUNCATED) | "preadv2 is not supported on musl libc\nThe preadv2 function is present on -gnu target, however when(...TRUNCATED) | 2024-08-29T10:34:24.000Z | 0.2 | 2025-03-10T07:04:42Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 | |
rust-lang/libc | 3,882 | rust-lang__libc-3882 | [
"3704"
] | 5431bdb756c560b21bce18ea7dff72a2aad9975c | "diff --git a/src/unix/bsd/apple/mod.rs b/src/unix/bsd/apple/mod.rs\n--- a/src/unix/bsd/apple/mod.rs(...TRUNCATED) | "diff --git a/libc-test/build.rs b/libc-test/build.rs\n--- a/libc-test/build.rs\n+++ b/libc-test/bui(...TRUNCATED) | "Add IPV6_DONTFRAG socket option on OpenBSD\n* target triple: x86_64-unknown-openbsd\r\n* Header fil(...TRUNCATED) | 2024-08-29T09:31:41.000Z | 0.2 | 2025-03-10T07:04:41Z | 5e62ce9fadb401539a08b329e4cbd98cc6393f60 |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 5