repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/math/samplerz.rs | miden-crypto/src/dsa/falcon512_rpo/math/samplerz.rs | use rand::Rng;
/// Samples an integer from {0, ..., 18} according to the distribution χ, which is close to
/// the half-Gaussian distribution on the natural numbers with mean 0 and standard deviation
/// equal to sigma_max.
fn base_sampler(bytes: [u8; 9]) -> i16 {
const RCDT: [u128; 18] = [
3024686241123004913666,
1564742784480091954050,
636254429462080897535,
199560484645026482916,
47667343854657281903,
8595902006365044063,
1163297957344668388,
117656387352093658,
8867391802663976,
496969357462633,
20680885154299,
638331848991,
14602316184,
247426747,
3104126,
28824,
198,
1,
];
let mut bytes = bytes.to_vec();
bytes.extend_from_slice(&[0u8; 7]);
bytes.reverse();
let u = u128::from_be_bytes(bytes.try_into().expect("should have length 16"));
RCDT.into_iter().filter(|r| u < *r).count() as i16
}
/// Computes an integer approximation of 2^63 * ccs * exp(-x).
fn approx_exp(x: f64, ccs: f64) -> u64 {
// The constants C are used to approximate exp(-x); these
// constants are taken from FACCT (up to a scaling factor
// of 2^63):
// https://eprint.iacr.org/2018/1234
// https://github.com/raykzhao/gaussian
const C: [u64; 13] = [
0x00000004741183a3u64,
0x00000036548cfc06u64,
0x0000024fdcbf140au64,
0x0000171d939de045u64,
0x0000d00cf58f6f84u64,
0x000680681cf796e3u64,
0x002d82d8305b0feau64,
0x011111110e066fd0u64,
0x0555555555070f00u64,
0x155555555581ff00u64,
0x400000000002b400u64,
0x7fffffffffff4800u64,
0x8000000000000000u64,
];
let mut z: u64;
let mut y: u64;
let twoe63 = 1u64 << 63;
y = C[0];
z = f64::floor(x * (twoe63 as f64)) as u64;
for cu in C.iter().skip(1) {
let zy = (z as u128) * (y as u128);
y = cu - ((zy >> 63) as u64);
}
z = f64::floor((twoe63 as f64) * ccs) as u64;
(((z as u128) * (y as u128)) >> 63) as u64
}
/// A random bool that is true with probability ≈ ccs · exp(-x).
fn ber_exp<R: Rng>(x: f64, ccs: f64, rng: &mut R) -> bool {
const LN2: f64 = core::f64::consts::LN_2;
const ILN2: f64 = 1.0 / LN2;
let s = f64::floor(x * ILN2);
let r = x - s * LN2;
let s = (s as u64).min(63);
let z = ((approx_exp(r, ccs) << 1) - 1) >> s;
let mut w = 0_i32;
for i in (0..=56).rev().step_by(8) {
let mut dest = [0_u8; 1];
rng.fill_bytes(&mut dest);
let p = u8::from_be_bytes(dest);
w = (p as i32) - (z >> i & 0xff) as i32;
if w != 0 {
break;
}
}
w < 0
}
/// Samples an integer from the Gaussian distribution with given mean (mu) and standard deviation
/// (sigma).
pub(crate) fn sampler_z<R: Rng>(mu: f64, sigma: f64, sigma_min: f64, rng: &mut R) -> i16 {
const SIGMA_MAX: f64 = 1.8205;
const INV_2SIGMA_MAX_SQ: f64 = 1f64 / (2f64 * SIGMA_MAX * SIGMA_MAX);
let isigma = 1f64 / sigma;
let dss = 0.5f64 * isigma * isigma;
let s = f64::floor(mu);
let r = mu - s;
let ccs = sigma_min * isigma;
loop {
let mut dest = [0_u8; 9];
rng.fill_bytes(&mut dest);
let z0 = base_sampler(dest);
let mut dest = [0_u8; 1];
rng.fill_bytes(&mut dest);
let random_byte: u8 = dest[0];
// x = ((z-r)^2)/(2*sigma^2) - ((z-b)^2)/(2*sigma0^2)
let b = (random_byte & 1) as i16;
let z = b + (2 * b - 1) * z0;
let zf_min_r = (z as f64) - r;
let x = zf_min_r * zf_min_r * dss - (z0 * z0) as f64 * INV_2SIGMA_MAX_SQ;
if ber_exp(x, ccs, rng) {
return z + (s as i16);
}
}
}
#[cfg(all(test, feature = "std"))]
mod test {
use super::approx_exp;
#[test]
fn test_approx_exp() {
let precision = 1u64 << 14;
// known answers were generated with the following sage script:
//```sage
// num_samples = 10
// precision = 200
// R = Reals(precision)
//
// print(f"let kats : [(f64, f64, u64);{num_samples}] = [")
// for i in range(num_samples):
// x = RDF.random_element(0.0, 0.693147180559945)
// ccs = RDF.random_element(0.0, 1.0)
// res = round(2^63 * R(ccs) * exp(R(-x)))
// print(f"({x}, {ccs}, {res}),")
// print("];")
// ```
let kats: [(f64, f64, u64); 10] = [
(0.2314993926072656, 0.8148006314615972, 5962140072160879737),
(0.2648875572812225, 0.12769669655309035, 903712282351034505),
(0.11251957513682391, 0.9264611470305881, 7635725498677341553),
(0.04353439307256617, 0.5306497137523327, 4685877322232397936),
(0.41834495299784347, 0.879438856118578, 5338392138535350986),
(0.32579398973228557, 0.16513412873289002, 1099603299296456803),
(0.5939508073919817, 0.029776019144967303, 151637565622779016),
(0.2932367999399056, 0.37123847662857923, 2553827649386670452),
(0.5005699297417507, 0.31447208863888976, 1758235618083658825),
(0.4876437338498085, 0.6159515298936868, 3488632981903743976),
];
for (x, ccs, answer) in kats {
let difference = (answer as i128) - (approx_exp(x, ccs) as i128);
assert!(
(difference * difference) as u64 <= precision * precision,
"answer: {answer} versus approximation: {}\ndifference: {} whereas precision: {}",
approx_exp(x, ccs),
difference,
precision
);
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/math/ffsampling.rs | miden-crypto/src/dsa/falcon512_rpo/math/ffsampling.rs | use alloc::boxed::Box;
use num::Zero;
use num_complex::{Complex, Complex64};
use rand::Rng;
use super::{fft::FastFft, polynomial::Polynomial, samplerz::sampler_z};
use crate::utils::zeroize::{Zeroize, ZeroizeOnDrop};
const SIGMIN: f64 = 1.2778336969128337;
/// Computes the Gram matrix. The argument must be a 2x2 matrix
/// whose elements are equal-length vectors of complex numbers,
/// representing polynomials in FFT domain.
pub fn gram(b: [Polynomial<Complex64>; 4]) -> [Polynomial<Complex64>; 4] {
const N: usize = 2;
let mut g: [Polynomial<Complex<f64>>; 4] =
[Polynomial::zero(), Polynomial::zero(), Polynomial::zero(), Polynomial::zero()];
for i in 0..N {
for j in 0..N {
for k in 0..N {
g[N * i + j] = g[N * i + j].clone()
+ b[N * i + k].hadamard_mul(&b[N * j + k].map(|c| c.conj()));
}
}
}
g
}
/// Computes the LDL decomposition of a 2×2 Hermitian matrix G such that L·D·L* = G
/// where D is diagonal, and L is lower-triangular with 1s on the diagonal.
///
/// # Input
/// A 2×2 Hermitian (self-adjoint) matrix G represented as a 4-element array in row-major order:
/// ```text
/// G = [g[0] g[1] ]
/// [g[2] g[3] ]
/// ```
/// where g[1] = conj(g[2]) (Hermitian) and all elements are polynomials in FFT domain.
///
/// # Output
/// Returns only the non-trivial elements: (l10, d00, d11) representing:
/// ```text
/// L = [1 0 ] D = [d00 0 ]
/// [l10 1 ] [0 d11 ]
/// ```
///
/// More specifically:
///
/// From the equation L·D·L* = G, we can derive:
/// 1. From position (0,0): 1·d00·1 = g[0] → **d00 = g[0]**
///
/// 2. From position (1,0): l10·d00·1 = g[2] → **l10 = g[2] / g[0]**
///
/// 3. From position (1,1): l10·d00·conj(l10) + 1·d11·1 = g[3] → d11 = g[3] - l10·d00·conj(l10) →
/// **d11 = g[3] - |l10|²·g[0]**
pub fn ldl(
g: [Polynomial<Complex64>; 4],
) -> (Polynomial<Complex64>, Polynomial<Complex64>, Polynomial<Complex64>) {
// Compute l10 = g[2] / g[0]
let l10 = g[2].hadamard_div(&g[0]);
// Compute |l10|² = l10 * conj(l10)
let l10_squared_norm = l10.map(|c| c * c.conj());
// Compute d11 = g[3] - |l10|² * g[0]
let d11 = g[3].clone() - g[0].hadamard_mul(&l10_squared_norm);
(l10, g[0].clone(), d11)
}
#[derive(Debug, Clone)]
pub enum LdlTree {
Branch(Polynomial<Complex64>, Box<LdlTree>, Box<LdlTree>),
Leaf([Complex64; 2]),
}
impl Zeroize for LdlTree {
fn zeroize(&mut self) {
match self {
LdlTree::Branch(poly, left, right) => {
// Zeroize polynomial coefficients using write_volatile to prevent compiler
// optimizations (dead store elimination)
for coeff in poly.coefficients.iter_mut() {
unsafe {
core::ptr::write_volatile(coeff, Complex64::new(0.0, 0.0));
}
}
// Recursively zeroize child nodes
left.zeroize();
right.zeroize();
// Compiler fence AFTER all zeroing operations to prevent reordering.
// This ensures all writes (both at this level and in recursive calls) are
// completed before any subsequent code can observe them.
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
},
LdlTree::Leaf(arr) => {
// Zeroize leaf array using write_volatile
for val in arr.iter_mut() {
unsafe {
core::ptr::write_volatile(val, Complex64::new(0.0, 0.0));
}
}
// Compiler fence after all writes to prevent reordering with subsequent code
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
},
}
}
}
// Manual Drop implementation to ensure zeroization on drop.
// Cannot use #[derive(ZeroizeOnDrop)] because Complex64 doesn't implement Zeroize,
// so we manually implement Drop to call our Zeroize impl.
impl Drop for LdlTree {
fn drop(&mut self) {
self.zeroize();
}
}
impl ZeroizeOnDrop for LdlTree {}
/// Computes the LDL Tree of G. Corresponds to Algorithm 9 of the specification [1, p.37].
/// The argument is a 2x2 matrix of polynomials, given in FFT form.
/// [1]: https://falcon-sign.info/falcon.pdf
pub fn ffldl(gram_matrix: [Polynomial<Complex64>; 4]) -> LdlTree {
let n = gram_matrix[0].coefficients.len();
let (l10, d00, d11) = ldl(gram_matrix);
if n > 2 {
let (d00_left, d00_right) = d00.split_fft();
let (d11_left, d11_right) = d11.split_fft();
let g0 = [d00_left.clone(), d00_right.clone(), d00_right.map(|c| c.conj()), d00_left];
let g1 = [d11_left.clone(), d11_right.clone(), d11_right.map(|c| c.conj()), d11_left];
LdlTree::Branch(l10, Box::new(ffldl(g0)), Box::new(ffldl(g1)))
} else {
LdlTree::Branch(
l10,
Box::new(LdlTree::Leaf(d00.coefficients.try_into().unwrap())),
Box::new(LdlTree::Leaf(d11.coefficients.try_into().unwrap())),
)
}
}
/// Normalizes the leaves of an LDL tree using a given normalization value `sigma`.
pub fn normalize_tree(tree: &mut LdlTree, sigma: f64) {
match tree {
LdlTree::Branch(_ell, left, right) => {
normalize_tree(left, sigma);
normalize_tree(right, sigma);
},
LdlTree::Leaf(vector) => {
vector[0] = Complex::new(sigma / vector[0].re.sqrt(), 0.0);
vector[1] = Complex64::zero();
},
}
}
/// Samples short polynomials using a Falcon tree. Algorithm 11 from the spec [1, p.40].
///
/// [1]: https://falcon-sign.info/falcon.pdf
pub fn ffsampling<R: Rng>(
t: &(Polynomial<Complex64>, Polynomial<Complex64>),
tree: &LdlTree,
mut rng: &mut R,
) -> (Polynomial<Complex64>, Polynomial<Complex64>) {
match tree {
LdlTree::Branch(ell, left, right) => {
let bold_t1 = t.1.split_fft();
let bold_z1 = ffsampling(&bold_t1, right, rng);
let z1 = Polynomial::<Complex64>::merge_fft(&bold_z1.0, &bold_z1.1);
// t0' = t0 + (t1 - z1) * l
let t0_prime = t.0.clone() + (t.1.clone() - z1.clone()).hadamard_mul(ell);
let bold_t0 = t0_prime.split_fft();
let bold_z0 = ffsampling(&bold_t0, left, rng);
let z0 = Polynomial::<Complex64>::merge_fft(&bold_z0.0, &bold_z0.1);
(z0, z1)
},
LdlTree::Leaf(value) => {
let z0 = sampler_z(t.0.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
let z1 = sampler_z(t.1.coefficients[0].re, value[0].re, SIGMIN, &mut rng);
(
Polynomial::new(vec![Complex64::new(z0 as f64, 0.0)]),
Polynomial::new(vec![Complex64::new(z1 as f64, 0.0)]),
)
},
}
}
// TESTS
// ================================================================================================
#[cfg(all(test, feature = "std"))]
mod tests {
use num_complex::Complex64;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha20Rng;
use super::*;
/// Helper to reconstruct G from L and D matrices by computing L·D·L*
///
/// All polynomials are in FFT domain, so we use Hadamard (element-wise) operations.
///
/// Given L = [1 0 ] and D = [d00 0 ]
/// [l10 1 ] [0 d11]
///
/// We compute G = L·D·L* = [1 0 ] [d00 0 ] [1 conj(l10)]
/// [l10 1 ] [0 d11] [0 1 ]
fn reconstruct_g(
l10: &Polynomial<Complex64>,
d00: &Polynomial<Complex64>,
d11: &Polynomial<Complex64>,
) -> [Polynomial<Complex64>; 4] {
// Compute conj(l10) for use in L*
let l10_conj = l10.map(|c| c.conj());
// Compute G = L·D·L* using Hadamard operations (FFT domain)
// G[0,0] = 1*d00*1 + 0*d11*0 = d00
let g00 = d00.clone();
// G[0,1] = 1*d00*conj(l10) + 0*d11*1 = d00 * conj(l10)
let g01 = d00.hadamard_mul(&l10_conj);
// G[1,0] = l10*d00*1 + 1*d11*0 = l10 * d00
let g10 = l10.hadamard_mul(d00);
// G[1,1] = l10*d00*conj(l10) + 1*d11*1 = l10 * d00 * conj(l10) + d11
let g11 = l10.hadamard_mul(d00).hadamard_mul(&l10_conj) + d11.clone();
[g00, g01, g10, g11]
}
/// Helper to create a random Hermitian matrix G.
///
/// The polynomials are in FFT domain (each coefficient represents an evaluation point).
/// Returns a 2×2 matrix as [g00, g01, g10, g11] where:
/// - g00 and g11 are self-adjoint (real-valued in FFT domain)
/// - g10 = conj(g01) (Hermitian property)
fn random_hermitian_matrix(n: usize, rng: &mut impl Rng) -> [Polynomial<Complex64>; 4] {
let mut g00 = vec![Complex64::new(0.0, 0.0); n];
let mut g01 = vec![Complex64::new(0.0, 0.0); n];
let mut g11 = vec![Complex64::new(0.0, 0.0); n];
for i in 0..n {
// Diagonal elements must be real (self-adjoint property)
g00[i] = Complex64::new(rng.random_range(-10.0..10.0), 0.0);
g11[i] = Complex64::new(rng.random_range(-10.0..10.0), 0.0);
// Off-diagonal can be any complex number
g01[i] = Complex64::new(rng.random_range(-10.0..10.0), rng.random_range(-10.0..10.0));
}
// Ensure Hermitian property: g10 = conj(g01)
let g10 = g01.iter().map(|c| c.conj()).collect();
[
Polynomial::new(g00),
Polynomial::new(g01),
Polynomial::new(g10),
Polynomial::new(g11),
]
}
/// Helper to check if two polynomials are approximately equal
fn polynomials_approx_eq(
a: &Polynomial<Complex64>,
b: &Polynomial<Complex64>,
eps: f64,
) -> bool {
if a.coefficients.len() != b.coefficients.len() {
return false;
}
a.coefficients
.iter()
.zip(b.coefficients.iter())
.all(|(x, y)| (x.re - y.re).abs() < eps && (x.im - y.im).abs() < eps)
}
/// Test that LDL decomposition satisfies L·D·L* = G for random polynomials in FFT domain.
///
/// This test verifies the mathematical correctness by:
/// 1. Creating random Hermitian matrices G (in FFT domain)
/// 2. Computing their LDL decomposition
/// 3. Reconstructing G from L and D using Hadamard operations
/// 4. Verifying the reconstruction matches the original
#[test]
fn test_ldl_decomposition_random() {
let mut rng = ChaCha20Rng::from_seed([42u8; 32]);
// Test with various polynomial sizes
for degree in [1, 2, 16, 512] {
let g = random_hermitian_matrix(degree, &mut rng);
// Compute LDL decomposition
let (l10, d00, d11) = ldl(g.clone());
// Reconstruct G from L·D·L*
let g_reconstructed = reconstruct_g(&l10, &d00, &d11);
// Verify reconstruction matches original (L·D·L* = G)
assert!(
polynomials_approx_eq(&g_reconstructed[0], &g[0], 1e-10),
"degree {}: G[0,0] mismatch",
degree
);
assert!(
polynomials_approx_eq(&g_reconstructed[1], &g[1], 1e-10),
"degree {}: G[0,1] mismatch",
degree
);
assert!(
polynomials_approx_eq(&g_reconstructed[2], &g[2], 1e-10),
"degree {}: G[1,0] mismatch",
degree
);
assert!(
polynomials_approx_eq(&g_reconstructed[3], &g[3], 1e-10),
"degree {}: G[1,1] mismatch",
degree
);
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/math/fft.rs | miden-crypto/src/dsa/falcon512_rpo/math/fft.rs | use alloc::vec::Vec;
use core::{
f64::consts::PI,
ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign},
};
use num::{One, Zero};
use num_complex::Complex64;
use super::{Inverse, field::FalconFelt, polynomial::Polynomial};
/// Implements Cyclotomic FFT without bitreversing the outputs, and using precomputed powers of the
/// 2n-th primitive root of unity.
pub trait FastFft: Sized + Clone {
type Field: Add + Mul + AddAssign + MulAssign + Neg + Sub + SubAssign;
fn fft_inplace(&mut self);
fn fft(&self) -> Self {
let mut a = self.clone();
a.fft_inplace();
a
}
fn merge_fft(a: &Self, b: &Self) -> Self;
fn split_fft(&self) -> (Self, Self);
fn ifft_inplace(&mut self);
fn ifft(&self) -> Self {
let mut a = self.clone();
a.ifft_inplace();
a
}
}
pub trait CyclotomicFourier
where
Self: Sized
+ Copy
+ One
+ Zero
+ Add<Output = Self>
+ Sub<Output = Self>
+ Mul<Output = Self>
+ MulAssign
+ Inverse,
{
/// Gets the inverse of 2^n.
#[allow(dead_code)]
fn power_of_two_inverse(n: usize) -> Self {
let mut a = Self::one() + Self::one();
for _ in 0..n {
a *= a;
}
Self::inverse_or_zero(a)
}
/// Gets a primitive nth (with n a power of 2) root of unity.
#[allow(dead_code)]
fn primitive_root_of_unity(n: usize) -> Self;
/// Computes the integer whose n-bit binary expansion is the reverse of that of the argument.
fn bitreverse_index(arg: usize, n: usize) -> usize {
assert!(n > 0);
assert_eq!(n & (n - 1), 0);
let mut rev = 0;
let mut m = n >> 1;
let mut k = 1;
while m > 0 {
rev |= (((arg & m) != 0) as usize) * k;
k <<= 1;
m >>= 1;
}
rev
}
/// Computes the first n powers of the 2nd root of unity, and put them in bit-reversed order.
#[allow(dead_code)]
fn bitreversed_powers(n: usize) -> Vec<Self> {
let psi = Self::primitive_root_of_unity(2 * n);
let mut array = vec![Self::zero(); n];
let mut alpha = Self::one();
for a in array.iter_mut() {
*a = alpha;
alpha *= psi;
}
Self::bitreverse_array(&mut array);
array
}
/// Computes the first n powers of the 2nd root of unity, invert them, and put them in
/// bit-reversed order.
#[allow(dead_code)]
fn bitreversed_powers_inverse(n: usize) -> Vec<Self> {
let psi = Self::primitive_root_of_unity(2 * n).inverse_or_zero();
let mut array = vec![Self::zero(); n];
let mut alpha = Self::one();
for a in array.iter_mut() {
*a = alpha;
alpha *= psi;
}
Self::bitreverse_array(&mut array);
array
}
/// Reorders the given elements in the array by reversing the binary expansions of their
/// indices.
fn bitreverse_array<T>(array: &mut [T]) {
let n = array.len();
for i in 0..n {
let j = Self::bitreverse_index(i, n);
if i < j {
array.swap(i, j);
}
}
}
/// Computes the evaluations of the polynomial on the roots of the polynomial X^n + 1 using a
/// fast Fourier transform. Algorithm 1 from https://eprint.iacr.org/2016/504.pdf.
///
/// Arguments:
///
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
/// transformed under the FFT. The transformation happens in- place.
///
/// - psi_rev: &[Self] (a reference to) an array of powers of psi, from 0 to n-1, but ordered
/// by bit-reversed index. Here psi is a primitive root of order 2n. You can use
/// `Self::bitreversed_powers(psi, n)` for this purpose, but this trait implementation is not
/// const. For the performance benefit you want a precompiled array, which you can get if you
/// can get by implementing the same method and marking it "const".
fn fft(a: &mut [Self], psi_rev: &[Self]) {
let n = a.len();
let mut t = n;
let mut m = 1;
while m < n {
t >>= 1;
for i in 0..m {
let j1 = 2 * i * t;
let j2 = j1 + t - 1;
let s = psi_rev[m + i];
for j in j1..=j2 {
let u = a[j];
let v = a[j + t] * s;
a[j] = u + v;
a[j + t] = u - v;
}
}
m <<= 1;
}
}
/// Computes the coefficients of the polynomial with the given evaluations on the roots of
/// X^n + 1 using an inverse fast Fourier transform.
/// Algorithm 2 from https://eprint.iacr.org/2016/504.pdf.
///
/// Arguments:
///
/// - a : &mut [Self] (a reference to) a mutable array of field elements which is to be
/// transformed under the IFFT. The transformation happens in- place.
///
/// - psi_inv_rev: &[Self] (a reference to) an array of powers of psi^-1, from 0 to n-1, but
/// ordered by bit-reversed index. Here psi is a primitive root of order 2n. You can use
/// `Self::bitreversed_powers(Self::inverse_or_zero(psi), n)` for this purpose, but this
/// trait implementation is not const. For the performance benefit you want a precompiled
/// array, which you can get if you can get by implementing the same methods and marking them
/// "const".
fn ifft(a: &mut [Self], psi_inv_rev: &[Self], ninv: Self) {
let n = a.len();
let mut t = 1;
let mut m = n;
while m > 1 {
let h = m / 2;
let mut j1 = 0;
for i in 0..h {
let j2 = j1 + t - 1;
let s = psi_inv_rev[h + i];
for j in j1..=j2 {
let u = a[j];
let v = a[j + t];
a[j] = u + v;
a[j + t] = (u - v) * s;
}
j1 += 2 * t;
}
t <<= 1;
m >>= 1;
}
for ai in a.iter_mut() {
*ai *= ninv;
}
}
fn split_fft(f: &[Self], psi_inv_rev: &[Self]) -> (Vec<Self>, Vec<Self>) {
let n_over_2 = f.len() / 2;
let mut f0 = vec![Self::zero(); n_over_2];
let mut f1 = vec![Self::zero(); n_over_2];
let two_inv = (Self::one() + Self::one()).inverse_or_zero();
for i in 0..n_over_2 {
let two_i = i * 2;
let two_zeta_inv = two_inv * psi_inv_rev[n_over_2 + i];
f0[i] = two_inv * (f[two_i] + f[two_i + 1]);
f1[i] = two_zeta_inv * (f[two_i] - f[two_i + 1]);
}
(f0, f1)
}
fn merge_fft(f0: &[Self], f1: &[Self], psi_rev: &[Self]) -> Vec<Self> {
let n_over_2 = f0.len();
let n = 2 * n_over_2;
let mut f = vec![Self::zero(); n];
for i in 0..n_over_2 {
let two_i = i * 2;
f[two_i] = f0[i] + psi_rev[n_over_2 + i] * f1[i];
f[two_i + 1] = f0[i] - psi_rev[n_over_2 + i] * f1[i];
}
f
}
}
impl CyclotomicFourier for Complex64 {
fn primitive_root_of_unity(n: usize) -> Self {
let angle = 2. * PI / (n as f64);
Complex64::new(f64::cos(angle), f64::sin(angle))
}
/// Custom implementation of CyclotomicFourier::bitreversed_powers for
/// better precision.
fn bitreversed_powers(n: usize) -> Vec<Self> {
let mut array = vec![Self::zero(); n];
let half_circle = PI;
for (i, a) in array.iter_mut().enumerate() {
let angle = (i as f64) * half_circle / (n as f64);
*a = Self::new(f64::cos(angle), f64::sin(angle));
}
Self::bitreverse_array(&mut array);
array
}
/// Custom implementation of CyclotomicFourier::bitreversed_powers_inverse
/// for better precision.
fn bitreversed_powers_inverse(n: usize) -> Vec<Self> {
let mut array = vec![Self::zero(); n];
let half_circle = PI;
for (i, a) in array.iter_mut().enumerate() {
let angle = (i as f64) * half_circle / (n as f64);
*a = Self::new(f64::cos(angle), -f64::sin(angle));
}
Self::bitreverse_array(&mut array);
array
}
}
impl FastFft for Polynomial<Complex64> {
type Field = Complex64;
fn fft_inplace(&mut self) {
let n = self.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
Complex64::fft(&mut self.coefficients, &COMPLEX_BITREVERSED_POWERS);
}
fn ifft_inplace(&mut self) {
let n = self.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
let psi_inv_rev: Vec<Complex64> =
COMPLEX_BITREVERSED_POWERS.iter().map(|c| Complex64::new(c.re, -c.im)).collect();
let ninv = Complex64::new(1.0 / (n as f64), 0.0);
Complex64::ifft(&mut self.coefficients, &psi_inv_rev, ninv);
}
fn merge_fft(a: &Self, b: &Self) -> Self {
let n = a.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
Self {
coefficients: Self::Field::merge_fft(
&a.coefficients,
&b.coefficients,
&COMPLEX_BITREVERSED_POWERS,
),
}
}
fn split_fft(&self) -> (Self, Self) {
let n = self.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
let psi_inv_rev: Vec<Complex64> =
COMPLEX_BITREVERSED_POWERS.iter().map(|c| Complex64::new(c.re, -c.im)).collect();
let (a, b) = Self::Field::split_fft(&self.coefficients, &psi_inv_rev);
(Self { coefficients: a }, Self { coefficients: b })
}
}
impl FastFft for Polynomial<FalconFelt> {
type Field = FalconFelt;
fn fft_inplace(&mut self) {
let n = self.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
FalconFelt::fft(&mut self.coefficients, &FELT_BITREVERSED_POWERS);
}
fn ifft_inplace(&mut self) {
let n = self.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
let ninv = match n {
1 => FELT_NINV_1,
2 => FELT_NINV_2,
4 => FELT_NINV_4,
8 => FELT_NINV_8,
16 => FELT_NINV_16,
32 => FELT_NINV_32,
64 => FELT_NINV_64,
128 => FELT_NINV_128,
256 => FELT_NINV_256,
512 => FELT_NINV_512,
_ => unreachable!("vector length is not power of 2 or larger than 512"),
};
FalconFelt::ifft(&mut self.coefficients, &FELT_BITREVERSED_POWERS_INVERSE, ninv);
}
fn merge_fft(a: &Self, b: &Self) -> Self {
let n = a.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
Self {
coefficients: Self::Field::merge_fft(
&a.coefficients,
&b.coefficients,
&FELT_BITREVERSED_POWERS,
),
}
}
fn split_fft(&self) -> (Self, Self) {
let n = self.coefficients.len();
debug_assert!(
(1..=512).contains(&n),
"unsupported: n = {n} not a power of 2 or larger than 512"
);
let (a, b) = Self::Field::split_fft(&self.coefficients, &FELT_BITREVERSED_POWERS_INVERSE);
(Self { coefficients: a }, Self { coefficients: b })
}
}
#[allow(clippy::approx_constant)]
const COMPLEX_BITREVERSED_POWERS: [Complex64; 512] = [
Complex64::new(1.0, 0.0),
Complex64::new(0.00000000000000006123233995736766, 1.0),
Complex64::new(0.7071067811865476, 0.7071067811865475),
Complex64::new(-0.7071067811865475, 0.7071067811865476),
Complex64::new(0.9238795325112867, 0.3826834323650898),
Complex64::new(-0.3826834323650897, 0.9238795325112867),
Complex64::new(0.38268343236508984, 0.9238795325112867),
Complex64::new(-0.9238795325112867, 0.3826834323650899),
Complex64::new(0.9807852804032304, 0.19509032201612825),
Complex64::new(-0.1950903220161282, 0.9807852804032304),
Complex64::new(0.5555702330196023, 0.8314696123025452),
Complex64::new(-0.8314696123025453, 0.5555702330196022),
Complex64::new(0.8314696123025452, 0.5555702330196022),
Complex64::new(-0.555570233019602, 0.8314696123025455),
Complex64::new(0.19509032201612833, 0.9807852804032304),
Complex64::new(-0.9807852804032304, 0.1950903220161286),
Complex64::new(0.9951847266721969, 0.0980171403295606),
Complex64::new(-0.09801714032956065, 0.9951847266721969),
Complex64::new(0.6343932841636455, 0.773010453362737),
Complex64::new(-0.773010453362737, 0.6343932841636455),
Complex64::new(0.881921264348355, 0.47139673682599764),
Complex64::new(-0.4713967368259977, 0.881921264348355),
Complex64::new(0.29028467725446233, 0.9569403357322089),
Complex64::new(-0.9569403357322088, 0.2902846772544624),
Complex64::new(0.9569403357322088, 0.29028467725446233),
Complex64::new(-0.29028467725446216, 0.9569403357322089),
Complex64::new(0.4713967368259978, 0.8819212643483549),
Complex64::new(-0.8819212643483549, 0.47139673682599786),
Complex64::new(0.773010453362737, 0.6343932841636455),
Complex64::new(-0.6343932841636454, 0.7730104533627371),
Complex64::new(0.09801714032956077, 0.9951847266721968),
Complex64::new(-0.9951847266721968, 0.09801714032956083),
Complex64::new(0.9987954562051724, 0.049067674327418015),
Complex64::new(-0.04906767432741801, 0.9987954562051724),
Complex64::new(0.6715589548470183, 0.7409511253549591),
Complex64::new(-0.7409511253549589, 0.6715589548470186),
Complex64::new(0.9039892931234433, 0.4275550934302821),
Complex64::new(-0.42755509343028186, 0.9039892931234434),
Complex64::new(0.33688985339222005, 0.9415440651830208),
Complex64::new(-0.9415440651830207, 0.33688985339222033),
Complex64::new(0.970031253194544, 0.24298017990326387),
Complex64::new(-0.24298017990326387, 0.970031253194544),
Complex64::new(0.5141027441932217, 0.8577286100002721),
Complex64::new(-0.857728610000272, 0.5141027441932218),
Complex64::new(0.8032075314806449, 0.5956993044924334),
Complex64::new(-0.5956993044924334, 0.8032075314806449),
Complex64::new(0.14673047445536175, 0.989176509964781),
Complex64::new(-0.989176509964781, 0.1467304744553618),
Complex64::new(0.989176509964781, 0.14673047445536175),
Complex64::new(-0.14673047445536164, 0.989176509964781),
Complex64::new(0.5956993044924335, 0.8032075314806448),
Complex64::new(-0.8032075314806448, 0.5956993044924335),
Complex64::new(0.8577286100002721, 0.5141027441932217),
Complex64::new(-0.5141027441932217, 0.8577286100002721),
Complex64::new(0.24298017990326398, 0.970031253194544),
Complex64::new(-0.970031253194544, 0.24298017990326407),
Complex64::new(0.9415440651830208, 0.33688985339222005),
Complex64::new(-0.33688985339221994, 0.9415440651830208),
Complex64::new(0.4275550934302822, 0.9039892931234433),
Complex64::new(-0.9039892931234433, 0.42755509343028203),
Complex64::new(0.7409511253549591, 0.6715589548470183),
Complex64::new(-0.6715589548470184, 0.740951125354959),
Complex64::new(0.049067674327418126, 0.9987954562051724),
Complex64::new(-0.9987954562051724, 0.049067674327417966),
Complex64::new(0.9996988186962042, 0.024541228522912288),
Complex64::new(-0.024541228522912142, 0.9996988186962042),
Complex64::new(0.6895405447370669, 0.7242470829514669),
Complex64::new(-0.7242470829514668, 0.689540544737067),
Complex64::new(0.9142097557035307, 0.40524131400498986),
Complex64::new(-0.40524131400498975, 0.9142097557035307),
Complex64::new(0.3598950365349883, 0.9329927988347388),
Complex64::new(-0.9329927988347388, 0.35989503653498833),
Complex64::new(0.9757021300385286, 0.2191012401568698),
Complex64::new(-0.21910124015686966, 0.9757021300385286),
Complex64::new(0.5349976198870973, 0.844853565249707),
Complex64::new(-0.8448535652497071, 0.5349976198870972),
Complex64::new(0.8175848131515837, 0.5758081914178453),
Complex64::new(-0.5758081914178453, 0.8175848131515837),
Complex64::new(0.17096188876030136, 0.9852776423889412),
Complex64::new(-0.9852776423889412, 0.17096188876030122),
Complex64::new(0.99247953459871, 0.1224106751992162),
Complex64::new(-0.12241067519921615, 0.99247953459871),
Complex64::new(0.6152315905806268, 0.7883464276266062),
Complex64::new(-0.7883464276266062, 0.6152315905806269),
Complex64::new(0.8700869911087115, 0.49289819222978404),
Complex64::new(-0.492898192229784, 0.8700869911087115),
Complex64::new(0.2667127574748984, 0.9637760657954398),
Complex64::new(-0.9637760657954398, 0.2667127574748985),
Complex64::new(0.9495281805930367, 0.3136817403988915),
Complex64::new(-0.3136817403988914, 0.9495281805930367),
Complex64::new(0.4496113296546066, 0.8932243011955153),
Complex64::new(-0.8932243011955152, 0.4496113296546069),
Complex64::new(0.7572088465064846, 0.6531728429537768),
Complex64::new(-0.6531728429537765, 0.7572088465064847),
Complex64::new(0.07356456359966745, 0.9972904566786902),
Complex64::new(-0.9972904566786902, 0.07356456359966773),
Complex64::new(0.9972904566786902, 0.07356456359966743),
Complex64::new(-0.07356456359966733, 0.9972904566786902),
Complex64::new(0.6531728429537768, 0.7572088465064845),
Complex64::new(-0.7572088465064846, 0.6531728429537766),
Complex64::new(0.8932243011955153, 0.44961132965460654),
Complex64::new(-0.4496113296546067, 0.8932243011955152),
Complex64::new(0.3136817403988916, 0.9495281805930367),
Complex64::new(-0.9495281805930367, 0.3136817403988914),
Complex64::new(0.9637760657954398, 0.26671275747489837),
Complex64::new(-0.2667127574748983, 0.9637760657954398),
Complex64::new(0.4928981922297841, 0.8700869911087113),
Complex64::new(-0.8700869911087113, 0.49289819222978415),
Complex64::new(0.7883464276266063, 0.6152315905806268),
Complex64::new(-0.6152315905806267, 0.7883464276266063),
Complex64::new(0.12241067519921628, 0.99247953459871),
Complex64::new(-0.99247953459871, 0.12241067519921635),
Complex64::new(0.9852776423889412, 0.17096188876030122),
Complex64::new(-0.17096188876030124, 0.9852776423889412),
Complex64::new(0.5758081914178453, 0.8175848131515837),
Complex64::new(-0.8175848131515836, 0.5758081914178454),
Complex64::new(0.8448535652497071, 0.5349976198870972),
Complex64::new(-0.534997619887097, 0.8448535652497072),
Complex64::new(0.21910124015686977, 0.9757021300385286),
Complex64::new(-0.9757021300385285, 0.21910124015687005),
Complex64::new(0.932992798834739, 0.3598950365349881),
Complex64::new(-0.35989503653498817, 0.9329927988347388),
Complex64::new(0.40524131400498986, 0.9142097557035307),
Complex64::new(-0.9142097557035307, 0.4052413140049899),
Complex64::new(0.724247082951467, 0.6895405447370668),
Complex64::new(-0.6895405447370669, 0.7242470829514669),
Complex64::new(0.024541228522912264, 0.9996988186962042),
Complex64::new(-0.9996988186962042, 0.024541228522912326),
Complex64::new(0.9999247018391445, 0.012271538285719925),
Complex64::new(-0.012271538285719823, 0.9999247018391445),
Complex64::new(0.6983762494089729, 0.7157308252838186),
Complex64::new(-0.7157308252838186, 0.6983762494089729),
Complex64::new(0.9191138516900578, 0.3939920400610481),
Complex64::new(-0.393992040061048, 0.9191138516900578),
Complex64::new(0.3713171939518376, 0.9285060804732155),
Complex64::new(-0.9285060804732155, 0.3713171939518377),
Complex64::new(0.9783173707196277, 0.20711137619221856),
Complex64::new(-0.20711137619221845, 0.9783173707196277),
Complex64::new(0.5453249884220465, 0.838224705554838),
Complex64::new(-0.8382247055548381, 0.5453249884220464),
Complex64::new(0.8245893027850253, 0.5657318107836131),
Complex64::new(-0.5657318107836132, 0.8245893027850252),
Complex64::new(0.18303988795514106, 0.9831054874312163),
Complex64::new(-0.9831054874312163, 0.1830398879551409),
Complex64::new(0.9939069700023561, 0.11022220729388306),
Complex64::new(-0.11022220729388306, 0.9939069700023561),
Complex64::new(0.6248594881423865, 0.7807372285720944),
Complex64::new(-0.7807372285720945, 0.6248594881423863),
Complex64::new(0.8760700941954066, 0.4821837720791227),
Complex64::new(-0.4821837720791227, 0.8760700941954066),
Complex64::new(0.27851968938505306, 0.9604305194155658),
Complex64::new(-0.9604305194155658, 0.27851968938505317),
Complex64::new(0.9533060403541939, 0.3020059493192281),
Complex64::new(-0.3020059493192281, 0.9533060403541939),
Complex64::new(0.46053871095824, 0.8876396204028539),
Complex64::new(-0.8876396204028538, 0.4605387109582402),
Complex64::new(0.765167265622459, 0.6438315428897914),
Complex64::new(-0.6438315428897913, 0.7651672656224591),
Complex64::new(0.08579731234443988, 0.996312612182778),
Complex64::new(-0.996312612182778, 0.08579731234444016),
Complex64::new(0.9981181129001492, 0.06132073630220858),
Complex64::new(-0.06132073630220853, 0.9981181129001492),
Complex64::new(0.6624157775901718, 0.7491363945234593),
Complex64::new(-0.7491363945234591, 0.662415777590172),
Complex64::new(0.8986744656939538, 0.43861623853852766),
Complex64::new(-0.4386162385385274, 0.8986744656939539),
Complex64::new(0.325310292162263, 0.9456073253805213),
Complex64::new(-0.9456073253805212, 0.32531029216226326),
Complex64::new(0.9669764710448521, 0.25486565960451457),
Complex64::new(-0.2548656596045145, 0.9669764710448521),
Complex64::new(0.5035383837257176, 0.8639728561215867),
Complex64::new(-0.8639728561215867, 0.5035383837257177),
Complex64::new(0.7958369046088836, 0.6055110414043255),
Complex64::new(-0.6055110414043254, 0.7958369046088836),
Complex64::new(0.13458070850712622, 0.99090263542778),
Complex64::new(-0.99090263542778, 0.13458070850712628),
Complex64::new(0.9873014181578584, 0.15885814333386145),
Complex64::new(-0.15885814333386128, 0.9873014181578584),
Complex64::new(0.5857978574564389, 0.8104571982525948),
Complex64::new(-0.8104571982525947, 0.585797857456439),
Complex64::new(0.8513551931052652, 0.524589682678469),
Complex64::new(-0.5245896826784687, 0.8513551931052652),
Complex64::new(0.23105810828067128, 0.9729399522055601),
Complex64::new(-0.9729399522055601, 0.23105810828067133),
Complex64::new(0.937339011912575, 0.34841868024943456),
Complex64::new(-0.3484186802494344, 0.937339011912575),
Complex64::new(0.4164295600976373, 0.9091679830905223),
Complex64::new(-0.9091679830905224, 0.41642956009763715),
Complex64::new(0.7326542716724128, 0.680600997795453),
Complex64::new(-0.680600997795453, 0.7326542716724128),
Complex64::new(0.03680722294135899, 0.9993223845883495),
Complex64::new(-0.9993223845883495, 0.03680722294135883),
Complex64::new(0.9993223845883495, 0.03680722294135883),
Complex64::new(-0.036807222941358866, 0.9993223845883495),
Complex64::new(0.6806009977954531, 0.7326542716724128),
Complex64::new(-0.7326542716724127, 0.6806009977954532),
Complex64::new(0.9091679830905224, 0.41642956009763715),
Complex64::new(-0.416429560097637, 0.9091679830905225),
Complex64::new(0.3484186802494345, 0.937339011912575),
Complex64::new(-0.9373390119125748, 0.3484186802494348),
Complex64::new(0.9729399522055602, 0.2310581082806711),
Complex64::new(-0.23105810828067114, 0.9729399522055602),
Complex64::new(0.5245896826784688, 0.8513551931052652),
Complex64::new(-0.8513551931052652, 0.524589682678469),
Complex64::new(0.8104571982525948, 0.5857978574564389),
Complex64::new(-0.5857978574564389, 0.8104571982525948),
Complex64::new(0.1588581433338614, 0.9873014181578584),
Complex64::new(-0.9873014181578584, 0.15885814333386147),
Complex64::new(0.99090263542778, 0.13458070850712617),
Complex64::new(-0.1345807085071261, 0.99090263542778),
Complex64::new(0.6055110414043255, 0.7958369046088835),
Complex64::new(-0.7958369046088835, 0.6055110414043257),
Complex64::new(0.8639728561215868, 0.5035383837257176),
Complex64::new(-0.5035383837257175, 0.8639728561215868),
Complex64::new(0.2548656596045146, 0.9669764710448521),
Complex64::new(-0.9669764710448521, 0.2548656596045147),
Complex64::new(0.9456073253805213, 0.3253102921622629),
Complex64::new(-0.32531029216226287, 0.9456073253805214),
Complex64::new(0.4386162385385277, 0.8986744656939538),
Complex64::new(-0.8986744656939539, 0.43861623853852755),
Complex64::new(0.7491363945234594, 0.6624157775901718),
Complex64::new(-0.6624157775901719, 0.7491363945234593),
Complex64::new(0.06132073630220865, 0.9981181129001492),
Complex64::new(-0.9981181129001492, 0.06132073630220849),
Complex64::new(0.996312612182778, 0.0857973123444399),
Complex64::new(-0.08579731234443976, 0.996312612182778),
Complex64::new(0.6438315428897915, 0.765167265622459),
Complex64::new(-0.765167265622459, 0.6438315428897914),
Complex64::new(0.8876396204028539, 0.46053871095824),
Complex64::new(-0.46053871095824006, 0.8876396204028539),
Complex64::new(0.3020059493192282, 0.9533060403541938),
Complex64::new(-0.9533060403541939, 0.30200594931922803),
Complex64::new(0.9604305194155658, 0.27851968938505306),
Complex64::new(-0.27851968938505295, 0.9604305194155659),
Complex64::new(0.48218377207912283, 0.8760700941954066),
Complex64::new(-0.8760700941954065, 0.4821837720791229),
Complex64::new(0.7807372285720945, 0.6248594881423863),
Complex64::new(-0.6248594881423862, 0.7807372285720946),
Complex64::new(0.11022220729388318, 0.9939069700023561),
Complex64::new(-0.9939069700023561, 0.11022220729388324),
Complex64::new(0.9831054874312163, 0.18303988795514095),
Complex64::new(-0.18303988795514092, 0.9831054874312163),
Complex64::new(0.5657318107836132, 0.8245893027850253),
Complex64::new(-0.8245893027850251, 0.5657318107836135),
Complex64::new(0.8382247055548381, 0.5453249884220465),
Complex64::new(-0.5453249884220462, 0.8382247055548382),
Complex64::new(0.20711137619221856, 0.9783173707196277),
Complex64::new(-0.9783173707196275, 0.20711137619221884),
Complex64::new(0.9285060804732156, 0.37131719395183754),
Complex64::new(-0.3713171939518375, 0.9285060804732156),
Complex64::new(0.3939920400610481, 0.9191138516900578),
Complex64::new(-0.9191138516900578, 0.39399204006104815),
Complex64::new(0.7157308252838186, 0.6983762494089729),
Complex64::new(-0.6983762494089728, 0.7157308252838187),
Complex64::new(0.012271538285719944, 0.9999247018391445),
Complex64::new(-0.9999247018391445, 0.012271538285720007),
Complex64::new(0.9999811752826011, 0.006135884649154475),
Complex64::new(-0.006135884649154393, 0.9999811752826011),
Complex64::new(0.7027547444572253, 0.7114321957452164),
Complex64::new(-0.7114321957452165, 0.7027547444572252),
Complex64::new(0.921514039342042, 0.38834504669882625),
Complex64::new(-0.3883450466988262, 0.921514039342042),
Complex64::new(0.3770074102164183, 0.9262102421383113),
Complex64::new(-0.9262102421383114, 0.37700741021641815),
Complex64::new(0.9795697656854405, 0.2011046348420919),
Complex64::new(-0.20110463484209182, 0.9795697656854405),
Complex64::new(0.5504579729366048, 0.83486287498638),
Complex64::new(-0.83486287498638, 0.5504579729366049),
Complex64::new(0.8280450452577558, 0.560661576197336),
Complex64::new(-0.5606615761973359, 0.8280450452577558),
Complex64::new(0.18906866414980628, 0.9819638691095552),
Complex64::new(-0.9819638691095552, 0.18906866414980636),
Complex64::new(0.9945645707342554, 0.10412163387205459),
Complex64::new(-0.1041216338720546, 0.9945645707342554),
Complex64::new(0.6296382389149271, 0.7768884656732324),
Complex64::new(-0.7768884656732323, 0.6296382389149272),
Complex64::new(0.8790122264286335, 0.4767992300633221),
Complex64::new(-0.4767992300633219, 0.8790122264286335),
Complex64::new(0.2844075372112718, 0.9587034748958716),
Complex64::new(-0.9587034748958715, 0.2844075372112721),
Complex64::new(0.9551411683057708, 0.2961508882436238),
Complex64::new(-0.29615088824362384, 0.9551411683057707),
Complex64::new(0.4659764957679661, 0.8847970984309378),
Complex64::new(-0.8847970984309378, 0.4659764957679662),
Complex64::new(0.7691033376455797, 0.6391244448637757),
Complex64::new(-0.6391244448637757, 0.7691033376455796),
Complex64::new(0.0919089564971327, 0.9957674144676598),
Complex64::new(-0.9957674144676598, 0.09190895649713275),
Complex64::new(0.9984755805732948, 0.055195244349689934),
Complex64::new(-0.05519524434968991, 0.9984755805732948),
Complex64::new(0.6669999223036375, 0.745057785441466),
Complex64::new(-0.745057785441466, 0.6669999223036376),
Complex64::new(0.901348847046022, 0.43309381885315196),
Complex64::new(-0.4330938188531519, 0.901348847046022),
Complex64::new(0.33110630575987643, 0.9435934581619604),
Complex64::new(-0.9435934581619604, 0.3311063057598765),
Complex64::new(0.9685220942744174, 0.24892760574572015),
Complex64::new(-0.24892760574572012, 0.9685220942744174),
Complex64::new(0.508830142543107, 0.8608669386377673),
Complex64::new(-0.8608669386377671, 0.5088301425431073),
Complex64::new(0.799537269107905, 0.600616479383869),
Complex64::new(-0.6006164793838688, 0.7995372691079052),
Complex64::new(0.14065823933284924, 0.9900582102622971),
Complex64::new(-0.990058210262297, 0.14065823933284954),
Complex64::new(0.9882575677307495, 0.15279718525844344),
Complex64::new(-0.1527971852584433, 0.9882575677307495),
Complex64::new(0.5907597018588743, 0.8068475535437992),
Complex64::new(-0.8068475535437993, 0.5907597018588742),
Complex64::new(0.8545579883654005, 0.5193559901655896),
Complex64::new(-0.5193559901655896, 0.8545579883654005),
Complex64::new(0.23702360599436734, 0.9715038909862518),
Complex64::new(-0.9715038909862518, 0.23702360599436717),
Complex64::new(0.9394592236021899, 0.3426607173119944),
Complex64::new(-0.34266071731199427, 0.9394592236021899),
Complex64::new(0.4220002707997998, 0.9065957045149153),
Complex64::new(-0.9065957045149153, 0.42200027079979985),
Complex64::new(0.7368165688773699, 0.6760927035753159),
Complex64::new(-0.6760927035753158, 0.73681656887737),
Complex64::new(0.04293825693494096, 0.9990777277526454),
Complex64::new(-0.9990777277526454, 0.04293825693494102),
Complex64::new(0.9995294175010931, 0.030674803176636626),
Complex64::new(-0.03067480317663646, 0.9995294175010931),
Complex64::new(0.6850836677727004, 0.7284643904482252),
Complex64::new(-0.7284643904482252, 0.6850836677727004),
Complex64::new(0.9117060320054299, 0.4108431710579039),
Complex64::new(-0.4108431710579038, 0.9117060320054299),
Complex64::new(0.3541635254204905, 0.9351835099389475),
Complex64::new(-0.9351835099389476, 0.3541635254204904),
Complex64::new(0.9743393827855759, 0.22508391135979283),
Complex64::new(-0.22508391135979267, 0.9743393827855759),
Complex64::new(0.5298036246862948, 0.8481203448032971),
Complex64::new(-0.8481203448032971, 0.5298036246862948),
Complex64::new(0.8140363297059484, 0.5808139580957645),
Complex64::new(-0.5808139580957644, 0.8140363297059485),
Complex64::new(0.1649131204899701, 0.9863080972445987),
Complex64::new(-0.9863080972445986, 0.16491312048997014),
Complex64::new(0.9917097536690995, 0.12849811079379317),
Complex64::new(-0.1284981107937931, 0.9917097536690995),
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/tests/prng.rs | miden-crypto/src/dsa/falcon512_rpo/tests/prng.rs | use alloc::vec::Vec;
use rand::{Rng, RngCore};
use rand_core::impls;
use sha3::{
Shake256, Shake256ReaderCore,
digest::{ExtendableOutput, Update, XofReader, core_api::XofReaderCoreWrapper},
};
use super::data::SYNC_DATA;
use crate::dsa::falcon512_rpo::SIG_NONCE_LEN;
/// Length of the seed for the ChaCha20-based PRNG.
pub(crate) const CHACHA_SEED_LEN: usize = 56;
// SHAKE256
// ================================================================================================
/// A PRNG based on SHAKE256 used for testing.
pub struct Shake256Testing(XofReaderCoreWrapper<Shake256ReaderCore>);
impl Shake256Testing {
pub fn new(data: Vec<u8>) -> Self {
let mut hasher = Shake256::default();
hasher.update(&data);
let result = hasher.finalize_xof();
Self(result)
}
fn fill_bytes(&mut self, des: &mut [u8]) {
self.0.read(des)
}
/// A function to help with "syncing" the SHAKE256 PRNG so that it can be used with the test
/// vectors for Falcon512.
pub(crate) fn sync_rng(&mut self) {
for (bytes, num_seed_sampled) in SYNC_DATA.iter() {
let mut dummy = vec![0_u8; bytes * 8];
self.fill_bytes(&mut dummy);
let mut nonce_bytes = [0u8; SIG_NONCE_LEN];
self.fill_bytes(&mut nonce_bytes);
for _ in 0..*num_seed_sampled {
let mut chacha_seed = [0_u8; CHACHA_SEED_LEN];
self.fill_bytes(&mut chacha_seed);
}
}
}
}
impl RngCore for Shake256Testing {
fn next_u32(&mut self) -> u32 {
impls::next_u32_via_fill(self)
}
fn next_u64(&mut self) -> u64 {
impls::next_u64_via_u32(self)
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.fill_bytes(dest)
}
}
// ChaCha20
// ================================================================================================
/// A PRNG based on ChaCha20 used for testing.
#[derive(Clone, PartialEq, Eq)]
pub struct ChaCha {
state: Vec<u32>,
s: Vec<u32>,
ctr: u64,
buffer: Vec<u8>,
}
impl ChaCha {
pub fn new<R: Rng>(rng: &mut R) -> Self {
let mut chacha_seed = [0_u8; CHACHA_SEED_LEN];
rng.fill_bytes(&mut chacha_seed);
ChaCha::with_seed(chacha_seed.to_vec())
}
pub fn with_seed(src: Vec<u8>) -> Self {
let mut s = vec![0_u32; 14];
for i in 0..14 {
let bytes = &src[(4 * i)..(4 * (i + 1))];
let value = u32::from_le_bytes(bytes.try_into().unwrap());
s[i] = value;
}
Self {
state: vec![0_u32; 16],
ctr: s[12] as u64 + ((s[13] as u64) << 32),
s,
buffer: vec![0_u8; 0],
}
}
#[inline(always)]
fn qround(&mut self, a: usize, b: usize, c: usize, d: usize) {
self.state[a] = self.state[a].wrapping_add(self.state[b]);
self.state[d] = Self::roll(self.state[d] ^ self.state[a], 16);
self.state[c] = self.state[c].wrapping_add(self.state[d]);
self.state[b] = Self::roll(self.state[b] ^ self.state[c], 12);
self.state[a] = self.state[a].wrapping_add(self.state[b]);
self.state[d] = Self::roll(self.state[d] ^ self.state[a], 8);
self.state[c] = self.state[c].wrapping_add(self.state[d]);
self.state[b] = Self::roll(self.state[b] ^ self.state[c], 7);
}
fn update(&mut self) -> Vec<u32> {
const CW: [u32; 4] = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574];
self.state = vec![0_u32; 16];
self.state[0] = CW[0];
self.state[1] = CW[1];
self.state[2] = CW[2];
self.state[3] = CW[3];
for i in 0..10 {
self.state[i + 4] = self.s[i]
}
self.state[14] = self.s[10] ^ ((self.ctr & 0xffffffff) as u32);
self.state[15] = self.s[11] ^ ((self.ctr >> 32) as u32);
let state = self.state.clone();
for _ in 0..10 {
self.qround(0, 4, 8, 12);
self.qround(1, 5, 9, 13);
self.qround(2, 6, 10, 14);
self.qround(3, 7, 11, 15);
self.qround(0, 5, 10, 15);
self.qround(1, 6, 11, 12);
self.qround(2, 7, 8, 13);
self.qround(3, 4, 9, 14);
}
for (i, s) in self.state.iter_mut().enumerate().take(16) {
*s = (*s).wrapping_add(state[i]);
}
self.ctr += 1;
self.state.clone()
}
fn block_update(&mut self) -> Vec<u32> {
let mut block = vec![0_u32; 16 * 8];
for i in 0..8 {
let updated = self.update();
block
.iter_mut()
.skip(i)
.step_by(8)
.zip(updated.iter())
.for_each(|(b, &u)| *b = u);
}
block
}
fn random_bytes(&mut self, k: usize) -> Vec<u8> {
if k > self.buffer.len() {
let block = self.block_update();
self.buffer = block.iter().flat_map(|&e| e.to_le_bytes().to_vec()).collect();
}
let out = (self.buffer[..k]).to_vec();
self.buffer = self.buffer[k..].to_vec();
out
}
fn roll(x: u32, n: usize) -> u32 {
(x << n) ^ (x >> (32 - n))
}
}
impl RngCore for ChaCha {
fn next_u32(&mut self) -> u32 {
impls::next_u32_via_fill(self)
}
fn next_u64(&mut self) -> u64 {
impls::next_u64_via_u32(self)
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
let len = dest.len();
let buffer = self.random_bytes(len);
dest.iter_mut().enumerate().for_each(|(i, d)| *d = buffer[i])
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/tests/mod.rs | miden-crypto/src/dsa/falcon512_rpo/tests/mod.rs | use alloc::vec::Vec;
use data::{
EXPECTED_SIG, EXPECTED_SIG_POLYS, NUM_TEST_VECTORS, SK_POLYS, SYNC_DATA_FOR_TEST_VECTOR,
};
use prng::Shake256Testing;
use rand::{RngCore, SeedableRng};
use rand_chacha::ChaCha20Rng;
use super::{Serializable, math::Polynomial};
use crate::dsa::falcon512_rpo::{
PREVERSIONED_NONCE, PREVERSIONED_NONCE_LEN, SIG_NONCE_LEN, SIG_POLY_BYTE_LEN, SecretKey,
tests::data::DETERMINISTIC_SIGNATURE,
};
mod data;
mod prng;
pub(crate) use prng::ChaCha;
/// Tests the Falcon512 implementation using the test vectors in
/// https://github.com/tprest/falcon.py/blob/88d01ede1d7fa74a8392116bc5149dee57af93f2/scripts/sign_KAT.py#L1131
#[test]
fn test_signature_gen_reference_impl() {
// message and initial seed used for generating the test vectors in the reference implementation
let message = b"data1";
let seed = b"external";
// the reference implementation uses SHAKE256 for generating:
// 1. The nonce for the hash-to-point algorithm.
// 2. The seed used for initializing the ChaCha20 PRNG which is used in signature generation.
let mut rng_shake = Shake256Testing::new(seed.to_vec());
// the test vectors in the reference implementation include test vectors for signatures with
// parameter N = 2^i for i = 1..10, where N is the exponent of the monic irreducible polynomial
// phi. We are only interested in the test vectors for N = 2^9 = 512 and thus need to "sync"
// the SHAKE256 PRNG before we can use it in testing the test vectors that are relevant for
// N = 512.
// The following makes the necessary calls to the PRNG in order to prepare it for use with
// the test vectors for N = 512.
rng_shake.sync_rng();
for i in 0..NUM_TEST_VECTORS {
// construct the four polynomials defining the secret key for this test vector
let [f, g, big_f, big_g] = SK_POLYS[i];
let f = Polynomial::new(f.to_vec());
let g = Polynomial::new(g.to_vec());
let big_f = Polynomial::new(big_f.to_vec());
let big_g = Polynomial::new(big_g.to_vec());
// we generate the secret key using the above four polynomials
let sk = SecretKey::from_short_lattice_basis([g, f, big_g, big_f]);
// we compare the signature as a polynomial
// 1. first we synchronize the `SHAKE256` context with the one in the reference C
// implementation as done in https://github.com/tprest/falcon.py/blob/88d01ede1d7fa74a8392116bc5149dee57af93f2/test.py#L256
let skip_bytes = SYNC_DATA_FOR_TEST_VECTOR[i].0 * 8;
let mut dummy = vec![0_u8; skip_bytes];
rng_shake.fill_bytes(&mut dummy);
// 2. generate the signature
let signature = sk.sign_with_rng_testing(message, &mut rng_shake);
// 3. compare against the expected signature
let sig_coef: Vec<i16> =
signature.sig_poly().coefficients.iter().map(|c| c.balanced_value()).collect();
assert_eq!(sig_coef, EXPECTED_SIG_POLYS[i]);
// 4. compare the encoded signatures including the nonce
let sig_bytes = &signature.to_bytes();
let expected_sig_bytes = EXPECTED_SIG[i];
let hex_expected_sig_bytes = hex::decode(expected_sig_bytes).unwrap();
// to compare against the test vectors we:
// 1. remove the headers when comparing as RPO_FALCON512 uses a different header format,
// 2. compare the nonce part separately as the deterministic version we use omits the
// inclusion of the preversioned portion of the nonce by in its serialized format,
// 3. we remove the public key from the RPO_FALCON512 signature as this is not part of the
// signature in the reference implementation,
// 4. remove the nonce version byte, in addition to the header, from `sig_bytes`.
let nonce = signature.nonce();
assert_eq!(hex_expected_sig_bytes[1..1 + SIG_NONCE_LEN], nonce.as_bytes());
assert_eq!(
&hex_expected_sig_bytes[1 + SIG_NONCE_LEN..],
&sig_bytes[2..2 + SIG_POLY_BYTE_LEN]
);
}
}
#[test]
fn test_secret_key_debug_redaction() {
let seed = [1_u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let sk = SecretKey::with_rng(&mut rng);
// Verify Debug impl produces expected redacted output
let debug_output = format!("{sk:?}");
assert_eq!(debug_output, "<elided secret for SecretKey>");
// Verify Display impl also elides
let display_output = format!("{sk}");
assert_eq!(display_output, "<elided secret for SecretKey>");
}
#[test]
fn test_signature_determinism() {
let seed = [0_u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
let sk = SecretKey::with_rng(&mut rng);
let message = b"data";
let signature = sk.sign(message.into());
let serialized_signature = signature.to_bytes();
assert_eq!(serialized_signature, DETERMINISTIC_SIGNATURE);
}
#[test]
fn check_preversioned_fixed_nonce() {
assert_eq!(build_preversioned_fixed_nonce(), PREVERSIONED_NONCE)
}
/// Builds the preversioned portion of the fixed nonce following [1].
///
/// Note that [1] uses the term salt instead of nonce.
///
/// [1]: https://github.com/algorand/falcon/blob/main/falcon-det.pdf
fn build_preversioned_fixed_nonce() -> [u8; PREVERSIONED_NONCE_LEN] {
use crate::dsa::falcon512_rpo::LOG_N;
let mut result = [0_u8; 39];
result[0] = LOG_N;
let domain_separator = "RPO-FALCON-DET".as_bytes();
result
.iter_mut()
.skip(1)
.zip(domain_separator.iter())
.for_each(|(dst, src)| *dst = *src);
result
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/tests/data.rs | miden-crypto/src/dsa/falcon512_rpo/tests/data.rs | use crate::dsa::falcon512_rpo::SIG_SERIALIZED_LEN;
pub(crate) const NUM_TEST_VECTORS: usize = 12;
/// Helper data to indicate the number of bytes the should be draw from the SHAKE256-based PRNG
/// in order to sync its state to the state of the PRNG in the reference implementation during
/// testing of the Falcon512 test vectors.
pub(crate) const SYNC_DATA_FOR_TEST_VECTOR: [(usize, usize); NUM_TEST_VECTORS] = [
(135376, 1),
(127224, 1),
(61580, 1),
(41060, 1),
(28736, 1),
(123204, 1),
(49268, 1),
(151816, 1),
(106628, 1),
(49212, 1),
(65672, 1),
(53332, 1),
];
/// Helper data to indicate the number of bytes the should be draw from the SHAKE256-based PRNG
/// in order to sync its state to the state of the PRNG in the reference implementation before
/// testing of the Falcon512 test vectors.
pub(crate) const SYNC_DATA: [(usize, usize); 96] = [
(75776, 2),
(8192, 3),
(29696, 1),
(20480, 1),
(12288, 1),
(4096, 1),
(48128, 1),
(24576, 1),
(6144, 1),
(37888, 1),
(39936, 1),
(56320, 1),
(18432, 1),
(29696, 1),
(10240, 2),
(5120, 1),
(27136, 1),
(39424, 1),
(11264, 4),
(15872, 1),
(15360, 3),
(10240, 2),
(11776, 1),
(7168, 1),
(18176, 3),
(30208, 2),
(163072, 1),
(17920, 1),
(110848, 2),
(18944, 2),
(119296, 1),
(19200, 2),
(14080, 1),
(69376, 1),
(75776, 2),
(109056, 1),
(9344, 1),
(37632, 1),
(17152, 2),
(118528, 1),
(4608, 1),
(4352, 2),
(20736, 2),
(27392, 1),
(4096, 1),
(8576, 1),
(26880, 1),
(26880, 1),
(75456, 1),
(4928, 1),
(16704, 3),
(12352, 1),
(16832, 1),
(67456, 1),
(46720, 1),
(24960, 1),
(45760, 1),
(20800, 1),
(8320, 1),
(13184, 2),
(53696, 1),
(8320, 1),
(66112, 1),
(12640, 1),
(4192, 1),
(8224, 1),
(4192, 1),
(4128, 1),
(37504, 1),
(4160, 1),
(12384, 1),
(16704, 1),
(86672, 1),
(37088, 1),
(24800, 1),
(32944, 1),
(12400, 1),
(12368, 1),
(12352, 1),
(29072, 1),
(45440, 1),
(8256, 1),
(4160, 1),
(53664, 1),
(37040, 1),
(20536, 1),
(4112, 1),
(16456, 1),
(20536, 1),
(20544, 1),
(69912, 1),
(28824, 1),
(36992, 1),
(8216, 1),
(74048, 1),
(12352, 1),
];
/// The encoded test vector signatures.
pub(crate) const EXPECTED_SIG: [&str; NUM_TEST_VECTORS] = [
"39938882ef322e8a46b93c6d67c47f31f92a30e31e181b6c6e57760d2a08f254a7e798b27956333e7f928bc01928dac4dcfbf93ba6decb4c21f5c30cd8796cece9626113f8dae0e21e9cef91a383bfa6f139462d5a964b567efb1269bb2989f24acd165110d6a50caa263eb8e8f60fd5fed2f231498167beccf199ca66d347378794b719665f6c74194d89308b2119acf270cf5ff9f1f70b45dd93a67b5dcc41b28c0a6a9db0a2275f470a3fc1620843d1ad45b44687a3ca692935d675df9bbefe5cec12f9af8cc53e04175d56b92a29b40d869836698a0e8452c992856cb74a906f0ed4ddb8aa1e0a2f645db18903d046768e5fb7ef13d1b3a8064129987e4b53428b7549d735e672908eb08861a63c3ee698b5c8a0dab8aed4acea186a89bd33ac92576ab758ad30fddd11698f54717afc36b093a053166b022f99e50dbb718bba87eea9ad491e1e778e1afd4ed92dd05621b48847ebbf0d517e3a2a943f33803434854b225d8cbb8f1e4bd252c9826a629b3ca9da85216d07771ca6538cf6f198f1e55cfd52c9232ffb941445533d30dfae6fd4e5cf63e2250e0593ed92a3df09898ea7dbb6d941179a42275dd93afab392d959075eb104b97375c7259e23703d036f25c92a8f9e6904e64675504bad272068a7bf34d18f665818899fddbf3a5c9bc694cd99a9913bd74f7c36e5422af91eb64371f16c5b28c3454dc360df8442ed2a4edc6956abd7adc4091302ddc3b98cbc47899eaacf250a476b4faa72e434f6767135b24d747e65eb9100e86f3084beb952c3be19369908cade8923d7e4ca32edabb2a593d4b7a5fd82b5d34ca9204010ec9161847d604c7b288a59f8848b548970d814b8d0a4f96e1b433848343a2c6c6cf7af3f84aec9d269d9be632c5c57672d948839ea863208cdc0000000000000000000000000",
"3990524b2f19ea93992641f81adfa0779d4aa5db24a4ddc243e94b85f8f63da5e443658926af37d1b506a9a992d428b32748aa3a80e911d4564a4332878b5d32779af56668bfdcd5faa414bfa77b95031e47373778eb2ceacef0e9aed21a97f0700f5d918d8a606384adf0c0eeb6797c00d4ba5f26273538569135cd586230b767d952b6c192521d1aaee594ea3d6b1fe363b21064abe172867919c825ad528e39877b11569662b6c5c73b4932dc159b71069398bfaea1b487fcd9583e192fea62da26dfaa287d8fbf9d5eae42b7895a63524c79265352b13b880989ea9bfe02f0f9f975532d931fccfe5d60da05c39a62c836792f8a3e12e6e7759b7753d5198c95b2f644690a5f58763d3b5b14a97a72cda6d97a6b4daecf97c7111c5275b44437cb8ecdc25a0a0ff1bef8b250a5a2e699b368c7bbe6eab4be97a2957957d6e9bb0faa9326f9568cca5a636f2653c6ee635c986d9d84753cff5ac17834c1385a066adf43c5633719cf34c15588d997d7a13782d19704363ea1d22858d41be56eb5c0842d0422fcf4261075f1af2178e849715249dce1af4c8583427258eadac0b03f721c62b7fcf33939a5fccadd264e830f0b58a7992b1c99f69c406ef03912e7b54342a11a8930d3259332c555e891119bca3c8d3905e2229ef334c854e405ba53431af462a74e48d9aa06cfed3b30897f3bf6e9d0496a549cc6636d9cd7287178947682bf8b982ab2b44a976071d3230233332189ca405256e776b25c881a85932c6a2a1af6f29e2abeda2d4a5f53be39f37e5d99db15af4a288dde537117d4153ebd5297946b99cebacabc4ee265cb698da59cc5f5492389dc964fc92cd2d665c64eb7f08f2a8ad660c8bb90c3dc0866111bde7c91d60f6a81d2ebd16af48ed7cd6079741abc414c922470680000000000000000000000",
"3919c9011564865ced4e03fa8e7c9b2b0f103e9fe24dbe9f28a4b5db5821dce0d70495ac617fcfd6038cba4de75bf78737b3b317a6478268f08d7c95f7d9202e4a04de2434ee86d988d4397557a2c78122b475bfe9a7b2e75388ba0344cebaea22c335267ba2f48e61d1c58b785473525271baa911bd35e8def50900ed4caa7854d709afc8f36cd02562199dc76fe6b644111d833cda7819cc7a5b58947e8c8cccf9af2cb07963c9f699516018b481cf61c80a248fa1bd78c18048efc86f366293a4847902224bcf1d3cb8bc54e4d0c7149c45df69377a6c6c4474ee1403f336b0e7673d4be6c1ea54361452009e266b6b0d1181221cf814f7779ef8dab9183fe94ea54730eace00a3fcab9ab0a8f6fe9374bb292868236a2640c4680a7410a540b5f10849a4dbcb6c7093bb49b6a9878b88d4601bdd6559b1649e8e019e54eeb508ee2e9c8636105670c5a76ef336fa9c76eea7b29871660fe6bf2ae8dd5878c72ea7b8405f08b3785d2ebb88f92bbaffac17f41f38c9d554bb3134afa8d834415cce30e40e4369e85dfe464b6f3aec13552e1b3376f026a108d9f4120b5a39b4fa9634390cb9c5ba484e6e36ad62fbd27468beb61d62b36785cfd1853e339ca365342ed315838a78c8495d80df73481e077cd5153671c4deccbb46051c70d1751afca525edd91ec53636b8d220db78a4308fecb5515cbebba4bfcdeaf3acc7fbeffd9a62d5f8fdd9335dd297013b8295b5b3dad5f0bbaf83aecaf3b1449917a5699aef39f5c25899782f4c8dadc8db0bf68067f1ee6a6d8835c9445dbccec54dcc43757d4396083c3a185e5cc2f7583e3148ac56ba7991152d075aedfb8c76e48ee1f5431d048cf1599659f3f366e669c1674433f1e75e666fd41b5ecb39b577dbea1ff2ebc2aaa3d398bffab1000000000000000000000000",
"39e40755a7c0fa1c134750a89c9316b75d5fa430de8cd0b9a15cfaaad412cfdf409ce8aef76ee383ee56d7a62adc07d4f2dab1c46ea1ef9a467ab1d8cc8d603f72f6564e741059b2ae93aaac86abf7e77e605c5e076d3cde6e1775ee4c8e310ee25b371a9d78d641c27eb022a98a5e52fbd84f3f9233825458257f369938707b556a0d85e553f497457e3b19b94d31f50e637d31415b4a3c522e87e129cb3187fda068e678f5fc31e7af18fe3ecd15f3dd844bb83a0f57c8cb32ad74f9a29de5e6733bd681f742ff66b8405bf8ae8c49644b63cf7066d93d3d4f755dd03badceebaa315b67811de3f36d047baf4a4d520ab76d1952eeb93a3ab31c54abfc763102c57f51a6733ccab1686aa6cfded9c450e930072d0ca9b8aeb69648f2de9302fff972b6382631db24ab9d39d2786ad56ce2b308aa2e563621427de8f0713cd653fb176d5937f2e47850ca6d016d6192390e755fd912878724db7b33129f6529fb78d1279923dd21d11a3e7b78b74c680cfbcfaa79d4388d46d46852de566b14e5c01b8cdc1d1afa7ae02200df2f2e5b05b65066fb717c8bbb9e84a6891895e12c98c52c43b639a527549fb050b40cf24e7f4aa2df91fcba2decaa6081d518161483bebe624b2a457ede5411d4b3b258954b86c2ca9b7475419c633743f25cf50b62cfb5abffd5cdc50ed6d1a4ecaae35cf961a67a7f1c7b7326252f0084cfd39c0298a844c9d92f79209d64fd53bbf919152fbac9d08af449d145e245364efdb32aa632cfabc0fb7b899d3e14f27bf2f0b76575bb99ecd84133ddeaa8ed19776d21be589bf50985f733e6edbcabd3e7ca7c93cfe19fb22af569e66791c321ab23ba421d82f85b5151d7bee532596c8bcf70a32edbc8044a86a6703dbb66925ce8cc5a9dfd29a44b75696108d1b5300000000000000000000000",
"396eb67cbb302365e81f5bcecabc6a80f1c2419de4369eec77b35421e777ec63595c39ec77d4b1db6aa06488839e8a5daebd750537e2b136582cdba9b1479a6402ccd0c6931face2e1bf79d8ac5bbf34751793c65124542fd1b0f8aee699c93accf187679ce809f1b63a18d049bb6d626f4e7f7495b85487a3a9b6e7c9ae67c33b8bdb681f984eb1eb44e3a3ebddf85cb00624d4c6de2efdcd69e498db8e3956e818e7d29c8612f85feaddc65ca635d91b82fe7132effde5aa6f108bad1e3dd5656b6ee2d0fd482ad54c355f00fbca12e9fbd6dace18ff5c2100370e8a15c0925298137ad2c335b2e20470d86ce3c1bb48be93fa6b47ba95ff2f8df54113566ffd261f4a5c117fa46f66ffaa1ece7b5ddda7579e68d40e67256e7bcfacc050731ceeb6d6a5f764e4ed9cd963409c8f21ee98f99259474a557abfef9635ef2ac91b9b7e7660aca456d83319bb414a81d62114367b150e11728d8a3f3b9fea6fd0733a9b0335f1cae90cd230580712689ab03f9c56e50cac6952c4a3922a6a6368794c26bd9bcda250568d45df72158dfc7651798eb3d1ccbd909c95f1362607a30294b639136ad75373e479134f3092d5cf4573a913a45e4dee70a2f06b2f077150e3b4cd0e8d19517406b44d9a5a8c9ee123497fb514ed7c71330c628685f735c7a2d3a4292dba5a8cebe0160c55fdf3e9e8afe801719aa62db8e29b73ab1d208eeacccaa91b5e7efe9bd09cba32c2048039e8b9e1c2d66e54a84f5de9af6bf5cf04074d6b8169fa0ce29ae2547936f32acdcb0dd3a6f7c07a9788dd75c4d3bdfb3fa387b0efcfcd5caaed5c809d5bed15e3ea6eff4c769883a7cc62f9b8565a85f4d95cc8aa0484bcd11b7fde224567134b475a0a3c5f1380bee76cd9838edb4469285af0c2752bf52b5cd7b2a6d7e000000000000000000000",
"39a3040e1e17129693200dd7ddf41b55e3c3bc64524b86b7c30f77d1897c53aa06a2663b71f7874b7d5393408dbe6163dbcf9aac4c13a748b1e6ef9a7f52a567b8c6281d448d00a266d895cf9a462e520699b22a4bf76e645f588f1ab07e70fa988c5d41c6a3700c9205b630f8d431e10c03bd2c9ca5ae1757d12cbb0a63b6ca2edf5340743154c8b8b44f1414cddece5820fd75bf03da6c2c2e2257a14f78bbd4f999671b999c05e29826a939c366e199d756150a8bd6db2745f68d1a9d0abca93eff14c143b94fa50fd7af41968eee278ce415057097bf66ff3b7ce6e9d3249e9e222d7763faf1b2ca3bacbf658e37011550b6390cee1e58876867198fa2ebb740fbfd19b4622f69f7b5770f57ab924b6d1d6c2192e759f68967618336be5dfd7d6a5ab84dd3ae95a0b23a31da65540c2241039860b2e631a44b6c3296171fc982bd8f9acc9cfd166c4e6e3368f27a70456692b0e43f26319cfb6ac6cee088d9ec4607de3c6ed5413e797c7519c445cef417953288d3dd98044b77eb5d32f18bad74c10ccfe52d4a54bafc674f33c64d2b12343b6b277729f60329013d9e0b55eee097468ccd2f423050270fb19af8c298069e606c56ec18f959ecdc9733510bb6fd500b06a1f9c8d7d922f47faeacc1b83e15daec9224c0e7ed0ddc272c9c6f92398b704898e1d4c72718d7ef46c6687a5a84be9c3216772d93a2a9f5c77b21e75574e6767f5bc8d9ceed47c99aa47534f2aeed56bec7f6b0f6cc0a3e20ec8465030d9c67c321cf3b5c9d02ea5523af629170ebe8ec70744b5303fc66e67c6b9ae6b1676fb43ebe7ad1543acedc1ea6d41588d571891548ee24c6beaf4a26a359521f352176753588afb116f22c86ae606579081bb3c5ab9243ae5c53afc699063be8713a621bece46aa72320000000000000000000000000",
"39fa7971d10c07ff2f265fd449591aac4d48cf198a8b58145d2fd0a5864b14f2daf90b272f282484eff796963638fffdb3c72d5bbd9e89aac5273a5707b0a29428fec3b586d7c0602e27cde8d0297386e0f2e9e558368d0a3d1c8dfacac8dd9fdd1d6d56faf02089c20b887d60d146f5cca492655b5ce1e5274c82a98665b66d2d476cc126d33adb2a8c9929249759cf4fd144e30ae54058a4f1803f9c9455ddd8e2a007df512a31d96f8d33243dede21e6eb8f6bdadd9a3653379bf3dca55a1aa33c85bff83cb3194effa3e67a6254a4537d5ae49b2eb3d879e0f9c1702c7bcd3a7e72bf8c2347eb4410ddcd8fabb7372579bab0e219a203a995ef3ecac0da461d0b3481d486765cce440b58125a2fccc30d064dc4fc3b2fe2e31ac5a0c74eac34300cd14d9cab1ba3206f8d56f77e5113aa596ca160cae39cb034eea7a499e6236e7aac94b22cb4694797a5ae0088997abea76ce4c4b401af3b07409d16e9952a33aae7f2f3520433b75cba34b186938103eb719b1d4bbf1d22cac63a1e4e53a4389460b80420a0e1b3891ab113e62ae92e0b3c75d644713f673dd36cf6b27ec3c5f9f3b6720d42babe12470a451b489a563d8c5e622d47eae11643cde922728ae68a943076be2ce5b7a2c731fd5bacd95fa47e5aca0c86e9b64389aa9a64b65aaa7a28ecec4a1770601d099cd100829e8d438cc72af867f3831a7bae1b2c3bf5f7e8ba2bc6d1494590267af6e76b991c1e89d965a0426ee965799ac54dfe50fe78529f98b4d0522545351ae4cd62b9a92f63d90b9e392eaced119720c9d72d17914fd5a4cb0f4e14af1762a39be3aae98a698ec56d1d28937ee5d722ac42c51d73d8be8a7082dff6f314057fd0e5b5a6391c682c291b20894b1c2843636377d853581a339f55c651ecc8a41f9ff8e9e104000000000000000",
"39aa4cac72b666b9847d138ef6f0698fe9e58317bc6e2d1913240da31cbc639e182165f25bfbb16549c495abee5ef1437eacfa6b14c8d6afdfaee611c9c0bf2b56ab40eea44163ba3072ca498a6265a6a4cda6f17670ed51ec3dbefc97174aca4ea765c208ad7865e7ab41df4037d810f637dc10a831b4923ac29299b903bca99e06e65d1445f478494f9757e014043212c2e4f26ca225046448ca9bc434d27615989b1dccc3c5aa8b31321495a0a70e3495b0b7d86d88c6b1e2bf28e81a14c6e519aaede3e46ad26a0f0a21a6a21b195f07e52bc01985b3a450edb66c1e2864b98fcab2f0f34cbeeaf191dc160e2ee9073cba960e6c8d67238e66d2f29146beaa5a37e936b13fe376cd5aa2f372179f586bf0180c02a3d2c2f39ae8839a8fddbfbc48c35192ffdb6c6cda34b01509ef50c5c99a651ecb67ffaac6b13daaa641686b2cd142856372346dbf508bfb100476324494b52e9130404d54315f573fecc9489645a26248668978b9a35156fed9c88c304f951598bd4311924cf8311b4e5fd35de5d6d9de9c0e0185d4149df5f79f12e1c1bfb009b44dcaae704b5ec1aca1ac363c5216ae3dc4c5682c9a56ca1abe222d9da718d2e87b6d3217b7e0105c1d11c5d4f26b7b6d53086514025a52bcc7278dd5cead321ec5764d4d7cc8177134ab125541b290268d7c18d13cbbca1271e18ee73a04b307c269a15522daba23d004648ba8d84de4f72f8a5015a80be91e863188549cdffc92df043592b44ba7a926b67add310fcf0326a04f9e9c6ab7bd5f59295e7d52395015216fe4dcf0719cf867eb25df8b30c0caac261d1a2bbc7e1f879ed71edc8a77832916b5fb7ae4b1e867d9aba2c02b6c43f1f7b8c07810dcb1c9219977e3bd9fde324a82bb91bdf2f2aac6c139c9bc2539005f6e46ab4480000000000000000000",
"3942143340bb245de675d10769dc963a1a3c190360b60f6dbe3d6c5915bc72aaeb0d219ff0d8a7700464854b953e024e368bd64aba8ae05dee5ad1b9b1765334cfbf1a2407c2b0448446170ed9adafba2c54de08618bca21ee3a17c710358d974408b2fcc0b12e1bd1d5cb4832e476bc843f4b9b8710ba23309fb187895834b9e4558a8e9f5bda0921e06268ec0194fb294dca82820bb791d28461572e488f4f77642e904c3af007e5d9b08a1464c8a1b4de9c5d89e23ed7d4716e72311395b7fa705101e94aa2d436d127494a6189b042f1d29b3a16c8f464b74303884c0cae8537bbde91356d11446b13139cd4572f8771325614f59612443c90f24b85f1381f944bbbbd83ae7f4826f2791e19a7131b74327d3b45c5fc60e529943a0a60100925c6788e7aeb4b5d33609934cbc6d189c357d0d2831c99dd72ae0a0b9443b5d2c6dfe787c9f0eb32ca14b4c594ec325d32e0719f6eb997c9e3d8430b378465d17575d2ff1068ea86c4bbed5a4c7b278805c8a7eef2758d6f53fbbed6636517241f159bef4d9648b22de57d7e49a55be8f94db14c892a211f083b12687b6f846588569305d2c7bfeba7b077935b98527e98f9743385662078a41b0e6fda318f6f19fd0c227b19e0c1939f24ba14fad5d8adc72b94ccb7aad27086e721fb0d4a9d4cd8e762c577b88e9cc7d29674a1ce46573db59ea6dc82091a5eac7f9ea77ec4e7186d120497342e0e08dbb1cf4d857268d24c9f0253558a28cc975796c3d68f2f788962101318d3214da9b8409896450244b4712a03d10b4d656d0701c376551e42c57760e0e4e330dcf8f2f7bdc91b9e4b539d8beee86d510e45f0d7f5328e258214609524c128b0c514cddd0b0947acc75d0539af7c940dd33068e9519c4e59967fea716829c17377fd6afcf64680000000000000000000",
"396ce0802634c44851f6aaa20b4494967e4b7dfd7d9a54311232fa9b41058394bbb1b940f3feac871022da639d2c01caac10c93311116e757785c32997c2d9f5711269a8db62925ce1f4adbd89fa965474b2654307d26e54f849ac8b6e0ad609c9a12e091f3e035eeef834bdb31bfd5aea71a917075ac81974ea68714ec317238ff9d2520b77ba2854bbc6c9a7608cfeaeb673a1451d949950874358c4d909e4350c60082c0886a80c13725c9d2f3d55224f5644a9a736265f04faa5872df088f4f96e26d5a46d1648b30709dcc9849b04f673f149e981fc5cf5058170f974d3e7b21f8ac2269f6db13544f4925c12e7cdd1ccb70890a0e28a4d9f6bb2333cb600cf5966d7f429a22c62005472786c5528e0430cd6388fbb89ca0b2f1945128ddf88587331d6bb41d0634a52011338b90779277eee08133a82230e1b24aae65b793d18d73b5e51e7f12cb6396ad1d9927f1aaf440d526ecde9b996a2dae31257a516a84c84dd9c05862fd9625c96e4d92a733724cca430694d5623475bd643a2c73616d639f78942e0ea64654f5f67ebaf3f0b072279b6c14f6c3fb2bf9ca91f48fbb012352635475f1036659cae51f1de8e052d0b70a21986a5f6fa51a43344e48ec6f59c84b189ac9cfcbbaee16fa0073b9c8c2a6d5998dc2486c673bed41388cd520da0cb9e6ae271e188535337add11d1f4d2e1264d45c19a56ddc9e77d20f6466d0f41a81b0b6b6ee4940d3751479abca5b7f249608d0f17e1a1e1f157df944a590833eb1e2e3271b65c390cd2d17cffaab31d424fc0331b494324deeadac6fd2045cc62ecc5c87bd79db16f306677a4ec359e2cecabaef4efb9461de940774c5125f08f0b6a59900bcfc4ef407c6aa6cc8b8dc2e55aeae5b2f25fe9acf1533334c84ca7b558ff45efcedc2bebb4a4e6200000000000000",
"397debc135313dbd031dc06b5b3c7219ef6bd2ae1eddda47026b2efc71a4260a157f565602e39820702870f7f714c8d8d8f76c3b549aaf513bb4dba7ab620404704d1a17c8cdcefb14c341c7997e90f227add8476664813f8c2b78193345ea4c48a549097c9d4b757efa76a2c84c4747c7f8d49223298c68f77871b99decff52088f0e235ffca3081c5a38d2a3c69bbe8acab4044661ff5e5cec8d2af6e9771c29bee2c919242f1a36ae5717fc22c035bc6767e49bcc5126a735028ccba55e9ca35675b4fb9dbab56d3832f94b0d7d83c57128b35682188b23cae3e229dc5de9d782117222ae68e97ecd122bbdcca7a184d84a236946f6148cec234e1c5bf1454b0d82c7e0ce251297beeb02bec1b1489282b61ed60988d0a9eb5da3757d967cd355388463b457a6a3f9867efd1cee9a01658ea4156e973f0d53d0e6a7944dec008050d5f455d545900286e5c8153bd396b8bc93ebdcacc9ab57664f14b1593676162f4367dc7c2378681a4e861d06ea30ac382b8c49b691b3dba9a5cb2a442bbb6d56bb71b9b524ad113034ca9339ad921fc21a91672974272c8de41ff8593b4bdb77866ccadcd026d34517b1166c57d52f41d9450b14cd60e754060edd2a9c7ff405751352343f670f82a2ffecf948eb47b9a7e5c20b1269bf7b365cc3a28832a6c956cd6ed2379994e6345e7a677f3882922b062fbd9ecc93427db6d1a4ca1f58c56aa2bdc9e54379da71a050c307bda25970db43f7b0419be65cc88dacb35b5d709003296d21259f848d3170c86c1fa4a79b2d8a59e9ed4c2af4f43707b288ffe2d30325a345a32498e07d59356b02a5204a7a1ac31ced81854633109e1f0f088a9947e1177392863a8673be2c8ed234d0ac3012459cc99ab454a42cf988d0d7546e760e7b475e6bd59cf3d2400000000000000000000000",
"396ef355a73884a445da06224862bff01b287645d9d7f5a187302b60dd511a377cbac676d9711835365bce36f14442da80ee3ab713d30aad4b90378fda5d0b1567cb6f9b57df363edde062dada21c547aef9ea9dd9a308fc4638c33129f5ae8fc46291a1f659bf480ac09c2238ff8f469e8aa7938a64da8faa85fca2d048bd659e2a5ad63d53cd380749d0bfed523b293868e78e4cb96418c46cb3eda764868dc688fe1ce773a48e48f3e9072a0cc4d4366b7e211f9b4b382e460e9b475bd999ad718a927ada526da52e867300dd91bd01cd6d8e64c1414358249f0df8fffaf2906ad669c4d86b50e89a1ab44ee44b22353842e3286f412ce26756d6d30084f1f19cbeed013ebcf958fedf31f59643d3b5b322da109828eee6c964b896ce5446ab697c89be9bec9097b44c891ee528e4689d10c26e649874518066d4f4148313e6caf12f4454bcd791864305b3d7e0fec8b1295ba1309893e7c706c2c2ad9a67063e94fda738e8073114e37e7219fde753792973940674c1356c5625ee45f2b1bd7f97b7a41519640914b49a5cf3dae96e989e9d29289abe93ceb4a5c785992a6d84dc259b19a5e78523e0a7f4170e4365485a75f53896a30ec24150784ce1912659071e1d0b9a224d817b9eb35a034daa67ff1115c94774ee382dbfee614d42de95f65c4c67d3bdd1936559c24e89b1d27be6c33b20204f06ae70cd3d636c929c393244f9c2fbb3a69e10ebb80e7f6e359b4959291babb558a5e872d19e217485c4af42b2398477c3fcc26cd75a5c3cf0a28c231950dbd0795edb1644cb2104548439b44420f944130fb28b9588197c092dfc2870fa9d5e34e7e0e04adaf3c3ad4edb4330751e76e11f211b05e23f197b1527dccf74e3c44a248298431759d564bc4f4fa5552e3e442d6cbbb45fa8278a000000000000000000",
];
/// The polynomial test vector signatures.
pub(crate) const EXPECTED_SIG_POLYS: [[i16; 512]; NUM_TEST_VECTORS] = [
[
-18, 23, 128, -18, 155, 226, -57, -111, -73, -186, -55, 101, 332, 15, 348, -134, -48, -101,
231, -285, 226, -132, 63, 155, 240, -324, -105, -29, -228, -35, 135, -381, -188, 185, 24,
106, -169, -18, -299, -123, -88, 38, 183, -20, -9, -100, 299, -34, -148, 16, -173, 168,
-149, 403, -117, -14, 30, -3, -106, -126, -37, -72, -138, 304, 103, 125, 51, -12, -28, 76,
-52, -35, 55, 271, 203, -198, 230, 125, 99, 65, 41, 226, 48, 150, 8, -26, -158, 184, -30,
127, -79, 159, -66, -34, -93, 167, 51, -53, -57, 144, -50, 408, 205, 78, -48, 196, 58,
-244, -66, 31, -193, -264, 136, -104, -173, 22, -290, -33, -81, -202, -36, 73, 221, -157,
-95, 183, -119, -101, -29, 4, 124, -47, 25, 20, -368, 5, -46, 86, 242, 209, 54, 131, -6,
176, -179, 177, 135, 8, 331, 178, 66, 108, 110, 170, 6, -97, -309, -187, -197, 67, -2, 23,
228, 118, -140, 160, -360, 25, -180, -75, -109, -119, 19, -163, -157, 0, -144, 41, 48,
-377, 234, -161, 22, -341, 58, -205, -230, -202, 8, -470, 16, -6, 49, -67, -92, -294, 107,
34, 6, -43, 21, -309, 89, -168, -6, 337, -317, -157, -146, 43, 106, 110, 98, 105, 15, -59,
196, 105, 30, 81, -11, -47, -6, -300, 167, 2, 177, -26, -385, 124, -158, 195, -187, -326,
247, 67, -110, 211, 234, 35, -7, 59, -398, -47, -41, -182, 45, -32, 88, 13, 72, 8, -122,
-95, 141, 69, -113, -34, 82, 15, -25, -256, -33, 200, 82, -273, 374, -151, -71, 158, 175,
293, 100, -130, -41, 20, -51, -277, -90, 138, 139, -32, -93, -142, 76, 334, -30, -188, -24,
-99, -149, -79, -170, 100, 291, 127, -185, 389, 149, 51, -422, -191, 243, -122, 185, -207,
-15, 273, 67, 2, -19, -347, 298, -111, 9, 403, -84, -118, -219, -50, 132, 121, 200, 19, 93,
-178, -87, -299, -201, -50, 484, -87, 88, 4, 114, -77, -302, -201, -286, 27, 3, -160, -55,
37, -146, 212, -115, -154, 4, -76, 25, -42, 4, 117, 73, -272, -34, 61, -243, 180, -143,
-153, -1, 273, -31, -187, -121, -37, -147, -227, 41, 182, -26, 306, 59, -46, 61, -97, 238,
80, 149, -370, -235, -16, -56, -241, -177, -50, 152, -34, 77, -6, -131, -248, 16, 118, 170,
187, -70, 42, -42, -107, -45, -392, 162, -129, -59, 14, -204, -23, 17, -68, -30, 85, 60,
168, 72, -90, -39, -170, -75, 33, 207, -29, 56, 53, 100, 53, -35, -230, 122, -72, 0, -80,
-60, -260, 47, 92, 82, -135, -112, -147, -166, 8, -21, 378, 164, -107, -100, -148, -151,
-181, -217, 331, 189, 45, -82, -125, 133, -46, 52, -277, 160, 128, 14, -274, 97, 8, -117,
130, -143, -148, 20, 103, -324, 34, -170, 18, -195, -129, 302, -161, 39, -22, -195, -33,
184, 32, -33, -34, -13, 283, -239, 121, -248, 43, 100, -210, -167, -27, -204, -278, 241,
246, -203, -20, 144, -207, 80, -12, 132, -27,
],
[
6, 211, 76, 173, 266, 230, 186, 149, 157, 1, -164, 157, 21, 293, 12, -276, -98, -46, 178,
-222, -175, 89, 180, 127, 243, 95, 84, 261, 255, 59, -185, 64, -271, 28, -57, 119, 157,
-150, -85, 59, -135, 53, -52, 141, 47, -65, -128, -107, 228, -13, 20, -1, 284, 171, -112,
-1, -58, -51, -23, -384, -169, -82, -242, -9, -26, 184, 218, 147, 243, 216, -8, -5, 246,
-246, 82, 109, 134, 293, 14, 26, 93, -150, 78, 71, 90, -15, -227, -14, -16, 134, 42, -112,
114, 12, -228, -28, 4, 235, 82, 156, -332, -94, -8, 86, 44, -10, -310, 113, -29, 329, -22,
-193, 102, -56, 6, 295, -11, -117, -168, -52, 15, -243, -21, 7, -134, 47, -84, -139, -162,
-55, -469, 67, -88, -119, -231, 94, 92, 10, -188, 171, 49, 210, 49, -329, -20, -169, 226,
-220, 257, -9, -341, -63, -192, 248, -115, -101, -42, 50, -178, -15, -76, -124, 117, 6,
-160, 112, -333, -267, 6, -158, 47, 148, -112, 174, -57, -58, -155, -221, 189, 198, -12,
171, -23, 228, 154, 138, 253, -135, -143, 187, 236, 74, 175, 185, -155, 54, -23, 205, -38,
-46, -287, 252, -196, 156, 73, -45, 196, 13, -101, -14, -27, 137, -32, 65, -252, -62, -241,
-18, 138, 104, 243, 179, -155, 24, -110, -243, -213, -37, -233, -104, 74, 249, 223, 110,
183, -7, -42, 166, 55, -149, -291, -20, 105, 155, -100, -20, -227, -92, -141, -73, 13, 231,
-132, -84, -103, -117, 344, 376, -550, 184, 488, -26, 111, 67, -10, -12, -56, -28, -102,
176, 85, 17, 230, 253, -232, 55, 5, 198, 240, 13, 159, 67, 72, 66, -141, 6, -114, 110, 107,
386, 139, 260, 23, -207, 9, 264, -87, -141, -356, 120, -80, 37, -10, 36, 59, 184, -175,
434, 96, -289, -201, -14, 219, 224, 224, -123, 33, -140, 91, -124, -230, -73, -154, 383,
-21, 116, 178, -208, -7, 139, 98, 60, -18, 99, 166, -118, 56, 1, 119, 3, 162, 115, -181,
525, 80, 26, 146, -6, 178, 100, -153, -10, 87, 68, 401, -27, -148, -228, -295, 5, -196, 20,
-367, -154, -16, 83, 288, 110, 41, 451, -175, 24, 83, 206, 163, -26, 64, -51, -118, 315,
-132, 47, -78, -251, -83, 321, 173, 170, 57, 25, 155, -51, 53, -276, -69, -68, 199, -32,
95, -11, 176, 85, 171, 18, 75, 224, -199, 306, -385, -25, 434, -9, -20, 129, 165, -57,
-187, 100, 370, 131, 66, -275, 227, 197, 141, -109, -202, -98, 87, -54, 22, 202, 253, 59,
-327, -243, -249, -89, 187, -10, -175, 40, 196, -59, -20, -56, 23, -296, 83, -87, 84, 75,
-148, -174, -28, -215, 229, 120, 59, 275, 114, -52, -13, 331, -204, 253, 329, 156, 59, 37,
39, -329, 102, 173, -153, -198, 58, -63, 8, -229, 197, -44, -259, 151, -200, -7, 368, 12,
-132, 27, -60, -242, 157, -3, -181, 3, 75, 94, 22, 94, 35, 107, -205, -1, -75, 65, 87, 144,
76, 164, 35, 6,
],
[
-12, 116, 55, 58, -191, -97, -27, -51, 358, 250, -17, -321, -35, -132, -47, 165, -119,
-178, 129, -201, 130, -188, 33, 78, -80, -182, -8, -168, -75, 213, -104, 99, -257, 21, 199,
111, -116, -39, 101, -157, 56, 151, 1, 68, -157, -215, 196, 97, 309, 51, -314, 250, 28,
-135, 156, 98, -316, 81, -26, 293, 56, -58, 338, 27, -38, 378, -61, -340, 416, -90, 50, 83,
-133, 53, -4, -47, -17, -77, 102, 130, 88, 12, -29, -14, -63, 53, 228, 132, 29, 134, -102,
-39, 259, -204, -233, -53, 18, 31, 198, 25, 51, -205, -101, 428, -101, 158, 62, -166, 209,
-128, -139, 160, -207, -263, 129, 146, 159, 13, -87, 280, -128, 35, 254, 13, -77, 177, 167,
292, 158, 258, 146, 121, -199, 60, 241, -98, 206, 308, -270, 73, -8, 119, -52, 183, -105,
227, -8, 285, -348, 320, -121, 54, 97, -29, 57, -84, 124, -176, -234, 80, -48, 325, 128,
188, 179, 237, -6, 17, 130, 16, -79, 2, 61, -59, -30, -113, 106, -72, -3, -381, 78, 74, 28,
-135, 89, -256, 71, -114, 92, -555, 81, -91, -116, 183, 46, -20, 168, -32, 155, 68, -272,
-8, -288, 78, 260, 74, 2, -47, 16, 265, -36, -55, 45, 99, 265, -93, 73, 237, 332, -98,
-196, -40, -128, -61, -44, 214, -177, -146, -104, -320, -158, 83, 117, 80, 29, -11, 206,
140, -48, 133, -284, -139, 187, -222, -155, -373, -199, -59, 83, -50, 48, -69, 176, -124,
-47, -149, -81, 245, -7, 24, -75, 83, -184, 129, -112, 150, -316, 116, 117, -56, 287, 43,
117, -126, 96, 255, 7, -156, -19, 213, 75, 358, 52, 223, 70, -131, 144, 92, -156, -263, 3,
33, 105, -80, 119, -370, -18, -55, 58, -344, 181, 75, 13, 179, -91, -257, -168, 8, -51,
-208, 32, 235, 28, -52, -373, 227, 142, 12, 115, 22, -210, 9, -27, 155, 90, -11, -94, 167,
282, 253, -48, -342, 89, 103, 11, 191, 152, 79, 25, -28, 70, -20, -161, -218, -10, -3, 20,
-355, 265, 93, 1, 125, -154, 3, -1, -190, -298, 211, -156, -68, -61, 50, -346, -129, 156,
-195, 151, 70, 254, 202, 175, -315, 30, -138, -49, 107, 154, 144, -54, -98, 33, 8, -125,
173, 209, 114, -117, -186, 47, -102, -106, -231, 358, -126, -119, -125, 52, -139, 95, 31,
246, 179, 247, 169, -192, 59, 261, 219, 108, -237, -43, -66, -221, -240, -215, -21, -78,
-266, 166, 23, 74, -166, -46, -359, -117, -4, 226, -23, 5, -339, 155, 238, 155, -5, -118,
0, -31, -143, -205, 54, -264, -174, 40, 151, -60, -29, 277, -57, 16, -58, 253, 142, 96, 7,
14, 268, 249, -332, 123, 344, -113, 20, 21, 21, 349, -230, 145, 203, 135, 107, 111, -56,
-14, -313, 29, -7, -426, -14, 4, 25, -197, -153, -150, -115, -102, -27, 179, 312, 231, 16,
-31, 158, -87, 307, -191, 65, 107, -50, -28, -181, -223, -62, 67, -124, 117, -66, 213, 30,
57, 23, -126, 88,
],
[
86, -175, 177, 91, 1, -234, -101, 106, -270, 27, 80, -111, 52, 25, -85, 29, 25, 163, 352,
-123, 175, -21, 295, -208, 5, 182, 215, 167, 213, -16, -42, -123, -231, -121, 2, -69, -64,
-219, 60, -60, -184, 247, 123, 166, 156, -136, -220, 45, 439, -41, -431, -44, 519, 191,
224, 149, 177, 303, 75, -94, -4, -103, -100, 25, -130, 209, -130, 95, -155, 178, -67, 7,
106, 90, 6, -5, -74, 79, -36, 244, 95, 29, 25, 114, 52, -15, 80, -204, -62, 177, 133, -52,
71, 20, 151, 15, -4, 206, 358, -7, -251, 131, 28, -286, -107, -112, -271, -107, -140, -252,
-246, -34, 124, -110, -132, 46, -65, -32, -106, -370, -150, -149, -46, 190, -34, 59, -23,
51, 51, 122, -160, -247, 11, -379, -430, 129, -63, 21, -419, 37, 162, 236, -103, 240, -155,
-19, -39, 83, -58, 93, -160, -221, -57, -58, -469, -10, -182, -224, 29, -71, -77, 360, 30,
-87, 202, 181, 32, 86, -219, 153, 75, 117, -19, 199, 89, 156, 82, 95, -199, -140, 2, -10,
-253, 154, -28, -158, -149, -139, 141, 211, -31, 251, -156, 276, -210, -256, -203, 12, 211,
-197, -86, -37, 164, -101, 250, 304, 127, -249, -74, -49, -130, -140, -347, 165, 115, 206,
-210, -97, 86, 86, -156, 89, 8, 212, 114, 227, -136, 194, -119, 71, 391, 60, -44, 79, -88,
118, -299, 55, -101, -17, -66, 12, 77, 192, 237, -134, 35, 33, -157, 95, -306, 168, -97,
-146, -54, -108, -24, 41, -108, 202, -251, -227, 146, -230, 35, -186, 14, 17, 71, -30,
-188, 110, 49, 192, -159, -103, -170, -231, 67, 17, 81, 362, -161, 45, -74, -26, -138, -75,
128, -56, -27, 135, 26, -116, -107, 385, 128, -190, 249, -203, -2, -182, 65, 55, -439, 124,
151, -92, -232, 41, 68, 24, 43, -4, 100, -140, 459, 14, -49, -154, 73, -170, 191, -2, 139,
259, -100, 57, -250, 212, 111, -17, -249, -209, -61, 42, 176, 3, 212, -129, -261, 135,
-117, -358, 37, 170, 21, -118, -229, 132, -84, 231, -18, -137, 82, -195, -5, 170, -183,
157, 65, 56, -12, -442, -377, 115, -168, 236, 103, -53, 87, -255, 92, -56, 67, 235, -163,
167, -149, 113, 92, -114, -6, 51, -39, -99, 30, -57, 294, 169, -480, 9, 191, 57, -128, 204,
80, 275, 315, 175, -100, 4, -214, 191, 83, 119, -228, -145, 75, -221, -19, 322, 94, 146,
-209, 23, 274, 77, 167, -251, -153, 84, -12, 103, -43, -1, -109, -220, 51, 79, 138, -100,
-111, -23, 139, -217, 117, 375, -30, -539, 4, -30, -94, 469, -346, -151, -219, 33, 124,
226, -191, 194, -5, -238, -287, -187, -60, 87, 79, 190, 79, 36, -103, -97, 191, -145, 94,
218, -230, -158, 284, -16, -299, 29, -292, 14, -2, -112, 237, 405, -87, 125, -20, -18, -22,
-145, -103, 112, 198, 118, -60, 0, 18, 195, 76, -64, -109, -182, -36, 174, -81, 177, -41,
-63, 202, -164, 45, -171, 300, 8, -163, -41,
],
[
-160, -274, 144, -207, 20, 118, 117, -215, 193, 55, -197, -9, 101, 5, 54, -84, -177, 158,
-166, 0, 230, -33, 154, 49, -117, 56, 112, -191, -231, -10, -139, -95, 180, -212, 377,
-355, 68, 34, 66, -378, -48, -241, 375, -166, -329, -214, -286, -135, -158, -78, 257, -113,
236, -80, -653, 38, -219, -172, 55, 206, -125, -36, 91, 10, 33, -81, -41, 109, -287, 53,
-25, -97, 59, 23, 109, 64, -121, 9, -172, -235, 19, 413, -117, -93, -112, 114, -256, -265,
-41, 155, -98, -95, 243, 105, -329, -13, 113, -142, 86, -336, -142, -116, 206, 268, 47, 11,
-122, 110, -198, 114, 49, 93, 163, -193, -124, -68, -151, -127, 249, -170, -188, 8, 117,
71, 30, -213, -21, 219, -220, 232, -122, 32, 86, 84, -6, 87, -128, -119, 168, 46, 191,
-235, -53, 184, -15, -107, 136, 256, -184, -209, 10, -64, 36, 202, -257, -189, -37, 12,
-45, 302, 258, -195, -6, -156, -96, -187, 34, -116, 63, 205, -35, -58, 43, -124, 252, -62,
208, 147, 89, 127, -210, -7, -293, 240, 23, -116, 27, -179, -255, 80, -108, -207, -46, -93,
78, 222, -102, 26, 3, 51, 165, -57, -222, -117, 432, 65, -24, -78, -86, -53, 82, -247, -19,
167, -51, 182, 227, 130, -72, -356, -110, 49, -230, 37, 40, -82, 42, 122, 127, -190, 227,
123, -149, -274, -57, 111, -29, 176, 217, 34, 109, 134, -12, -187, 261, 336, -342, 8, 195,
-30, -10, 398, 279, 70, -266, -121, -57, -253, 55, -208, -76, -84, -304, -26, -113, -21,
-292, -154, 280, 480, -68, 180, 181, -1, -121, -10, -185, 12, 88, -165, 172, 40, -457, 211,
204, -308, -357, -4, -175, -27, -155, 18, 133, -163, 69, -62, -200, 88, -63, 29, 40, 121,
157, -30, 28, -23, 356, 313, 479, 310, 304, -104, -129, 169, -49, -273, -181, -46, 77,
-287, 158, 147, 60, -4, 173, 115, -34, 115, 338, 186, 23, 38, -238, -66, 23, 6, 101, -65,
-56, 80, -199, -166, -33, -163, 153, 69, -288, -429, 182, -37, 81, 39, 112, 163, 37, -125,
209, 187, 252, -68, -152, -140, 195, 11, -92, -302, -104, 105, -292, 201, -183, 173, 25,
-47, 0, 96, -10, 255, -115, -83, -34, 255, 256, 113, 181, 177, -439, -325, -311, -85, 285,
132, -221, 230, -149, 72, -53, -79, -63, 77, -80, 185, -81, 300, 130, 128, -207, 279, -97,
-5, 89, 114, 74, 9, -215, -105, 94, -47, -174, -96, 1, -38, 107, 2, -39, -208, -156, 205,
-68, 81, -73, 367, -149, -27, 300, -186, -211, -111, 1, -84, 120, 27, 221, -68, -167, -111,
-51, -244, -67, -48, -95, 319, -43, 42, 118, 92, 257, -85, 125, 197, -99, -84, -59, -250,
-14, -294, 135, 190, -140, 124, -184, 217, -168, 381, -50, 371, 149, 130, 137, -102, 17,
111, -119, 273, 21, 56, 52, 104, -86, 389, -98, -369, -192, 125, -29, 102, -280, -199, -54,
17, 201, 139, 248, -4, -84, 95, 82, 107, 53, -217, 77, 95,
],
[
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/keys/public_key.rs | miden-crypto/src/dsa/falcon512_rpo/keys/public_key.rs | //! Public key types for the RPO Falcon 512 digital signature scheme used in Miden VM.
use alloc::{string::ToString, vec::Vec};
use core::ops::Deref;
use num::Zero;
use super::{
super::{LOG_N, N, PK_LEN},
ByteReader, ByteWriter, Deserializable, DeserializationError, FalconFelt, Felt, Polynomial,
Serializable, Signature,
};
use crate::{SequentialCommit, Word, dsa::falcon512_rpo::FALCON_ENCODING_BITS};
// PUBLIC KEY
// ================================================================================================
/// Public key represented as a polynomial with coefficients over the Falcon prime field.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PublicKey(Polynomial<FalconFelt>);
impl PublicKey {
/// Verifies the provided signature against provided message and this public key.
pub fn verify(&self, message: Word, signature: &Signature) -> bool {
signature.verify(message, self)
}
/// Recovers from the signature the public key associated to the secret key used to sign
/// a message.
pub fn recover_from(_message: Word, signature: &Signature) -> Self {
signature.public_key().clone()
}
/// Returns a commitment to the public key using the RPO256 hash function.
pub fn to_commitment(&self) -> Word {
<Self as SequentialCommit>::to_commitment(self)
}
}
impl SequentialCommit for PublicKey {
type Commitment = Word;
fn to_elements(&self) -> Vec<Felt> {
Into::<Polynomial<Felt>>::into(self.0.clone()).coefficients
}
}
impl Deref for PublicKey {
type Target = Polynomial<FalconFelt>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<Polynomial<FalconFelt>> for PublicKey {
fn from(pk_poly: Polynomial<FalconFelt>) -> Self {
Self(pk_poly)
}
}
impl Serializable for &PublicKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
let mut buf = [0_u8; PK_LEN];
buf[0] = LOG_N;
let mut acc = 0_u32;
let mut acc_len: u32 = 0;
let mut input_pos = 1;
for c in self.0.coefficients.iter() {
let c = c.value();
acc = (acc << FALCON_ENCODING_BITS) | c as u32;
acc_len += FALCON_ENCODING_BITS;
while acc_len >= 8 {
acc_len -= 8;
buf[input_pos] = (acc >> acc_len) as u8;
input_pos += 1;
}
}
if acc_len > 0 {
buf[input_pos] = (acc >> (8 - acc_len)) as u8;
}
target.write(buf);
}
}
impl Deserializable for PublicKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let buf = source.read_array::<PK_LEN>()?;
if buf[0] != LOG_N {
return Err(DeserializationError::InvalidValue(format!(
"Failed to decode public key: expected the first byte to be {LOG_N} but was {}",
buf[0]
)));
}
let mut acc = 0_u32;
let mut acc_len = 0;
let mut output = [FalconFelt::zero(); N];
let mut output_idx = 0;
for &byte in buf.iter().skip(1) {
acc = (acc << 8) | (byte as u32);
acc_len += 8;
if acc_len >= FALCON_ENCODING_BITS {
acc_len -= FALCON_ENCODING_BITS;
let w = (acc >> acc_len) & 0x3fff;
let element = w.try_into().map_err(|err| {
DeserializationError::InvalidValue(format!(
"Failed to decode public key: {err}"
))
})?;
output[output_idx] = element;
output_idx += 1;
}
}
if (acc & ((1u32 << acc_len) - 1)) == 0 {
Ok(Polynomial::new(output.to_vec()).into())
} else {
Err(DeserializationError::InvalidValue(
"Failed to decode public key: input not fully consumed".to_string(),
))
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/keys/secret_key.rs | miden-crypto/src/dsa/falcon512_rpo/keys/secret_key.rs | use alloc::{string::ToString, vec::Vec};
use miden_crypto_derive::{SilentDebug, SilentDisplay};
use num::Complex;
use num_complex::Complex64;
use rand::Rng;
use super::{
super::{
ByteReader, ByteWriter, Deserializable, DeserializationError, MODULUS, N, Nonce,
SIG_L2_BOUND, SIGMA, Serializable, ShortLatticeBasis, Signature,
math::{FalconFelt, FastFft, LdlTree, Polynomial, ffldl, ffsampling, gram, normalize_tree},
signature::SignaturePoly,
},
PublicKey,
};
use crate::{
Word,
dsa::falcon512_rpo::{LOG_N, SK_LEN, hash_to_point::hash_to_point_rpo256, math::ntru_gen},
hash::blake::Blake3_256,
utils::zeroize::{Zeroize, ZeroizeOnDrop},
};
// CONSTANTS
// ================================================================================================
pub(crate) const WIDTH_BIG_POLY_COEFFICIENT: usize = 8;
pub(crate) const WIDTH_SMALL_POLY_COEFFICIENT: usize = 6;
// SECRET KEY
// ================================================================================================
/// Represents the secret key for Falcon DSA.
///
/// The secret key is a quadruple [[g, -f], [G, -F]] of polynomials with integer coefficients. Each
/// polynomial is of degree at most N = 512 and computations with these polynomials is done modulo
/// the monic irreducible polynomial ϕ = x^N + 1. The secret key is a basis for a lattice and has
/// the property of being short with respect to a certain norm and an upper bound appropriate for
/// a given security parameter. The public key on the other hand is another basis for the same
/// lattice and can be described by a single polynomial h with integer coefficients modulo ϕ.
/// The two keys are related by the following relation:
///
/// 1. h = g /f [mod ϕ][mod p]
/// 2. f.G - g.F = p [mod ϕ]
///
/// where p = 12289 is the Falcon prime. Equation 2 is called the NTRU equation.
/// The secret key is generated by first sampling a random pair (f, g) of polynomials using
/// an appropriate distribution that yields short but not too short polynomials with integer
/// coefficients modulo ϕ. The NTRU equation is then used to find a matching pair (F, G).
/// The public key is then derived from the secret key using equation 1.
///
/// To allow for fast signature generation, the secret key is pre-processed into a more suitable
/// form, called the LDL tree, and this allows for fast sampling of short vectors in the lattice
/// using Fast Fourier sampling during signature generation (ffSampling algorithm 11 in [1]).
///
/// [1]: https://falcon-sign.info/falcon.pdf
#[derive(Clone, SilentDebug, SilentDisplay)]
pub struct SecretKey {
secret_key: ShortLatticeBasis,
tree: LdlTree,
}
impl Zeroize for SecretKey {
fn zeroize(&mut self) {
self.secret_key.zeroize();
self.tree.zeroize();
}
}
// Manual Drop implementation to ensure zeroization on drop.
// Cannot use #[derive(ZeroizeOnDrop)] because it's not available when sourcing zeroize from k256.
impl Drop for SecretKey {
fn drop(&mut self) {
self.zeroize();
}
}
impl ZeroizeOnDrop for SecretKey {}
#[allow(clippy::new_without_default)]
impl SecretKey {
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Generates a secret key from OS-provided randomness.
#[cfg(feature = "std")]
pub fn new() -> Self {
let mut rng = rand::rng();
Self::with_rng(&mut rng)
}
/// Generates a secret_key using the provided random number generator `Rng`.
pub fn with_rng<R: Rng>(rng: &mut R) -> Self {
let basis = ntru_gen(N, rng);
Self::from_short_lattice_basis(basis)
}
/// Given a short basis [[g, -f], [G, -F]], computes the normalized LDL tree i.e., Falcon tree.
pub(crate) fn from_short_lattice_basis(basis: ShortLatticeBasis) -> SecretKey {
// FFT each polynomial of the short basis.
let basis_fft = to_complex_fft(&basis);
// compute the Gram matrix.
let gram_fft = gram(basis_fft);
// construct the LDL tree of the Gram matrix.
let mut tree = ffldl(gram_fft);
// normalize the leaves of the LDL tree.
normalize_tree(&mut tree, SIGMA);
Self { secret_key: basis, tree }
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the polynomials of the short lattice basis of this secret key.
pub fn short_lattice_basis(&self) -> &ShortLatticeBasis {
&self.secret_key
}
/// Returns the public key corresponding to this secret key.
pub fn public_key(&self) -> PublicKey {
self.compute_pub_key_poly()
}
/// Returns the LDL tree associated to this secret key.
pub fn tree(&self) -> &LdlTree {
&self.tree
}
// SIGNATURE GENERATION
// --------------------------------------------------------------------------------------------
/// Signs a message with this secret key.
pub fn sign(&self, message: crate::Word) -> Signature {
use rand::SeedableRng;
use rand_chacha::ChaCha20Rng;
let mut seed = self.generate_seed(&message);
let mut rng = ChaCha20Rng::from_seed(seed);
let signature = self.sign_with_rng(message, &mut rng);
// Zeroize the seed to prevent leakage
seed.zeroize();
signature
}
/// Signs a message with the secret key relying on the provided randomness generator.
pub fn sign_with_rng<R: Rng>(&self, message: Word, rng: &mut R) -> Signature {
let nonce = Nonce::deterministic();
let h = self.compute_pub_key_poly();
let c = hash_to_point_rpo256(message, &nonce);
let s2 = self.sign_helper(c, rng);
Signature::new(nonce, h, s2)
}
/// Signs a message with the secret key relying on the provided randomness generator.
///
/// This is similar to [SecretKey::sign_with_rng()] and is used only for testing with
/// the main difference being that this method:
///
/// 1. uses `SHAKE256` for the hash-to-point algorithm, and
/// 2. uses `ChaCha20` in `Self::sign_helper`.
///
/// Hence, in contrast to `Self::sign_with_rng`, the current method uses different random
/// number generators for generating the nonce and in `Self::sign_helper`.
///
/// These changes make the signature algorithm compliant with the reference implementation.
#[cfg(all(test, feature = "std"))]
pub fn sign_with_rng_testing<R: Rng>(&self, message: &[u8], rng: &mut R) -> Signature {
use crate::dsa::falcon512_rpo::{hash_to_point::hash_to_point_shake256, tests::ChaCha};
let nonce = Nonce::random(rng);
let h = self.compute_pub_key_poly();
let c = hash_to_point_shake256(message, &nonce);
let mut chacha_prng = ChaCha::new(rng);
let s2 = self.sign_helper(c, &mut chacha_prng);
Signature::new(nonce, h, s2)
}
// HELPER METHODS
// --------------------------------------------------------------------------------------------
/// Derives the public key corresponding to this secret key using h = g /f [mod ϕ][mod p].
fn compute_pub_key_poly(&self) -> PublicKey {
let g: Polynomial<FalconFelt> = self.secret_key[0].clone().into();
let g_fft = g.fft();
let minus_f: Polynomial<FalconFelt> = self.secret_key[1].clone().into();
let f = -minus_f;
let f_fft = f.fft();
let h_fft = g_fft.hadamard_div(&f_fft);
h_fft.ifft().into()
}
/// Signs a message polynomial with the secret key.
///
/// Takes a randomness generator implementing `Rng` and message polynomial representing `c`
/// the hash-to-point of the message to be signed. It outputs a signature polynomial `s2`.
fn sign_helper<R: Rng>(&self, c: Polynomial<FalconFelt>, rng: &mut R) -> SignaturePoly {
let one_over_q = 1.0 / (MODULUS as f64);
let c_over_q_fft = c.map(|cc| Complex::new(one_over_q * cc.value() as f64, 0.0)).fft();
// B = [[FFT(g), -FFT(f)], [FFT(G), -FFT(F)]]
let [g_fft, minus_f_fft, big_g_fft, minus_big_f_fft] = to_complex_fft(&self.secret_key);
let t0 = c_over_q_fft.hadamard_mul(&minus_big_f_fft);
let t1 = -c_over_q_fft.hadamard_mul(&minus_f_fft);
loop {
let bold_s = loop {
let z = ffsampling(&(t0.clone(), t1.clone()), &self.tree, rng);
let t0_min_z0 = t0.clone() - z.0;
let t1_min_z1 = t1.clone() - z.1;
// s = (t-z) * B
let s0 = t0_min_z0.hadamard_mul(&g_fft) + t1_min_z1.hadamard_mul(&big_g_fft);
let s1 =
t0_min_z0.hadamard_mul(&minus_f_fft) + t1_min_z1.hadamard_mul(&minus_big_f_fft);
// compute the norm of (s0||s1) and note that they are in FFT representation
let length_squared: f64 =
(s0.coefficients.iter().map(|a| (a * a.conj()).re).sum::<f64>()
+ s1.coefficients.iter().map(|a| (a * a.conj()).re).sum::<f64>())
/ (N as f64);
if length_squared > (SIG_L2_BOUND as f64) {
continue;
}
break [-s0, s1];
};
let s2 = bold_s[1].ifft();
let s2_coef: [i16; N] = s2
.coefficients
.iter()
.map(|a| a.re.round() as i16)
.collect::<Vec<i16>>()
.try_into()
.expect("The number of coefficients should be equal to N");
if let Ok(s2) = SignaturePoly::try_from(&s2_coef) {
return s2;
}
}
}
/// Deterministically generates a seed for seeding the PRNG used in the trapdoor sampling
/// algorithm used during signature generation.
///
/// This uses the argument described in [RFC 6979](https://datatracker.ietf.org/doc/html/rfc6979#section-3.5)
/// § 3.5 where the concatenation of the private key and the hashed message, i.e., sk || H(m),
/// is used in order to construct the initial seed of a PRNG. See also [1].
///
///
/// Note that we hash in also a `log_2(N)` where `N = 512` in order to domain separate between
/// different versions of the Falcon DSA, see [1] Section 3.4.1.
///
/// [1]: https://github.com/algorand/falcon/blob/main/falcon-det.pdf
fn generate_seed(&self, message: &Word) -> [u8; 32] {
let mut buffer = Vec::with_capacity(1 + SK_LEN + Word::SERIALIZED_SIZE);
buffer.push(LOG_N);
buffer.extend_from_slice(&self.to_bytes());
buffer.extend_from_slice(&message.to_bytes());
let digest = Blake3_256::hash(&buffer);
// Zeroize the buffer as it contains secret key material
buffer.zeroize();
digest.into()
}
}
impl PartialEq for SecretKey {
fn eq(&self, other: &Self) -> bool {
use subtle::ConstantTimeEq;
self.to_bytes().ct_eq(&other.to_bytes()).into()
}
}
impl Eq for SecretKey {}
// SERIALIZATION / DESERIALIZATION
// ================================================================================================
impl Serializable for SecretKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
let basis = &self.secret_key;
// header
let n = basis[0].coefficients.len();
let l = n.checked_ilog2().unwrap() as u8;
let header: u8 = (5 << 4) | l;
let neg_f = &basis[1];
let g = &basis[0];
let neg_big_f = &basis[3];
let mut buffer = Vec::with_capacity(1281);
buffer.push(header);
let mut f_i8: Vec<i8> = neg_f
.coefficients
.iter()
.map(|&a| FalconFelt::new(-a).balanced_value() as i8)
.collect();
let f_i8_encoded = encode_i8(&f_i8, WIDTH_SMALL_POLY_COEFFICIENT).unwrap();
buffer.extend_from_slice(&f_i8_encoded);
f_i8.zeroize();
let mut g_i8: Vec<i8> = g
.coefficients
.iter()
.map(|&a| FalconFelt::new(a).balanced_value() as i8)
.collect();
let g_i8_encoded = encode_i8(&g_i8, WIDTH_SMALL_POLY_COEFFICIENT).unwrap();
buffer.extend_from_slice(&g_i8_encoded);
g_i8.zeroize();
let mut big_f_i8: Vec<i8> = neg_big_f
.coefficients
.iter()
.map(|&a| FalconFelt::new(-a).balanced_value() as i8)
.collect();
let big_f_i8_encoded = encode_i8(&big_f_i8, WIDTH_BIG_POLY_COEFFICIENT).unwrap();
buffer.extend_from_slice(&big_f_i8_encoded);
big_f_i8.zeroize();
target.write_bytes(&buffer);
// Note: buffer is not zeroized here as it's being passed to write_bytes which consumes it
// The caller should ensure proper handling of the written bytes
}
}
impl Deserializable for SecretKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let byte_vector: [u8; SK_LEN] = source.read_array()?;
// read fields
let header = byte_vector[0];
// check fixed bits in header
if (header >> 4) != 5 {
return Err(DeserializationError::InvalidValue("Invalid header format".to_string()));
}
// check log n
let logn = (header & 15) as usize;
let n = 1 << logn;
// match against const variant generic parameter
if n != N {
return Err(DeserializationError::InvalidValue(
"Unsupported Falcon DSA variant".to_string(),
));
}
let chunk_size_f = ((n * WIDTH_SMALL_POLY_COEFFICIENT) + 7) >> 3;
let chunk_size_g = ((n * WIDTH_SMALL_POLY_COEFFICIENT) + 7) >> 3;
let chunk_size_big_f = ((n * WIDTH_BIG_POLY_COEFFICIENT) + 7) >> 3;
let f = decode_i8(&byte_vector[1..chunk_size_f + 1], WIDTH_SMALL_POLY_COEFFICIENT).ok_or(
DeserializationError::InvalidValue("Failed to decode f coefficients".to_string()),
)?;
let g = decode_i8(
&byte_vector[chunk_size_f + 1..(chunk_size_f + chunk_size_g + 1)],
WIDTH_SMALL_POLY_COEFFICIENT,
)
.unwrap();
let big_f = decode_i8(
&byte_vector[(chunk_size_f + chunk_size_g + 1)
..(chunk_size_f + chunk_size_g + chunk_size_big_f + 1)],
WIDTH_BIG_POLY_COEFFICIENT,
)
.unwrap();
let f = Polynomial::new(f.iter().map(|&c| FalconFelt::new(c.into())).collect());
let g = Polynomial::new(g.iter().map(|&c| FalconFelt::new(c.into())).collect());
let big_f = Polynomial::new(big_f.iter().map(|&c| FalconFelt::new(c.into())).collect());
// big_g * f - g * big_f = p (mod X^n + 1)
let big_g = g.fft().hadamard_div(&f.fft()).hadamard_mul(&big_f.fft()).ifft();
let basis = [
g.map(|f| f.balanced_value()),
-f.map(|f| f.balanced_value()),
big_g.map(|f| f.balanced_value()),
-big_f.map(|f| f.balanced_value()),
];
Ok(Self::from_short_lattice_basis(basis))
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Computes the complex FFT of the secret key polynomials.
fn to_complex_fft(basis: &[Polynomial<i16>; 4]) -> [Polynomial<Complex<f64>>; 4] {
let [g, f, big_g, big_f] = basis.clone();
let g_fft = g.map(|cc| Complex64::new(*cc as f64, 0.0)).fft();
let minus_f_fft = f.map(|cc| -Complex64::new(*cc as f64, 0.0)).fft();
let big_g_fft = big_g.map(|cc| Complex64::new(*cc as f64, 0.0)).fft();
let minus_big_f_fft = big_f.map(|cc| -Complex64::new(*cc as f64, 0.0)).fft();
[g_fft, minus_f_fft, big_g_fft, minus_big_f_fft]
}
/// Encodes a sequence of signed integers such that each integer x satisfies |x| < 2^(bits-1)
/// for a given parameter bits. bits can take either the value 6 or 8.
pub fn encode_i8(x: &[i8], bits: usize) -> Option<Vec<u8>> {
let maxv = (1 << (bits - 1)) - 1_usize;
let maxv = maxv as i8;
let minv = -maxv;
for &c in x {
if c > maxv || c < minv {
return None;
}
}
let out_len = ((N * bits) + 7) >> 3;
let mut buf = vec![0_u8; out_len];
let mut acc = 0_u32;
let mut acc_len = 0;
let mask = ((1_u16 << bits) - 1) as u8;
let mut input_pos = 0;
for &c in x {
acc = (acc << bits) | (c as u8 & mask) as u32;
acc_len += bits;
while acc_len >= 8 {
acc_len -= 8;
buf[input_pos] = (acc >> acc_len) as u8;
input_pos += 1;
}
}
if acc_len > 0 {
buf[input_pos] = (acc >> (8 - acc_len)) as u8;
}
Some(buf)
}
/// Decodes a sequence of bytes into a sequence of signed integers such that each integer x
/// satisfies |x| < 2^(bits-1) for a given parameter bits. bits can take either the value 6 or 8.
pub fn decode_i8(buf: &[u8], bits: usize) -> Option<Vec<i8>> {
let mut x = [0_i8; N];
let mut i = 0;
let mut j = 0;
let mut acc = 0_u32;
let mut acc_len = 0;
let mask = (1_u32 << bits) - 1;
let a = (1 << bits) as u8;
let b = ((1 << (bits - 1)) - 1) as u8;
while i < N {
acc = (acc << 8) | (buf[j] as u32);
j += 1;
acc_len += 8;
while acc_len >= bits && i < N {
acc_len -= bits;
let w = (acc >> acc_len) & mask;
let w = w as u8;
let z = if w > b { w as i8 - a as i8 } else { w as i8 };
x[i] = z;
i += 1;
}
}
if (acc & ((1u32 << acc_len) - 1)) == 0 {
Some(x.to_vec())
} else {
None
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/falcon512_rpo/keys/mod.rs | miden-crypto/src/dsa/falcon512_rpo/keys/mod.rs | use super::{
ByteReader, ByteWriter, Deserializable, DeserializationError, Felt, Serializable, Signature,
math::{FalconFelt, Polynomial},
};
mod public_key;
pub use public_key::PublicKey;
mod secret_key;
pub use secret_key::SecretKey;
pub(crate) use secret_key::{WIDTH_BIG_POLY_COEFFICIENT, WIDTH_SMALL_POLY_COEFFICIENT};
// TESTS
// ================================================================================================
#[cfg(all(test, feature = "std"))]
mod tests {
use rand::SeedableRng;
use rand_chacha::ChaCha20Rng;
use crate::{
ONE, PrimeCharacteristicRing, Word,
dsa::falcon512_rpo::SecretKey,
utils::{Deserializable, Serializable},
};
#[test]
fn test_falcon_verification() {
let seed = [0_u8; 32];
let mut rng = ChaCha20Rng::from_seed(seed);
// generate random keys
let sk = SecretKey::with_rng(&mut rng);
let pk = sk.public_key();
// test secret key serialization/deserialization
let mut buffer = vec![];
sk.write_into(&mut buffer);
let sk_deserialized = SecretKey::read_from_bytes(&buffer).unwrap();
assert_eq!(sk.short_lattice_basis(), sk_deserialized.short_lattice_basis());
// sign a random message
let message = Word::new([ONE; 4]);
let signature = sk.sign_with_rng(message, &mut rng);
// make sure the signature verifies correctly
assert!(pk.verify(message, &signature));
// a signature should not verify against a wrong message
let message2 = Word::new([ONE.double(); 4]);
assert!(!pk.verify(message2, &signature));
// a signature should not verify against a wrong public key
let sk2 = SecretKey::with_rng(&mut rng);
assert!(!sk2.public_key().verify(message, &signature))
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/ecdsa_k256_keccak/tests.rs | miden-crypto/src/dsa/ecdsa_k256_keccak/tests.rs | use rand::rng;
use super::*;
use crate::Felt;
#[test]
fn test_key_generation() {
let mut rng = rng();
let secret_key = SecretKey::with_rng(&mut rng);
let public_key = secret_key.public_key();
// Test that we can convert to/from bytes
let sk_bytes = secret_key.to_bytes();
let recovered_sk = SecretKey::read_from_bytes(&sk_bytes).unwrap();
assert_eq!(secret_key.to_bytes(), recovered_sk.to_bytes());
let pk_bytes = public_key.to_bytes();
let recovered_pk = PublicKey::read_from_bytes(&pk_bytes).unwrap();
assert_eq!(public_key, recovered_pk);
}
#[test]
fn test_public_key_recovery() {
let mut rng = rng();
let secret_key = SecretKey::with_rng(&mut rng);
let public_key = secret_key.public_key();
// Generate a signature using the secret key
let message = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)].into();
let signature = secret_key.sign(message);
// Recover the public key
let recovered_pk = PublicKey::recover_from(message, &signature).unwrap();
assert_eq!(public_key, recovered_pk);
// Using the wrong message, we shouldn't be able to recover the public key
let message = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(5)].into();
let recovered_pk = PublicKey::recover_from(message, &signature).unwrap();
assert!(public_key != recovered_pk);
}
#[test]
fn test_sign_and_verify() {
let mut rng = rng();
let secret_key = SecretKey::with_rng(&mut rng);
let public_key = secret_key.public_key();
let message = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)].into();
let signature = secret_key.sign(message);
// Verify using public key method
assert!(public_key.verify(message, &signature));
// Verify using signature method
assert!(signature.verify(message, &public_key));
// Test with wrong message
let wrong_message = [Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)].into();
assert!(!public_key.verify(wrong_message, &signature));
}
#[test]
fn test_signature_serialization_default() {
let mut rng = rng();
let secret_key = SecretKey::with_rng(&mut rng);
let message = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)].into();
let signature = secret_key.sign(message);
let sig_bytes = signature.to_bytes();
let recovered_sig = Signature::read_from_bytes(&sig_bytes).unwrap();
assert_eq!(signature, recovered_sig);
}
#[test]
fn test_signature_serialization() {
let mut rng = rng();
let secret_key = SecretKey::with_rng(&mut rng);
let message = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)].into();
let signature = secret_key.sign(message);
let recovery_id = signature.v();
let sig_bytes = signature.to_sec1_bytes();
let recovered_sig = Signature::from_sec1_bytes_and_recovery_id(sig_bytes, recovery_id).unwrap();
assert_eq!(signature, recovered_sig);
let recovery_id = (recovery_id + 1) % 4;
let recovered_sig = Signature::from_sec1_bytes_and_recovery_id(sig_bytes, recovery_id).unwrap();
assert_ne!(signature, recovered_sig);
let recovered_sig = Signature::from_sec1_bytes_and_recovery_id(sig_bytes, recovery_id).unwrap();
assert_ne!(signature, recovered_sig);
}
#[test]
fn test_secret_key_debug_redaction() {
let mut rng = rng();
let secret_key = SecretKey::with_rng(&mut rng);
// Verify Debug impl produces expected redacted output
let debug_output = format!("{secret_key:?}");
assert_eq!(debug_output, "<elided secret for SecretKey>");
// Verify Display impl also elides
let display_output = format!("{secret_key}");
assert_eq!(display_output, "<elided secret for SecretKey>");
}
#[cfg(feature = "std")]
#[test]
fn test_signature_serde() {
use crate::utils::SliceReader;
let sig0 = SecretKey::new().sign(Word::from([5, 0, 0, 0u32]));
let sig_bytes = sig0.to_bytes();
let mut slice_reader = SliceReader::new(&sig_bytes);
let sig0_deserialized = Signature::read_from(&mut slice_reader).unwrap();
assert!(!slice_reader.has_more_bytes());
assert_eq!(sig0, sig0_deserialized);
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/dsa/ecdsa_k256_keccak/mod.rs | miden-crypto/src/dsa/ecdsa_k256_keccak/mod.rs | //! ECDSA (Elliptic Curve Digital Signature Algorithm) signature implementation over secp256k1
//! curve using Keccak to hash the messages when signing.
use alloc::{string::ToString, vec::Vec};
use k256::{
ecdh::diffie_hellman,
ecdsa::{RecoveryId, SigningKey, VerifyingKey, signature::hazmat::PrehashVerifier},
};
use miden_crypto_derive::{SilentDebug, SilentDisplay};
use rand::{CryptoRng, RngCore};
use thiserror::Error;
use crate::{
Felt, SequentialCommit, Word,
ecdh::k256::{EphemeralPublicKey, SharedSecret},
utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable,
bytes_to_packed_u32_elements,
zeroize::{Zeroize, ZeroizeOnDrop},
},
};
#[cfg(all(test, feature = "std"))]
mod tests;
// CONSTANTS
// ================================================================================================
/// Length of secret key in bytes
const SECRET_KEY_BYTES: usize = 32;
/// Length of public key in bytes when using compressed format encoding
pub(crate) const PUBLIC_KEY_BYTES: usize = 33;
/// Length of signature in bytes using our custom serialization
const SIGNATURE_BYTES: usize = 65;
/// Length of signature in bytes using standard serialization i.e., `SEC1`
const SIGNATURE_STANDARD_BYTES: usize = 64;
/// Length of scalars for the `secp256k1` curve
const SCALARS_SIZE_BYTES: usize = 32;
// SECRET KEY
// ================================================================================================
/// Secret key for ECDSA signature verification over secp256k1 curve.
#[derive(Clone, SilentDebug, SilentDisplay)]
pub struct SecretKey {
inner: SigningKey,
}
impl SecretKey {
/// Generates a new random secret key using the OS random number generator.
#[cfg(feature = "std")]
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let mut rng = rand::rng();
Self::with_rng(&mut rng)
}
/// Generates a new secret key using the provided random number generator.
pub fn with_rng<R: CryptoRng + RngCore>(rng: &mut R) -> Self {
// we use a seedable CSPRNG and seed it with `rng`
// this is a work around the fact that the version of the `rand` dependency in our crate
// is different than the one used in the `k256` one. This solution will no longer be needed
// once `k256` gets a new release with a version of the `rand` dependency matching ours
use k256::elliptic_curve::rand_core::SeedableRng;
let mut seed = [0_u8; 32];
rand::RngCore::fill_bytes(rng, &mut seed);
let mut rng = rand_hc::Hc128Rng::from_seed(seed);
let signing_key = SigningKey::random(&mut rng);
// Zeroize the seed to prevent leaking secret material
seed.zeroize();
Self { inner: signing_key }
}
/// Gets the corresponding public key for this secret key.
pub fn public_key(&self) -> PublicKey {
let verifying_key = self.inner.verifying_key();
PublicKey { inner: *verifying_key }
}
/// Signs a message (represented as a Word) with this secret key.
pub fn sign(&self, message: Word) -> Signature {
let message_digest = hash_message(message);
self.sign_prehash(message_digest)
}
/// Signs a pre-hashed message with this secret key.
pub fn sign_prehash(&self, message_digest: [u8; 32]) -> Signature {
let (signature_inner, recovery_id) = self
.inner
.sign_prehash_recoverable(&message_digest)
.expect("failed to generate signature");
let (r, s) = signature_inner.split_scalars();
Signature {
r: r.to_bytes().into(),
s: s.to_bytes().into(),
v: recovery_id.into(),
}
}
/// Computes a Diffie-Hellman shared secret from this secret key and the ephemeral public key
/// generated by the other party.
pub fn get_shared_secret(&self, pk_e: EphemeralPublicKey) -> SharedSecret {
let shared_secret_inner = diffie_hellman(self.inner.as_nonzero_scalar(), pk_e.as_affine());
SharedSecret::new(shared_secret_inner)
}
}
// SAFETY: The inner `k256::ecdsa::SigningKey` already implements `ZeroizeOnDrop`,
// which ensures that the secret key material is securely zeroized when dropped.
impl ZeroizeOnDrop for SecretKey {}
impl PartialEq for SecretKey {
fn eq(&self, other: &Self) -> bool {
use subtle::ConstantTimeEq;
self.to_bytes().ct_eq(&other.to_bytes()).into()
}
}
impl Eq for SecretKey {}
// PUBLIC KEY
// ================================================================================================
/// Public key for ECDSA signature verification over secp256k1 curve.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PublicKey {
pub(crate) inner: VerifyingKey,
}
impl PublicKey {
/// Returns a commitment to the public key using the RPO256 hash function.
///
/// The commitment is computed by first converting the public key to field elements (4 bytes
/// per element), and then computing a sequential hash of the elements.
pub fn to_commitment(&self) -> Word {
<Self as SequentialCommit>::to_commitment(self)
}
/// Verifies a signature against this public key and message.
pub fn verify(&self, message: Word, signature: &Signature) -> bool {
let message_digest = hash_message(message);
self.verify_prehash(message_digest, signature)
}
/// Verifies a signature against this public key and pre-hashed message.
pub fn verify_prehash(&self, message_digest: [u8; 32], signature: &Signature) -> bool {
let signature_inner = k256::ecdsa::Signature::from_scalars(*signature.r(), *signature.s());
match signature_inner {
Ok(signature) => self.inner.verify_prehash(&message_digest, &signature).is_ok(),
Err(_) => false,
}
}
/// Recovers from the signature the public key associated to the secret key used to sign the
/// message.
pub fn recover_from(message: Word, signature: &Signature) -> Result<Self, PublicKeyError> {
let message_digest = hash_message(message);
let signature_data = k256::ecdsa::Signature::from_scalars(*signature.r(), *signature.s())
.map_err(|_| PublicKeyError::RecoveryFailed)?;
let verifying_key = k256::ecdsa::VerifyingKey::recover_from_prehash(
&message_digest,
&signature_data,
RecoveryId::from_byte(signature.v()).ok_or(PublicKeyError::RecoveryFailed)?,
)
.map_err(|_| PublicKeyError::RecoveryFailed)?;
Ok(Self { inner: verifying_key })
}
}
impl SequentialCommit for PublicKey {
type Commitment = Word;
fn to_elements(&self) -> Vec<Felt> {
bytes_to_packed_u32_elements(&self.to_bytes())
}
}
#[derive(Debug, Error)]
pub enum PublicKeyError {
#[error("Could not recover the public key from the message and signature")]
RecoveryFailed,
}
// SIGNATURE
// ================================================================================================
/// ECDSA signature over secp256k1 curve using Keccak to hash the messages when signing.
///
/// ## Serialization Formats
///
/// This implementation supports 2 serialization formats:
///
/// ### Custom Format (66 bytes):
/// - Bytes 0-31: r component (32 bytes, big-endian)
/// - Bytes 32-63: s component (32 bytes, big-endian)
/// - Byte 64: recovery ID (v) - values 0-3
///
/// ### SEC1 Format (64 bytes):
/// - Bytes 0-31: r component (32 bytes, big-endian)
/// - Bytes 32-63: s component (32 bytes, big-endian)
/// - Note: Recovery ID
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Signature {
r: [u8; SCALARS_SIZE_BYTES],
s: [u8; SCALARS_SIZE_BYTES],
v: u8,
}
impl Signature {
/// Returns the `r` scalar of this signature.
pub fn r(&self) -> &[u8; SCALARS_SIZE_BYTES] {
&self.r
}
/// Returns the `s` scalar of this signature.
pub fn s(&self) -> &[u8; SCALARS_SIZE_BYTES] {
&self.s
}
/// Returns the `v` component of this signature, which is a `u8` representing the recovery id.
pub fn v(&self) -> u8 {
self.v
}
/// Verifies this signature against a message and public key.
pub fn verify(&self, message: Word, pub_key: &PublicKey) -> bool {
pub_key.verify(message, self)
}
/// Converts signature to SEC1 format (standard 64-byte r||s format).
///
/// This format is the standard one used by most ECDSA libraries but loses the recovery ID.
pub fn to_sec1_bytes(&self) -> [u8; SIGNATURE_STANDARD_BYTES] {
let mut bytes = [0u8; 2 * SCALARS_SIZE_BYTES];
bytes[0..SCALARS_SIZE_BYTES].copy_from_slice(self.r());
bytes[SCALARS_SIZE_BYTES..2 * SCALARS_SIZE_BYTES].copy_from_slice(self.s());
bytes
}
/// Creates a signature from SEC1 format bytes with a given recovery id.
///
/// # Arguments
/// * `bytes` - 64-byte array containing r and s components
/// * `recovery_id` - recovery ID (0-3)
pub fn from_sec1_bytes_and_recovery_id(
bytes: [u8; SIGNATURE_STANDARD_BYTES],
v: u8,
) -> Result<Self, DeserializationError> {
let mut r = [0u8; SCALARS_SIZE_BYTES];
let mut s = [0u8; SCALARS_SIZE_BYTES];
r.copy_from_slice(&bytes[0..SCALARS_SIZE_BYTES]);
s.copy_from_slice(&bytes[SCALARS_SIZE_BYTES..2 * SCALARS_SIZE_BYTES]);
if v > 3 {
return Err(DeserializationError::InvalidValue(r#"Invalid recovery ID"#.to_string()));
}
Ok(Signature { r, s, v })
}
}
// SERIALIZATION / DESERIALIZATION
// ================================================================================================
impl Serializable for SecretKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
let mut buffer = Vec::with_capacity(SECRET_KEY_BYTES);
let sk_bytes: [u8; SECRET_KEY_BYTES] = self.inner.to_bytes().into();
buffer.extend_from_slice(&sk_bytes);
target.write_bytes(&buffer);
}
}
impl Deserializable for SecretKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let mut bytes: [u8; SECRET_KEY_BYTES] = source.read_array()?;
let signing_key = SigningKey::from_slice(&bytes)
.map_err(|_| DeserializationError::InvalidValue("Invalid secret key".to_string()))?;
bytes.zeroize();
Ok(Self { inner: signing_key })
}
}
impl Serializable for PublicKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
// Compressed format
let encoded = self.inner.to_encoded_point(true);
target.write_bytes(encoded.as_bytes());
}
}
impl Deserializable for PublicKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let bytes: [u8; PUBLIC_KEY_BYTES] = source.read_array()?;
let verifying_key = VerifyingKey::from_sec1_bytes(&bytes)
.map_err(|_| DeserializationError::InvalidValue("Invalid public key".to_string()))?;
Ok(Self { inner: verifying_key })
}
}
impl Serializable for Signature {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
let mut bytes = [0u8; SIGNATURE_BYTES];
bytes[0..SCALARS_SIZE_BYTES].copy_from_slice(self.r());
bytes[SCALARS_SIZE_BYTES..2 * SCALARS_SIZE_BYTES].copy_from_slice(self.s());
bytes[2 * SCALARS_SIZE_BYTES] = self.v();
target.write_bytes(&bytes);
}
}
impl Deserializable for Signature {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let r: [u8; SCALARS_SIZE_BYTES] = source.read_array()?;
let s: [u8; SCALARS_SIZE_BYTES] = source.read_array()?;
let v: u8 = source.read_u8()?;
Ok(Signature { r, s, v })
}
}
// HELPER
// ================================================================================================
/// Hashes a word message using Keccak.
fn hash_message(message: Word) -> [u8; 32] {
use sha3::{Digest, Keccak256};
let mut hasher = Keccak256::new();
let message_bytes: [u8; 32] = message.into();
hasher.update(message_bytes);
hasher.finalize().into()
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/mod.rs | miden-crypto/src/hash/mod.rs | //! Cryptographic hash functions used by the Miden protocol.
use crate::{Felt, Word, ZERO};
/// Blake3 hash function.
pub mod blake;
/// Keccak hash function.
pub mod keccak;
/// SHA-2 hash functions (SHA-256 and SHA-512).
pub mod sha2;
/// Poseidon2 hash function.
pub mod poseidon2 {
pub use p3_miden_goldilocks::Poseidon2Goldilocks;
pub use super::algebraic_sponge::poseidon2::{
Poseidon2, Poseidon2Challenger, Poseidon2Compression, Poseidon2Hasher,
Poseidon2Permutation256,
};
}
/// Rescue Prime Optimized (RPO) hash function.
pub mod rpo {
pub use super::algebraic_sponge::rescue::rpo::{
Rpo256, RpoChallenger, RpoCompression, RpoHasher, RpoPermutation256,
};
}
/// Rescue Prime Extended (RPX) hash function.
pub mod rpx {
pub use super::algebraic_sponge::rescue::rpx::{
Rpx256, RpxChallenger, RpxCompression, RpxHasher, RpxPermutation256,
};
}
mod algebraic_sponge;
// TRAITS
// ================================================================================================
/// Extension trait for hashers to provide iterator-based hashing.
pub trait HasherExt {
/// The digest type produced by this hasher.
type Digest;
/// Hashes an iterator of byte slices.
///
/// This method allows for more efficient hashing by avoiding the need to
/// allocate a contiguous buffer when the input data is already available
/// as discrete slices.
fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Self::Digest;
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/sha2/tests.rs | miden-crypto/src/hash/sha2/tests.rs | #![cfg(feature = "std")]
use alloc::vec::Vec;
use proptest::prelude::*;
use super::*;
use crate::rand::test_utils::rand_vector;
// SHA-256 TESTS
// ================================================================================================
#[test]
fn sha256_hash_elements() {
// test multiple of 8
let elements = rand_vector::<Felt>(16);
let expected = compute_expected_sha256_element_hash(&elements);
let actual: [u8; DIGEST256_BYTES] = hash_elements_256(&elements);
assert_eq!(&expected, &actual);
// test not multiple of 8
let elements = rand_vector::<Felt>(17);
let expected = compute_expected_sha256_element_hash(&elements);
let actual: [u8; DIGEST256_BYTES] = hash_elements_256(&elements);
assert_eq!(&expected, &actual);
}
proptest! {
#[test]
fn sha256_wont_panic_with_arbitrary_input(ref vec in any::<Vec<u8>>()) {
Sha256::hash(vec);
}
#[test]
fn sha256_hash_iter_matches_hash(ref slices in any::<Vec<Vec<u8>>>()) {
// Concatenate all slices to create the expected result
let mut concatenated = Vec::new();
for slice in slices.iter() {
concatenated.extend_from_slice(slice);
}
let expected = Sha256::hash(&concatenated);
// Test with iterator
let actual = Sha256::hash_iter(slices.iter().map(|v| v.as_slice()));
assert_eq!(expected, actual);
// Test with empty slices list
let empty_actual = Sha256::hash_iter(core::iter::empty());
let empty_expected = Sha256::hash(b"");
assert_eq!(empty_expected, empty_actual);
// Test with single slice
if let Some(single_slice) = slices.first() {
let single_actual = Sha256::hash_iter(core::iter::once(single_slice.as_slice()));
let single_expected = Sha256::hash(single_slice);
assert_eq!(single_expected, single_actual);
}
}
}
#[test]
fn test_sha256_nist_test_vectors() {
for (i, vector) in SHA256_TEST_VECTORS.iter().enumerate() {
let result = Sha256::hash(vector.input);
let expected = hex::decode(vector.expected).unwrap();
assert_eq!(
result.to_vec(),
expected,
"SHA-256 test vector {} failed: {}",
i,
vector.description
);
}
}
// SHA-512 TESTS
// ================================================================================================
#[test]
fn sha512_hash_elements() {
// test multiple of 16
let elements = rand_vector::<Felt>(32);
let expected = compute_expected_sha512_element_hash(&elements);
let actual: [u8; DIGEST512_BYTES] = hash_elements_512(&elements);
assert_eq!(&expected, &actual);
// test not multiple of 16
let elements = rand_vector::<Felt>(17);
let expected = compute_expected_sha512_element_hash(&elements);
let actual: [u8; DIGEST512_BYTES] = hash_elements_512(&elements);
assert_eq!(&expected, &actual);
}
proptest! {
#[test]
fn sha512_wont_panic_with_arbitrary_input(ref vec in any::<Vec<u8>>()) {
Sha512::hash(vec);
}
#[test]
fn sha512_hash_iter_matches_hash(ref slices in any::<Vec<Vec<u8>>>()) {
// Concatenate all slices to create the expected result
let mut concatenated = Vec::new();
for slice in slices.iter() {
concatenated.extend_from_slice(slice);
}
let expected = Sha512::hash(&concatenated);
// Test with iterator
let actual = Sha512::hash_iter(slices.iter().map(|v| v.as_slice()));
assert_eq!(expected, actual);
// Test with empty slices list
let empty_actual = Sha512::hash_iter(core::iter::empty());
let empty_expected = Sha512::hash(b"");
assert_eq!(empty_expected, empty_actual);
// Test with single slice
if let Some(single_slice) = slices.first() {
let single_actual = Sha512::hash_iter(core::iter::once(single_slice.as_slice()));
let single_expected = Sha512::hash(single_slice);
assert_eq!(single_expected, single_actual);
}
}
}
#[test]
fn test_sha512_nist_test_vectors() {
for (i, vector) in SHA512_TEST_VECTORS.iter().enumerate() {
let result = Sha512::hash(vector.input);
let expected = hex::decode(vector.expected).unwrap();
assert_eq!(
result.to_vec(),
expected,
"SHA-512 test vector {} failed: {}",
i,
vector.description
);
}
}
// HELPER FUNCTIONS
// ================================================================================================
fn compute_expected_sha256_element_hash(elements: &[Felt]) -> [u8; DIGEST256_BYTES] {
let mut bytes = Vec::new();
for element in elements.iter() {
bytes.extend_from_slice(&element.as_canonical_u64().to_le_bytes());
}
let mut hasher = sha2::Sha256::new();
hasher.update(&bytes);
hasher.finalize().into()
}
fn compute_expected_sha512_element_hash(elements: &[Felt]) -> [u8; DIGEST512_BYTES] {
let mut bytes = Vec::new();
for element in elements.iter() {
bytes.extend_from_slice(&element.as_canonical_u64().to_le_bytes());
}
let mut hasher = sha2::Sha512::new();
hasher.update(&bytes);
hasher.finalize().into()
}
struct TestVector {
input: &'static [u8],
expected: &'static str,
description: &'static str,
}
// TEST VECTORS
// ================================================================================================
// NIST test vectors for SHA-256
// https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/SHA256.pdf
const SHA256_TEST_VECTORS: &[TestVector] = &[
TestVector {
input: b"",
expected: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
description: "Empty input",
},
TestVector {
input: b"abc",
expected: "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad",
description: "String 'abc'",
},
TestVector {
input: b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
expected: "248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1",
description: "448 bits message",
},
];
// NIST test vectors for SHA-512
// https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/SHA512.pdf
const SHA512_TEST_VECTORS: &[TestVector] = &[
TestVector {
input: b"",
expected: "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e",
description: "Empty input",
},
TestVector {
input: b"abc",
expected: "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f",
description: "String 'abc'",
},
TestVector {
input: b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
expected: "8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909",
description: "896 bits message",
},
];
// MEMORY LAYOUT TESTS
// ================================================================================================
#[test]
fn test_memory_layout_assumptions() {
// Verify struct size equals inner array size (required for safe pointer casting)
assert_eq!(core::mem::size_of::<Sha256Digest>(), core::mem::size_of::<[u8; 32]>());
// Verify alignment
assert_eq!(core::mem::align_of::<Sha256Digest>(), core::mem::align_of::<[u8; 32]>());
// Same for Sha512Digest
assert_eq!(core::mem::size_of::<Sha512Digest>(), core::mem::size_of::<[u8; 64]>());
assert_eq!(core::mem::align_of::<Sha512Digest>(), core::mem::align_of::<[u8; 64]>());
}
#[test]
fn test_sha256_digests_as_bytes_correctness() {
let digests = vec![Sha256Digest([1u8; 32]), Sha256Digest([2u8; 32]), Sha256Digest([3u8; 32])];
let bytes = Sha256Digest::digests_as_bytes(&digests);
// Verify length
assert_eq!(bytes.len(), 96);
// Verify contiguous layout
assert_eq!(&bytes[0..32], &[1u8; 32]);
assert_eq!(&bytes[32..64], &[2u8; 32]);
assert_eq!(&bytes[64..96], &[3u8; 32]);
}
#[test]
fn test_sha512_digests_as_bytes_correctness() {
let digests = vec![Sha512Digest([1u8; 64]), Sha512Digest([2u8; 64]), Sha512Digest([3u8; 64])];
let bytes = Sha512Digest::digests_as_bytes(&digests);
// Verify length
assert_eq!(bytes.len(), 192);
// Verify contiguous layout
assert_eq!(&bytes[0..64], &[1u8; 64]);
assert_eq!(&bytes[64..128], &[2u8; 64]);
assert_eq!(&bytes[128..192], &[3u8; 64]);
}
// MERGE_MANY CORRECTNESS TESTS
// ================================================================================================
proptest! {
#[test]
fn sha256_merge_many_matches_concatenated_hash(
digests in prop::collection::vec(any::<[u8; 32]>(), 1..10)
) {
let sha_digests: Vec<Sha256Digest> =
digests.iter().map(|&d| Sha256Digest(d)).collect();
// Method 1: Using merge_many (uses unsafe digests_as_bytes)
let result1 = Sha256::merge_many(&sha_digests);
// Method 2: Safe concatenation for comparison
let mut concat = Vec::new();
for d in &sha_digests {
concat.extend_from_slice(&d.0);
}
let result2 = Sha256::hash(&concat);
// Should produce identical results
assert_eq!(result1, result2);
}
#[test]
fn sha512_merge_many_matches_concatenated_hash(
digests in prop::collection::vec(any::<[u8; 64]>(), 1..10)
) {
let sha_digests: Vec<Sha512Digest> =
digests.iter().map(|&d| Sha512Digest(d)).collect();
let result1 = Sha512::merge_many(&sha_digests);
let mut concat = Vec::new();
for d in &sha_digests {
concat.extend_from_slice(&d.0);
}
let result2 = Sha512::hash(&concat);
assert_eq!(result1, result2);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/sha2/mod.rs | miden-crypto/src/hash/sha2/mod.rs | //! SHA2 hash function wrappers (SHA-256 and SHA-512).
//!
//! # Note on SHA-512 and the Digest trait
//!
//! `Sha512Digest` does not implement the `Digest` trait because Winterfell's `Digest` trait
//! requires a fixed 32-byte output via `as_bytes() -> [u8; 32]`, which is incompatible with
//! SHA-512's native 64-byte output. Truncating to 32 bytes would create confusion with
//! SHA-512/256 (which uses different initialization vectors per FIPS 180-4).
//!
//! See <https://github.com/facebook/winterfell/issues/406> for a proposal to make the
//! `Digest` trait generic over output size.
use alloc::string::String;
use core::{
mem::size_of,
ops::Deref,
slice::{self, from_raw_parts},
};
use p3_field::{BasedVectorSpace, PrimeField64};
use sha2::Digest as Sha2Digest;
use super::{Felt, HasherExt};
use crate::utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, HexParseError, Serializable,
bytes_to_hex_string, hex_to_bytes,
};
#[cfg(test)]
mod tests;
// CONSTANTS
// ================================================================================================
const DIGEST256_BYTES: usize = 32;
const DIGEST512_BYTES: usize = 64;
// SHA256 DIGEST
// ================================================================================================
/// SHA-256 digest (32 bytes).
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "&str"))]
#[repr(transparent)]
pub struct Sha256Digest([u8; DIGEST256_BYTES]);
impl Sha256Digest {
pub fn as_bytes(&self) -> &[u8; DIGEST256_BYTES] {
&self.0
}
pub fn digests_as_bytes(digests: &[Sha256Digest]) -> &[u8] {
let p = digests.as_ptr();
let len = digests.len() * DIGEST256_BYTES;
unsafe { slice::from_raw_parts(p as *const u8, len) }
}
}
impl Default for Sha256Digest {
fn default() -> Self {
Self([0; DIGEST256_BYTES])
}
}
impl Deref for Sha256Digest {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<Sha256Digest> for [u8; DIGEST256_BYTES] {
fn from(value: Sha256Digest) -> Self {
value.0
}
}
impl From<[u8; DIGEST256_BYTES]> for Sha256Digest {
fn from(value: [u8; DIGEST256_BYTES]) -> Self {
Self(value)
}
}
impl From<Sha256Digest> for String {
fn from(value: Sha256Digest) -> Self {
bytes_to_hex_string(*value.as_bytes())
}
}
impl TryFrom<&str> for Sha256Digest {
type Error = HexParseError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
hex_to_bytes(value).map(|v| v.into())
}
}
impl Serializable for Sha256Digest {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_bytes(&self.0);
}
}
impl Deserializable for Sha256Digest {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
source.read_array().map(Self)
}
}
// SHA256 HASHER
// ================================================================================================
/// SHA-256 hash function.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Sha256;
impl HasherExt for Sha256 {
type Digest = Sha256Digest;
fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Self::Digest {
let mut hasher = sha2::Sha256::new();
for slice in slices {
hasher.update(slice);
}
Sha256Digest(hasher.finalize().into())
}
}
impl Sha256 {
/// SHA-256 collision resistance is 128-bits for 32-bytes output.
pub const COLLISION_RESISTANCE: u32 = 128;
pub fn hash(bytes: &[u8]) -> Sha256Digest {
let mut hasher = sha2::Sha256::new();
hasher.update(bytes);
Sha256Digest(hasher.finalize().into())
}
pub fn merge(values: &[Sha256Digest; 2]) -> Sha256Digest {
Self::hash(prepare_merge(values))
}
pub fn merge_many(values: &[Sha256Digest]) -> Sha256Digest {
let data = Sha256Digest::digests_as_bytes(values);
let mut hasher = sha2::Sha256::new();
hasher.update(data);
Sha256Digest(hasher.finalize().into())
}
pub fn merge_with_int(seed: Sha256Digest, value: u64) -> Sha256Digest {
let mut hasher = sha2::Sha256::new();
hasher.update(seed.0);
hasher.update(value.to_le_bytes());
Sha256Digest(hasher.finalize().into())
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E: BasedVectorSpace<Felt>>(elements: &[E]) -> Sha256Digest {
Sha256Digest(hash_elements_256(elements))
}
/// Hashes an iterator of byte slices.
#[inline(always)]
pub fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Sha256Digest {
<Self as HasherExt>::hash_iter(slices)
}
}
// SHA512 DIGEST
// ================================================================================================
/// SHA-512 digest (64 bytes).
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "&str"))]
#[repr(transparent)]
pub struct Sha512Digest([u8; DIGEST512_BYTES]);
impl Sha512Digest {
pub fn digests_as_bytes(digests: &[Sha512Digest]) -> &[u8] {
let p = digests.as_ptr();
let len = digests.len() * DIGEST512_BYTES;
unsafe { slice::from_raw_parts(p as *const u8, len) }
}
}
impl Default for Sha512Digest {
fn default() -> Self {
Self([0; DIGEST512_BYTES])
}
}
impl Deref for Sha512Digest {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<Sha512Digest> for [u8; DIGEST512_BYTES] {
fn from(value: Sha512Digest) -> Self {
value.0
}
}
impl From<[u8; DIGEST512_BYTES]> for Sha512Digest {
fn from(value: [u8; DIGEST512_BYTES]) -> Self {
Self(value)
}
}
impl From<Sha512Digest> for String {
fn from(value: Sha512Digest) -> Self {
bytes_to_hex_string(value.0)
}
}
impl TryFrom<&str> for Sha512Digest {
type Error = HexParseError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
hex_to_bytes(value).map(|v| v.into())
}
}
impl Serializable for Sha512Digest {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_bytes(&self.0);
}
}
impl Deserializable for Sha512Digest {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
source.read_array().map(Self)
}
}
// NOTE: Sha512 intentionally does not implement the Hasher, HasherExt, ElementHasher,
// or Digest traits. See the module-level documentation for details.
// SHA512 HASHER
// ================================================================================================
/// SHA-512 hash function.
///
/// Unlike [Sha256], this struct does not implement the `Hasher`, `HasherExt`, or `ElementHasher`
/// traits because those traits require `Digest`, which mandates a 32-byte output. SHA-512
/// produces a 64-byte digest, and truncating it would create confusion with SHA-512/256.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Sha512;
impl Sha512 {
/// Returns a hash of the provided sequence of bytes.
#[inline(always)]
pub fn hash(bytes: &[u8]) -> Sha512Digest {
let mut hasher = sha2::Sha512::new();
hasher.update(bytes);
Sha512Digest(hasher.finalize().into())
}
/// Returns a hash of two digests. This method is intended for use in construction of
/// Merkle trees and verification of Merkle paths.
#[inline(always)]
pub fn merge(values: &[Sha512Digest; 2]) -> Sha512Digest {
Self::hash(prepare_merge(values))
}
/// Returns a hash of the provided digests.
#[inline(always)]
pub fn merge_many(values: &[Sha512Digest]) -> Sha512Digest {
let data = Sha512Digest::digests_as_bytes(values);
let mut hasher = sha2::Sha512::new();
hasher.update(data);
Sha512Digest(hasher.finalize().into())
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E>(elements: &[E]) -> Sha512Digest
where
E: BasedVectorSpace<Felt>,
{
Sha512Digest(hash_elements_512(elements))
}
/// Hashes an iterator of byte slices.
#[inline(always)]
pub fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Sha512Digest {
let mut hasher = sha2::Sha512::new();
for slice in slices {
hasher.update(slice);
}
Sha512Digest(hasher.finalize().into())
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Hash the elements into bytes for SHA-256.
fn hash_elements_256<E>(elements: &[E]) -> [u8; DIGEST256_BYTES]
where
E: BasedVectorSpace<Felt>,
{
let digest = {
const FELT_BYTES: usize = size_of::<u64>();
const { assert!(FELT_BYTES == 8, "buffer arithmetic assumes 8-byte field elements") };
let mut hasher = sha2::Sha256::new();
// SHA-256 block size: 64 bytes
let mut buf = [0_u8; 64];
let mut buf_offset = 0;
for elem in elements.iter() {
for &felt in E::as_basis_coefficients_slice(elem) {
buf[buf_offset..buf_offset + FELT_BYTES]
.copy_from_slice(&felt.as_canonical_u64().to_le_bytes());
buf_offset += FELT_BYTES;
if buf_offset == 64 {
hasher.update(buf);
buf_offset = 0;
}
}
}
if buf_offset > 0 {
hasher.update(&buf[..buf_offset]);
}
hasher.finalize()
};
digest.into()
}
/// Hash the elements into bytes for SHA-512.
fn hash_elements_512<E>(elements: &[E]) -> [u8; DIGEST512_BYTES]
where
E: BasedVectorSpace<Felt>,
{
let digest = {
const FELT_BYTES: usize = size_of::<u64>();
const { assert!(FELT_BYTES == 8, "buffer arithmetic assumes 8-byte field elements") };
let mut hasher = sha2::Sha512::new();
// SHA-512 block size: 128 bytes
let mut buf = [0_u8; 128];
let mut buf_offset = 0;
for elem in elements.iter() {
for &felt in E::as_basis_coefficients_slice(elem) {
buf[buf_offset..buf_offset + FELT_BYTES]
.copy_from_slice(&felt.as_canonical_u64().to_le_bytes());
buf_offset += FELT_BYTES;
if buf_offset == 128 {
hasher.update(buf);
buf_offset = 0;
}
}
}
if buf_offset > 0 {
hasher.update(&buf[..buf_offset]);
}
hasher.finalize()
};
digest.into()
}
/// Cast the slice into contiguous bytes.
fn prepare_merge<const N: usize, D>(args: &[D; N]) -> &[u8]
where
D: Deref<Target = [u8]>,
{
// compile-time assertion
assert!(N > 0, "N shouldn't represent an empty slice!");
let values = args.as_ptr() as *const u8;
let len = size_of::<D>() * N;
// safety: the values are tested to be contiguous
let bytes = unsafe { from_raw_parts(values, len) };
debug_assert_eq!(args[0].deref(), &bytes[..len / N]);
bytes
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/blake/tests.rs | miden-crypto/src/hash/blake/tests.rs | #![cfg(feature = "std")]
use alloc::vec::Vec;
use p3_field::PrimeField64;
use p3_miden_goldilocks::Goldilocks as Felt;
use proptest::prelude::*;
use super::*;
use crate::rand::test_utils::rand_vector;
#[test]
fn blake3_hash_elements() {
// test multiple of 8
let elements = rand_vector::<Felt>(16);
let expected = compute_expected_element_hash(&elements);
let actual: [u8; 32] = hash_elements(&elements);
assert_eq!(&expected, &actual);
// test not multiple of 8
let elements = rand_vector::<Felt>(17);
let expected = compute_expected_element_hash(&elements);
let actual: [u8; 32] = hash_elements(&elements);
assert_eq!(&expected, &actual);
}
proptest! {
#[test]
fn blake192_wont_panic_with_arbitrary_input(ref vec in any::<Vec<u8>>()) {
Blake3_192::hash(vec);
}
#[test]
fn blake256_wont_panic_with_arbitrary_input(ref vec in any::<Vec<u8>>()) {
Blake3_256::hash(vec);
}
#[test]
fn blake256_hash_iter_matches_hash(ref slices in any::<Vec<Vec<u8>>>()) {
// Test that hash_iter produces the same result as concatenating all slices
// Concatenate all slices to create the expected result using the original hash method
let mut concatenated = Vec::new();
for slice in slices.iter() {
concatenated.extend_from_slice(slice);
}
let expected = Blake3_256::hash(&concatenated);
// Test with the original iterator of slices (converting Vec<u8> to &[u8])
let actual = Blake3_256::hash_iter(slices.iter().map(|v| v.as_slice()));
assert_eq!(expected, actual);
// Test with empty slices list (should produce hash of empty string)
let empty_actual = Blake3_256::hash_iter(core::iter::empty());
let empty_expected = Blake3_256::hash(b"");
assert_eq!(empty_expected, empty_actual);
// Test with single slice (should be identical to hash)
if let Some(single_slice) = slices.first() {
let single_actual = Blake3_256::hash_iter(core::iter::once(single_slice.as_slice()));
let single_expected = Blake3_256::hash(single_slice);
assert_eq!(single_expected, single_actual);
}
}
#[test]
fn blake192_hash_iter_matches_hash(ref slices in any::<Vec<Vec<u8>>>()) {
// Test that hash_iter produces the same result as concatenating all slices
// Concatenate all slices to create the expected result using the original hash method
let mut concatenated = Vec::new();
for slice in slices.iter() {
concatenated.extend_from_slice(slice);
}
let expected = Blake3_192::hash(&concatenated);
// Test with the original iterator of slices (converting Vec<u8> to &[u8])
let actual = Blake3_192::hash_iter(slices.iter().map(|v| v.as_slice()));
assert_eq!(expected, actual);
// Test with empty slices list (should produce hash of empty string)
let empty_actual = Blake3_192::hash_iter(core::iter::empty());
let empty_expected = Blake3_192::hash(b"");
assert_eq!(empty_expected, empty_actual);
// Test with single slice (should be identical to hash)
if let Some(single_slice) = slices.first() {
let single_actual = Blake3_192::hash_iter(core::iter::once(single_slice.as_slice()));
let single_expected = Blake3_192::hash(single_slice);
assert_eq!(single_expected, single_actual);
}
}
}
// HELPER FUNCTIONS
// ================================================================================================
fn compute_expected_element_hash(elements: &[Felt]) -> blake3::Hash {
let mut bytes = Vec::new();
for element in elements.iter() {
bytes.extend_from_slice(&((*element).as_canonical_u64()).to_le_bytes());
}
blake3::hash(&bytes)
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/blake/mod.rs | miden-crypto/src/hash/blake/mod.rs | use alloc::string::String;
use core::{
mem::size_of,
ops::Deref,
slice::{self, from_raw_parts},
};
use p3_field::{BasedVectorSpace, PrimeField64};
use p3_miden_goldilocks::Goldilocks as Felt;
use super::HasherExt;
use crate::utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, HexParseError, Serializable,
bytes_to_hex_string, hex_to_bytes,
};
#[cfg(test)]
mod tests;
// CONSTANTS
// ================================================================================================
const DIGEST32_BYTES: usize = 32;
const DIGEST24_BYTES: usize = 24;
// BLAKE3 N-BIT OUTPUT
// ================================================================================================
/// N-bytes output of a blake3 function.
///
/// Note: `N` can't be greater than `32` because [`Blake3Digest::as_bytes`] currently supports only
/// 32 bytes.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "&str"))]
#[repr(transparent)]
pub struct Blake3Digest<const N: usize>([u8; N]);
impl<const N: usize> Blake3Digest<N> {
pub fn as_bytes(&self) -> [u8; 32] {
// compile-time assertion
assert!(N <= 32, "digest currently supports only 32 bytes!");
expand_bytes(&self.0)
}
pub fn digests_as_bytes(digests: &[Blake3Digest<N>]) -> &[u8] {
let p = digests.as_ptr();
let len = digests.len() * N;
unsafe { slice::from_raw_parts(p as *const u8, len) }
}
}
impl<const N: usize> Default for Blake3Digest<N> {
fn default() -> Self {
Self([0; N])
}
}
impl<const N: usize> Deref for Blake3Digest<N> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<const N: usize> From<Blake3Digest<N>> for [u8; N] {
fn from(value: Blake3Digest<N>) -> Self {
value.0
}
}
impl<const N: usize> From<[u8; N]> for Blake3Digest<N> {
fn from(value: [u8; N]) -> Self {
Self(value)
}
}
impl<const N: usize> From<Blake3Digest<N>> for String {
fn from(value: Blake3Digest<N>) -> Self {
bytes_to_hex_string(value.as_bytes())
}
}
impl<const N: usize> TryFrom<&str> for Blake3Digest<N> {
type Error = HexParseError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
hex_to_bytes(value).map(|v| v.into())
}
}
impl<const N: usize> Serializable for Blake3Digest<N> {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_bytes(&self.0);
}
}
impl<const N: usize> Deserializable for Blake3Digest<N> {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
source.read_array().map(Self)
}
}
// BLAKE3 256-BIT OUTPUT
// ================================================================================================
/// 256-bit output blake3 hasher.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Blake3_256;
impl HasherExt for Blake3_256 {
type Digest = Blake3Digest<32>;
fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Self::Digest {
let mut hasher = blake3::Hasher::new();
for slice in slices {
hasher.update(slice);
}
Blake3Digest(hasher.finalize().into())
}
}
impl Blake3_256 {
/// Blake3 collision resistance is 128-bits for 32-bytes output.
pub const COLLISION_RESISTANCE: u32 = 128;
pub fn hash(bytes: &[u8]) -> Blake3Digest<32> {
Blake3Digest(blake3::hash(bytes).into())
}
// Note: merge/merge_many/merge_with_int methods were previously trait delegations
// (<Self as Hasher>::merge). They're now direct implementations as part of removing
// the Winterfell Hasher trait dependency. These are public API used in benchmarks.
pub fn merge(values: &[Blake3Digest<32>; 2]) -> Blake3Digest<32> {
Self::hash(prepare_merge(values))
}
pub fn merge_many(values: &[Blake3Digest<32>]) -> Blake3Digest<32> {
Blake3Digest(blake3::hash(Blake3Digest::digests_as_bytes(values)).into())
}
pub fn merge_with_int(seed: Blake3Digest<32>, value: u64) -> Blake3Digest<32> {
let mut hasher = blake3::Hasher::new();
hasher.update(&seed.0);
hasher.update(&value.to_le_bytes());
Blake3Digest(hasher.finalize().into())
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E: BasedVectorSpace<Felt>>(elements: &[E]) -> Blake3Digest<32> {
Blake3Digest(hash_elements(elements))
}
/// Hashes an iterator of byte slices.
#[inline(always)]
pub fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Blake3Digest<DIGEST32_BYTES> {
<Self as HasherExt>::hash_iter(slices)
}
}
// BLAKE3 192-BIT OUTPUT
// ================================================================================================
/// 192-bit output blake3 hasher.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Blake3_192;
impl HasherExt for Blake3_192 {
type Digest = Blake3Digest<24>;
fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Self::Digest {
let mut hasher = blake3::Hasher::new();
for slice in slices {
hasher.update(slice);
}
Blake3Digest(shrink_array(hasher.finalize().into()))
}
}
impl Blake3_192 {
/// Blake3 collision resistance is 96-bits for 24-bytes output.
pub const COLLISION_RESISTANCE: u32 = 96;
pub fn hash(bytes: &[u8]) -> Blake3Digest<24> {
Blake3Digest(shrink_array(blake3::hash(bytes).into()))
}
// Note: Same as Blake3_256 - these methods replaced trait delegations to remove Winterfell.
pub fn merge_many(values: &[Blake3Digest<24>]) -> Blake3Digest<24> {
let bytes = Blake3Digest::digests_as_bytes(values);
Blake3Digest(shrink_array(blake3::hash(bytes).into()))
}
pub fn merge(values: &[Blake3Digest<24>; 2]) -> Blake3Digest<24> {
Self::hash(prepare_merge(values))
}
pub fn merge_with_int(seed: Blake3Digest<24>, value: u64) -> Blake3Digest<24> {
let mut hasher = blake3::Hasher::new();
hasher.update(&seed.0);
hasher.update(&value.to_le_bytes());
Blake3Digest(shrink_array(hasher.finalize().into()))
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E: BasedVectorSpace<Felt>>(elements: &[E]) -> Blake3Digest<32> {
Blake3Digest(hash_elements(elements))
}
/// Hashes an iterator of byte slices.
#[inline(always)]
pub fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Blake3Digest<DIGEST24_BYTES> {
<Self as HasherExt>::hash_iter(slices)
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Hash the elements into bytes and shrink the output.
fn hash_elements<const N: usize, E>(elements: &[E]) -> [u8; N]
where
E: BasedVectorSpace<Felt>,
{
let digest = {
const FELT_BYTES: usize = size_of::<u64>();
const { assert!(FELT_BYTES == 8, "buffer arithmetic assumes 8-byte field elements") };
let mut hasher = blake3::Hasher::new();
// BLAKE3 block size: 64 bytes
let mut buf = [0_u8; 64];
let mut buf_offset = 0;
for elem in elements.iter() {
for &felt in E::as_basis_coefficients_slice(elem) {
buf[buf_offset..buf_offset + FELT_BYTES]
.copy_from_slice(&felt.as_canonical_u64().to_le_bytes());
buf_offset += FELT_BYTES;
if buf_offset == 64 {
hasher.update(&buf);
buf_offset = 0;
}
}
}
if buf_offset > 0 {
hasher.update(&buf[..buf_offset]);
}
hasher.finalize()
};
shrink_array(digest.into())
}
/// Shrinks an array.
///
/// Due to compiler optimizations, this function is zero-copy.
fn shrink_array<const M: usize, const N: usize>(source: [u8; M]) -> [u8; N] {
const {
assert!(M >= N, "size of destination should be smaller or equal than source");
}
core::array::from_fn(|i| source[i])
}
/// Owned bytes expansion.
fn expand_bytes<const M: usize, const N: usize>(bytes: &[u8; M]) -> [u8; N] {
// compile-time assertion
assert!(M <= N, "M should fit in N so M can be expanded!");
let mut expanded = [0u8; N];
expanded[..M].copy_from_slice(bytes);
expanded
}
// Cast the slice into contiguous bytes.
fn prepare_merge<const N: usize, D>(args: &[D; N]) -> &[u8]
where
D: Deref<Target = [u8]>,
{
// compile-time assertion
assert!(N > 0, "N shouldn't represent an empty slice!");
let values = args.as_ptr() as *const u8;
let len = size_of::<D>() * N;
// safety: the values are tested to be contiguous
let bytes = unsafe { from_raw_parts(values, len) };
debug_assert_eq!(args[0].deref(), &bytes[..len / N]);
bytes
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/mod.rs | miden-crypto/src/hash/algebraic_sponge/mod.rs | //! Algebraic sponge-based hash functions.
//!
//! These are hash functions based on the sponge construction, which itself is defined from
//! a cryptographic permutation function and a padding rule.
//!
//! Throughout the module, the padding rule used is the one in <https://eprint.iacr.org/2023/1045>.
//! The core of the definition of an algebraic sponge-based hash function is then the definition
//! of its cryptographic permutation function. This can be done by implementing the trait
//! `[AlgebraicSponge]` which boils down to implementing the `apply_permutation` method.
//!
//! There are currently three algebraic sponge-based hash functions implemented in the module, RPO
//! and RPX hash functions, both of which belong to the Rescue family of hash functions, and
//! Poseidon2 hash function.
use core::ops::Range;
use p3_field::PrimeCharacteristicRing;
use super::{Felt, Word, ZERO};
use crate::field::{BasedVectorSpace, PrimeField64};
pub(crate) mod poseidon2;
pub(crate) mod rescue;
// CONSTANTS
// ================================================================================================
/// Sponge state is set to 12 field elements or 96 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
pub(crate) const STATE_WIDTH: usize = 12;
/// The rate portion of the state is located in elements 4 through 11.
pub(crate) const RATE_RANGE: Range<usize> = 4..12;
pub(crate) const RATE_WIDTH: usize = RATE_RANGE.end - RATE_RANGE.start;
pub(crate) const INPUT1_RANGE: Range<usize> = 4..8;
pub(crate) const INPUT2_RANGE: Range<usize> = 8..12;
/// The capacity portion of the state is located in elements 0, 1, 2, and 3.
pub(crate) const CAPACITY_RANGE: Range<usize> = 0..4;
/// The output of the hash function is a digest which consists of 4 field elements or 32 bytes.
///
/// The digest is returned from state elements 4, 5, 6, and 7 (the first four elements of the
/// rate portion).
pub(crate) const DIGEST_RANGE: Range<usize> = 4..8;
/// The number of byte chunks defining a field element when hashing a sequence of bytes
const BINARY_CHUNK_SIZE: usize = 7;
/// S-Box and Inverse S-Box powers;
///
/// The constants are defined for tests only because the exponentiations in the code are unrolled
/// for efficiency reasons.
#[cfg(all(test, feature = "std"))]
pub(crate) const ALPHA: u64 = 7;
#[cfg(all(test, feature = "std"))]
pub(crate) const INV_ALPHA: u64 = 10540996611094048183;
// ALGEBRAIC SPONGE
// ================================================================================================
pub(crate) trait AlgebraicSponge {
fn apply_permutation(state: &mut [Felt; STATE_WIDTH]);
/// Returns a hash of the provided field elements.
fn hash_elements<E>(elements: &[E]) -> Word
where
E: BasedVectorSpace<Felt>,
{
// Count total number of base field elements without collecting
let total_len = elements
.iter()
.map(|elem| E::as_basis_coefficients_slice(elem).len())
.sum::<usize>();
// initialize state to all zeros, except for the first element of the capacity part, which
// is set to `total_len % RATE_WIDTH`.
let mut state = [ZERO; STATE_WIDTH];
state[CAPACITY_RANGE.start] = Felt::from_u8((total_len % RATE_WIDTH) as u8);
// absorb elements into the state one by one until the rate portion of the state is filled
// up; then apply the permutation and start absorbing again; repeat until all
// elements have been absorbed
let mut i = 0;
for elem in elements.iter() {
for &felt in E::as_basis_coefficients_slice(elem) {
state[RATE_RANGE.start + i] = felt;
i += 1;
if i.is_multiple_of(RATE_WIDTH) {
Self::apply_permutation(&mut state);
i = 0;
}
}
}
// if we absorbed some elements but didn't apply a permutation to them (would happen when
// the number of elements is not a multiple of RATE_WIDTH), apply the permutation after
// padding by as many 0 as necessary to make the input length a multiple of the RATE_WIDTH.
if i > 0 {
while i != RATE_WIDTH {
state[RATE_RANGE.start + i] = ZERO;
i += 1;
}
Self::apply_permutation(&mut state);
}
// return the first 4 elements of the state as hash result
Word::new(state[DIGEST_RANGE].try_into().unwrap())
}
/// Returns a hash of the provided sequence of bytes.
fn hash(bytes: &[u8]) -> Word {
// initialize the state with zeroes
let mut state = [ZERO; STATE_WIDTH];
// determine the number of field elements needed to encode `bytes` when each field element
// represents at most 7 bytes.
let num_field_elem = bytes.len().div_ceil(BINARY_CHUNK_SIZE);
// set the first capacity element to `RATE_WIDTH + (num_field_elem % RATE_WIDTH)`. We do
// this to achieve:
// 1. Domain separating hashing of `[u8]` from hashing of `[Felt]`.
// 2. Avoiding collisions at the `[Felt]` representation of the encoded bytes.
state[CAPACITY_RANGE.start] =
Felt::from_u8((RATE_WIDTH + (num_field_elem % RATE_WIDTH)) as u8);
// initialize a buffer to receive the little-endian elements.
let mut buf = [0_u8; 8];
// iterate the chunks of bytes, creating a field element from each chunk and copying it
// into the state.
//
// every time the rate range is filled, a permutation is performed. if the final value of
// `rate_pos` is not zero, then the chunks count wasn't enough to fill the state range,
// and an additional permutation must be performed.
let mut current_chunk_idx = 0_usize;
// handle the case of an empty `bytes`
let last_chunk_idx = if num_field_elem == 0 {
current_chunk_idx
} else {
num_field_elem - 1
};
let rate_pos = bytes.chunks(BINARY_CHUNK_SIZE).fold(0, |rate_pos, chunk| {
// copy the chunk into the buffer
if current_chunk_idx != last_chunk_idx {
buf[..BINARY_CHUNK_SIZE].copy_from_slice(chunk);
} else {
// on the last iteration, we pad `buf` with a 1 followed by as many 0's as are
// needed to fill it
buf.fill(0);
buf[..chunk.len()].copy_from_slice(chunk);
buf[chunk.len()] = 1;
}
current_chunk_idx += 1;
// set the current rate element to the input. since we take at most 7 bytes, we are
// guaranteed that the inputs data will fit into a single field element.
state[RATE_RANGE.start + rate_pos] = Felt::new(u64::from_le_bytes(buf));
// proceed filling the range. if it's full, then we apply a permutation and reset the
// counter to the beginning of the range.
if rate_pos == RATE_WIDTH - 1 {
Self::apply_permutation(&mut state);
0
} else {
rate_pos + 1
}
});
// if we absorbed some elements but didn't apply a permutation to them (would happen when
// the number of elements is not a multiple of RATE_WIDTH), apply the permutation. we
// don't need to apply any extra padding because the first capacity element contains a
// flag indicating the number of field elements constituting the last block when the latter
// is not divisible by `RATE_WIDTH`.
if rate_pos != 0 {
state[RATE_RANGE.start + rate_pos..RATE_RANGE.end].fill(ZERO);
Self::apply_permutation(&mut state);
}
// return the first 4 elements of the rate as hash result.
Word::new(state[DIGEST_RANGE].try_into().unwrap())
}
/// Returns a hash of two digests. This method is intended for use in construction of
/// Merkle trees and verification of Merkle paths.
fn merge(values: &[Word; 2]) -> Word {
// initialize the state by copying the digest elements into the rate portion of the state
// (8 total elements), and set the capacity elements to 0.
let mut state = [ZERO; STATE_WIDTH];
let it = Word::words_as_elements_iter(values.iter());
for (i, v) in it.enumerate() {
state[RATE_RANGE.start + i] = *v;
}
// apply the permutation and return the digest portion of the state
Self::apply_permutation(&mut state);
Word::new(state[DIGEST_RANGE].try_into().unwrap())
}
/// Returns a hash of many digests.
fn merge_many(values: &[Word]) -> Word {
let elements = Word::words_as_elements(values);
Self::hash_elements(elements)
}
/// Returns hash(`seed` || `value`). This method is intended for use in PRNG and PoW contexts.
fn merge_with_int(seed: Word, value: u64) -> Word {
// initialize the state as follows:
// - seed is copied into the first 4 elements of the rate portion of the state.
// - if the value fits into a single field element, copy it into the fifth rate element and
// set the first capacity element to 5.
// - if the value doesn't fit into a single field element, split it into two field elements,
// copy them into rate elements 5 and 6 and set the first capacity element to 6.
let mut state = [ZERO; STATE_WIDTH];
state[INPUT1_RANGE].copy_from_slice(seed.as_elements());
state[INPUT2_RANGE.start] = Felt::new(value);
if value < Felt::ORDER_U64 {
state[CAPACITY_RANGE.start] = Felt::from_u8(5_u8);
} else {
state[INPUT2_RANGE.start + 1] = Felt::new(value / Felt::ORDER_U64);
state[CAPACITY_RANGE.start] = Felt::from_u8(6_u8);
}
// apply the permutation and return the digest portion of the rate
Self::apply_permutation(&mut state);
Word::new(state[DIGEST_RANGE].try_into().unwrap())
}
// DOMAIN IDENTIFIER HASHING
// --------------------------------------------------------------------------------------------
/// Returns a hash of two digests and a domain identifier.
fn merge_in_domain(values: &[Word; 2], domain: Felt) -> Word {
// initialize the state by copying the digest elements into the rate portion of the state
// (8 total elements), and set the capacity elements to 0.
let mut state = [ZERO; STATE_WIDTH];
let it = Word::words_as_elements_iter(values.iter());
for (i, v) in it.enumerate() {
state[RATE_RANGE.start + i] = *v;
}
// set the second capacity element to the domain value. The first capacity element is used
// for padding purposes.
state[CAPACITY_RANGE.start + 1] = domain;
// apply the permutation and return the first four elements of the state
Self::apply_permutation(&mut state);
Word::new(state[DIGEST_RANGE].try_into().unwrap())
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/tests.rs | miden-crypto/src/hash/algebraic_sponge/rescue/tests.rs | #![cfg(feature = "std")]
use p3_field::PrimeCharacteristicRing;
use super::{ALPHA, Felt, INV_ALPHA};
use crate::rand::test_utils::rand_value;
#[test]
fn test_alphas() {
let e: Felt = Felt::new(rand_value());
let e_exp = e.exp_u64(ALPHA);
assert_eq!(e, e_exp.exp_u64(INV_ALPHA));
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/mod.rs | miden-crypto/src/hash/algebraic_sponge/rescue/mod.rs | use p3_field::{Field, PrimeCharacteristicRing};
#[cfg(all(test, feature = "std"))]
pub(crate) use super::{ALPHA, INV_ALPHA};
use super::{
AlgebraicSponge, CAPACITY_RANGE, DIGEST_RANGE, Felt, RATE_RANGE, Range, STATE_WIDTH, Word, ZERO,
};
mod arch;
pub use arch::optimized::{
add_constants_and_apply_ext_round, add_constants_and_apply_inv_sbox,
add_constants_and_apply_sbox,
};
mod mds;
use mds::{MDS, apply_mds};
pub(crate) mod rpo;
pub(crate) mod rpx;
#[cfg(test)]
mod tests;
// CONSTANTS
// ================================================================================================
/// The number of rounds is set to 7. For the RPO hash function all rounds are uniform. For the
/// RPX hash function, there are 3 different types of rounds.
const NUM_ROUNDS: usize = 7;
// SBOX FUNCTION
// ================================================================================================
#[inline(always)]
fn apply_sbox(state: &mut [Felt; STATE_WIDTH]) {
state[0] = state[0].exp_const_u64::<7>();
state[1] = state[1].exp_const_u64::<7>();
state[2] = state[2].exp_const_u64::<7>();
state[3] = state[3].exp_const_u64::<7>();
state[4] = state[4].exp_const_u64::<7>();
state[5] = state[5].exp_const_u64::<7>();
state[6] = state[6].exp_const_u64::<7>();
state[7] = state[7].exp_const_u64::<7>();
state[8] = state[8].exp_const_u64::<7>();
state[9] = state[9].exp_const_u64::<7>();
state[10] = state[10].exp_const_u64::<7>();
state[11] = state[11].exp_const_u64::<7>();
}
// INVERSE SBOX FUNCTION
// ================================================================================================
#[inline(always)]
fn apply_inv_sbox(state: &mut [Felt; STATE_WIDTH]) {
// compute base^10540996611094048183 using 72 multiplications per array element
// 10540996611094048183 = b1001001001001001001001001001000110110110110110110110110110110111
// compute base^10
let mut t1 = *state;
t1.iter_mut().for_each(|t| *t = t.square());
// compute base^100
let mut t2 = t1;
t2.iter_mut().for_each(|t| *t = t.square());
// compute base^100100
let t3 = exp_acc::<Felt, STATE_WIDTH, 3>(t2, t2);
// compute base^100100100100
let t4 = exp_acc::<Felt, STATE_WIDTH, 6>(t3, t3);
// compute base^100100100100100100100100
let t5 = exp_acc::<Felt, STATE_WIDTH, 12>(t4, t4);
// compute base^100100100100100100100100100100
let t6 = exp_acc::<Felt, STATE_WIDTH, 6>(t5, t3);
// compute base^1001001001001001001001001001000100100100100100100100100100100
let t7 = exp_acc::<Felt, STATE_WIDTH, 31>(t6, t6);
// compute base^1001001001001001001001001001000110110110110110110110110110110111
for (i, s) in state.iter_mut().enumerate() {
let a = (t7[i].square() * t6[i]).square().square();
let b = t1[i] * t2[i] * *s;
*s = a * b;
}
#[inline(always)]
fn exp_acc<B: Field, const N: usize, const M: usize>(base: [B; N], tail: [B; N]) -> [B; N] {
let mut result = base;
for _ in 0..M {
result.iter_mut().for_each(|r| *r = r.square());
}
result.iter_mut().zip(tail).for_each(|(r, t)| *r *= t);
result
}
}
#[inline(always)]
fn add_constants(state: &mut [Felt; STATE_WIDTH], ark: &[Felt; STATE_WIDTH]) {
state.iter_mut().zip(ark).for_each(|(s, &k)| *s += k);
}
// ROUND CONSTANTS
// ================================================================================================
/// Rescue round constants;
/// computed as in [specifications](https://github.com/ASDiscreteMathematics/rpo)
///
/// The constants are broken up into two arrays ARK1 and ARK2; ARK1 contains the constants for the
/// first half of RPO round, and ARK2 contains constants for the second half of RPO round.
const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
[
Felt::new(5789762306288267392),
Felt::new(6522564764413701783),
Felt::new(17809893479458208203),
Felt::new(107145243989736508),
Felt::new(6388978042437517382),
Felt::new(15844067734406016715),
Felt::new(9975000513555218239),
Felt::new(3344984123768313364),
Felt::new(9959189626657347191),
Felt::new(12960773468763563665),
Felt::new(9602914297752488475),
Felt::new(16657542370200465908),
],
[
Felt::new(12987190162843096997),
Felt::new(653957632802705281),
Felt::new(4441654670647621225),
Felt::new(4038207883745915761),
Felt::new(5613464648874830118),
Felt::new(13222989726778338773),
Felt::new(3037761201230264149),
Felt::new(16683759727265180203),
Felt::new(8337364536491240715),
Felt::new(3227397518293416448),
Felt::new(8110510111539674682),
Felt::new(2872078294163232137),
],
[
Felt::new(18072785500942327487),
Felt::new(6200974112677013481),
Felt::new(17682092219085884187),
Felt::new(10599526828986756440),
Felt::new(975003873302957338),
Felt::new(8264241093196931281),
Felt::new(10065763900435475170),
Felt::new(2181131744534710197),
Felt::new(6317303992309418647),
Felt::new(1401440938888741532),
Felt::new(8884468225181997494),
Felt::new(13066900325715521532),
],
[
Felt::new(5674685213610121970),
Felt::new(5759084860419474071),
Felt::new(13943282657648897737),
Felt::new(1352748651966375394),
Felt::new(17110913224029905221),
Felt::new(1003883795902368422),
Felt::new(4141870621881018291),
Felt::new(8121410972417424656),
Felt::new(14300518605864919529),
Felt::new(13712227150607670181),
Felt::new(17021852944633065291),
Felt::new(6252096473787587650),
],
[
Felt::new(4887609836208846458),
Felt::new(3027115137917284492),
Felt::new(9595098600469470675),
Felt::new(10528569829048484079),
Felt::new(7864689113198939815),
Felt::new(17533723827845969040),
Felt::new(5781638039037710951),
Felt::new(17024078752430719006),
Felt::new(109659393484013511),
Felt::new(7158933660534805869),
Felt::new(2955076958026921730),
Felt::new(7433723648458773977),
],
[
Felt::new(16308865189192447297),
Felt::new(11977192855656444890),
Felt::new(12532242556065780287),
Felt::new(14594890931430968898),
Felt::new(7291784239689209784),
Felt::new(5514718540551361949),
Felt::new(10025733853830934803),
Felt::new(7293794580341021693),
Felt::new(6728552937464861756),
Felt::new(6332385040983343262),
Felt::new(13277683694236792804),
Felt::new(2600778905124452676),
],
[
Felt::new(7123075680859040534),
Felt::new(1034205548717903090),
Felt::new(7717824418247931797),
Felt::new(3019070937878604058),
Felt::new(11403792746066867460),
Felt::new(10280580802233112374),
Felt::new(337153209462421218),
Felt::new(13333398568519923717),
Felt::new(3596153696935337464),
Felt::new(8104208463525993784),
Felt::new(14345062289456085693),
Felt::new(17036731477169661256),
],
];
const ARK2: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = [
[
Felt::new(6077062762357204287),
Felt::new(15277620170502011191),
Felt::new(5358738125714196705),
Felt::new(14233283787297595718),
Felt::new(13792579614346651365),
Felt::new(11614812331536767105),
Felt::new(14871063686742261166),
Felt::new(10148237148793043499),
Felt::new(4457428952329675767),
Felt::new(15590786458219172475),
Felt::new(10063319113072092615),
Felt::new(14200078843431360086),
],
[
Felt::new(6202948458916099932),
Felt::new(17690140365333231091),
Felt::new(3595001575307484651),
Felt::new(373995945117666487),
Felt::new(1235734395091296013),
Felt::new(14172757457833931602),
Felt::new(707573103686350224),
Felt::new(15453217512188187135),
Felt::new(219777875004506018),
Felt::new(17876696346199469008),
Felt::new(17731621626449383378),
Felt::new(2897136237748376248),
],
[
Felt::new(8023374565629191455),
Felt::new(15013690343205953430),
Felt::new(4485500052507912973),
Felt::new(12489737547229155153),
Felt::new(9500452585969030576),
Felt::new(2054001340201038870),
Felt::new(12420704059284934186),
Felt::new(355990932618543755),
Felt::new(9071225051243523860),
Felt::new(12766199826003448536),
Felt::new(9045979173463556963),
Felt::new(12934431667190679898),
],
[
Felt::new(18389244934624494276),
Felt::new(16731736864863925227),
Felt::new(4440209734760478192),
Felt::new(17208448209698888938),
Felt::new(8739495587021565984),
Felt::new(17000774922218161967),
Felt::new(13533282547195532087),
Felt::new(525402848358706231),
Felt::new(16987541523062161972),
Felt::new(5466806524462797102),
Felt::new(14512769585918244983),
Felt::new(10973956031244051118),
],
[
Felt::new(6982293561042362913),
Felt::new(14065426295947720331),
Felt::new(16451845770444974180),
Felt::new(7139138592091306727),
Felt::new(9012006439959783127),
Felt::new(14619614108529063361),
Felt::new(1394813199588124371),
Felt::new(4635111139507788575),
Felt::new(16217473952264203365),
Felt::new(10782018226466330683),
Felt::new(6844229992533662050),
Felt::new(7446486531695178711),
],
[
Felt::new(3736792340494631448),
Felt::new(577852220195055341),
Felt::new(6689998335515779805),
Felt::new(13886063479078013492),
Felt::new(14358505101923202168),
Felt::new(7744142531772274164),
Felt::new(16135070735728404443),
Felt::new(12290902521256031137),
Felt::new(12059913662657709804),
Felt::new(16456018495793751911),
Felt::new(4571485474751953524),
Felt::new(17200392109565783176),
],
[
Felt::new(17130398059294018733),
Felt::new(519782857322261988),
Felt::new(9625384390925085478),
Felt::new(1664893052631119222),
Felt::new(7629576092524553570),
Felt::new(3485239601103661425),
Felt::new(9755891797164033838),
Felt::new(15218148195153269027),
Felt::new(16460604813734957368),
Felt::new(9643968136937729763),
Felt::new(3611348709641382851),
Felt::new(18256379591337759196),
],
];
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/mds/freq.rs | miden-crypto/src/hash/algebraic_sponge/rescue/mds/freq.rs | // FFT-BASED MDS MULTIPLICATION HELPER FUNCTIONS
// ================================================================================================
//! This module contains helper functions as well as constants used to perform the vector-matrix
//! multiplication step of the Rescue prime permutation. The special form of our MDS matrix
//! i.e. being circular, allows us to reduce the vector-matrix multiplication to a Hadamard product
//! of two vectors in "frequency domain". This follows from the simple fact that every circulant
//! matrix has the columns of the discrete Fourier transform matrix as orthogonal eigenvectors.
//! The implementation also avoids the use of 3-point FFTs, and 3-point iFFTs, and substitutes that
//! with explicit expressions. It also avoids, due to the form of our matrix in the frequency
//! domain, divisions by 2 and repeated modular reductions. This is because of our explicit choice
//! of an MDS matrix that has small powers of 2 entries in frequency domain.
//! The following implementation has benefited greatly from the discussions and insights of
//! Hamish Ivey-Law and Jacqueline Nabaglo of Polygon Zero and is base on Nabaglo's Plonky2
//! implementation.
// Rescue MDS matrix in frequency domain.
//
// More precisely, this is the output of the three 4-point (real) FFTs of the first column of
// the MDS matrix i.e. just before the multiplication with the appropriate twiddle factors
// and application of the final four 3-point FFT in order to get the full 12-point FFT.
// The entries have been scaled appropriately in order to avoid divisions by 2 in iFFT2 and iFFT4.
// The code to generate the matrix in frequency domain is based on an adaptation of a code, to
// generate MDS matrices efficiently in original domain, that was developed by the Polygon Zero
// team.
const MDS_FREQ_BLOCK_ONE: [i64; 3] = [16, 8, 16];
const MDS_FREQ_BLOCK_TWO: [(i64, i64); 3] = [(-1, 2), (-1, 1), (4, 8)];
const MDS_FREQ_BLOCK_THREE: [i64; 3] = [-8, 1, 1];
// We use split 3 x 4 FFT transform in order to transform our vectors into the frequency domain.
#[inline(always)]
pub const fn mds_multiply_freq(state: [u64; 12]) -> [u64; 12] {
let [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11] = state;
let (u0, u1, u2) = fft4_real([s0, s3, s6, s9]);
let (u4, u5, u6) = fft4_real([s1, s4, s7, s10]);
let (u8, u9, u10) = fft4_real([s2, s5, s8, s11]);
// This where the multiplication in frequency domain is done. More precisely, and with
// the appropriate permutations in between, the sequence of
// 3-point FFTs --> multiplication by twiddle factors --> Hadamard multiplication -->
// 3 point iFFTs --> multiplication by (inverse) twiddle factors
// is "squashed" into one step composed of the functions "block1", "block2" and "block3".
// The expressions in the aforementioned functions are the result of explicit computations
// combined with the Karatsuba trick for the multiplication of Complex numbers.
let [v0, v4, v8] = block1([u0, u4, u8], MDS_FREQ_BLOCK_ONE);
let [v1, v5, v9] = block2([u1, u5, u9], MDS_FREQ_BLOCK_TWO);
let [v2, v6, v10] = block3([u2, u6, u10], MDS_FREQ_BLOCK_THREE);
// The 4th block is not computed as it is similar to the 2nd one, up to complex conjugation,
// and is, due to the use of the real FFT and iFFT, redundant.
let [s0, s3, s6, s9] = ifft4_real((v0, v1, v2));
let [s1, s4, s7, s10] = ifft4_real((v4, v5, v6));
let [s2, s5, s8, s11] = ifft4_real((v8, v9, v10));
[s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11]
}
// We use the real FFT to avoid redundant computations. See https://www.mdpi.com/2076-3417/12/9/4700
#[inline(always)]
const fn fft2_real(x: [u64; 2]) -> [i64; 2] {
[(x[0] as i64 + x[1] as i64), (x[0] as i64 - x[1] as i64)]
}
#[inline(always)]
const fn ifft2_real(y: [i64; 2]) -> [u64; 2] {
// We avoid divisions by 2 by appropriately scaling the MDS matrix constants.
[(y[0] + y[1]) as u64, (y[0] - y[1]) as u64]
}
#[inline(always)]
const fn fft4_real(x: [u64; 4]) -> (i64, (i64, i64), i64) {
let [z0, z2] = fft2_real([x[0], x[2]]);
let [z1, z3] = fft2_real([x[1], x[3]]);
let y0 = z0 + z1;
let y1 = (z2, -z3);
let y2 = z0 - z1;
(y0, y1, y2)
}
#[inline(always)]
const fn ifft4_real(y: (i64, (i64, i64), i64)) -> [u64; 4] {
// In calculating 'z0' and 'z1', division by 2 is avoided by appropriately scaling
// the MDS matrix constants.
let z0 = y.0 + y.2;
let z1 = y.0 - y.2;
let z2 = y.1.0;
let z3 = -y.1.1;
let [x0, x2] = ifft2_real([z0, z2]);
let [x1, x3] = ifft2_real([z1, z3]);
[x0, x1, x2, x3]
}
#[inline(always)]
const fn block1(x: [i64; 3], y: [i64; 3]) -> [i64; 3] {
let [x0, x1, x2] = x;
let [y0, y1, y2] = y;
let z0 = x0 * y0 + x1 * y2 + x2 * y1;
let z1 = x0 * y1 + x1 * y0 + x2 * y2;
let z2 = x0 * y2 + x1 * y1 + x2 * y0;
[z0, z1, z2]
}
#[inline(always)]
const fn block2(x: [(i64, i64); 3], y: [(i64, i64); 3]) -> [(i64, i64); 3] {
let [(x0r, x0i), (x1r, x1i), (x2r, x2i)] = x;
let [(y0r, y0i), (y1r, y1i), (y2r, y2i)] = y;
let x0s = x0r + x0i;
let x1s = x1r + x1i;
let x2s = x2r + x2i;
let y0s = y0r + y0i;
let y1s = y1r + y1i;
let y2s = y2r + y2i;
// Compute x0y0 − ix1y2 − ix2y1 using Karatsuba for complex numbers multiplication
let m0 = (x0r * y0r, x0i * y0i);
let m1 = (x1r * y2r, x1i * y2i);
let m2 = (x2r * y1r, x2i * y1i);
let z0r = (m0.0 - m0.1) + (x1s * y2s - m1.0 - m1.1) + (x2s * y1s - m2.0 - m2.1);
let z0i = (x0s * y0s - m0.0 - m0.1) + (-m1.0 + m1.1) + (-m2.0 + m2.1);
let z0 = (z0r, z0i);
// Compute x0y1 + x1y0 − ix2y2 using Karatsuba for complex numbers multiplication
let m0 = (x0r * y1r, x0i * y1i);
let m1 = (x1r * y0r, x1i * y0i);
let m2 = (x2r * y2r, x2i * y2i);
let z1r = (m0.0 - m0.1) + (m1.0 - m1.1) + (x2s * y2s - m2.0 - m2.1);
let z1i = (x0s * y1s - m0.0 - m0.1) + (x1s * y0s - m1.0 - m1.1) + (-m2.0 + m2.1);
let z1 = (z1r, z1i);
// Compute x0y2 + x1y1 + x2y0 using Karatsuba for complex numbers multiplication
let m0 = (x0r * y2r, x0i * y2i);
let m1 = (x1r * y1r, x1i * y1i);
let m2 = (x2r * y0r, x2i * y0i);
let z2r = (m0.0 - m0.1) + (m1.0 - m1.1) + (m2.0 - m2.1);
let z2i = (x0s * y2s - m0.0 - m0.1) + (x1s * y1s - m1.0 - m1.1) + (x2s * y0s - m2.0 - m2.1);
let z2 = (z2r, z2i);
[z0, z1, z2]
}
#[inline(always)]
const fn block3(x: [i64; 3], y: [i64; 3]) -> [i64; 3] {
let [x0, x1, x2] = x;
let [y0, y1, y2] = y;
let z0 = x0 * y0 - x1 * y2 - x2 * y1;
let z1 = x0 * y1 + x1 * y0 - x2 * y2;
let z2 = x0 * y2 + x1 * y1 + x2 * y0;
[z0, z1, z2]
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use proptest::prelude::*;
use super::super::{Felt, MDS, ZERO, apply_mds};
const STATE_WIDTH: usize = 12;
#[inline(always)]
fn apply_mds_naive(state: &mut [Felt; STATE_WIDTH]) {
let mut result = [ZERO; STATE_WIDTH];
result.iter_mut().zip(MDS.iter()).for_each(|(r, mds_row)| {
state.iter().zip(mds_row).for_each(|(&s, &m)| {
*r += m * s;
});
});
*state = result;
}
proptest! {
#[test]
fn mds_freq_proptest(a in any::<[u64; STATE_WIDTH]>()) {
let mut v1 = [ZERO; STATE_WIDTH];
let mut v2;
for i in 0..STATE_WIDTH {
v1[i] = Felt::new(a[i]);
}
v2 = v1;
apply_mds_naive(&mut v1);
apply_mds(&mut v2);
prop_assert_eq!(v1, v2);
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/mds/mod.rs | miden-crypto/src/hash/algebraic_sponge/rescue/mds/mod.rs | use p3_field::PrimeField64;
use super::{Felt, STATE_WIDTH, ZERO};
mod freq;
pub use freq::mds_multiply_freq;
// MDS MULTIPLICATION
// ================================================================================================
#[inline(always)]
pub fn apply_mds(state: &mut [Felt; STATE_WIDTH]) {
let mut result = [ZERO; STATE_WIDTH];
// Using the linearity of the operations we can split the state into a low||high decomposition
// and operate on each with no overflow and then combine/reduce the result to a field element.
// The no overflow is guaranteed by the fact that the MDS matrix is a small powers of two in
// frequency domain.
let mut state_l = [0u64; STATE_WIDTH];
let mut state_h = [0u64; STATE_WIDTH];
for r in 0..STATE_WIDTH {
let s = state[r].as_canonical_u64();
state_h[r] = s >> 32;
state_l[r] = (s as u32) as u64;
}
let state_h = mds_multiply_freq(state_h);
let state_l = mds_multiply_freq(state_l);
for r in 0..STATE_WIDTH {
let s = state_l[r] as u128 + ((state_h[r] as u128) << 32);
let s_hi = (s >> 64) as u64;
let s_lo = s as u64;
let z = (s_hi << 32) - s_hi;
let (res, over) = s_lo.overflowing_add(z);
result[r] = Felt::new(res.wrapping_add(0u32.wrapping_sub(over as u32) as u64));
}
*state = result;
}
// MDS MATRIX
// ================================================================================================
/// RPO MDS matrix
pub const MDS: [[Felt; STATE_WIDTH]; STATE_WIDTH] = [
[
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
],
[
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
],
[
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
],
[
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
],
[
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
],
[
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
],
[
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
],
[
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
],
[
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
Felt::new(26),
],
[
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
Felt::new(8),
],
[
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
Felt::new(23),
],
[
Felt::new(23),
Felt::new(8),
Felt::new(26),
Felt::new(13),
Felt::new(10),
Felt::new(9),
Felt::new(7),
Felt::new(6),
Felt::new(22),
Felt::new(21),
Felt::new(8),
Felt::new(7),
],
];
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/rpo/tests.rs | miden-crypto/src/hash/algebraic_sponge/rescue/rpo/tests.rs | #![cfg(feature = "std")]
use alloc::{collections::BTreeSet, vec::Vec};
use proptest::prelude::*;
use super::{
super::{ALPHA, INV_ALPHA, apply_inv_sbox, apply_sbox},
Felt, Rpo256, STATE_WIDTH,
};
use crate::{
ONE, Word, ZERO,
field::{PrimeCharacteristicRing, PrimeField64},
hash::algebraic_sponge::{AlgebraicSponge, BINARY_CHUNK_SIZE, CAPACITY_RANGE, RATE_WIDTH},
rand::test_utils::rand_value,
};
#[test]
fn test_sbox() {
let state = [Felt::new(rand_value()); STATE_WIDTH];
let mut expected = state;
expected.iter_mut().for_each(|v| *v = v.exp_const_u64::<ALPHA>());
let mut actual = state;
apply_sbox(&mut actual);
assert_eq!(expected, actual);
}
#[test]
fn test_inv_sbox() {
let state = [Felt::new(rand_value()); STATE_WIDTH];
let mut expected = state;
expected.iter_mut().for_each(|v| *v = v.exp_const_u64::<INV_ALPHA>());
let mut actual = state;
apply_inv_sbox(&mut actual);
assert_eq!(expected, actual);
}
#[test]
fn hash_elements_vs_merge() {
let elements = [Felt::new(rand_value()); 8];
let digests: [Word; 2] = [
Word::new(elements[..4].try_into().unwrap()),
Word::new(elements[4..].try_into().unwrap()),
];
let m_result = Rpo256::merge(&digests);
let h_result = Rpo256::hash_elements(&elements);
assert_eq!(m_result, h_result);
}
#[test]
fn merge_vs_merge_in_domain() {
let elements = [Felt::new(rand_value()); 8];
let digests: [Word; 2] = [
Word::new(elements[..4].try_into().unwrap()),
Word::new(elements[4..].try_into().unwrap()),
];
let merge_result = Rpo256::merge(&digests);
// ------------- merge with domain = 0 -------------
// set domain to ZERO. This should not change the result.
let domain = ZERO;
let merge_in_domain_result = Rpo256::merge_in_domain(&digests, domain);
assert_eq!(merge_result, merge_in_domain_result);
// ------------- merge with domain = 1 -------------
// set domain to ONE. This should change the result.
let domain = ONE;
let merge_in_domain_result = Rpo256::merge_in_domain(&digests, domain);
assert_ne!(merge_result, merge_in_domain_result);
}
#[test]
fn hash_elements_vs_merge_with_int() {
let tmp = [Felt::new(rand_value()); 4];
let seed = Word::new(tmp);
// ----- value fits into a field element ------------------------------------------------------
let val: Felt = Felt::new(rand_value());
let m_result = <Rpo256 as AlgebraicSponge>::merge_with_int(seed, val.as_canonical_u64());
let mut elements = seed.as_elements().to_vec();
elements.push(val);
let h_result = Rpo256::hash_elements(&elements);
assert_eq!(m_result, h_result);
// ----- value does not fit into a field element ----------------------------------------------
let val = Felt::ORDER_U64 + 2;
let m_result = <Rpo256 as AlgebraicSponge>::merge_with_int(seed, val);
let mut elements = seed.as_elements().to_vec();
elements.push(Felt::new(val));
elements.push(ONE);
let h_result = Rpo256::hash_elements(&elements);
assert_eq!(m_result, h_result);
}
#[test]
fn hash_padding() {
// adding a zero bytes at the end of a byte string should result in a different hash
let r1 = Rpo256::hash(&[1_u8, 2, 3]);
let r2 = Rpo256::hash(&[1_u8, 2, 3, 0]);
assert_ne!(r1, r2);
// same as above but with bigger inputs
let r1 = Rpo256::hash(&[1_u8, 2, 3, 4, 5, 6]);
let r2 = Rpo256::hash(&[1_u8, 2, 3, 4, 5, 6, 0]);
assert_ne!(r1, r2);
// same as above but with input splitting over two elements
let r1 = Rpo256::hash(&[1_u8, 2, 3, 4, 5, 6, 7]);
let r2 = Rpo256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0]);
assert_ne!(r1, r2);
// same as above but with multiple zeros
let r1 = Rpo256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0]);
let r2 = Rpo256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0]);
assert_ne!(r1, r2);
}
#[test]
fn hash_padding_no_extra_permutation_call() {
use crate::hash::algebraic_sponge::DIGEST_RANGE;
// Implementation
let num_bytes = BINARY_CHUNK_SIZE * RATE_WIDTH;
let mut buffer = vec![0_u8; num_bytes];
*buffer.last_mut().unwrap() = 97;
let r1 = Rpo256::hash(&buffer);
// Expected
let final_chunk = [0_u8, 0, 0, 0, 0, 0, 97, 1];
let mut state = [ZERO; STATE_WIDTH];
// padding when hashing bytes
state[CAPACITY_RANGE.start] = Felt::from_u8(RATE_WIDTH as u8);
*state.last_mut().unwrap() = Felt::new(u64::from_le_bytes(final_chunk));
Rpo256::apply_permutation(&mut state);
assert_eq!(&r1[0..4], &state[DIGEST_RANGE]);
}
#[test]
fn hash_elements_padding() {
let e1 = [Felt::new(rand_value()); 2];
let e2 = [e1[0], e1[1], ZERO];
let r1 = Rpo256::hash_elements(&e1);
let r2 = Rpo256::hash_elements(&e2);
assert_ne!(r1, r2);
}
#[test]
fn hash_elements() {
let elements = [
ZERO,
ONE,
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
];
let digests: [Word; 2] = [
Word::new(elements[..4].try_into().unwrap()),
Word::new(elements[4..8].try_into().unwrap()),
];
let m_result = Rpo256::merge(&digests);
let h_result = Rpo256::hash_elements(&elements);
assert_eq!(m_result, h_result);
}
#[test]
fn hash_empty() {
let elements: Vec<Felt> = vec![];
let zero_digest = Word::default();
let h_result = Rpo256::hash_elements(&elements);
assert_eq!(zero_digest, h_result);
}
#[test]
fn hash_empty_bytes() {
let bytes: Vec<u8> = vec![];
let zero_digest = Word::default();
let h_result = Rpo256::hash(&bytes);
assert_eq!(zero_digest, h_result);
}
#[test]
fn hash_test_vectors() {
let elements = [
ZERO,
ONE,
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
Felt::new(13),
Felt::new(14),
Felt::new(15),
Felt::new(16),
Felt::new(17),
Felt::new(18),
];
for i in 0..elements.len() {
let expected = Word::new(*EXPECTED[i]);
let result = Rpo256::hash_elements(&elements[..(i + 1)]);
assert_eq!(result, expected);
}
}
#[test]
fn sponge_bytes_with_remainder_length_wont_panic() {
// this test targets to assert that no panic will happen with the edge case of having an inputs
// with length that is not divisible by the used binary chunk size. 113 is a non-negligible
// input length that is prime; hence guaranteed to not be divisible by any choice of chunk
// size.
//
// this is a preliminary test to the fuzzy-stress of proptest.
Rpo256::hash(&[0; 113]);
}
#[test]
fn sponge_collision_for_wrapped_field_element() {
let a = Rpo256::hash(&[0; 8]);
let b = Rpo256::hash(&Felt::ORDER_U64.to_le_bytes());
assert_ne!(a, b);
}
#[test]
fn sponge_zeroes_collision() {
let mut zeroes = Vec::with_capacity(255);
let mut set = BTreeSet::new();
(0..255).for_each(|_| {
let hash = Rpo256::hash(&zeroes);
zeroes.push(0);
// panic if a collision was found
assert!(set.insert(hash));
});
}
proptest! {
#[test]
fn rpo256_wont_panic_with_arbitrary_input(ref bytes in any::<Vec<u8>>()) {
Rpo256::hash(bytes);
}
}
const EXPECTED: [Word; 19] = [
Word::new([
Felt::new(18126731724905382595),
Felt::new(7388557040857728717),
Felt::new(14290750514634285295),
Felt::new(7852282086160480146),
]),
Word::new([
Felt::new(10139303045932500183),
Felt::new(2293916558361785533),
Felt::new(15496361415980502047),
Felt::new(17904948502382283940),
]),
Word::new([
Felt::new(17457546260239634015),
Felt::new(803990662839494686),
Felt::new(10386005777401424878),
Felt::new(18168807883298448638),
]),
Word::new([
Felt::new(13072499238647455740),
Felt::new(10174350003422057273),
Felt::new(9201651627651151113),
Felt::new(6872461887313298746),
]),
Word::new([
Felt::new(2903803350580990546),
Felt::new(1838870750730563299),
Felt::new(4258619137315479708),
Felt::new(17334260395129062936),
]),
Word::new([
Felt::new(8571221005243425262),
Felt::new(3016595589318175865),
Felt::new(13933674291329928438),
Felt::new(678640375034313072),
]),
Word::new([
Felt::new(16314113978986502310),
Felt::new(14587622368743051587),
Felt::new(2808708361436818462),
Felt::new(10660517522478329440),
]),
Word::new([
Felt::new(2242391899857912644),
Felt::new(12689382052053305418),
Felt::new(235236990017815546),
Felt::new(5046143039268215739),
]),
Word::new([
Felt::new(5218076004221736204),
Felt::new(17169400568680971304),
Felt::new(8840075572473868990),
Felt::new(12382372614369863623),
]),
Word::new([
Felt::new(9783834557155203486),
Felt::new(12317263104955018849),
Felt::new(3933748931816109604),
Felt::new(1843043029836917214),
]),
Word::new([
Felt::new(14498234468286984551),
Felt::new(16837257669834682387),
Felt::new(6664141123711355107),
Felt::new(4590460158294697186),
]),
Word::new([
Felt::new(4661800562479916067),
Felt::new(11794407552792839953),
Felt::new(9037742258721863712),
Felt::new(6287820818064278819),
]),
Word::new([
Felt::new(7752693085194633729),
Felt::new(7379857372245835536),
Felt::new(9270229380648024178),
Felt::new(10638301488452560378),
]),
Word::new([
Felt::new(11542686762698783357),
Felt::new(15570714990728449027),
Felt::new(7518801014067819501),
Felt::new(12706437751337583515),
]),
Word::new([
Felt::new(9553923701032839042),
Felt::new(7281190920209838818),
Felt::new(2488477917448393955),
Felt::new(5088955350303368837),
]),
Word::new([
Felt::new(4935426252518736883),
Felt::new(12584230452580950419),
Felt::new(8762518969632303998),
Felt::new(18159875708229758073),
]),
Word::new([
Felt::new(12795429638314178838),
Felt::new(14360248269767567855),
Felt::new(3819563852436765058),
Felt::new(10859123583999067291),
]),
Word::new([
Felt::new(2695742617679420093),
Felt::new(9151515850666059759),
Felt::new(15855828029180595485),
Felt::new(17190029785471463210),
]),
Word::new([
Felt::new(13205273108219124830),
Felt::new(2524898486192849221),
Felt::new(14618764355375283547),
Felt::new(10615614265042186874),
]),
];
// PLONKY3 INTEGRATION TESTS
// ================================================================================================
mod p3_tests {
use p3_symmetric::{CryptographicHasher, Permutation, PseudoCompressionFunction};
use super::*;
use crate::hash::algebraic_sponge::rescue::rpo::{
RpoCompression, RpoHasher, RpoPermutation256,
};
#[test]
fn test_rpo_permutation_basic() {
let mut state = [Felt::new(0); STATE_WIDTH];
// Apply permutation
let perm = RpoPermutation256;
perm.permute_mut(&mut state);
// State should be different from all zeros after permutation
assert_ne!(state, [Felt::new(0); STATE_WIDTH]);
}
#[test]
fn test_rpo_permutation_consistency() {
let mut state1 = [Felt::new(0); STATE_WIDTH];
let mut state2 = [Felt::new(0); STATE_WIDTH];
// Apply permutation using the trait
let perm = RpoPermutation256;
perm.permute_mut(&mut state1);
// Apply permutation directly
RpoPermutation256::apply_permutation(&mut state2);
// Both should produce the same result
assert_eq!(state1, state2);
}
#[test]
fn test_rpo_permutation_deterministic() {
let input = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
];
let mut state1 = input;
let mut state2 = input;
let perm = RpoPermutation256;
perm.permute_mut(&mut state1);
perm.permute_mut(&mut state2);
// Same input should produce same output
assert_eq!(state1, state2);
}
#[test]
#[ignore] // TODO: Re-enable after migrating RPO state layout to match Plonky3
// Miden-crypto: capacity=[0-3], rate=[4-11]
// Plonky3: rate=[0-7], capacity=[8-11]
fn test_rpo_hasher_vs_hash_elements() {
// Test with empty input
let expected: [Felt; 4] = Rpo256::hash_elements::<Felt>(&[]).into();
let hasher = RpoHasher::new(RpoPermutation256);
let result = hasher.hash_iter([]);
assert_eq!(result, expected, "Empty input should produce same digest");
// Test with 4 elements (one digest worth)
let input4 = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)];
let expected: [Felt; 4] = Rpo256::hash_elements(&input4).into();
let result = hasher.hash_iter(input4);
assert_eq!(result, expected, "4 elements should produce same digest");
// Test with 8 elements (exactly one rate)
let input8 = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
];
let expected: [Felt; 4] = Rpo256::hash_elements(&input8).into();
let result = hasher.hash_iter(input8);
assert_eq!(result, expected, "8 elements (one rate) should produce same digest");
// Test with 12 elements (more than one rate)
let input12 = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
];
let expected: [Felt; 4] = Rpo256::hash_elements(&input12).into();
let result = hasher.hash_iter(input12);
assert_eq!(result, expected, "12 elements should produce same digest");
// Test with 16 elements (two rates)
let input16 = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
Felt::new(13),
Felt::new(14),
Felt::new(15),
Felt::new(16),
];
let expected: [Felt; 4] = Rpo256::hash_elements(&input16).into();
let result = hasher.hash_iter(input16);
assert_eq!(result, expected, "16 elements (two rates) should produce same digest");
}
#[test]
#[ignore] // TODO: Re-enable after migrating RPO state layout to match Plonky3
// Miden-crypto: capacity=[0-3], rate=[4-11]
// Plonky3: rate=[0-7], capacity=[8-11]
fn test_rpo_compression_vs_merge() {
let digest1 = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)];
let digest2 = [Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)];
// Rpo256::merge expects &[Word; 2]
let expected: [Felt; 4] = Rpo256::merge(&[digest1.into(), digest2.into()]).into();
// RpoCompression expects [[Felt; 4]; 2]
let compress = RpoCompression::new(RpoPermutation256);
let result = compress.compress([digest1, digest2]);
assert_eq!(result, expected, "RpoCompression should match Rpo256::merge");
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/rpo/mod.rs | miden-crypto/src/hash/algebraic_sponge/rescue/rpo/mod.rs | use super::{
ARK1, ARK2, AlgebraicSponge, CAPACITY_RANGE, DIGEST_RANGE, Felt, NUM_ROUNDS, RATE_RANGE, Range,
STATE_WIDTH, Word, add_constants, add_constants_and_apply_inv_sbox,
add_constants_and_apply_sbox, apply_inv_sbox, apply_mds, apply_sbox,
};
use crate::hash::algebraic_sponge::rescue::mds::MDS;
#[cfg(test)]
mod tests;
// HASHER IMPLEMENTATION
// ================================================================================================
/// Implementation of the Rescue Prime Optimized hash function with 256-bit output.
///
/// The hash function is implemented according to the Rescue Prime Optimized
/// [specifications](https://eprint.iacr.org/2022/1577) while the padding rule follows the one
/// described [here](https://eprint.iacr.org/2023/1045).
///
/// The parameters used to instantiate the function are:
/// * Field: 64-bit prime field with modulus p = 2^64 - 2^32 + 1.
/// * State width: 12 field elements.
/// * Rate size: r = 8 field elements.
/// * Capacity size: c = 4 field elements.
/// * Number of founds: 7.
/// * S-Box degree: 7.
///
/// The above parameters target a 128-bit security level. The digest consists of four field elements
/// and it can be serialized into 32 bytes (256 bits).
///
/// ## Hash output consistency
/// Functions [hash_elements()](Rpo256::hash_elements), [merge()](Rpo256::merge), and
/// [merge_with_int()](Rpo256::merge_with_int) are internally consistent. That is, computing
/// a hash for the same set of elements using these functions will always produce the same
/// result. For example, merging two digests using [merge()](Rpo256::merge) will produce the
/// same result as hashing 8 elements which make up these digests using
/// [hash_elements()](Rpo256::hash_elements) function.
///
/// However, [hash()](Rpo256::hash) function is not consistent with functions mentioned above.
/// For example, if we take two field elements, serialize them to bytes and hash them using
/// [hash()](Rpo256::hash), the result will differ from the result obtained by hashing these
/// elements directly using [hash_elements()](Rpo256::hash_elements) function. The reason for
/// this difference is that [hash()](Rpo256::hash) function needs to be able to handle
/// arbitrary binary strings, which may or may not encode valid field elements - and thus,
/// deserialization procedure used by this function is different from the procedure used to
/// deserialize valid field elements.
///
/// Thus, if the underlying data consists of valid field elements, it might make more sense
/// to deserialize them into field elements and then hash them using
/// [hash_elements()](Rpo256::hash_elements) function rather than hashing the serialized bytes
/// using [hash()](Rpo256::hash) function.
///
/// ## Domain separation
/// [merge_in_domain()](Rpo256::merge_in_domain) hashes two digests into one digest with some domain
/// identifier and the current implementation sets the second capacity element to the value of
/// this domain identifier. Using a similar argument to the one formulated for domain separation of
/// the RPX hash function in Appendix C of its [specification](https://eprint.iacr.org/2023/1045),
/// one sees that doing so degrades only pre-image resistance, from its initial bound of c.log_2(p),
/// by as much as the log_2 of the size of the domain identifier space. Since pre-image resistance
/// becomes the bottleneck for the security bound of the sponge in overwrite-mode only when it is
/// lower than 2^128, we see that the target 128-bit security level is maintained as long as
/// the size of the domain identifier space, including for padding, is less than 2^128.
///
/// ## Hashing of empty input
/// The current implementation hashes empty input to the zero digest [0, 0, 0, 0]. This has
/// the benefit of requiring no calls to the RPO permutation when hashing empty input.
#[allow(rustdoc::private_intra_doc_links)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Rpo256();
impl AlgebraicSponge for Rpo256 {
// RESCUE PERMUTATION
// --------------------------------------------------------------------------------------------
/// Applies RPO permutation to the provided state.
#[inline(always)]
fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
for i in 0..NUM_ROUNDS {
Self::apply_round(state, i);
}
}
}
impl Rpo256 {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// Target collision resistance level in bits.
pub const COLLISION_RESISTANCE: u32 = 128;
/// The number of rounds is set to 7 to target 128-bit security level.
pub const NUM_ROUNDS: usize = NUM_ROUNDS;
/// Sponge state is set to 12 field elements or 768 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
pub const STATE_WIDTH: usize = STATE_WIDTH;
/// The rate portion of the state is located in elements 4 through 11 (inclusive).
pub const RATE_RANGE: Range<usize> = RATE_RANGE;
/// The capacity portion of the state is located in elements 0, 1, 2, and 3.
pub const CAPACITY_RANGE: Range<usize> = CAPACITY_RANGE;
/// The output of the hash function can be read from state elements 4, 5, 6, and 7.
pub const DIGEST_RANGE: Range<usize> = DIGEST_RANGE;
/// MDS matrix used for computing the linear layer in a RPO round.
pub const MDS: [[Felt; STATE_WIDTH]; STATE_WIDTH] = MDS;
/// Round constants added to the hasher state in the first half of the RPO round.
pub const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = ARK1;
/// Round constants added to the hasher state in the second half of the RPO round.
pub const ARK2: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = ARK2;
// HASH FUNCTIONS
// --------------------------------------------------------------------------------------------
/// Returns a hash of the provided sequence of bytes.
#[inline(always)]
pub fn hash(bytes: &[u8]) -> Word {
<Self as AlgebraicSponge>::hash(bytes)
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E: crate::field::BasedVectorSpace<Felt>>(elements: &[E]) -> Word {
<Self as AlgebraicSponge>::hash_elements(elements)
}
/// Returns a hash of two digests. This method is intended for use in construction of
/// Merkle trees and verification of Merkle paths.
#[inline(always)]
pub fn merge(values: &[Word; 2]) -> Word {
<Self as AlgebraicSponge>::merge(values)
}
/// Returns a hash of multiple digests.
#[inline(always)]
pub fn merge_many(values: &[Word]) -> Word {
<Self as AlgebraicSponge>::merge_many(values)
}
/// Returns a hash of a digest and a u64 value.
#[inline(always)]
pub fn merge_with_int(seed: Word, value: u64) -> Word {
<Self as AlgebraicSponge>::merge_with_int(seed, value)
}
/// Returns a hash of two digests and a domain identifier.
#[inline(always)]
pub fn merge_in_domain(values: &[Word; 2], domain: Felt) -> Word {
<Self as AlgebraicSponge>::merge_in_domain(values, domain)
}
// RESCUE PERMUTATION
// --------------------------------------------------------------------------------------------
/// Applies RPO permutation to the provided state.
#[inline(always)]
pub fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
for i in 0..NUM_ROUNDS {
Self::apply_round(state, i);
}
}
/// RPO round function.
#[inline(always)]
pub fn apply_round(state: &mut [Felt; STATE_WIDTH], round: usize) {
// apply first half of RPO round
apply_mds(state);
if !add_constants_and_apply_sbox(state, &ARK1[round]) {
add_constants(state, &ARK1[round]);
apply_sbox(state);
}
// apply second half of RPO round
apply_mds(state);
if !add_constants_and_apply_inv_sbox(state, &ARK2[round]) {
add_constants(state, &ARK2[round]);
apply_inv_sbox(state);
}
}
}
// PLONKY3 INTEGRATION
// ================================================================================================
/// Plonky3-compatible RPO permutation implementation.
///
/// This module provides a Plonky3-compatible interface to the RPO256 hash function,
/// implementing the `Permutation` and `CryptographicPermutation` traits from Plonky3.
///
/// This allows RPO to be used with Plonky3's cryptographic infrastructure, including:
/// - PaddingFreeSponge for hashing
/// - TruncatedPermutation for compression
/// - DuplexChallenger for Fiat-Shamir transforms
use p3_challenger::DuplexChallenger;
use p3_symmetric::{
CryptographicPermutation, PaddingFreeSponge, Permutation, TruncatedPermutation,
};
// RPO PERMUTATION FOR PLONKY3
// ================================================================================================
/// Plonky3-compatible RPO permutation.
///
/// This struct wraps the RPO256 permutation and implements Plonky3's `Permutation` and
/// `CryptographicPermutation` traits, allowing RPO to be used within the Plonky3 ecosystem.
///
/// The permutation operates on a state of 12 field elements (STATE_WIDTH = 12), with:
/// - Rate: 8 elements (positions 4-11)
/// - Capacity: 4 elements (positions 0-3)
/// - Digest output: 4 elements (positions 4-7)
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct RpoPermutation256;
impl RpoPermutation256 {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// The number of rounds is set to 7 to target 128-bit security level.
pub const NUM_ROUNDS: usize = Rpo256::NUM_ROUNDS;
/// Sponge state is set to 12 field elements or 768 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
pub const STATE_WIDTH: usize = STATE_WIDTH;
/// The rate portion of the state is located in elements 4 through 11 (inclusive).
pub const RATE_RANGE: Range<usize> = Rpo256::RATE_RANGE;
/// The capacity portion of the state is located in elements 0, 1, 2, and 3.
pub const CAPACITY_RANGE: Range<usize> = Rpo256::CAPACITY_RANGE;
/// The output of the hash function can be read from state elements 4, 5, 6, and 7.
pub const DIGEST_RANGE: Range<usize> = Rpo256::DIGEST_RANGE;
// RESCUE PERMUTATION
// --------------------------------------------------------------------------------------------
/// Applies RPO permutation to the provided state.
///
/// This delegates to the RPO256 implementation which applies 7 rounds of the
/// Rescue Prime Optimized permutation.
#[inline(always)]
pub fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
Rpo256::apply_permutation(state);
}
}
// PLONKY3 TRAIT IMPLEMENTATIONS
// ================================================================================================
impl Permutation<[Felt; STATE_WIDTH]> for RpoPermutation256 {
fn permute_mut(&self, state: &mut [Felt; STATE_WIDTH]) {
Self::apply_permutation(state);
}
}
impl CryptographicPermutation<[Felt; STATE_WIDTH]> for RpoPermutation256 {}
// TYPE ALIASES FOR PLONKY3 INTEGRATION
// ================================================================================================
/// RPO-based hasher using Plonky3's PaddingFreeSponge.
///
/// This provides a sponge-based hash function with:
/// - WIDTH: 12 field elements (total state size)
/// - RATE: 8 field elements (input/output rate)
/// - OUT: 4 field elements (digest size)
pub type RpoHasher = PaddingFreeSponge<RpoPermutation256, 12, 8, 4>;
/// RPO-based compression function using Plonky3's TruncatedPermutation.
///
/// This provides a 2-to-1 compression function for Merkle tree construction with:
/// - CHUNK: 2 (number of input chunks - i.e., 2 digests of 4 elements each = 8 elements)
/// - N: 4 (output size in field elements)
/// - WIDTH: 12 (total state size)
///
/// The compression function takes 8 field elements (2 digests) as input and produces
/// 4 field elements (1 digest) as output.
pub type RpoCompression = TruncatedPermutation<RpoPermutation256, 2, 4, 12>;
/// RPO-based challenger using Plonky3's DuplexChallenger.
///
/// This provides a Fiat-Shamir transform implementation for interactive proof protocols,
/// with:
/// - F: Generic field type (typically the same as Felt)
/// - WIDTH: 12 field elements (sponge state size)
/// - RATE: 8 field elements (rate of absorption/squeezing)
pub type RpoChallenger<F> = DuplexChallenger<F, RpoPermutation256, 12, 8>;
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/arch/x86_64_avx512.rs | miden-crypto/src/hash/algebraic_sponge/rescue/arch/x86_64_avx512.rs | use core::arch::x86_64::*;
/// 12×u64 state split as:
/// - 8 lanes in AVX-512 register (__m512i) for vector operations
/// - 4 lanes as scalar u64 array
type State = (__m512i, [u64; 4]);
// CONSTANTS
// ================================================================================================
const EPS_U64: u64 = 0xffff_ffff;
#[allow(clippy::useless_transmute)]
const LO_32_BITS_MASK: __mmask16 = unsafe { core::mem::transmute(0b0101_0101_0101_0101u16) };
// S-Box
// ================================================================================================
#[inline(always)]
unsafe fn mul64_64(x: __m512i, y: __m512i) -> (__m512i, __m512i) {
unsafe {
let eps = _mm512_set1_epi64(EPS_U64 as i64);
let x_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(x)));
let y_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(y)));
let mul_ll = _mm512_mul_epu32(x, y);
let mul_hh = _mm512_mul_epu32(x_hi, y_hi);
let mul_lh = _mm512_mul_epu32(x, y_hi);
let mul_hl = _mm512_mul_epu32(x_hi, y);
let mul_ll_hi = _mm512_srli_epi64::<32>(mul_ll);
let t0 = _mm512_add_epi64(mul_hl, mul_ll_hi);
let t0_lo = _mm512_and_si512(t0, eps);
let t0_hi = _mm512_srli_epi64::<32>(t0);
let t1 = _mm512_add_epi64(mul_lh, t0_lo);
let t2 = _mm512_add_epi64(mul_hh, t0_hi);
let t1_hi = _mm512_srli_epi64::<32>(t1);
let res_hi = _mm512_add_epi64(t2, t1_hi);
let t1_lo = _mm512_castps_si512(_mm512_moveldup_ps(_mm512_castsi512_ps(t1)));
let res_lo = _mm512_mask_blend_epi32(LO_32_BITS_MASK, t1_lo, mul_ll);
(res_lo, res_hi)
}
}
#[inline(always)]
unsafe fn square64(x: __m512i) -> (__m512i, __m512i) {
unsafe {
let x_hi = _mm512_castps_si512(_mm512_movehdup_ps(_mm512_castsi512_ps(x)));
let mul_ll = _mm512_mul_epu32(x, x);
let mul_hh = _mm512_mul_epu32(x_hi, x_hi);
let mul_lh = _mm512_mul_epu32(x, x_hi);
let mul_ll_hi = _mm512_srli_epi64::<33>(mul_ll);
let t0 = _mm512_add_epi64(mul_lh, mul_ll_hi);
let t0_hi = _mm512_srli_epi64::<31>(t0);
let res_hi = _mm512_add_epi64(mul_hh, t0_hi);
let mul_lh_lo = _mm512_slli_epi64::<33>(mul_lh);
let res_lo = _mm512_add_epi64(mul_ll, mul_lh_lo);
(res_lo, res_hi)
}
}
#[inline(always)]
unsafe fn reduce128(x: (__m512i, __m512i)) -> __m512i {
unsafe {
let (lo, hi) = x;
let sign = _mm512_set1_epi64(i64::MIN);
let eps = _mm512_set1_epi64(EPS_U64 as i64);
let lo_s = _mm512_xor_si512(lo, sign);
let hi_hi = _mm512_srli_epi64::<32>(hi);
// NOTE: On some Zen4+ CPUs, mask-based conditional operations
// (like the masked subtract below) can be slightly slower than the
// scalar fixup approach used in the AVX2 path. This all-vector
// implementation keeps the code branch-free and still yields the
// expected speedup on Zen5, but future maintainers may want to revisit
// this if they notice performance regressions on other architectures.
let lo1_s = {
let diff = _mm512_sub_epi64(lo_s, hi_hi);
let mask: __mmask8 = _mm512_cmpgt_epi64_mask(diff, lo_s);
if mask == 0 {
diff
} else {
let adj = _mm512_maskz_set1_epi64(mask, EPS_U64 as i64);
_mm512_sub_epi64(diff, adj)
}
};
let t1 = _mm512_mul_epu32(hi, eps);
let sum = _mm512_add_epi64(lo1_s, t1);
let carry: __mmask8 = _mm512_cmplt_epi64_mask(sum, lo1_s);
let adj = _mm512_maskz_set1_epi64(carry, EPS_U64 as i64);
let lo2_s = _mm512_add_epi64(sum, adj);
_mm512_xor_si512(lo2_s, sign)
}
}
#[inline(always)]
fn reduce128_u64(lo: u64, hi: u64) -> u64 {
let (mut lo1, borrow) = lo.overflowing_sub(hi >> 32);
if borrow {
lo1 = lo1.wrapping_sub(EPS_U64)
}
let t1 = (hi & EPS_U64).wrapping_mul(EPS_U64);
let (mut out, carry) = lo1.overflowing_add(t1);
if carry {
out = out.wrapping_add(EPS_U64)
}
out
}
#[inline(always)]
fn mul_reduce_u64(a: u64, b: u64) -> u64 {
let p = (a as u128) * (b as u128);
reduce128_u64(p as u64, (p >> 64) as u64)
}
#[inline(always)]
fn square_reduce_u64(a: u64) -> u64 {
let p = (a as u128) * (a as u128);
reduce128_u64(p as u64, (p >> 64) as u64)
}
#[inline(always)]
unsafe fn mul_reduce(a: State, b: State) -> State {
unsafe {
let head = reduce128(mul64_64(a.0, b.0));
let t0 = mul_reduce_u64(a.1[0], b.1[0]);
let t1 = mul_reduce_u64(a.1[1], b.1[1]);
let t2 = mul_reduce_u64(a.1[2], b.1[2]);
let t3 = mul_reduce_u64(a.1[3], b.1[3]);
(head, [t0, t1, t2, t3])
}
}
#[inline(always)]
unsafe fn square_reduce(x: State) -> State {
unsafe {
let head = reduce128(square64(x.0));
let t0 = square_reduce_u64(x.1[0]);
let t1 = square_reduce_u64(x.1[1]);
let t2 = square_reduce_u64(x.1[2]);
let t3 = square_reduce_u64(x.1[3]);
(head, [t0, t1, t2, t3])
}
}
#[inline(always)]
unsafe fn exp_acc(mut high: State, low: State, exp: usize) -> State {
unsafe {
for _ in 0..exp {
high = square_reduce(high);
}
mul_reduce(high, low)
}
}
#[inline(always)]
unsafe fn do_apply_sbox(state: State) -> State {
unsafe {
let s2 = square_reduce(state);
let s4 = square_reduce(s2);
let s3 = mul_reduce(s2, state);
mul_reduce(s3, s4)
}
}
#[inline(always)]
unsafe fn do_apply_inv_sbox(state: State) -> State {
unsafe {
let t1 = square_reduce(state);
let t2 = square_reduce(t1);
let t3 = exp_acc(t2, t2, 3);
let t4 = exp_acc(t3, t3, 6);
let t5 = exp_acc(t4, t4, 12);
let t6 = exp_acc(t5, t3, 6);
let t7 = exp_acc(t6, t6, 31);
let a = square_reduce(square_reduce(mul_reduce(square_reduce(t7), t6)));
let b = mul_reduce(t1, mul_reduce(t2, state));
mul_reduce(a, b)
}
}
#[inline(always)]
unsafe fn load12(src: &[u64; 12]) -> State {
unsafe {
let head = _mm512_loadu_si512(src.as_ptr().cast::<__m512i>());
let tail = [src[8], src[9], src[10], src[11]];
(head, tail)
}
}
#[inline(always)]
unsafe fn store12(dst: &mut [u64; 12], s: State) {
unsafe {
_mm512_storeu_si512(dst.as_mut_ptr().cast::<__m512i>(), s.0);
dst[8..12].copy_from_slice(&s.1);
}
}
#[inline(always)]
pub unsafe fn apply_sbox(buf: &mut [u64; 12]) {
unsafe {
let s = load12(buf);
let s = do_apply_sbox(s);
store12(buf, s);
}
}
#[inline(always)]
pub unsafe fn apply_inv_sbox(buf: &mut [u64; 12]) {
unsafe {
let s = load12(buf);
let s = do_apply_inv_sbox(s);
store12(buf, s);
}
}
// RPX E-round
// ================================================================================================
const P_U64: u64 = 0xffff_ffff_0000_0001;
#[inline(always)]
unsafe fn load_ext(buf: &[u64; 12]) -> (__m512i, __m512i, __m512i) {
unsafe {
let a0 = _mm512_setr_epi64(
buf[0] as i64,
buf[3] as i64,
buf[6] as i64,
buf[9] as i64,
0,
0,
0,
0,
);
let a1 = _mm512_setr_epi64(
buf[1] as i64,
buf[4] as i64,
buf[7] as i64,
buf[10] as i64,
0,
0,
0,
0,
);
let a2 = _mm512_setr_epi64(
buf[2] as i64,
buf[5] as i64,
buf[8] as i64,
buf[11] as i64,
0,
0,
0,
0,
);
(a0, a1, a2)
}
}
#[inline(always)]
unsafe fn store_ext(buf: &mut [u64; 12], a0: __m512i, a1: __m512i, a2: __m512i) {
unsafe {
let v0: [i64; 8] = core::mem::transmute(a0);
let v1: [i64; 8] = core::mem::transmute(a1);
let v2: [i64; 8] = core::mem::transmute(a2);
buf[0] = v0[0] as u64;
buf[3] = v0[1] as u64;
buf[6] = v0[2] as u64;
buf[9] = v0[3] as u64;
buf[1] = v1[0] as u64;
buf[4] = v1[1] as u64;
buf[7] = v1[2] as u64;
buf[10] = v1[3] as u64;
buf[2] = v2[0] as u64;
buf[5] = v2[1] as u64;
buf[8] = v2[2] as u64;
buf[11] = v2[3] as u64;
}
}
#[inline(always)]
unsafe fn add_mod(z: __m512i, w: __m512i) -> __m512i {
unsafe {
let p = _mm512_set1_epi64(P_U64 as i64);
// Inputs are already canonical from reduce128, so no need to call canon()
let s = _mm512_add_epi64(z, w);
// If carry or s >= p, subtract p once
let carry = _mm512_cmp_epu64_mask(s, z, _MM_CMPINT_LT);
let ge_p = _mm512_cmpge_epu64_mask(s, p);
let need = carry | ge_p;
_mm512_mask_sub_epi64(s, need, s, p)
}
}
#[inline(always)]
unsafe fn dbl_mod(z: __m512i) -> __m512i {
unsafe { add_mod(z, z) }
}
#[inline(always)]
unsafe fn canonicalize_vec(x: __m512i) -> __m512i {
unsafe {
let p = _mm512_set1_epi64(P_U64 as i64);
let ge = _mm512_cmpge_epu64_mask(x, p);
_mm512_mask_sub_epi64(x, ge, x, p)
}
}
#[inline(always)]
unsafe fn mul_reduce_vec(x: __m512i, y: __m512i) -> __m512i {
unsafe { canonicalize_vec(reduce128(mul64_64(x, y))) }
}
#[inline(always)]
unsafe fn square_reduce_vec(x: __m512i) -> __m512i {
unsafe { canonicalize_vec(reduce128(square64(x))) }
}
#[inline(always)]
unsafe fn ext_square(a0: __m512i, a1: __m512i, a2: __m512i) -> (__m512i, __m512i, __m512i) {
unsafe {
let s0 = square_reduce_vec(a0);
let s1 = dbl_mod(mul_reduce_vec(a0, a1));
let s2 = add_mod(dbl_mod(mul_reduce_vec(a0, a2)), square_reduce_vec(a1));
let s3 = dbl_mod(mul_reduce_vec(a1, a2));
let s4 = square_reduce_vec(a2);
let out0 = add_mod(s0, s3);
let out1 = add_mod(add_mod(s1, s3), s4);
let out2 = add_mod(s2, s4);
(out0, out1, out2)
}
}
#[inline(always)]
unsafe fn ext_mul(
a0: __m512i,
a1: __m512i,
a2: __m512i,
b0: __m512i,
b1: __m512i,
b2: __m512i,
) -> (__m512i, __m512i, __m512i) {
unsafe {
let a0b0 = mul_reduce_vec(a0, b0);
let a1b1 = mul_reduce_vec(a1, b1);
let a2b2 = mul_reduce_vec(a2, b2);
let a0b1 = mul_reduce_vec(a0, b1);
let a1b0 = mul_reduce_vec(a1, b0);
let a0b2 = mul_reduce_vec(a0, b2);
let a2b0 = mul_reduce_vec(a2, b0);
let a1b2 = mul_reduce_vec(a1, b2);
let a2b1 = mul_reduce_vec(a2, b1);
let r0 = a0b0;
let r1 = add_mod(a0b1, a1b0);
let r2 = add_mod(add_mod(a0b2, a1b1), a2b0);
let r3 = add_mod(a1b2, a2b1);
let r4 = a2b2;
let out0 = add_mod(r0, r3);
let out1 = add_mod(add_mod(r1, r3), r4);
let out2 = add_mod(r2, r4);
(out0, out1, out2)
}
}
#[inline(always)]
unsafe fn ext_exp7(a0: __m512i, a1: __m512i, a2: __m512i) -> (__m512i, __m512i, __m512i) {
unsafe {
let (x2_0, x2_1, x2_2) = ext_square(a0, a1, a2);
let (x4_0, x4_1, x4_2) = ext_square(x2_0, x2_1, x2_2);
let (x3_0, x3_1, x3_2) = ext_mul(x2_0, x2_1, x2_2, a0, a1, a2);
ext_mul(x3_0, x3_1, x3_2, x4_0, x4_1, x4_2)
}
}
#[inline(always)]
pub unsafe fn apply_ext_round(buf: &mut [u64; 12]) {
unsafe {
let (mut a0, mut a1, mut a2) = load_ext(buf);
(a0, a1, a2) = ext_exp7(a0, a1, a2);
store_ext(buf, a0, a1, a2);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/arch/mod.rs | miden-crypto/src/hash/algebraic_sponge/rescue/arch/mod.rs | // SVE OPTIMIZATIONS
// ================================================================================================
#[cfg(target_feature = "sve")]
pub mod optimized {
use crate::{Felt, hash::algebraic_sponge::rescue::STATE_WIDTH};
mod ffi {
#[link(name = "rpo_sve", kind = "static")]
unsafe extern "C" {
pub fn add_constants_and_apply_sbox(
state: *mut std::ffi::c_ulong,
constants: *const std::ffi::c_ulong,
) -> bool;
pub fn add_constants_and_apply_inv_sbox(
state: *mut std::ffi::c_ulong,
constants: *const std::ffi::c_ulong,
) -> bool;
}
}
#[inline(always)]
pub fn add_constants_and_apply_sbox(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
unsafe {
ffi::add_constants_and_apply_sbox(
state.as_mut_ptr() as *mut u64,
ark.as_ptr() as *const u64,
)
}
}
#[inline(always)]
pub fn add_constants_and_apply_inv_sbox(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
unsafe {
ffi::add_constants_and_apply_inv_sbox(
state.as_mut_ptr() as *mut u64,
ark.as_ptr() as *const u64,
)
}
}
#[inline(always)]
pub fn add_constants_and_apply_ext_round(
_state: &mut [Felt; STATE_WIDTH],
_ark: &[Felt; STATE_WIDTH],
) -> bool {
false
}
}
// AVX2 OPTIMIZATIONS
// ================================================================================================
#[cfg(all(
target_feature = "avx2",
not(all(target_feature = "avx512f", target_feature = "avx512dq"))
))]
mod x86_64_avx2;
#[cfg(all(
target_feature = "avx2",
not(all(target_feature = "avx512f", target_feature = "avx512dq"))
))]
pub mod optimized {
use super::x86_64_avx2::{apply_ext_round, apply_inv_sbox, apply_sbox};
use crate::{
Felt,
hash::algebraic_sponge::rescue::{STATE_WIDTH, add_constants},
};
#[inline(always)]
pub fn add_constants_and_apply_sbox(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
add_constants(state, ark);
unsafe {
apply_sbox(core::mem::transmute::<&mut [Felt; 12], &mut [u64; 12]>(state));
}
true
}
#[inline(always)]
pub fn add_constants_and_apply_inv_sbox(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
add_constants(state, ark);
unsafe {
apply_inv_sbox(core::mem::transmute::<&mut [Felt; 12], &mut [u64; 12]>(state));
}
true
}
#[inline(always)]
pub fn add_constants_and_apply_ext_round(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
add_constants(state, ark);
unsafe {
apply_ext_round(core::mem::transmute::<&mut [Felt; 12], &mut [u64; 12]>(state));
}
true
}
}
// AVX512 OPTIMIZATIONS
// ================================================================================================
#[cfg(all(target_feature = "avx512f", target_feature = "avx512dq"))]
mod x86_64_avx512;
#[cfg(all(target_feature = "avx512f", target_feature = "avx512dq"))]
pub mod optimized {
use super::x86_64_avx512::{apply_ext_round, apply_inv_sbox, apply_sbox};
use crate::{
Felt,
hash::algebraic_sponge::rescue::{STATE_WIDTH, add_constants},
};
#[inline(always)]
pub fn add_constants_and_apply_sbox(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
add_constants(state, ark);
unsafe {
apply_sbox(core::mem::transmute::<&mut [Felt; 12], &mut [u64; 12]>(state));
}
true
}
#[inline(always)]
pub fn add_constants_and_apply_inv_sbox(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
add_constants(state, ark);
unsafe {
apply_inv_sbox(core::mem::transmute::<&mut [Felt; 12], &mut [u64; 12]>(state));
}
true
}
#[inline(always)]
pub fn add_constants_and_apply_ext_round(
state: &mut [Felt; STATE_WIDTH],
ark: &[Felt; STATE_WIDTH],
) -> bool {
add_constants(state, ark);
unsafe {
apply_ext_round(core::mem::transmute::<&mut [Felt; 12], &mut [u64; 12]>(state));
}
true
}
}
// NO OPTIMIZATIONS
// ================================================================================================
#[cfg(not(any(
target_feature = "avx2",
target_feature = "avx512f",
target_feature = "avx512dq",
target_feature = "sve"
)))]
pub mod optimized {
use crate::{Felt, hash::algebraic_sponge::rescue::STATE_WIDTH};
#[inline(always)]
pub fn add_constants_and_apply_sbox(
_state: &mut [Felt; STATE_WIDTH],
_ark: &[Felt; STATE_WIDTH],
) -> bool {
false
}
#[inline(always)]
pub fn add_constants_and_apply_inv_sbox(
_state: &mut [Felt; STATE_WIDTH],
_ark: &[Felt; STATE_WIDTH],
) -> bool {
false
}
#[inline(always)]
pub fn add_constants_and_apply_ext_round(
_state: &mut [Felt; STATE_WIDTH],
_ark: &[Felt; STATE_WIDTH],
) -> bool {
false
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/arch/x86_64_avx2.rs | miden-crypto/src/hash/algebraic_sponge/rescue/arch/x86_64_avx2.rs | use core::arch::x86_64::*;
// The following AVX2 implementation has been copied from plonky2:
// https://github.com/0xPolygonZero/plonky2/blob/main/plonky2/src/hash/arch/x86_64/poseidon_goldilocks_avx2_bmi2.rs
// Preliminary notes:
// 1. AVX does not support addition with carry but 128-bit (2-word) addition can be easily emulated.
// The method recognizes that for a + b overflowed iff (a + b) < a:
// 1. res_lo = a_lo + b_lo
// 2. carry_mask = res_lo < a_lo
// 3. res_hi = a_hi + b_hi - carry_mask
//
// Notice that carry_mask is subtracted, not added. This is because AVX comparison instructions
// return -1 (all bits 1) for true and 0 for false.
//
// 2. AVX does not have unsigned 64-bit comparisons. Those can be emulated with signed comparisons
// by recognizing that a <u b iff a + (1 << 63) <s b + (1 << 63), where the addition wraps around
// and the comparisons are unsigned and signed respectively. The shift function adds/subtracts 1
// << 63 to enable this trick. Addition with carry example:
// 1. a_lo_s = shift(a_lo)
// 2. res_lo_s = a_lo_s + b_lo
// 3. carry_mask = res_lo_s <s a_lo_s
// 4. res_lo = shift(res_lo_s)
// 5. res_hi = a_hi + b_hi - carry_mask
//
// The suffix _s denotes a value that has been shifted by 1 << 63. The result of addition
// is shifted if exactly one of the operands is shifted, as is the case on
// line 2. Line 3. performs a signed comparison res_lo_s <s a_lo_s on shifted values to
// emulate unsigned comparison res_lo <u a_lo on unshifted values. Finally, line 4. reverses the
// shift so the result can be returned.
//
// When performing a chain of calculations, we can often save instructions by letting
// the shift propagate through and only undoing it when necessary.
// For example, to compute the addition of three two-word (128-bit) numbers we can do:
// 1. a_lo_s = shift(a_lo)
// 2. tmp_lo_s = a_lo_s + b_lo
// 3. tmp_carry_mask = tmp_lo_s <s a_lo_s
// 4. tmp_hi = a_hi + b_hi - tmp_carry_mask
// 5. res_lo_s = tmp_lo_s + c_lo vi. res_carry_mask = res_lo_s <s tmp_lo_s
// 6. res_carry_mask = res_lo_s <s tmp_lo_s
// 7. res_lo = shift(res_lo_s)
// 8. res_hi = tmp_hi + c_hi - res_carry_mask
//
// Notice that the above 3-value addition still only requires two calls to shift, just like our
// 2-value addition.
#[inline(always)]
pub fn branch_hint() {
// NOTE: These are the currently supported assembly architectures. See the
// [nightly reference](https://doc.rust-lang.org/nightly/reference/inline-assembly.html) for
// the most up-to-date list.
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "x86",
target_arch = "x86_64",
))]
unsafe {
core::arch::asm!("", options(nomem, nostack, preserves_flags));
}
}
macro_rules! map3 {
($f:ident:: < $l:literal > , $v:ident) => {
($f::<$l>($v.0), $f::<$l>($v.1), $f::<$l>($v.2))
};
($f:ident:: < $l:literal > , $v1:ident, $v2:ident) => {
($f::<$l>($v1.0, $v2.0), $f::<$l>($v1.1, $v2.1), $f::<$l>($v1.2, $v2.2))
};
($f:ident, $v:ident) => {
($f($v.0), $f($v.1), $f($v.2))
};
($f:ident, $v0:ident, $v1:ident) => {
($f($v0.0, $v1.0), $f($v0.1, $v1.1), $f($v0.2, $v1.2))
};
($f:ident,rep $v0:ident, $v1:ident) => {
($f($v0, $v1.0), $f($v0, $v1.1), $f($v0, $v1.2))
};
($f:ident, $v0:ident,rep $v1:ident) => {
($f($v0.0, $v1), $f($v0.1, $v1), $f($v0.2, $v1))
};
}
#[inline(always)]
unsafe fn square3(
x: (__m256i, __m256i, __m256i),
) -> ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)) {
unsafe {
let x_hi = {
// Move high bits to low position. The high bits of x_hi are ignored. Swizzle is faster
// than bitshift. This instruction only has a floating-point flavor, so we
// cast to/from float. This is safe and free.
let x_ps = map3!(_mm256_castsi256_ps, x);
let x_hi_ps = map3!(_mm256_movehdup_ps, x_ps);
map3!(_mm256_castps_si256, x_hi_ps)
};
// All pairwise multiplications.
let mul_ll = map3!(_mm256_mul_epu32, x, x);
let mul_lh = map3!(_mm256_mul_epu32, x, x_hi);
let mul_hh = map3!(_mm256_mul_epu32, x_hi, x_hi);
// Bignum addition, but mul_lh is shifted by 33 bits (not 32).
let mul_ll_hi = map3!(_mm256_srli_epi64::<33>, mul_ll);
let t0 = map3!(_mm256_add_epi64, mul_lh, mul_ll_hi);
let t0_hi = map3!(_mm256_srli_epi64::<31>, t0);
let res_hi = map3!(_mm256_add_epi64, mul_hh, t0_hi);
// Form low result by adding the mul_ll and the low 31 bits of mul_lh (shifted to the high
// position).
let mul_lh_lo = map3!(_mm256_slli_epi64::<33>, mul_lh);
let res_lo = map3!(_mm256_add_epi64, mul_ll, mul_lh_lo);
(res_lo, res_hi)
}
}
#[inline(always)]
unsafe fn mul3(
x: (__m256i, __m256i, __m256i),
y: (__m256i, __m256i, __m256i),
) -> ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)) {
unsafe {
let epsilon = _mm256_set1_epi64x(0xffffffff);
let x_hi = {
// Move high bits to low position. The high bits of x_hi are ignored. Swizzle is faster
// than bitshift. This instruction only has a floating-point flavor, so we
// cast to/from float. This is safe and free.
let x_ps = map3!(_mm256_castsi256_ps, x);
let x_hi_ps = map3!(_mm256_movehdup_ps, x_ps);
map3!(_mm256_castps_si256, x_hi_ps)
};
let y_hi = {
let y_ps = map3!(_mm256_castsi256_ps, y);
let y_hi_ps = map3!(_mm256_movehdup_ps, y_ps);
map3!(_mm256_castps_si256, y_hi_ps)
};
// All four pairwise multiplications
let mul_ll = map3!(_mm256_mul_epu32, x, y);
let mul_lh = map3!(_mm256_mul_epu32, x, y_hi);
let mul_hl = map3!(_mm256_mul_epu32, x_hi, y);
let mul_hh = map3!(_mm256_mul_epu32, x_hi, y_hi);
// Bignum addition
// Extract high 32 bits of mul_ll and add to mul_hl. This cannot overflow.
let mul_ll_hi = map3!(_mm256_srli_epi64::<32>, mul_ll);
let t0 = map3!(_mm256_add_epi64, mul_hl, mul_ll_hi);
// Extract low 32 bits of t0 and add to mul_lh. Again, this cannot overflow.
// Also, extract high 32 bits of t0 and add to mul_hh.
let t0_lo = map3!(_mm256_and_si256, t0, rep epsilon);
let t0_hi = map3!(_mm256_srli_epi64::<32>, t0);
let t1 = map3!(_mm256_add_epi64, mul_lh, t0_lo);
let t2 = map3!(_mm256_add_epi64, mul_hh, t0_hi);
// Lastly, extract the high 32 bits of t1 and add to t2.
let t1_hi = map3!(_mm256_srli_epi64::<32>, t1);
let res_hi = map3!(_mm256_add_epi64, t2, t1_hi);
// Form res_lo by combining the low half of mul_ll with the low half of t1 (shifted into
// high position).
let t1_lo = {
let t1_ps = map3!(_mm256_castsi256_ps, t1);
let t1_lo_ps = map3!(_mm256_moveldup_ps, t1_ps);
map3!(_mm256_castps_si256, t1_lo_ps)
};
let res_lo = map3!(_mm256_blend_epi32::<0xaa>, mul_ll, t1_lo);
(res_lo, res_hi)
}
}
/// Addition, where the second operand is `0 <= y < 0xffffffff00000001`.
#[inline(always)]
unsafe fn add_small(
x_s: (__m256i, __m256i, __m256i),
y: (__m256i, __m256i, __m256i),
) -> (__m256i, __m256i, __m256i) {
unsafe {
let res_wrapped_s = map3!(_mm256_add_epi64, x_s, y);
let mask = map3!(_mm256_cmpgt_epi32, x_s, res_wrapped_s);
let wrapback_amt = map3!(_mm256_srli_epi64::<32>, mask); // EPSILON if overflowed else 0.
map3!(_mm256_add_epi64, res_wrapped_s, wrapback_amt)
}
}
#[inline(always)]
unsafe fn maybe_adj_sub(res_wrapped_s: __m256i, mask: __m256i) -> __m256i {
// The subtraction is very unlikely to overflow so we're best off branching.
// The even u32s in `mask` are meaningless, so we want to ignore them. `_mm256_testz_pd`
// branches depending on the sign bit of double-precision (64-bit) floats. Bit cast `mask` to
// floating-point (this is free).
unsafe {
let mask_pd = _mm256_castsi256_pd(mask);
// `_mm256_testz_pd(mask_pd, mask_pd) == 1` iff all sign bits are 0, meaning that underflow
// did not occur for any of the vector elements.
if _mm256_testz_pd(mask_pd, mask_pd) == 1 {
res_wrapped_s
} else {
branch_hint();
// Highly unlikely: underflow did occur. Find adjustment per element and apply it.
let adj_amount = _mm256_srli_epi64::<32>(mask); // EPSILON if underflow.
_mm256_sub_epi64(res_wrapped_s, adj_amount)
}
}
}
/// Addition, where the second operand is much smaller than `0xffffffff00000001`.
#[inline(always)]
unsafe fn sub_tiny(
x_s: (__m256i, __m256i, __m256i),
y: (__m256i, __m256i, __m256i),
) -> (__m256i, __m256i, __m256i) {
unsafe {
let res_wrapped_s = map3!(_mm256_sub_epi64, x_s, y);
let mask = map3!(_mm256_cmpgt_epi32, res_wrapped_s, x_s);
map3!(maybe_adj_sub, res_wrapped_s, mask)
}
}
#[inline(always)]
unsafe fn reduce3(
(lo0, hi0): ((__m256i, __m256i, __m256i), (__m256i, __m256i, __m256i)),
) -> (__m256i, __m256i, __m256i) {
unsafe {
let sign_bit = _mm256_set1_epi64x(i64::MIN);
let epsilon = _mm256_set1_epi64x(0xffffffff);
let lo0_s = map3!(_mm256_xor_si256, lo0, rep sign_bit);
let hi_hi0 = map3!(_mm256_srli_epi64::<32>, hi0);
let lo1_s = sub_tiny(lo0_s, hi_hi0);
let t1 = map3!(_mm256_mul_epu32, hi0, rep epsilon);
let lo2_s = add_small(lo1_s, t1);
map3!(_mm256_xor_si256, lo2_s, rep sign_bit)
}
}
#[inline(always)]
unsafe fn mul_reduce(
a: (__m256i, __m256i, __m256i),
b: (__m256i, __m256i, __m256i),
) -> (__m256i, __m256i, __m256i) {
unsafe { reduce3(mul3(a, b)) }
}
#[inline(always)]
unsafe fn square_reduce(state: (__m256i, __m256i, __m256i)) -> (__m256i, __m256i, __m256i) {
unsafe { reduce3(square3(state)) }
}
#[inline(always)]
unsafe fn exp_acc(
high: (__m256i, __m256i, __m256i),
low: (__m256i, __m256i, __m256i),
exp: usize,
) -> (__m256i, __m256i, __m256i) {
let mut result = high;
unsafe {
for _ in 0..exp {
result = square_reduce(result);
}
mul_reduce(result, low)
}
}
#[inline(always)]
unsafe fn do_apply_sbox(state: (__m256i, __m256i, __m256i)) -> (__m256i, __m256i, __m256i) {
unsafe {
let state2 = square_reduce(state);
let state4_unreduced = square3(state2);
let state3_unreduced = mul3(state2, state);
let state4 = reduce3(state4_unreduced);
let state3 = reduce3(state3_unreduced);
let state7_unreduced = mul3(state3, state4);
reduce3(state7_unreduced)
}
}
#[inline(always)]
unsafe fn do_apply_inv_sbox(state: (__m256i, __m256i, __m256i)) -> (__m256i, __m256i, __m256i) {
unsafe {
// compute base^10540996611094048183 using 72 multiplications per array element
// 10540996611094048183 = b1001001001001001001001001001000110110110110110110110110110110111
// compute base^10
let t1 = square_reduce(state);
// compute base^100
let t2 = square_reduce(t1);
// compute base^100100
let t3 = exp_acc(t2, t2, 3);
// compute base^100100100100
let t4 = exp_acc(t3, t3, 6);
// compute base^100100100100100100100100
let t5 = exp_acc(t4, t4, 12);
// compute base^100100100100100100100100100100
let t6 = exp_acc(t5, t3, 6);
// compute base^1001001001001001001001001001000100100100100100100100100100100
let t7 = exp_acc(t6, t6, 31);
// compute base^1001001001001001001001001001000110110110110110110110110110110111
let a = square_reduce(square_reduce(mul_reduce(square_reduce(t7), t6)));
let b = mul_reduce(t1, mul_reduce(t2, state));
mul_reduce(a, b)
}
}
#[inline(always)]
unsafe fn avx2_load(state: &[u64; 12]) -> (__m256i, __m256i, __m256i) {
unsafe {
(
_mm256_loadu_si256((state[0..4]).as_ptr().cast::<__m256i>()),
_mm256_loadu_si256((state[4..8]).as_ptr().cast::<__m256i>()),
_mm256_loadu_si256((state[8..12]).as_ptr().cast::<__m256i>()),
)
}
}
#[inline(always)]
unsafe fn avx2_store(buf: &mut [u64; 12], state: (__m256i, __m256i, __m256i)) {
unsafe {
_mm256_storeu_si256((buf[0..4]).as_mut_ptr().cast::<__m256i>(), state.0);
_mm256_storeu_si256((buf[4..8]).as_mut_ptr().cast::<__m256i>(), state.1);
_mm256_storeu_si256((buf[8..12]).as_mut_ptr().cast::<__m256i>(), state.2);
}
}
#[inline(always)]
pub unsafe fn apply_sbox(buffer: &mut [u64; 12]) {
unsafe {
let mut state = avx2_load(buffer);
state = do_apply_sbox(state);
avx2_store(buffer, state);
}
}
#[inline(always)]
pub unsafe fn apply_inv_sbox(buffer: &mut [u64; 12]) {
unsafe {
let mut state = avx2_load(buffer);
state = do_apply_inv_sbox(state);
avx2_store(buffer, state);
}
}
// RPX E-round
// ================================================================================================
const P_U64: u64 = 0xffff_ffff_0000_0001;
#[inline(always)]
unsafe fn load_ext(buf: &[u64; 12]) -> (__m256i, __m256i, __m256i) {
unsafe {
// a0 = [s0,s3,s6,s9], a1 = [s1,s4,s7,s10], a2 = [s2,s5,s8,s11]
let a0 = _mm256_setr_epi64x(buf[0] as i64, buf[3] as i64, buf[6] as i64, buf[9] as i64);
let a1 = _mm256_setr_epi64x(buf[1] as i64, buf[4] as i64, buf[7] as i64, buf[10] as i64);
let a2 = _mm256_setr_epi64x(buf[2] as i64, buf[5] as i64, buf[8] as i64, buf[11] as i64);
(a0, a1, a2)
}
}
#[inline(always)]
unsafe fn store_ext(buf: &mut [u64; 12], a0: __m256i, a1: __m256i, a2: __m256i) {
unsafe {
let mut t0 = [0i64; 4];
let mut t1 = [0i64; 4];
let mut t2 = [0i64; 4];
_mm256_storeu_si256(t0.as_mut_ptr().cast(), a0);
_mm256_storeu_si256(t1.as_mut_ptr().cast(), a1);
_mm256_storeu_si256(t2.as_mut_ptr().cast(), a2);
buf[0] = t0[0] as u64;
buf[3] = t0[1] as u64;
buf[6] = t0[2] as u64;
buf[9] = t0[3] as u64;
buf[1] = t1[0] as u64;
buf[4] = t1[1] as u64;
buf[7] = t1[2] as u64;
buf[10] = t1[3] as u64;
buf[2] = t2[0] as u64;
buf[5] = t2[1] as u64;
buf[8] = t2[2] as u64;
buf[11] = t2[3] as u64;
}
}
#[inline(always)]
unsafe fn add_mod(x: __m256i, y: __m256i) -> __m256i {
unsafe {
let p = _mm256_set1_epi64x(P_U64 as i64);
let sign = _mm256_set1_epi64x(i64::MIN);
// Inputs are already canonical from reduce3, so no need to call canon()
let sum = _mm256_add_epi64(x, y);
let x_s = _mm256_xor_si256(x, sign);
let sum_s = _mm256_xor_si256(sum, sign);
let carry_mask = _mm256_cmpgt_epi64(x_s, sum_s);
let p_s = _mm256_xor_si256(p, sign);
let p_gt_sum = _mm256_cmpgt_epi64(p_s, sum_s);
let ge_p_mask = _mm256_xor_si256(p_gt_sum, _mm256_set1_epi64x(-1));
let need = _mm256_or_si256(carry_mask, ge_p_mask);
let adj = _mm256_and_si256(need, p);
_mm256_sub_epi64(sum, adj)
}
}
#[inline(always)]
unsafe fn dbl_mod(x: __m256i) -> __m256i {
unsafe { add_mod(x, x) }
}
#[inline(always)]
unsafe fn canonicalize(x: (__m256i, __m256i, __m256i)) -> (__m256i, __m256i, __m256i) {
unsafe {
let sign_bit = _mm256_set1_epi64x(i64::MIN);
let p = _mm256_set1_epi64x(P_U64 as i64);
let all_ones = _mm256_set1_epi64x(-1);
let x_s = map3!(_mm256_xor_si256, x, rep sign_bit);
let p_s = _mm256_xor_si256(p, sign_bit);
let p_gt = map3!(_mm256_cmpgt_epi64, rep p_s, x_s);
let ge_mask = map3!(_mm256_xor_si256, p_gt, rep all_ones);
let adj = map3!(_mm256_and_si256, ge_mask, rep p);
map3!(_mm256_sub_epi64, x, adj)
}
}
#[inline(always)]
unsafe fn mul_reduce_vec(a: __m256i, b: __m256i) -> __m256i {
unsafe { canonicalize(mul_reduce((a, a, a), (b, b, b))).0 }
}
#[inline(always)]
unsafe fn square_reduce_vec(state: __m256i) -> __m256i {
unsafe { canonicalize(square_reduce((state, state, state))).0 }
}
#[inline(always)]
unsafe fn ext_square(a0: __m256i, a1: __m256i, a2: __m256i) -> (__m256i, __m256i, __m256i) {
unsafe {
let a0_sq = square_reduce_vec(a0);
let a1_sq = square_reduce_vec(a1);
let a2_sq = square_reduce_vec(a2);
let a0a1 = mul_reduce_vec(a0, a1);
let a0a2 = mul_reduce_vec(a0, a2);
let a1a2 = mul_reduce_vec(a1, a2);
let s0 = a0_sq;
let s1 = dbl_mod(a0a1);
let s2 = add_mod(dbl_mod(a0a2), a1_sq);
let s3 = dbl_mod(a1a2);
let s4 = a2_sq;
let out0 = add_mod(s0, s3);
let out1 = add_mod(add_mod(s1, s3), s4);
let out2 = add_mod(s2, s4);
(out0, out1, out2)
}
}
#[inline(always)]
unsafe fn ext_mul(
a0: __m256i,
a1: __m256i,
a2: __m256i,
b0: __m256i,
b1: __m256i,
b2: __m256i,
) -> (__m256i, __m256i, __m256i) {
unsafe {
let a0b0 = mul_reduce_vec(a0, b0);
let a1b1 = mul_reduce_vec(a1, b1);
let a2b2 = mul_reduce_vec(a2, b2);
let a0b1 = mul_reduce_vec(a0, b1);
let a1b0 = mul_reduce_vec(a1, b0);
let a0b2 = mul_reduce_vec(a0, b2);
let a2b0 = mul_reduce_vec(a2, b0);
let a1b2 = mul_reduce_vec(a1, b2);
let a2b1 = mul_reduce_vec(a2, b1);
let r0 = a0b0;
let r1 = add_mod(a0b1, a1b0);
let r2 = add_mod(add_mod(a0b2, a1b1), a2b0);
let r3 = add_mod(a1b2, a2b1);
let r4 = a2b2;
let out0 = add_mod(r0, r3);
let out1 = add_mod(add_mod(r1, r3), r4);
let out2 = add_mod(r2, r4);
(out0, out1, out2)
}
}
#[inline(always)]
unsafe fn ext_exp7(a0: __m256i, a1: __m256i, a2: __m256i) -> (__m256i, __m256i, __m256i) {
unsafe {
let (x2_0, x2_1, x2_2) = ext_square(a0, a1, a2);
let (x4_0, x4_1, x4_2) = ext_square(x2_0, x2_1, x2_2);
let (x3_0, x3_1, x3_2) = ext_mul(x2_0, x2_1, x2_2, a0, a1, a2);
ext_mul(x3_0, x3_1, x3_2, x4_0, x4_1, x4_2)
}
}
#[inline(always)]
pub unsafe fn apply_ext_round(buf: &mut [u64; 12]) {
unsafe {
let (mut a0, mut a1, mut a2) = load_ext(buf);
(a0, a1, a2) = ext_exp7(a0, a1, a2);
store_ext(buf, a0, a1, a2);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/rpx/tests.rs | miden-crypto/src/hash/algebraic_sponge/rescue/rpx/tests.rs | #![cfg(feature = "std")]
use alloc::{collections::BTreeSet, vec::Vec};
use p3_field::PrimeField64;
use proptest::prelude::*;
use super::{Felt, Rpx256};
use crate::{
ONE, Word, ZERO, hash::algebraic_sponge::AlgebraicSponge, rand::test_utils::rand_value,
};
// The number of iterations to run the `ext_round_matches_reference_many` test.
#[cfg(all(
target_arch = "x86_64",
any(
target_feature = "avx2",
all(target_feature = "avx512f", target_feature = "avx512dq")
)
))]
const EXT_ROUND_TEST_ITERS: usize = 5_000_000;
#[test]
fn hash_elements_vs_merge() {
let elements = [Felt::new(rand_value()); 8];
let digests: [Word; 2] = [
Word::new(elements[..4].try_into().unwrap()),
Word::new(elements[4..].try_into().unwrap()),
];
let m_result = Rpx256::merge(&digests);
let h_result = Rpx256::hash_elements(&elements);
assert_eq!(m_result, h_result);
}
#[test]
fn merge_vs_merge_in_domain() {
let elements = [Felt::new(rand_value()); 8];
let digests: [Word; 2] = [
Word::new(elements[..4].try_into().unwrap()),
Word::new(elements[4..].try_into().unwrap()),
];
let merge_result = Rpx256::merge(&digests);
// ----- merge with domain = 0 ----------------------------------------------------------------
// set domain to ZERO. This should not change the result.
let domain = ZERO;
let merge_in_domain_result = Rpx256::merge_in_domain(&digests, domain);
assert_eq!(merge_result, merge_in_domain_result);
// ----- merge with domain = 1 ----------------------------------------------------------------
// set domain to ONE. This should change the result.
let domain = ONE;
let merge_in_domain_result = Rpx256::merge_in_domain(&digests, domain);
assert_ne!(merge_result, merge_in_domain_result);
}
#[test]
fn hash_elements_vs_merge_with_int() {
let tmp = [Felt::new(rand_value()); 4];
let seed = Word::new(tmp);
// ----- value fits into a field element ------------------------------------------------------
let val: Felt = Felt::new(rand_value());
let m_result = <Rpx256 as AlgebraicSponge>::merge_with_int(seed, val.as_canonical_u64());
let mut elements = seed.as_elements().to_vec();
elements.push(val);
let h_result = Rpx256::hash_elements(&elements);
assert_eq!(m_result, h_result);
// ----- value does not fit into a field element ----------------------------------------------
let val = Felt::ORDER_U64 + 2;
let m_result = <Rpx256 as AlgebraicSponge>::merge_with_int(seed, val);
let mut elements = seed.as_elements().to_vec();
elements.push(Felt::new(val));
elements.push(ONE);
let h_result = Rpx256::hash_elements(&elements);
assert_eq!(m_result, h_result);
}
#[test]
fn hash_padding() {
// adding a zero bytes at the end of a byte string should result in a different hash
let r1 = Rpx256::hash(&[1_u8, 2, 3]);
let r2 = Rpx256::hash(&[1_u8, 2, 3, 0]);
assert_ne!(r1, r2);
// same as above but with bigger inputs
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6]);
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 0]);
assert_ne!(r1, r2);
// same as above but with input splitting over two elements
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7]);
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0]);
assert_ne!(r1, r2);
// same as above but with multiple zeros
let r1 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0]);
let r2 = Rpx256::hash(&[1_u8, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0]);
assert_ne!(r1, r2);
}
#[test]
fn hash_elements_padding() {
let e1 = [Felt::new(rand_value()); 2];
let e2 = [e1[0], e1[1], ZERO];
let r1 = Rpx256::hash_elements(&e1);
let r2 = Rpx256::hash_elements(&e2);
assert_ne!(r1, r2);
}
#[test]
fn hash_elements() {
let elements = [
ZERO,
ONE,
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
];
let digests: [Word; 2] = [
Word::new(elements[..4].try_into().unwrap()),
Word::new(elements[4..8].try_into().unwrap()),
];
let m_result = Rpx256::merge(&digests);
let h_result = Rpx256::hash_elements(&elements);
assert_eq!(m_result, h_result);
}
#[test]
fn hash_empty() {
let elements: Vec<Felt> = vec![];
let zero_digest = Word::default();
let h_result = Rpx256::hash_elements(&elements);
assert_eq!(zero_digest, h_result);
}
#[test]
fn hash_empty_bytes() {
let bytes: Vec<u8> = vec![];
let zero_digest = Word::default();
let h_result = Rpx256::hash(&bytes);
assert_eq!(zero_digest, h_result);
}
#[test]
fn sponge_bytes_with_remainder_length_wont_panic() {
// this test targets to assert that no panic will happen with the edge case of having an inputs
// with length that is not divisible by the used binary chunk size. 113 is a non-negligible
// input length that is prime; hence guaranteed to not be divisible by any choice of chunk
// size.
//
// this is a preliminary test to the fuzzy-stress of proptest.
Rpx256::hash(&[0; 113]);
}
#[test]
fn sponge_collision_for_wrapped_field_element() {
let a = Rpx256::hash(&[0; 8]);
let b = Rpx256::hash(&Felt::ORDER_U64.to_le_bytes());
assert_ne!(a, b);
}
#[test]
fn sponge_zeroes_collision() {
let mut zeroes = Vec::with_capacity(255);
let mut set = BTreeSet::new();
(0..255).for_each(|_| {
let hash = Rpx256::hash(&zeroes);
zeroes.push(0);
// panic if a collision was found
assert!(set.insert(hash));
});
}
/// Verifies that the optimized RPX (E) round (SIMD path) matches the
/// scalar reference implementation across many random states.
///
/// Compiles and runs only when we build an x86_64 target with AVX2 or AVX-512 enabled.
/// At runtime, if the host CPU lacks the compiled feature, the test returns early.
#[cfg(all(
target_arch = "x86_64",
any(
target_feature = "avx2",
all(target_feature = "avx512f", target_feature = "avx512dq")
)
))]
#[test]
fn ext_round_matches_reference_many() {
for i in 0..EXT_ROUND_TEST_ITERS {
let mut state = core::array::from_fn(|_| Felt::new(rand_value()));
for round in 0..7 {
let mut got = state;
let mut want = state;
// Optimized path (AVX2 or AVX-512 depending on build).
Rpx256::apply_ext_round(&mut got, round);
// Scalar reference path.
Rpx256::apply_ext_round_ref(&mut want, round);
assert_eq!(got, want, "mismatch at round {round} (iteration {i})");
state = got; // advance to catch chaining issues
}
}
}
proptest! {
#[test]
fn rpo256_wont_panic_with_arbitrary_input(ref bytes in any::<Vec<u8>>()) {
Rpx256::hash(bytes);
}
}
// PLONKY3 INTEGRATION TESTS
// ================================================================================================
mod p3_tests {
use p3_symmetric::{CryptographicHasher, Permutation, PseudoCompressionFunction};
use super::*;
use crate::hash::algebraic_sponge::rescue::rpx::{
RpxCompression, RpxHasher, RpxPermutation256, STATE_WIDTH, cubic_ext,
};
#[test]
fn test_cubic_ext_power7() {
use cubic_ext::*;
// Test with a simple element [1, 0, 0]
let x = [Felt::new(1), Felt::new(0), Felt::new(0)];
let x7 = power7(x);
assert_eq!(x7, x, "1^7 should equal 1");
// Test with [0, 1, 0] (just φ)
let phi = [Felt::new(0), Felt::new(1), Felt::new(0)];
let phi7 = power7(phi);
// φ^7 should be some combination - verify it's computed correctly
assert_ne!(phi7, phi, "φ^7 should not equal φ");
// Test with [1, 1, 1]
let x = [Felt::new(1), Felt::new(1), Felt::new(1)];
let x7 = power7(x);
assert_ne!(x7, x, "(1+φ+φ²)^7 should not equal 1+φ+φ²");
// Verify power7 is consistent
let x = [Felt::new(42), Felt::new(17), Felt::new(99)];
let x7_a = power7(x);
let x7_b = power7(x);
assert_eq!(x7_a, x7_b, "power7 should be deterministic");
}
#[test]
fn test_rpx_permutation_basic() {
let mut state = [Felt::new(0); STATE_WIDTH];
// Apply permutation
let perm = RpxPermutation256;
perm.permute_mut(&mut state);
// State should be different from all zeros after permutation
assert_ne!(state, [Felt::new(0); STATE_WIDTH]);
}
#[test]
fn test_rpx_permutation_consistency() {
let mut state1 = [Felt::new(0); STATE_WIDTH];
let mut state2 = [Felt::new(0); STATE_WIDTH];
// Apply permutation using the trait
let perm = RpxPermutation256;
perm.permute_mut(&mut state1);
// Apply permutation directly
RpxPermutation256::apply_permutation(&mut state2);
// Both should produce the same result
assert_eq!(state1, state2);
}
#[test]
fn test_rpx_permutation_deterministic() {
let input = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
];
let mut state1 = input;
let mut state2 = input;
let perm = RpxPermutation256;
perm.permute_mut(&mut state1);
perm.permute_mut(&mut state2);
// Same input should produce same output
assert_eq!(state1, state2);
}
#[test]
#[ignore] // TODO: Re-enable after migrating RPX state layout to match Plonky3
// Miden-crypto: capacity=[0-3], rate=[4-11]
// Plonky3: rate=[0-7], capacity=[8-11]
fn test_rpx_hasher_vs_hash_elements() {
// Test with empty input
let expected: [Felt; 4] = Rpx256::hash_elements::<Felt>(&[]).into();
let hasher = RpxHasher::new(RpxPermutation256);
let result = hasher.hash_iter([]);
assert_eq!(result, expected, "Empty input should produce same digest");
// Test with 4 elements (one digest worth)
let input4 = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)];
let expected: [Felt; 4] = Rpx256::hash_elements(&input4).into();
let result = hasher.hash_iter(input4);
assert_eq!(result, expected, "4 elements should produce same digest");
// Test with 8 elements (exactly one rate)
let input8 = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
];
let expected: [Felt; 4] = Rpx256::hash_elements(&input8).into();
let result = hasher.hash_iter(input8);
assert_eq!(result, expected, "8 elements (one rate) should produce same digest");
// Test with 16 elements (two rates)
let input16 = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
Felt::new(13),
Felt::new(14),
Felt::new(15),
Felt::new(16),
];
let expected: [Felt; 4] = Rpx256::hash_elements(&input16).into();
let result = hasher.hash_iter(input16);
assert_eq!(result, expected, "16 elements (two rates) should produce same digest");
}
#[test]
#[ignore] // TODO: Re-enable after migrating RPX state layout to match Plonky3
// Miden-crypto: capacity=[0-3], rate=[4-11]
// Plonky3: rate=[0-7], capacity=[8-11]
fn test_rpx_compression_vs_merge() {
let digest1 = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)];
let digest2 = [Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)];
// Rpx256::merge expects &[Word; 2]
let expected: [Felt; 4] = Rpx256::merge(&[digest1.into(), digest2.into()]).into();
// RpxCompression expects [[Felt; 4]; 2]
let compress = RpxCompression::new(RpxPermutation256);
let result = compress.compress([digest1, digest2]);
assert_eq!(result, expected, "RpxCompression should match Rpx256::merge");
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/rescue/rpx/mod.rs | miden-crypto/src/hash/algebraic_sponge/rescue/rpx/mod.rs | use super::{
ARK1, ARK2, CAPACITY_RANGE, DIGEST_RANGE, Felt, MDS, NUM_ROUNDS, RATE_RANGE, Range,
STATE_WIDTH, Word, add_constants, add_constants_and_apply_ext_round,
add_constants_and_apply_inv_sbox, add_constants_and_apply_sbox, apply_inv_sbox, apply_mds,
apply_sbox,
};
use crate::hash::algebraic_sponge::AlgebraicSponge;
#[cfg(test)]
mod tests;
// HASHER IMPLEMENTATION
// ================================================================================================
/// Implementation of the Rescue Prime eXtension hash function with 256-bit output.
///
/// The hash function is based on the XHash12 construction in [specifications](https://eprint.iacr.org/2023/1045)
///
/// The parameters used to instantiate the function are:
/// * Field: 64-bit prime field with modulus 2^64 - 2^32 + 1.
/// * State width: 12 field elements.
/// * Capacity size: 4 field elements.
/// * S-Box degree: 7.
/// * Rounds: There are 3 different types of rounds:
/// - (FB): `apply_mds` → `add_constants` → `apply_sbox` → `apply_mds` → `add_constants` →
/// `apply_inv_sbox`.
/// - (E): `add_constants` → `ext_sbox` (which is raising to power 7 in the degree 3 extension
/// field).
/// - (M): `apply_mds` → `add_constants`.
/// * Permutation: (FB) (E) (FB) (E) (FB) (E) (M).
///
/// The above parameters target a 128-bit security level. The digest consists of four field elements
/// and it can be serialized into 32 bytes (256 bits).
///
/// ## Hash output consistency
/// Functions [hash_elements()](Rpx256::hash_elements), [merge()](Rpx256::merge), and
/// [merge_with_int()](Rpx256::merge_with_int) are internally consistent. That is, computing
/// a hash for the same set of elements using these functions will always produce the same
/// result. For example, merging two digests using [merge()](Rpx256::merge) will produce the
/// same result as hashing 8 elements which make up these digests using
/// [hash_elements()](Rpx256::hash_elements) function.
///
/// However, [hash()](Rpx256::hash) function is not consistent with functions mentioned above.
/// For example, if we take two field elements, serialize them to bytes and hash them using
/// [hash()](Rpx256::hash), the result will differ from the result obtained by hashing these
/// elements directly using [hash_elements()](Rpx256::hash_elements) function. The reason for
/// this difference is that [hash()](Rpx256::hash) function needs to be able to handle
/// arbitrary binary strings, which may or may not encode valid field elements - and thus,
/// deserialization procedure used by this function is different from the procedure used to
/// deserialize valid field elements.
///
/// Thus, if the underlying data consists of valid field elements, it might make more sense
/// to deserialize them into field elements and then hash them using
/// [hash_elements()](Rpx256::hash_elements) function rather than hashing the serialized bytes
/// using [hash()](Rpx256::hash) function.
///
/// ## Domain separation
/// [merge_in_domain()](Rpx256::merge_in_domain) hashes two digests into one digest with some domain
/// identifier and the current implementation sets the second capacity element to the value of
/// this domain identifier. Using a similar argument to the one formulated for domain separation
/// in Appendix C of the [specifications](https://eprint.iacr.org/2023/1045), one sees that doing
/// so degrades only pre-image resistance, from its initial bound of c.log_2(p), by as much as
/// the log_2 of the size of the domain identifier space. Since pre-image resistance becomes
/// the bottleneck for the security bound of the sponge in overwrite-mode only when it is
/// lower than 2^128, we see that the target 128-bit security level is maintained as long as
/// the size of the domain identifier space, including for padding, is less than 2^128.
///
/// ## Hashing of empty input
/// The current implementation hashes empty input to the zero digest [0, 0, 0, 0]. This has
/// the benefit of requiring no calls to the RPX permutation when hashing empty input.
#[allow(rustdoc::private_intra_doc_links)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Rpx256();
impl AlgebraicSponge for Rpx256 {
/// Applies RPX permutation to the provided state.
#[inline(always)]
fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
Self::apply_fb_round(state, 0);
Self::apply_ext_round(state, 1);
Self::apply_fb_round(state, 2);
Self::apply_ext_round(state, 3);
Self::apply_fb_round(state, 4);
Self::apply_ext_round(state, 5);
Self::apply_final_round(state, 6);
}
}
impl Rpx256 {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// Target collision resistance level in bits.
pub const COLLISION_RESISTANCE: u32 = 128;
/// Sponge state is set to 12 field elements or 768 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
pub const STATE_WIDTH: usize = STATE_WIDTH;
/// The rate portion of the state is located in elements 4 through 11 (inclusive).
pub const RATE_RANGE: Range<usize> = RATE_RANGE;
/// The capacity portion of the state is located in elements 0, 1, 2, and 3.
pub const CAPACITY_RANGE: Range<usize> = CAPACITY_RANGE;
/// The output of the hash function can be read from state elements 4, 5, 6, and 7.
pub const DIGEST_RANGE: Range<usize> = DIGEST_RANGE;
/// MDS matrix used for computing the linear layer in the (FB) and (E) rounds.
pub const MDS: [[Felt; STATE_WIDTH]; STATE_WIDTH] = MDS;
/// Round constants added to the hasher state in the first half of the round.
pub const ARK1: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = ARK1;
/// Round constants added to the hasher state in the second half of the round.
pub const ARK2: [[Felt; STATE_WIDTH]; NUM_ROUNDS] = ARK2;
// HASH FUNCTIONS
// --------------------------------------------------------------------------------------------
/// Returns a hash of the provided sequence of bytes.
#[inline(always)]
pub fn hash(bytes: &[u8]) -> Word {
<Self as AlgebraicSponge>::hash(bytes)
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E: crate::field::BasedVectorSpace<Felt>>(elements: &[E]) -> Word {
<Self as AlgebraicSponge>::hash_elements(elements)
}
/// Returns a hash of two digests. This method is intended for use in construction of
/// Merkle trees and verification of Merkle paths.
#[inline(always)]
pub fn merge(values: &[Word; 2]) -> Word {
<Self as AlgebraicSponge>::merge(values)
}
/// Returns a hash of multiple digests.
#[inline(always)]
pub fn merge_many(values: &[Word]) -> Word {
<Self as AlgebraicSponge>::merge_many(values)
}
/// Returns a hash of a digest and a u64 value.
#[inline(always)]
pub fn merge_with_int(seed: Word, value: u64) -> Word {
<Self as AlgebraicSponge>::merge_with_int(seed, value)
}
/// Returns a hash of two digests and a domain identifier.
#[inline(always)]
pub fn merge_in_domain(values: &[Word; 2], domain: Felt) -> Word {
<Self as AlgebraicSponge>::merge_in_domain(values, domain)
}
// RPX PERMUTATION
// --------------------------------------------------------------------------------------------
/// Applies RPX permutation to the provided state.
#[inline(always)]
pub fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
Self::apply_fb_round(state, 0);
Self::apply_ext_round(state, 1);
Self::apply_fb_round(state, 2);
Self::apply_ext_round(state, 3);
Self::apply_fb_round(state, 4);
Self::apply_ext_round(state, 5);
Self::apply_final_round(state, 6);
}
// RPX PERMUTATION ROUND FUNCTIONS
// --------------------------------------------------------------------------------------------
/// (FB) round function.
#[inline(always)]
pub fn apply_fb_round(state: &mut [Felt; STATE_WIDTH], round: usize) {
apply_mds(state);
if !add_constants_and_apply_sbox(state, &ARK1[round]) {
add_constants(state, &ARK1[round]);
apply_sbox(state);
}
apply_mds(state);
if !add_constants_and_apply_inv_sbox(state, &ARK2[round]) {
add_constants(state, &ARK2[round]);
apply_inv_sbox(state);
}
}
/// (E) round function.
///
/// It first attempts to run the optimized (SIMD-accelerated) implementation.
/// If SIMD acceleration is not available for the current target it falls
/// back to the scalar reference implementation (`apply_ext_round_ref`).
#[inline(always)]
pub fn apply_ext_round(state: &mut [Felt; STATE_WIDTH], round: usize) {
if !add_constants_and_apply_ext_round(state, &ARK1[round]) {
Self::apply_ext_round_ref(state, round);
}
}
/// Scalar (reference) implementation of the (E) round function.
///
/// This version performs the round without SIMD acceleration and is used
/// as a fallback when optimized implementations are not available.
#[inline(always)]
fn apply_ext_round_ref(state: &mut [Felt; STATE_WIDTH], round: usize) {
// add constants
add_constants(state, &ARK1[round]);
// decompose the state into 4 elements in the cubic extension field and apply the power 7
// map to each of the elements using our custom cubic extension implementation
let [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11] = *state;
let ext0 = cubic_ext::power7([s0, s1, s2]);
let ext1 = cubic_ext::power7([s3, s4, s5]);
let ext2 = cubic_ext::power7([s6, s7, s8]);
let ext3 = cubic_ext::power7([s9, s10, s11]);
// write the results back into the state
state[0] = ext0[0];
state[1] = ext0[1];
state[2] = ext0[2];
state[3] = ext1[0];
state[4] = ext1[1];
state[5] = ext1[2];
state[6] = ext2[0];
state[7] = ext2[1];
state[8] = ext2[2];
state[9] = ext3[0];
state[10] = ext3[1];
state[11] = ext3[2];
}
/// (M) round function.
#[inline(always)]
pub fn apply_final_round(state: &mut [Felt; STATE_WIDTH], round: usize) {
apply_mds(state);
add_constants(state, &ARK1[round]);
}
}
// CUBIC EXTENSION FIELD OPERATIONS
// ================================================================================================
/// Helper functions for cubic extension field operations over the irreducible polynomial
/// x³ - x - 1. These are used for Plonky3 integration where we need explicit control
/// over the field arithmetic.
mod cubic_ext {
use super::Felt;
use crate::field::PrimeCharacteristicRing;
/// Multiplies two cubic extension field elements.
///
/// Element representation: [a0, a1, a2] = a0 + a1*φ + a2*φ²
/// where φ is a root of x³ - x - 1.
#[inline(always)]
pub fn mul(a: [Felt; 3], b: [Felt; 3]) -> [Felt; 3] {
let a0b0 = a[0] * b[0];
let a1b1 = a[1] * b[1];
let a2b2 = a[2] * b[2];
let a0b0_a0b1_a1b0_a1b1 = (a[0] + a[1]) * (b[0] + b[1]);
let a0b0_a0b2_a2b0_a2b2 = (a[0] + a[2]) * (b[0] + b[2]);
let a1b1_a1b2_a2b1_a2b2 = (a[1] + a[2]) * (b[1] + b[2]);
let a0b0_minus_a1b1 = a0b0 - a1b1;
let a0b0_a1b2_a2b1 = a1b1_a1b2_a2b1_a2b2 + a0b0_minus_a1b1 - a2b2;
let a0b1_a1b0_a1b2_a2b1_a2b2 =
a0b0_a0b1_a1b0_a1b1 + a1b1_a1b2_a2b1_a2b2 - a1b1.double() - a0b0;
let a0b2_a1b1_a2b0_a2b2 = a0b0_a0b2_a2b0_a2b2 - a0b0_minus_a1b1;
[a0b0_a1b2_a2b1, a0b1_a1b0_a1b2_a2b1_a2b2, a0b2_a1b1_a2b0_a2b2]
}
/// Squares a cubic extension field element.
#[inline(always)]
pub fn square(a: [Felt; 3]) -> [Felt; 3] {
let a0 = a[0];
let a1 = a[1];
let a2 = a[2];
let a2_sq = a2.square();
let a1_a2 = a1 * a2;
let out0 = a0.square() + a1_a2.double();
let out1 = (a0 * a1 + a1_a2).double() + a2_sq;
let out2 = (a0 * a2).double() + a1.square() + a2_sq;
[out0, out1, out2]
}
/// Computes the 7th power of a cubic extension field element.
///
/// Uses the addition chain: x → x² → x³ → x⁶ → x⁷
/// - x² (1 squaring)
/// - x³ = x² * x (1 multiplication)
/// - x⁶ = (x³)² (1 squaring)
/// - x⁷ = x⁶ * x (1 multiplication)
///
/// Total: 2 squarings + 2 multiplications
#[inline(always)]
pub fn power7(a: [Felt; 3]) -> [Felt; 3] {
let a2 = square(a);
let a3 = mul(a2, a);
let a6 = square(a3);
mul(a6, a)
}
}
// PLONKY3 INTEGRATION
// ================================================================================================
/// Plonky3-compatible RPX permutation implementation.
///
/// This module provides a Plonky3-compatible interface to the RPX256 hash function,
/// implementing the `Permutation` and `CryptographicPermutation` traits from Plonky3.
///
/// This allows RPX to be used with Plonky3's cryptographic infrastructure, including:
/// - PaddingFreeSponge for hashing
/// - TruncatedPermutation for compression
/// - DuplexChallenger for Fiat-Shamir transforms
use p3_challenger::DuplexChallenger;
use p3_symmetric::{
CryptographicPermutation, PaddingFreeSponge, Permutation, TruncatedPermutation,
};
// RPX PERMUTATION FOR PLONKY3
// ================================================================================================
/// Plonky3-compatible RPX permutation.
///
/// This struct wraps the RPX256 permutation and implements Plonky3's `Permutation` and
/// `CryptographicPermutation` traits, allowing RPX to be used within the Plonky3 ecosystem.
///
/// The permutation operates on a state of 12 field elements (STATE_WIDTH = 12), with:
/// - Rate: 8 elements (positions 4-11)
/// - Capacity: 4 elements (positions 0-3)
/// - Digest output: 4 elements (positions 4-7)
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct RpxPermutation256;
impl RpxPermutation256 {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// Sponge state is set to 12 field elements or 768 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
pub const STATE_WIDTH: usize = STATE_WIDTH;
/// The rate portion of the state is located in elements 4 through 11 (inclusive).
pub const RATE_RANGE: Range<usize> = Rpx256::RATE_RANGE;
/// The capacity portion of the state is located in elements 0, 1, 2, and 3.
pub const CAPACITY_RANGE: Range<usize> = Rpx256::CAPACITY_RANGE;
/// The output of the hash function can be read from state elements 4, 5, 6, and 7.
pub const DIGEST_RANGE: Range<usize> = Rpx256::DIGEST_RANGE;
// RPX PERMUTATION
// --------------------------------------------------------------------------------------------
/// Applies RPX permutation to the provided state.
///
/// This delegates to the RPX256 implementation.
#[inline(always)]
pub fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
Rpx256::apply_permutation(state);
}
}
// PLONKY3 TRAIT IMPLEMENTATIONS
// ================================================================================================
impl Permutation<[Felt; STATE_WIDTH]> for RpxPermutation256 {
fn permute_mut(&self, state: &mut [Felt; STATE_WIDTH]) {
Self::apply_permutation(state);
}
}
impl CryptographicPermutation<[Felt; STATE_WIDTH]> for RpxPermutation256 {}
// TYPE ALIASES FOR PLONKY3 INTEGRATION
// ================================================================================================
/// RPX-based hasher using Plonky3's PaddingFreeSponge.
///
/// This provides a sponge-based hash function with:
/// - WIDTH: 12 field elements (total state size)
/// - RATE: 8 field elements (input/output rate)
/// - OUT: 4 field elements (digest size)
pub type RpxHasher = PaddingFreeSponge<RpxPermutation256, 12, 8, 4>;
/// RPX-based compression function using Plonky3's TruncatedPermutation.
///
/// This provides a 2-to-1 compression function for Merkle tree construction with:
/// - CHUNK: 2 (number of input chunks - i.e., 2 digests of 4 elements each = 8 elements)
/// - N: 4 (output size in field elements)
/// - WIDTH: 12 (total state size)
///
/// The compression function takes 8 field elements (2 digests) as input and produces
/// 4 field elements (1 digest) as output.
pub type RpxCompression = TruncatedPermutation<RpxPermutation256, 2, 4, 12>;
/// RPX-based challenger using Plonky3's DuplexChallenger.
///
/// This provides a Fiat-Shamir transform implementation for interactive proof protocols,
/// with:
/// - F: Generic field type (typically the same as Felt)
/// - WIDTH: 12 field elements (sponge state size)
/// - RATE: 8 field elements (rate of absorption/squeezing)
pub type RpxChallenger<F> = DuplexChallenger<F, RpxPermutation256, 12, 8>;
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/poseidon2/test.rs | miden-crypto/src/hash/algebraic_sponge/poseidon2/test.rs | use super::{Felt, ZERO};
use crate::hash::{algebraic_sponge::AlgebraicSponge, poseidon2::Poseidon2};
#[test]
fn permutation_test_vector() {
// tests that the current implementation is consistent with
// the reference [implementation](https://github.com/HorizenLabs/poseidon2) and uses
// the test vectors provided therein
let mut elements = [
ZERO,
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
];
Poseidon2::apply_permutation(&mut elements);
let perm = elements;
assert_eq!(perm[0], Felt::new(0x01eaef96bdf1c0c1));
assert_eq!(perm[1], Felt::new(0x1f0d2cc525b2540c));
assert_eq!(perm[2], Felt::new(0x6282c1dfe1e0358d));
assert_eq!(perm[3], Felt::new(0xe780d721f698e1e6));
assert_eq!(perm[4], Felt::new(0x280c0b6f753d833b));
assert_eq!(perm[5], Felt::new(0x1b942dd5023156ab));
assert_eq!(perm[6], Felt::new(0x43f0df3fcccb8398));
assert_eq!(perm[7], Felt::new(0xe8e8190585489025));
assert_eq!(perm[8], Felt::new(0x56bdbf72f77ada22));
assert_eq!(perm[9], Felt::new(0x7911c32bf9dcd705));
assert_eq!(perm[10], Felt::new(0xec467926508fbe67));
assert_eq!(perm[11], Felt::new(0x6a50450ddf85a6ed));
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/poseidon2/mod.rs | miden-crypto/src/hash/algebraic_sponge/poseidon2/mod.rs | use super::{
AlgebraicSponge, CAPACITY_RANGE, DIGEST_RANGE, Felt, RATE_RANGE, Range, STATE_WIDTH, Word, ZERO,
};
use crate::field::PrimeCharacteristicRing;
mod constants;
use constants::{
ARK_EXT_INITIAL, ARK_EXT_TERMINAL, ARK_INT, MAT_DIAG, NUM_EXTERNAL_ROUNDS_HALF,
NUM_INTERNAL_ROUNDS,
};
#[cfg(test)]
mod test;
/// Implementation of the Poseidon2 hash function with 256-bit output.
///
/// The implementation follows the original [specification](https://eprint.iacr.org/2023/323) and
/// its accompanying reference [implementation](https://github.com/HorizenLabs/poseidon2).
///
/// The parameters used to instantiate the function are:
/// * Field: 64-bit prime field with modulus 2^64 - 2^32 + 1.
/// * State width: 12 field elements.
/// * Capacity size: 4 field elements.
/// * S-Box degree: 7.
/// * Rounds: There are 2 different types of rounds, called internal and external, and are
/// structured as follows:
/// - Initial External rounds (IE): `add_constants` → `apply_sbox` → `apply_matmul_external`.
/// - Internal rounds: `add_constants` → `apply_sbox` → `apply_matmul_internal`, where the constant
/// addition and sbox application apply only to the first entry of the state.
/// - Terminal External rounds (TE): `add_constants` → `apply_sbox` → `apply_matmul_external`.
/// - An additional `apply_matmul_external` is inserted at the beginning in order to protect against
/// some recent attacks.
///
/// The above parameters target a 128-bit security level. The digest consists of four field elements
/// and it can be serialized into 32 bytes (256 bits).
///
/// ## Hash output consistency
/// Functions [hash_elements()](Poseidon2::hash_elements), [merge()](Poseidon2::merge), and
/// [merge_with_int()](Poseidon2::merge_with_int) are internally consistent. That is, computing
/// a hash for the same set of elements using these functions will always produce the same
/// result. For example, merging two digests using [merge()](Poseidon2::merge) will produce the
/// same result as hashing 8 elements which make up these digests using
/// [hash_elements()](Poseidon2::hash_elements) function.
///
/// However, [hash()](Poseidon2::hash) function is not consistent with functions mentioned above.
/// For example, if we take two field elements, serialize them to bytes and hash them using
/// [hash()](Poseidon2::hash), the result will differ from the result obtained by hashing these
/// elements directly using [hash_elements()](Poseidon2::hash_elements) function. The reason for
/// this difference is that [hash()](Poseidon2::hash) function needs to be able to handle
/// arbitrary binary strings, which may or may not encode valid field elements - and thus,
/// deserialization procedure used by this function is different from the procedure used to
/// deserialize valid field elements.
///
/// Thus, if the underlying data consists of valid field elements, it might make more sense
/// to deserialize them into field elements and then hash them using
/// [hash_elements()](Poseidon2::hash_elements) function rather than hashing the serialized bytes
/// using [hash()](Poseidon2::hash) function.
///
/// ## Domain separation
/// [merge_in_domain()](Poseidon2::merge_in_domain) hashes two digests into one digest with some
/// domain identifier and the current implementation sets the second capacity element to the value
/// of this domain identifier. Using a similar argument to the one formulated for domain separation
/// in Appendix C of the [specifications](https://eprint.iacr.org/2023/1045), one sees that doing
/// so degrades only pre-image resistance, from its initial bound of c.log_2(p), by as much as
/// the log_2 of the size of the domain identifier space. Since pre-image resistance becomes
/// the bottleneck for the security bound of the sponge in overwrite-mode only when it is
/// lower than 2^128, we see that the target 128-bit security level is maintained as long as
/// the size of the domain identifier space, including for padding, is less than 2^128.
///
/// ## Hashing of empty input
/// The current implementation hashes empty input to the zero digest [0, 0, 0, 0]. This has
/// the benefit of requiring no calls to the Poseidon2 permutation when hashing empty input.
#[allow(rustdoc::private_intra_doc_links)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Poseidon2();
impl AlgebraicSponge for Poseidon2 {
fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
// 1. Apply (external) linear layer to the input
Self::apply_matmul_external(state);
// 2. Apply initial external rounds to the state
Self::initial_external_rounds(state);
// 3. Apply internal rounds to the state
Self::internal_rounds(state);
// 4. Apply terminal external rounds to the state
Self::terminal_external_rounds(state);
}
}
impl Poseidon2 {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// Target collision resistance level in bits.
pub const COLLISION_RESISTANCE: u32 = 128;
/// Number of initial or terminal external rounds.
pub const NUM_EXTERNAL_ROUNDS_HALF: usize = NUM_EXTERNAL_ROUNDS_HALF;
/// Number of internal rounds.
pub const NUM_INTERNAL_ROUNDS: usize = NUM_INTERNAL_ROUNDS;
/// Sponge state is set to 12 field elements or 768 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
pub const STATE_WIDTH: usize = STATE_WIDTH;
/// The rate portion of the state is located in elements 4 through 11 (inclusive).
pub const RATE_RANGE: Range<usize> = RATE_RANGE;
/// The capacity portion of the state is located in elements 0, 1, 2, and 3.
pub const CAPACITY_RANGE: Range<usize> = CAPACITY_RANGE;
/// The output of the hash function can be read from state elements 4, 5, 6, and 7.
pub const DIGEST_RANGE: Range<usize> = DIGEST_RANGE;
/// Matrix used for computing the linear layers of internal rounds.
pub const MAT_DIAG: [Felt; STATE_WIDTH] = MAT_DIAG;
/// Round constants added to the hasher state.
pub const ARK_EXT_INITIAL: [[Felt; STATE_WIDTH]; NUM_EXTERNAL_ROUNDS_HALF] = ARK_EXT_INITIAL;
pub const ARK_EXT_TERMINAL: [[Felt; STATE_WIDTH]; NUM_EXTERNAL_ROUNDS_HALF] = ARK_EXT_TERMINAL;
pub const ARK_INT: [Felt; NUM_INTERNAL_ROUNDS] = ARK_INT;
// HASH FUNCTIONS
// --------------------------------------------------------------------------------------------
/// Returns a hash of the provided sequence of bytes.
#[inline(always)]
pub fn hash(bytes: &[u8]) -> Word {
<Self as AlgebraicSponge>::hash(bytes)
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E: crate::field::BasedVectorSpace<Felt>>(elements: &[E]) -> Word {
<Self as AlgebraicSponge>::hash_elements(elements)
}
/// Returns a hash of two digests. This method is intended for use in construction of
/// Merkle trees and verification of Merkle paths.
#[inline(always)]
pub fn merge(values: &[Word; 2]) -> Word {
<Self as AlgebraicSponge>::merge(values)
}
/// Returns a hash of multiple digests.
#[inline(always)]
pub fn merge_many(values: &[Word]) -> Word {
<Self as AlgebraicSponge>::merge_many(values)
}
/// Returns a hash of a digest and a u64 value.
#[inline(always)]
pub fn merge_with_int(seed: Word, value: u64) -> Word {
<Self as AlgebraicSponge>::merge_with_int(seed, value)
}
/// Returns a hash of two digests and a domain identifier.
#[inline(always)]
pub fn merge_in_domain(values: &[Word; 2], domain: Felt) -> Word {
<Self as AlgebraicSponge>::merge_in_domain(values, domain)
}
// POSEIDON2 PERMUTATION
// --------------------------------------------------------------------------------------------
/// Applies the initial external rounds of the permutation.
#[allow(clippy::needless_range_loop)]
#[inline(always)]
fn initial_external_rounds(state: &mut [Felt; STATE_WIDTH]) {
for r in 0..NUM_EXTERNAL_ROUNDS_HALF {
Self::add_rc(state, &ARK_EXT_INITIAL[r]);
Self::apply_sbox(state);
Self::apply_matmul_external(state);
}
}
/// Applies the internal rounds of the permutation.
#[allow(clippy::needless_range_loop)]
#[inline(always)]
fn internal_rounds(state: &mut [Felt; STATE_WIDTH]) {
for r in 0..NUM_INTERNAL_ROUNDS {
state[0] += ARK_INT[r];
state[0] = state[0].exp_const_u64::<7>();
Self::matmul_internal(state, MAT_DIAG);
}
}
/// Applies the terminal external rounds of the permutation.
#[inline(always)]
#[allow(clippy::needless_range_loop)]
fn terminal_external_rounds(state: &mut [Felt; STATE_WIDTH]) {
for r in 0..NUM_EXTERNAL_ROUNDS_HALF {
Self::add_rc(state, &ARK_EXT_TERMINAL[r]);
Self::apply_sbox(state);
Self::apply_matmul_external(state);
}
}
/// Applies the M_E linear layer to the state.
///
/// This basically takes any 4 x 4 MDS matrix M and computes the matrix-vector product with
/// the matrix defined by `[[2M, M, ..., M], [M, 2M, ..., M], ..., [M, M, ..., 2M]]`.
///
/// Given the structure of the above matrix, we can compute the product of the state with
/// matrix `[M, M, ..., M]` and compute the final result using a few addition.
#[inline(always)]
fn apply_matmul_external(state: &mut [Felt; STATE_WIDTH]) {
// multiply the state by `[M, M, ..., M]` block-wise
Self::matmul_m4(state);
// accumulate column-wise sums
let number_blocks = STATE_WIDTH / 4;
let mut stored = [ZERO; 4];
for j in 0..number_blocks {
let base = j * 4;
for l in 0..4 {
stored[l] += state[base + l];
}
}
// add stored column-sums to each element
for (i, val) in state.iter_mut().enumerate() {
*val += stored[i % 4];
}
}
/// Multiplies the state block-wise with a 4 x 4 MDS matrix.
#[inline(always)]
fn matmul_m4(state: &mut [Felt; STATE_WIDTH]) {
let t4 = STATE_WIDTH / 4;
for i in 0..t4 {
let idx = i * 4;
let a = state[idx];
let b = state[idx + 1];
let c = state[idx + 2];
let d = state[idx + 3];
let t0 = a + b;
let t1 = c + d;
let two_b = b.double();
let two_d = d.double();
let t2 = two_b + t1;
let t3 = two_d + t0;
let t4 = t1.double().double() + t3;
let t5 = t0.double().double() + t2;
let t6 = t3 + t5;
let t7 = t2 + t4;
state[idx] = t6;
state[idx + 1] = t5;
state[idx + 2] = t7;
state[idx + 3] = t4;
}
}
/// Applies the M_I linear layer to the state.
///
/// The matrix is given by its diagonal entries with the remaining entries set equal to 1.
/// Hence, given the sum of the state entries, the matrix-vector product is computed using
/// a multiply-and-add per state entry.
#[inline(always)]
fn matmul_internal(state: &mut [Felt; STATE_WIDTH], mat_diag: [Felt; 12]) {
let mut sum = ZERO;
for s in state.iter().take(STATE_WIDTH) {
sum += *s
}
for i in 0..state.len() {
state[i] = state[i] * mat_diag[i] + sum;
}
}
/// Adds the round-constants to the state during external rounds.
#[inline(always)]
fn add_rc(state: &mut [Felt; STATE_WIDTH], ark: &[Felt; 12]) {
state.iter_mut().zip(ark).for_each(|(s, &k)| *s += k);
}
/// Applies the sbox entry-wise to the state.
#[inline(always)]
fn apply_sbox(state: &mut [Felt; STATE_WIDTH]) {
state[0] = state[0].exp_const_u64::<7>();
state[1] = state[1].exp_const_u64::<7>();
state[2] = state[2].exp_const_u64::<7>();
state[3] = state[3].exp_const_u64::<7>();
state[4] = state[4].exp_const_u64::<7>();
state[5] = state[5].exp_const_u64::<7>();
state[6] = state[6].exp_const_u64::<7>();
state[7] = state[7].exp_const_u64::<7>();
state[8] = state[8].exp_const_u64::<7>();
state[9] = state[9].exp_const_u64::<7>();
state[10] = state[10].exp_const_u64::<7>();
state[11] = state[11].exp_const_u64::<7>();
}
}
// PLONKY3 INTEGRATION
// ================================================================================================
/// Plonky3-compatible Poseidon2 permutation implementation.
///
/// This module provides a Plonky3-compatible interface to the Poseidon2 hash function,
/// implementing the `Permutation` and `CryptographicPermutation` traits from Plonky3.
///
/// This allows Poseidon2 to be used with Plonky3's cryptographic infrastructure, including:
/// - PaddingFreeSponge for hashing
/// - TruncatedPermutation for compression
/// - DuplexChallenger for Fiat-Shamir transforms
use p3_challenger::DuplexChallenger;
use p3_symmetric::{
CryptographicPermutation, PaddingFreeSponge, Permutation, TruncatedPermutation,
};
// POSEIDON2 PERMUTATION FOR PLONKY3
// ================================================================================================
/// Plonky3-compatible Poseidon2 permutation.
///
/// This struct wraps the Poseidon2 permutation and implements Plonky3's `Permutation` and
/// `CryptographicPermutation` traits, allowing Poseidon2 to be used within the Plonky3 ecosystem.
///
/// The permutation operates on a state of 12 field elements (STATE_WIDTH = 12), with:
/// - Rate: 8 elements (positions 4-11)
/// - Capacity: 4 elements (positions 0-3)
/// - Digest output: 4 elements (positions 4-7)
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Poseidon2Permutation256;
impl Poseidon2Permutation256 {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// Number of initial or terminal external rounds.
pub const NUM_EXTERNAL_ROUNDS_HALF: usize = Poseidon2::NUM_EXTERNAL_ROUNDS_HALF;
/// Number of internal rounds.
pub const NUM_INTERNAL_ROUNDS: usize = Poseidon2::NUM_INTERNAL_ROUNDS;
/// Sponge state is set to 12 field elements or 768 bytes; 8 elements are reserved for rate and
/// the remaining 4 elements are reserved for capacity.
pub const STATE_WIDTH: usize = STATE_WIDTH;
/// The rate portion of the state is located in elements 4 through 11 (inclusive).
pub const RATE_RANGE: Range<usize> = Poseidon2::RATE_RANGE;
/// The capacity portion of the state is located in elements 0, 1, 2, and 3.
pub const CAPACITY_RANGE: Range<usize> = Poseidon2::CAPACITY_RANGE;
/// The output of the hash function can be read from state elements 4, 5, 6, and 7.
pub const DIGEST_RANGE: Range<usize> = Poseidon2::DIGEST_RANGE;
// POSEIDON2 PERMUTATION
// --------------------------------------------------------------------------------------------
/// Applies Poseidon2 permutation to the provided state.
///
/// This delegates to the Poseidon2 implementation.
#[inline(always)]
pub fn apply_permutation(state: &mut [Felt; STATE_WIDTH]) {
Poseidon2::apply_permutation(state);
}
}
// PLONKY3 TRAIT IMPLEMENTATIONS
// ================================================================================================
impl Permutation<[Felt; STATE_WIDTH]> for Poseidon2Permutation256 {
fn permute_mut(&self, state: &mut [Felt; STATE_WIDTH]) {
Self::apply_permutation(state);
}
}
impl CryptographicPermutation<[Felt; STATE_WIDTH]> for Poseidon2Permutation256 {}
// TYPE ALIASES FOR PLONKY3 INTEGRATION
// ================================================================================================
/// Poseidon2-based hasher using Plonky3's PaddingFreeSponge.
///
/// This provides a sponge-based hash function with:
/// - WIDTH: 12 field elements (total state size)
/// - RATE: 8 field elements (input/output rate)
/// - OUT: 4 field elements (digest size)
pub type Poseidon2Hasher = PaddingFreeSponge<Poseidon2Permutation256, 12, 8, 4>;
/// Poseidon2-based compression function using Plonky3's TruncatedPermutation.
///
/// This provides a 2-to-1 compression function for Merkle tree construction with:
/// - CHUNK: 2 (number of input chunks - i.e., 2 digests of 4 elements each = 8 elements)
/// - N: 4 (output size in field elements)
/// - WIDTH: 12 (total state size)
///
/// The compression function takes 8 field elements (2 digests) as input and produces
/// 4 field elements (1 digest) as output.
pub type Poseidon2Compression = TruncatedPermutation<Poseidon2Permutation256, 2, 4, 12>;
/// Poseidon2-based challenger using Plonky3's DuplexChallenger.
///
/// This provides a Fiat-Shamir transform implementation for interactive proof protocols,
/// with:
/// - F: Generic field type (typically the same as Felt)
/// - WIDTH: 12 field elements (sponge state size)
/// - RATE: 8 field elements (rate of absorption/squeezing)
pub type Poseidon2Challenger<F> = DuplexChallenger<F, Poseidon2Permutation256, 12, 8>;
#[cfg(test)]
mod p3_tests {
use p3_symmetric::{CryptographicHasher, PseudoCompressionFunction};
use super::*;
#[test]
fn test_poseidon2_permutation_basic() {
let mut state = [Felt::new(0); STATE_WIDTH];
// Apply permutation
let perm = Poseidon2Permutation256;
perm.permute_mut(&mut state);
// State should be different from all zeros after permutation
assert_ne!(state, [Felt::new(0); STATE_WIDTH]);
}
#[test]
fn test_poseidon2_permutation_consistency() {
let mut state1 = [Felt::new(0); STATE_WIDTH];
let mut state2 = [Felt::new(0); STATE_WIDTH];
// Apply permutation using the trait
let perm = Poseidon2Permutation256;
perm.permute_mut(&mut state1);
// Apply permutation directly
Poseidon2Permutation256::apply_permutation(&mut state2);
// Both should produce the same result
assert_eq!(state1, state2);
}
#[test]
fn test_poseidon2_permutation_deterministic() {
let input = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
];
let mut state1 = input;
let mut state2 = input;
let perm = Poseidon2Permutation256;
perm.permute_mut(&mut state1);
perm.permute_mut(&mut state2);
// Same input should produce same output
assert_eq!(state1, state2);
}
#[test]
#[ignore] // TODO: Re-enable after migrating Poseidon2 state layout to match Plonky3
// Miden-crypto: capacity=[0-3], rate=[4-11]
// Plonky3: rate=[0-7], capacity=[8-11]
fn test_poseidon2_hasher_vs_hash_elements() {
// Test with empty input
let expected: [Felt; 4] = Poseidon2::hash_elements::<Felt>(&[]).into();
let hasher = Poseidon2Hasher::new(Poseidon2Permutation256);
let result = hasher.hash_iter([]);
assert_eq!(result, expected, "Empty input should produce same digest");
// Test with 4 elements (one digest worth)
let input4 = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)];
let expected: [Felt; 4] = Poseidon2::hash_elements(&input4).into();
let result = hasher.hash_iter(input4);
assert_eq!(result, expected, "4 elements should produce same digest");
// Test with 8 elements (exactly one rate)
let input8 = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
];
let expected: [Felt; 4] = Poseidon2::hash_elements(&input8).into();
let result = hasher.hash_iter(input8);
assert_eq!(result, expected, "8 elements (one rate) should produce same digest");
// Test with 16 elements (two rates)
let input16 = [
Felt::new(1),
Felt::new(2),
Felt::new(3),
Felt::new(4),
Felt::new(5),
Felt::new(6),
Felt::new(7),
Felt::new(8),
Felt::new(9),
Felt::new(10),
Felt::new(11),
Felt::new(12),
Felt::new(13),
Felt::new(14),
Felt::new(15),
Felt::new(16),
];
let expected: [Felt; 4] = Poseidon2::hash_elements(&input16).into();
let result = hasher.hash_iter(input16);
assert_eq!(result, expected, "16 elements (two rates) should produce same digest");
}
#[test]
#[ignore] // TODO: Re-enable after migrating Poseidon2 state layout to match Plonky3
// Miden-crypto: capacity=[0-3], rate=[4-11]
// Plonky3: rate=[0-7], capacity=[8-11]
fn test_poseidon2_compression_vs_merge() {
let digest1 = [Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)];
let digest2 = [Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)];
// Poseidon2::merge expects &[Word; 2]
let expected: [Felt; 4] = Poseidon2::merge(&[digest1.into(), digest2.into()]).into();
// Poseidon2Compression expects [[Felt; 4]; 2]
let compress = Poseidon2Compression::new(Poseidon2Permutation256);
let result = compress.compress([digest1, digest2]);
assert_eq!(result, expected, "Poseidon2Compression should match Poseidon2::merge");
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/algebraic_sponge/poseidon2/constants.rs | miden-crypto/src/hash/algebraic_sponge/poseidon2/constants.rs | use super::{Felt, STATE_WIDTH};
// HASH FUNCTION DEFINING CONSTANTS
// ================================================================================================
/// Number of external rounds.
pub(crate) const NUM_EXTERNAL_ROUNDS: usize = 8;
/// Number of either initial or terminal external rounds.
pub(crate) const NUM_EXTERNAL_ROUNDS_HALF: usize = NUM_EXTERNAL_ROUNDS / 2;
/// Number of internal rounds.
pub(crate) const NUM_INTERNAL_ROUNDS: usize = 22;
// DIAGONAL MATRIX USED IN INTERNAL ROUNDS
// ================================================================================================
pub(crate) const MAT_DIAG: [Felt; STATE_WIDTH] = [
Felt::new(0xc3b6c08e23ba9300),
Felt::new(0xd84b5de94a324fb6),
Felt::new(0x0d0c371c5b35b84f),
Felt::new(0x7964f570e7188037),
Felt::new(0x5daf18bbd996604b),
Felt::new(0x6743bc47b9595257),
Felt::new(0x5528b9362c59bb70),
Felt::new(0xac45e25b7127b68b),
Felt::new(0xa2077d7dfbb606b5),
Felt::new(0xf3faac6faee378ae),
Felt::new(0x0c6388b51545e883),
Felt::new(0xd27dbb6944917b60),
];
// ROUND CONSTANTS
// ================================================================================================
pub(crate) const ARK_EXT_INITIAL: [[Felt; 12]; 4] = [
[
Felt::new(0x13dcf33aba214f46),
Felt::new(0x30b3b654a1da6d83),
Felt::new(0x1fc634ada6159b56),
Felt::new(0x937459964dc03466),
Felt::new(0xedd2ef2ca7949924),
Felt::new(0xede9affde0e22f68),
Felt::new(0x8515b9d6bac9282d),
Felt::new(0x6b5c07b4e9e900d8),
Felt::new(0x1ec66368838c8a08),
Felt::new(0x9042367d80d1fbab),
Felt::new(0x400283564a3c3799),
Felt::new(0x4a00be0466bca75e),
],
[
Felt::new(0x7913beee58e3817f),
Felt::new(0xf545e88532237d90),
Felt::new(0x22f8cb8736042005),
Felt::new(0x6f04990e247a2623),
Felt::new(0xfe22e87ba37c38cd),
Felt::new(0xd20e32c85ffe2815),
Felt::new(0x117227674048fe73),
Felt::new(0x4e9fb7ea98a6b145),
Felt::new(0xe0866c232b8af08b),
Felt::new(0x00bbc77916884964),
Felt::new(0x7031c0fb990d7116),
Felt::new(0x240a9e87cf35108f),
],
[
Felt::new(0x2e6363a5a12244b3),
Felt::new(0x5e1c3787d1b5011c),
Felt::new(0x4132660e2a196e8b),
Felt::new(0x3a013b648d3d4327),
Felt::new(0xf79839f49888ea43),
Felt::new(0xfe85658ebafe1439),
Felt::new(0xb6889825a14240bd),
Felt::new(0x578453605541382b),
Felt::new(0x4508cda8f6b63ce9),
Felt::new(0x9c3ef35848684c91),
Felt::new(0x0812bde23c87178c),
Felt::new(0xfe49638f7f722c14),
],
[
Felt::new(0x8e3f688ce885cbf5),
Felt::new(0xb8e110acf746a87d),
Felt::new(0xb4b2e8973a6dabef),
Felt::new(0x9e714c5da3d462ec),
Felt::new(0x6438f9033d3d0c15),
Felt::new(0x24312f7cf1a27199),
Felt::new(0x23f843bb47acbf71),
Felt::new(0x9183f11a34be9f01),
Felt::new(0x839062fbb9d45dbf),
Felt::new(0x24b56e7e6c2e43fa),
Felt::new(0xe1683da61c962a72),
Felt::new(0xa95c63971a19bfa7),
],
];
pub(crate) const ARK_INT: [Felt; 22] = [
Felt::new(0x4adf842aa75d4316),
Felt::new(0xf8fbb871aa4ab4eb),
Felt::new(0x68e85b6eb2dd6aeb),
Felt::new(0x07a0b06b2d270380),
Felt::new(0xd94e0228bd282de4),
Felt::new(0x8bdd91d3250c5278),
Felt::new(0x209c68b88bba778f),
Felt::new(0xb5e18cdab77f3877),
Felt::new(0xb296a3e808da93fa),
Felt::new(0x8370ecbda11a327e),
Felt::new(0x3f9075283775dad8),
Felt::new(0xb78095bb23c6aa84),
Felt::new(0x3f36b9fe72ad4e5f),
Felt::new(0x69bc96780b10b553),
Felt::new(0x3f1d341f2eb7b881),
Felt::new(0x4e939e9815838818),
Felt::new(0xda366b3ae2a31604),
Felt::new(0xbc89db1e7287d509),
Felt::new(0x6102f411f9ef5659),
Felt::new(0x58725c5e7ac1f0ab),
Felt::new(0x0df5856c798883e7),
Felt::new(0xf7bb62a8da4c961b),
];
pub(crate) const ARK_EXT_TERMINAL: [[Felt; STATE_WIDTH]; 4] = [
[
Felt::new(0xc68be7c94882a24d),
Felt::new(0xaf996d5d5cdaedd9),
Felt::new(0x9717f025e7daf6a5),
Felt::new(0x6436679e6e7216f4),
Felt::new(0x8a223d99047af267),
Felt::new(0xbb512e35a133ba9a),
Felt::new(0xfbbf44097671aa03),
Felt::new(0xf04058ebf6811e61),
Felt::new(0x5cca84703fac7ffb),
Felt::new(0x9b55c7945de6469f),
Felt::new(0x8e05bf09808e934f),
Felt::new(0x2ea900de876307d7),
],
[
Felt::new(0x7748fff2b38dfb89),
Felt::new(0x6b99a676dd3b5d81),
Felt::new(0xac4bb7c627cf7c13),
Felt::new(0xadb6ebe5e9e2f5ba),
Felt::new(0x2d33378cafa24ae3),
Felt::new(0x1e5b73807543f8c2),
Felt::new(0x09208814bfebb10f),
Felt::new(0x782e64b6bb5b93dd),
Felt::new(0xadd5a48eac90b50f),
Felt::new(0xadd4c54c736ea4b1),
Felt::new(0xd58dbb86ed817fd8),
Felt::new(0x6d5ed1a533f34ddd),
],
[
Felt::new(0x28686aa3e36b7cb9),
Felt::new(0x591abd3476689f36),
Felt::new(0x047d766678f13875),
Felt::new(0xa2a11112625f5b49),
Felt::new(0x21fd10a3f8304958),
Felt::new(0xf9b40711443b0280),
Felt::new(0xd2697eb8b2bde88e),
Felt::new(0x3493790b51731b3f),
Felt::new(0x11caf9dd73764023),
Felt::new(0x7acfb8f72878164e),
Felt::new(0x744ec4db23cefc26),
Felt::new(0x1e00e58f422c6340),
],
[
Felt::new(0x21dd28d906a62dda),
Felt::new(0xf32a46ab5f465b5f),
Felt::new(0xbfce13201f3f7e6b),
Felt::new(0xf30d2e7adb5304e2),
Felt::new(0xecdf4ee4abad48e9),
Felt::new(0xf94e82182d395019),
Felt::new(0x4ee52e3744d887c5),
Felt::new(0xa1341c7cac0083b2),
Felt::new(0x2302fb26c30c834a),
Felt::new(0xaea3c587273bf7d3),
Felt::new(0xf798e24961823ec7),
Felt::new(0x962deba3e9a2cd94),
],
];
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/keccak/tests.rs | miden-crypto/src/hash/keccak/tests.rs | #![cfg(feature = "std")]
use alloc::vec::Vec;
use proptest::prelude::*;
use super::*;
use crate::rand::test_utils::rand_vector;
#[test]
fn keccak256_hash_elements() {
// test multiple of 8
let elements = rand_vector::<Felt>(16);
let expected = compute_expected_element_hash(&elements);
let actual: [u8; 32] = hash_elements(&elements);
assert_eq!(&expected, &actual);
// test not multiple of 8
let elements = rand_vector::<Felt>(17);
let expected = compute_expected_element_hash(&elements);
let actual: [u8; 32] = hash_elements(&elements);
assert_eq!(&expected, &actual);
}
proptest! {
#[test]
fn keccak256_wont_panic_with_arbitrary_input(ref vec in any::<Vec<u8>>()) {
Keccak256::hash(vec);
}
#[test]
fn keccak256_hash_iter_matches_hash(ref slices in any::<Vec<Vec<u8>>>()) {
// Test that hash_iter produces the same result as concatenating all slices
// Concatenate all slices to create the expected result using the original hash method
let mut concatenated = Vec::new();
for slice in slices.iter() {
concatenated.extend_from_slice(slice);
}
let expected = Keccak256::hash(&concatenated);
// Test with the original iterator of slices
let actual = Keccak256::hash_iter(slices.iter().map(|v| v.as_slice()));
assert_eq!(expected, actual);
// Test with empty slices list (should produce hash of empty string)
let empty_actual = Keccak256::hash_iter(core::iter::empty());
let empty_expected = Keccak256::hash(b"");
assert_eq!(empty_expected, empty_actual);
// Test with single slice (should be identical to hash)
if let Some(single_slice) = slices.first() {
let single_actual = Keccak256::hash_iter(core::iter::once(single_slice.as_slice()));
let single_expected = Keccak256::hash(single_slice);
assert_eq!(single_expected, single_actual);
}
}
}
#[test]
fn test_nist_test_vectors() {
for (i, vector) in NIST_TEST_VECTORS.iter().enumerate() {
let result = Keccak256::hash(vector.input);
let expected = hex::decode(vector.expected).unwrap();
assert_eq!(
result.to_vec(),
expected,
"NIST test vector {} failed: {}",
i,
vector.description
);
}
}
#[test]
fn test_ethereum_test_vectors() {
for (i, vector) in ETHEREUM_TEST_VECTORS.iter().enumerate() {
let result = Keccak256::hash(vector.input);
let expected = hex::decode(vector.expected).unwrap();
assert_eq!(
result.to_vec(),
expected,
"Ethereum test vector {} failed: {}",
i,
vector.description
);
}
}
// HELPER FUNCTION AND STRUCT
// ================================================================================================
fn compute_expected_element_hash(elements: &[Felt]) -> [u8; DIGEST_BYTES] {
let mut bytes = Vec::new();
for element in elements.iter() {
bytes.extend_from_slice(&element.as_canonical_u64().to_le_bytes());
}
let mut hasher = sha3::Keccak256::new();
hasher.update(&bytes);
hasher.finalize().into()
}
struct TestVector {
input: &'static [u8],
expected: &'static str,
description: &'static str,
}
// TEST VECTORS
// ================================================================================================
// Derived from the wrapped implementation
const NIST_TEST_VECTORS: &[TestVector] = &[
TestVector {
input: b"",
expected: "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
description: "Empty input",
},
TestVector {
input: b"a",
expected: "3ac225168df54212a25c1c01fd35bebfea408fdac2e31ddd6f80a4bbf9a5f1cb",
description: "Single byte 'a'",
},
TestVector {
input: b"abc",
expected: "4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45",
description: "String 'abc'",
},
];
// Fetched from https://docs.ethers.org/v5/api/utils/hashing/
const ETHEREUM_TEST_VECTORS: &[TestVector] = &[
TestVector {
input: b"\x19Ethereum Signed Message:\n11Hello World",
expected: "a1de988600a42c4b4ab089b619297c17d53cffae5d5120d82d8a92d0bb3b78f2",
description: "Ethereum signed message prefix: Hello World",
},
TestVector {
input: b"\x19Ethereum Signed Message:\n40x42",
expected: "f0d544d6e4a96e1c08adc3efabe2fcb9ec5e28db1ad6c33ace880ba354ab0fce",
description: "Ethereum signed message prefix: `[0, x, 4, 2]` sequence of characters ",
},
TestVector {
input: b"\x19Ethereum Signed Message:\n1B",
expected: "d18c12b87124f9ceb7e1d3a5d06a5ac92ecab15931417e8d1558d9a263f99d63",
description: "Ethereum signed message prefix: `0x42` byte in UTF-8",
},
];
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/hash/keccak/mod.rs | miden-crypto/src/hash/keccak/mod.rs | use alloc::string::String;
use core::{
mem::size_of,
ops::Deref,
slice::{self, from_raw_parts},
};
use p3_field::BasedVectorSpace;
use sha3::Digest as Sha3Digest;
use super::{Felt, HasherExt};
use crate::{
field::PrimeField64,
utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, HexParseError, Serializable,
bytes_to_hex_string, hex_to_bytes,
},
};
#[cfg(test)]
mod tests;
// CONSTANTS
// ================================================================================================
const DIGEST_BYTES: usize = 32;
// DIGEST
// ================================================================================================
/// Keccak digest
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
#[repr(transparent)]
pub struct Keccak256Digest([u8; DIGEST_BYTES]);
impl Keccak256Digest {
pub fn as_bytes(&self) -> [u8; 32] {
self.0
}
pub fn digests_as_bytes(digests: &[Keccak256Digest]) -> &[u8] {
let p = digests.as_ptr();
let len = digests.len() * DIGEST_BYTES;
unsafe { slice::from_raw_parts(p as *const u8, len) }
}
}
impl Default for Keccak256Digest {
fn default() -> Self {
Self([0; DIGEST_BYTES])
}
}
impl Deref for Keccak256Digest {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<Keccak256Digest> for [u8; DIGEST_BYTES] {
fn from(value: Keccak256Digest) -> Self {
value.0
}
}
impl From<[u8; DIGEST_BYTES]> for Keccak256Digest {
fn from(value: [u8; DIGEST_BYTES]) -> Self {
Self(value)
}
}
impl From<Keccak256Digest> for String {
fn from(value: Keccak256Digest) -> Self {
bytes_to_hex_string(value.as_bytes())
}
}
impl TryFrom<&str> for Keccak256Digest {
type Error = HexParseError;
fn try_from(value: &str) -> Result<Self, Self::Error> {
hex_to_bytes(value).map(|v| v.into())
}
}
impl Serializable for Keccak256Digest {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_bytes(&self.0);
}
}
impl Deserializable for Keccak256Digest {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
source.read_array().map(Self)
}
}
// KECCAK256 HASHER
// ================================================================================================
/// Keccak256 hash function
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct Keccak256;
impl HasherExt for Keccak256 {
type Digest = Keccak256Digest;
fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Self::Digest {
let mut hasher = sha3::Keccak256::new();
for slice in slices {
hasher.update(slice);
}
Keccak256Digest(hasher.finalize().into())
}
}
impl Keccak256 {
/// Keccak256 collision resistance is 128-bits for 32-bytes output.
pub const COLLISION_RESISTANCE: u32 = 128;
pub fn hash(bytes: &[u8]) -> Keccak256Digest {
let mut hasher = sha3::Keccak256::new();
hasher.update(bytes);
Keccak256Digest(hasher.finalize().into())
}
pub fn merge(values: &[Keccak256Digest; 2]) -> Keccak256Digest {
Self::hash(prepare_merge(values))
}
pub fn merge_many(values: &[Keccak256Digest]) -> Keccak256Digest {
let data = Keccak256Digest::digests_as_bytes(values);
let mut hasher = sha3::Keccak256::new();
hasher.update(data);
Keccak256Digest(hasher.finalize().into())
}
pub fn merge_with_int(seed: Keccak256Digest, value: u64) -> Keccak256Digest {
let mut hasher = sha3::Keccak256::new();
hasher.update(seed.0);
hasher.update(value.to_le_bytes());
Keccak256Digest(hasher.finalize().into())
}
/// Returns a hash of the provided field elements.
#[inline(always)]
pub fn hash_elements<E>(elements: &[E]) -> Keccak256Digest
where
E: BasedVectorSpace<Felt>,
{
hash_elements(elements).into()
}
/// Hashes an iterator of byte slices.
#[inline(always)]
pub fn hash_iter<'a>(slices: impl Iterator<Item = &'a [u8]>) -> Keccak256Digest {
<Self as HasherExt>::hash_iter(slices)
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Hash the elements into bytes and shrink the output.
fn hash_elements<E>(elements: &[E]) -> [u8; DIGEST_BYTES]
where
E: BasedVectorSpace<Felt>,
{
// don't leak assumptions from felt and check its actual implementation.
let digest = {
const FELT_BYTES: usize = size_of::<u64>();
const { assert!(FELT_BYTES == 8, "buffer arithmetic assumes 8-byte field elements") };
let mut hasher = sha3::Keccak256::new();
// Keccak256 rate: 1600 bits (state) - 512 bits (capacity) = 1088 bits = 136 bytes
let mut buf = [0_u8; 136];
let mut buf_offset = 0;
for elem in elements.iter() {
for &felt in E::as_basis_coefficients_slice(elem) {
buf[buf_offset..buf_offset + FELT_BYTES]
.copy_from_slice(&felt.as_canonical_u64().to_le_bytes());
buf_offset += FELT_BYTES;
if buf_offset == 136 {
hasher.update(buf);
buf_offset = 0;
}
}
}
if buf_offset > 0 {
hasher.update(&buf[..buf_offset]);
}
hasher.finalize()
};
digest.into()
}
// Cast the slice into contiguous bytes.
fn prepare_merge<const N: usize, D>(args: &[D; N]) -> &[u8]
where
D: Deref<Target = [u8]>,
{
// compile-time assertion
assert!(N > 0, "N shouldn't represent an empty slice!");
let values = args.as_ptr() as *const u8;
let len = size_of::<D>() * N;
// safety: the values are tested to be contiguous
let bytes = unsafe { from_raw_parts(values, len) };
debug_assert_eq!(args[0].deref(), &bytes[..len / N]);
bytes
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/word/tests.rs | miden-crypto/src/word/tests.rs | #![cfg(feature = "std")]
use alloc::string::String;
use p3_field::PrimeCharacteristicRing;
use super::{Deserializable, Felt, Serializable, WORD_SIZE_BYTES, WORD_SIZE_FELT, Word};
use crate::{rand::test_utils::rand_value, utils::SliceReader, word};
// TESTS
// ================================================================================================
#[test]
fn word_serialization() {
let e1 = Felt::new(rand_value());
let e2 = Felt::new(rand_value());
let e3 = Felt::new(rand_value());
let e4 = Felt::new(rand_value());
let d1 = Word([e1, e2, e3, e4]);
let mut bytes = vec![];
d1.write_into(&mut bytes);
assert_eq!(WORD_SIZE_BYTES, bytes.len());
assert_eq!(bytes.len(), d1.get_size_hint());
let mut reader = SliceReader::new(&bytes);
let d2 = Word::read_from(&mut reader).unwrap();
assert_eq!(d1, d2);
}
#[test]
fn word_encoding() {
let word = Word([
Felt::new(rand_value()),
Felt::new(rand_value()),
Felt::new(rand_value()),
Felt::new(rand_value()),
]);
let string: String = word.into();
let round_trip: Word = string.try_into().expect("decoding failed");
assert_eq!(word, round_trip);
}
#[test]
fn test_conversions() {
let word = Word([
Felt::new(rand_value()),
Felt::new(rand_value()),
Felt::new(rand_value()),
Felt::new(rand_value()),
]);
// BY VALUE
// ----------------------------------------------------------------------------------------
let v: [bool; WORD_SIZE_FELT] = [true, false, true, true];
let v2: Word = v.into();
assert_eq!(v, <[bool; WORD_SIZE_FELT]>::try_from(v2).unwrap());
let v: [u8; WORD_SIZE_FELT] = [0_u8, 1_u8, 2_u8, 3_u8];
let v2: Word = v.into();
assert_eq!(v, <[u8; WORD_SIZE_FELT]>::try_from(v2).unwrap());
let v: [u16; WORD_SIZE_FELT] = [0_u16, 1_u16, 2_u16, 3_u16];
let v2: Word = v.into();
assert_eq!(v, <[u16; WORD_SIZE_FELT]>::try_from(v2).unwrap());
let v: [u32; WORD_SIZE_FELT] = [0_u32, 1_u32, 2_u32, 3_u32];
let v2: Word = v.into();
assert_eq!(v, <[u32; WORD_SIZE_FELT]>::try_from(v2).unwrap());
let v: [u64; WORD_SIZE_FELT] = word.into();
let v2: Word = v.try_into().unwrap();
assert_eq!(word, v2);
let v: [Felt; WORD_SIZE_FELT] = word.into();
let v2: Word = v.into();
assert_eq!(word, v2);
let v: [u8; WORD_SIZE_BYTES] = word.into();
let v2: Word = v.try_into().unwrap();
assert_eq!(word, v2);
let v: String = word.into();
let v2: Word = v.try_into().unwrap();
assert_eq!(word, v2);
// BY REF
// ----------------------------------------------------------------------------------------
let v: [bool; WORD_SIZE_FELT] = [true, false, true, true];
let v2: Word = (&v).into();
assert_eq!(v, <[bool; WORD_SIZE_FELT]>::try_from(&v2).unwrap());
let v: [u8; WORD_SIZE_FELT] = [0_u8, 1_u8, 2_u8, 3_u8];
let v2: Word = (&v).into();
assert_eq!(v, <[u8; WORD_SIZE_FELT]>::try_from(&v2).unwrap());
let v: [u16; WORD_SIZE_FELT] = [0_u16, 1_u16, 2_u16, 3_u16];
let v2: Word = (&v).into();
assert_eq!(v, <[u16; WORD_SIZE_FELT]>::try_from(&v2).unwrap());
let v: [u32; WORD_SIZE_FELT] = [0_u32, 1_u32, 2_u32, 3_u32];
let v2: Word = (&v).into();
assert_eq!(v, <[u32; WORD_SIZE_FELT]>::try_from(&v2).unwrap());
let v: [u64; WORD_SIZE_FELT] = (&word).into();
let v2: Word = (&v).try_into().unwrap();
assert_eq!(word, v2);
let v: [Felt; WORD_SIZE_FELT] = (&word).into();
let v2: Word = (&v).into();
assert_eq!(word, v2);
let v: [u8; WORD_SIZE_BYTES] = (&word).into();
let v2: Word = (&v).try_into().unwrap();
assert_eq!(word, v2);
let v: String = (&word).into();
let v2: Word = (&v).try_into().unwrap();
assert_eq!(word, v2);
}
#[test]
fn test_index() {
let word = Word::new([
Felt::from_u32(1_u32),
Felt::from_u32(2_u32),
Felt::from_u32(3_u32),
Felt::from_u32(4_u32),
]);
assert_eq!(word[0], Felt::from_u32(1_u32));
assert_eq!(word[1], Felt::from_u32(2_u32));
assert_eq!(word[2], Felt::from_u32(3_u32));
assert_eq!(word[3], Felt::from_u32(4_u32));
}
#[test]
fn test_index_mut() {
let mut word = Word::new([
Felt::from_u32(1_u32),
Felt::from_u32(2_u32),
Felt::from_u32(3_u32),
Felt::from_u32(4_u32),
]);
word[0] = Felt::from_u32(5_u32);
word[1] = Felt::from_u32(6_u32);
word[2] = Felt::from_u32(7_u32);
word[3] = Felt::from_u32(8_u32);
assert_eq!(word[0], Felt::from_u32(5_u32));
assert_eq!(word[1], Felt::from_u32(6_u32));
assert_eq!(word[2], Felt::from_u32(7_u32));
assert_eq!(word[3], Felt::from_u32(8_u32));
}
#[test]
fn test_index_mut_range() {
let mut word = Word::new([
Felt::from_u32(1_u32),
Felt::from_u32(2_u32),
Felt::from_u32(3_u32),
Felt::from_u32(4_u32),
]);
word[1..3].copy_from_slice(&[Felt::from_u32(6_u32), Felt::from_u32(7_u32)]);
assert_eq!(word[1], Felt::from_u32(6_u32));
assert_eq!(word[2], Felt::from_u32(7_u32));
}
#[rstest::rstest]
#[case::missing_prefix("1234")]
#[case::invalid_character("1234567890abcdefg")]
#[case::too_long("0xx00000000000000000000000000000000000000000000000000000000000000001")]
#[case::overflow_felt0("0x01000000ffffffff000000000000000000000000000000000000000000000000")]
#[case::overflow_felt1("0x000000000000000001000000ffffffff00000000000000000000000000000000")]
#[case::overflow_felt2("0x0000000000000000000000000000000001000000ffffffff0000000000000000")]
#[case::overflow_felt3("0x00000000000000000000000000000000000000000000000001000000ffffffff")]
#[should_panic]
fn word_macro_invalid(#[case] bad_input: &str) {
word!(bad_input);
}
#[rstest::rstest]
#[case::each_digit("0x1234567890abcdef")]
#[case::empty("0x")]
#[case::zero("0x0")]
#[case::zero_full("0x0000000000000000000000000000000000000000000000000000000000000000")]
#[case::one_lsb("0x1")]
#[case::one_msb("0x0000000000000000000000000000000000000000000000000000000000000001")]
#[case::one_partial("0x0001")]
#[case::odd("0x123")]
#[case::even("0x1234")]
#[case::touch_each_felt("0x00000000000123450000000000067890000000000000abcd00000000000000ef")]
#[case::unique_felt("0x111111111111111155555555555555559999999999999999cccccccccccccccc")]
#[case::digits_on_repeat("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")]
fn word_macro(#[case] input: &str) {
let uut = word!(input);
// Right pad to 64 hex digits (66 including prefix). This is required by the
// Word::try_from(String) implementation.
let padded_input = format!("{input:<66}").replace(" ", "0");
let expected = crate::Word::try_from(padded_input.as_str()).unwrap();
assert_eq!(uut, expected);
}
#[rstest::rstest]
#[case::first_nibble("0x1000000000000000000000000000000000000000000000000000000000000000", crate::Word::new([Felt::new(16), Felt::new(0), Felt::new(0), Felt::new(0)]))]
#[case::second_nibble("0x0100000000000000000000000000000000000000000000000000000000000000", crate::Word::new([Felt::new(1), Felt::new(0), Felt::new(0), Felt::new(0)]))]
#[case::all_first_nibbles("0x1000000000000000100000000000000010000000000000001000000000000000", crate::Word::new([Felt::new(16), Felt::new(16), Felt::new(16), Felt::new(16)]))]
#[case::all_first_nibbles_asc("0x1000000000000000200000000000000030000000000000004000000000000000", crate::Word::new([Felt::new(16), Felt::new(32), Felt::new(48), Felt::new(64)]))]
fn word_macro_endianness(#[case] input: &str, #[case] expected: crate::Word) {
let uut = word!(input);
assert_eq!(uut, expected);
}
#[test]
fn word_ord_respects_partialeq() {
use core::cmp::Ordering;
// Test that Word::cmp() respects the PartialEq invariant:
// if a == b, then a.cmp(b) must equal Ordering::Equal
let test_cases = vec![
Word::new([Felt::new(2), Felt::new(0), Felt::new(0), Felt::new(0)]),
Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(0)]),
Word::new([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]),
Word::new([Felt::new(100), Felt::new(200), Felt::new(300), Felt::new(400)]),
];
for word in test_cases {
let word_copy = word;
assert_eq!(word, word_copy, "Word should be equal to itself");
assert_eq!(
word.cmp(&word_copy),
Ordering::Equal,
"Word::cmp() should return Ordering::Equal for equal words: {:?}",
word
);
}
}
#[test]
fn word_ord_btreemap_usage() {
use alloc::collections::BTreeMap;
// Test that Word works correctly as a BTreeMap key
// This will fail if Ord and PartialEq are inconsistent
let mut map = BTreeMap::new();
let key1 = Word::new([Felt::new(2), Felt::new(0), Felt::new(0), Felt::new(0)]);
let key2 = Word::new([Felt::new(2), Felt::new(0), Felt::new(0), Felt::new(0)]);
map.insert(key1, "value1");
// key2 should be equal to key1
assert_eq!(key1, key2);
// So map should contain key2
assert!(map.contains_key(&key2), "BTreeMap should find key2 since it's equal to key1");
// And getting by key2 should return the same value
assert_eq!(map.get(&key2), Some(&"value1"));
// Inserting with key2 should update the existing entry
map.insert(key2, "value2");
assert_eq!(map.len(), 1, "Map should still have only one entry");
assert_eq!(map.get(&key1), Some(&"value2"));
}
#[test]
fn word_ord_consistency_with_partialeq() {
use core::cmp::Ordering;
// Comprehensive test that Ord is consistent with PartialEq
// This is required by Rust's trait contract: if a == b, then a.cmp(b) == Ordering::Equal
let test_pairs = vec![
// Same values
(
Word::new([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]),
Word::new([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]),
Ordering::Equal,
),
// Different first element
(
Word::new([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]),
Word::new([Felt::new(2), Felt::new(2), Felt::new(3), Felt::new(4)]),
Ordering::Less,
),
// Different last element
(
Word::new([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]),
Word::new([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(3)]),
Ordering::Greater,
),
];
for (w1, w2, expected_ordering) in test_pairs {
let actual_ordering = w1.cmp(&w2);
assert_eq!(
actual_ordering, expected_ordering,
"Word::cmp mismatch: {:?}.cmp({:?}) returned {:?}, expected {:?}",
w1, w2, actual_ordering, expected_ordering
);
// Verify consistency with PartialEq
match expected_ordering {
Ordering::Equal => {
assert_eq!(w1, w2, "Words should be equal when cmp returns Equal");
},
Ordering::Less => {
assert_ne!(w1, w2, "Words should not be equal when cmp returns Less");
},
Ordering::Greater => {
assert_ne!(w1, w2, "Words should not be equal when cmp returns Greater");
},
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/word/lexicographic.rs | miden-crypto/src/word/lexicographic.rs | use core::cmp::Ordering;
use p3_field::PrimeField64;
use super::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
use crate::{Felt, WORD_SIZE, Word};
// LEXICOGRAPHIC WORD
// ================================================================================================
/// A [`Word`] wrapper with lexicographic ordering.
///
/// This is a wrapper around any [`Word`] convertible type that overrides the equality and ordering
/// implementations with a lexigographic one based on the wrapped type's [`Word`] representation.
#[derive(Debug, Clone, Copy)]
pub struct LexicographicWord<T: Into<Word> = Word>(T);
impl<T: Into<Word>> LexicographicWord<T> {
/// Wraps the provided value into a new [`LexicographicWord`].
pub fn new(inner: T) -> Self {
Self(inner)
}
/// Returns a reference to the inner value.
pub fn inner(&self) -> &T {
&self.0
}
/// Consumes self and returns the inner value.
pub fn into_inner(self) -> T {
self.0
}
}
impl From<[Felt; WORD_SIZE]> for LexicographicWord {
fn from(value: [Felt; WORD_SIZE]) -> Self {
Self(value.into())
}
}
impl From<Word> for LexicographicWord {
fn from(word: Word) -> Self {
Self(word)
}
}
impl<T: Into<Word>> From<LexicographicWord<T>> for Word {
fn from(key: LexicographicWord<T>) -> Self {
key.0.into()
}
}
impl<T: Into<Word> + Copy> PartialEq for LexicographicWord<T> {
fn eq(&self, other: &Self) -> bool {
let self_word: Word = self.0.into();
let other_word: Word = other.0.into();
self_word == other_word
}
}
impl<T: Into<Word> + Copy> Eq for LexicographicWord<T> {}
impl<T: Into<Word> + Copy> PartialOrd for LexicographicWord<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: Into<Word> + Copy> Ord for LexicographicWord<T> {
fn cmp(&self, other: &Self) -> Ordering {
let self_word: Word = self.0.into();
let other_word: Word = other.0.into();
for (felt0, felt1) in self_word
.iter()
.rev()
.map(Felt::as_canonical_u64)
.zip(other_word.iter().rev().map(Felt::as_canonical_u64))
{
let ordering = felt0.cmp(&felt1);
if let Ordering::Less | Ordering::Greater = ordering {
return ordering;
}
}
Ordering::Equal
}
}
// SERIALIZATION
// ================================================================================================
impl<T: Into<Word> + Copy> Serializable for LexicographicWord<T> {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.0.into().write_into(target);
}
fn get_size_hint(&self) -> usize {
self.0.into().get_size_hint()
}
}
impl<T: Into<Word> + From<Word>> Deserializable for LexicographicWord<T> {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let word = Word::read_from(source)?;
Ok(Self::new(T::from(word)))
}
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use p3_field::PrimeCharacteristicRing;
use super::*;
#[derive(Debug, Clone, Copy)]
struct NoteId(Word);
impl From<Word> for NoteId {
fn from(value: Word) -> Self {
Self(value)
}
}
impl From<NoteId> for Word {
fn from(value: NoteId) -> Self {
value.0
}
}
#[test]
fn lexicographic_word_ordering() {
for (expected, key0, key1) in [
(Ordering::Equal, [0, 0, 0, 0u32], [0, 0, 0, 0u32]),
(Ordering::Greater, [1, 0, 0, 0u32], [0, 0, 0, 0u32]),
(Ordering::Greater, [0, 1, 0, 0u32], [0, 0, 0, 0u32]),
(Ordering::Greater, [0, 0, 1, 0u32], [0, 0, 0, 0u32]),
(Ordering::Greater, [0, 0, 0, 1u32], [0, 0, 0, 0u32]),
(Ordering::Less, [0, 0, 0, 0u32], [1, 0, 0, 0u32]),
(Ordering::Less, [0, 0, 0, 0u32], [0, 1, 0, 0u32]),
(Ordering::Less, [0, 0, 0, 0u32], [0, 0, 1, 0u32]),
(Ordering::Less, [0, 0, 0, 0u32], [0, 0, 0, 1u32]),
(Ordering::Greater, [0, 0, 0, 1u32], [1, 1, 1, 0u32]),
(Ordering::Greater, [0, 0, 1, 0u32], [1, 1, 0, 0u32]),
(Ordering::Less, [1, 1, 1, 0u32], [0, 0, 0, 1u32]),
(Ordering::Less, [1, 1, 0, 0u32], [0, 0, 1, 0u32]),
] {
assert_eq!(
LexicographicWord::from(key0.map(Felt::from_u32))
.cmp(&LexicographicWord::from(key1.map(Felt::from_u32))),
expected
);
}
}
#[test]
fn lexicographic_serialization() {
let word = Word::from([1u64, 2, 3, 4].map(Felt::new));
let key = LexicographicWord::new(word);
let bytes = key.to_bytes();
let deserialized_key = LexicographicWord::<Word>::read_from_bytes(&bytes).unwrap();
assert_eq!(key, deserialized_key);
let note_id = NoteId::from(word);
let key = LexicographicWord::new(note_id);
let bytes = key.to_bytes();
let deserialized_key = LexicographicWord::<NoteId>::read_from_bytes(&bytes).unwrap();
assert_eq!(key, deserialized_key);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/word/mod.rs | miden-crypto/src/word/mod.rs | //! A [Word] type used in the Miden protocol and associated utilities.
use alloc::{string::String, vec::Vec};
use core::{
cmp::Ordering,
fmt::Display,
hash::{Hash, Hasher},
ops::{Deref, DerefMut, Index, IndexMut, Range},
slice,
};
use thiserror::Error;
const WORD_SIZE_FELT: usize = 4;
const WORD_SIZE_BYTES: usize = 32;
use p3_field::integers::QuotientMap;
use super::{Felt, ZERO};
use crate::{
field::{PrimeCharacteristicRing, PrimeField64},
rand::Randomizable,
utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, HexParseError, Serializable,
bytes_to_hex_string, hex_to_bytes,
},
};
mod lexicographic;
pub use lexicographic::LexicographicWord;
#[cfg(test)]
mod tests;
// WORD
// ================================================================================================
/// A unit of data consisting of 4 field elements.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(feature = "serde", serde(into = "String", try_from = "&str"))]
pub struct Word([Felt; WORD_SIZE_FELT]);
impl Word {
/// The serialized size of the word in bytes.
pub const SERIALIZED_SIZE: usize = WORD_SIZE_BYTES;
/// Creates a new [`Word`] from the given field elements.
pub const fn new(value: [Felt; WORD_SIZE_FELT]) -> Self {
Self(value)
}
/// Parses a hex string into a new [`Word`].
///
/// The input must contain valid hex prefixed with `0x`. The input after the prefix
/// must contain between 0 and 64 characters (inclusive).
///
/// The input is interpreted to have little-endian byte ordering. Nibbles are interpreted
/// to have big-endian ordering so that "0x10" represents Felt::new(16), not Felt::new(1).
///
/// This function is usually used via the `word!` macro.
///
/// ```
/// use miden_crypto::{Felt, Word, word};
/// let word = word!("0x1000000000000000200000000000000030000000000000004000000000000000");
/// assert_eq!(word, Word::new([Felt::new(16), Felt::new(32), Felt::new(48), Felt::new(64)]));
/// ```
pub const fn parse(hex: &str) -> Result<Self, &'static str> {
const fn parse_hex_digit(digit: u8) -> Result<u8, &'static str> {
match digit {
b'0'..=b'9' => Ok(digit - b'0'),
b'A'..=b'F' => Ok(digit - b'A' + 0x0a),
b'a'..=b'f' => Ok(digit - b'a' + 0x0a),
_ => Err("Invalid hex character"),
}
}
// Enforce and skip the '0x' prefix.
let hex_bytes = match hex.as_bytes() {
[b'0', b'x', rest @ ..] => rest,
_ => return Err("Hex string must have a \"0x\" prefix"),
};
if hex_bytes.len() > 64 {
return Err("Hex string has more than 64 characters");
}
let mut felts = [0u64; 4];
let mut i = 0;
while i < hex_bytes.len() {
let hex_digit = match parse_hex_digit(hex_bytes[i]) {
// SAFETY: u8 cast to u64 is safe. We cannot use u64::from in const context so we
// are forced to cast.
Ok(v) => v as u64,
Err(e) => return Err(e),
};
// This digit's nibble offset within the felt. We need to invert the nibbles per
// byte to ensure little-endian ordering i.e. ABCD -> BADC.
let inibble = if i.is_multiple_of(2) {
(i + 1) % 16
} else {
(i - 1) % 16
};
let value = hex_digit << (inibble * 4);
felts[i / 2 / 8] += value;
i += 1;
}
// Ensure each felt is within bounds as `Felt::new` silently wraps around.
// This matches the behavior of `Word::try_from(String)`.
let mut idx = 0;
while idx < felts.len() {
if felts[idx] >= Felt::ORDER_U64 {
return Err("Felt overflow");
}
idx += 1;
}
Ok(Self::new([
Felt::new(felts[0]),
Felt::new(felts[1]),
Felt::new(felts[2]),
Felt::new(felts[3]),
]))
}
/// Returns a new [Word] consisting of four ZERO elements.
pub const fn empty() -> Self {
Self([Felt::ZERO; WORD_SIZE_FELT])
}
/// Returns true if the word consists of four ZERO elements.
pub fn is_empty(&self) -> bool {
self.0[0] == Felt::ZERO
&& self.0[1] == Felt::ZERO
&& self.0[2] == Felt::ZERO
&& self.0[3] == Felt::ZERO
}
/// Returns the word as a slice of field elements.
pub fn as_elements(&self) -> &[Felt] {
self.as_ref()
}
/// Returns the word as a byte array.
pub fn as_bytes(&self) -> [u8; WORD_SIZE_BYTES] {
let mut result = [0; WORD_SIZE_BYTES];
result[..8].copy_from_slice(&self.0[0].as_canonical_u64().to_le_bytes());
result[8..16].copy_from_slice(&self.0[1].as_canonical_u64().to_le_bytes());
result[16..24].copy_from_slice(&self.0[2].as_canonical_u64().to_le_bytes());
result[24..].copy_from_slice(&self.0[3].as_canonical_u64().to_le_bytes());
result
}
/// Returns an iterator over the elements of multiple words.
pub(crate) fn words_as_elements_iter<'a, I>(words: I) -> impl Iterator<Item = &'a Felt>
where
I: Iterator<Item = &'a Self>,
{
words.flat_map(|d| d.0.iter())
}
/// Returns all elements of multiple words as a slice.
pub fn words_as_elements(words: &[Self]) -> &[Felt] {
let p = words.as_ptr();
let len = words.len() * WORD_SIZE_FELT;
unsafe { slice::from_raw_parts(p as *const Felt, len) }
}
/// Returns hexadecimal representation of this word prefixed with `0x`.
pub fn to_hex(&self) -> String {
bytes_to_hex_string(self.as_bytes())
}
/// Returns internal elements of this word as a vector.
pub fn to_vec(&self) -> Vec<Felt> {
self.0.to_vec()
}
}
impl Hash for Word {
fn hash<H: Hasher>(&self, state: &mut H) {
state.write(&self.as_bytes());
}
}
impl Deref for Word {
type Target = [Felt; WORD_SIZE_FELT];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Word {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Index<usize> for Word {
type Output = Felt;
fn index(&self, index: usize) -> &Self::Output {
&self.0[index]
}
}
impl IndexMut<usize> for Word {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.0[index]
}
}
impl Index<Range<usize>> for Word {
type Output = [Felt];
fn index(&self, index: Range<usize>) -> &Self::Output {
&self.0[index]
}
}
impl IndexMut<Range<usize>> for Word {
fn index_mut(&mut self, index: Range<usize>) -> &mut Self::Output {
&mut self.0[index]
}
}
impl Ord for Word {
fn cmp(&self, other: &Self) -> Ordering {
// Compare the canonical u64 representation of both elements.
//
// It will iterate the elements and will return the first computation different than
// `Equal`. Otherwise, the ordering is equal.
//
// We use `as_canonical_u64()` to ensure we're comparing the actual field element values
// in their canonical form (that is, `x in [0,p)`). P3's Goldilocks field uses unreduced
// representation (not Montgomery form), meaning internal values may be in [0, 2^64) even
// though the field order is p = 2^64 - 2^32 + 1. This method canonicalizes to [0, p).
//
// We must iterate over and compare each element individually. A simple bytestring
// comparison would be inappropriate because the `Word`s are represented in
// "lexicographical" order.
self.0
.iter()
.map(Felt::as_canonical_u64)
.zip(other.0.iter().map(Felt::as_canonical_u64))
.fold(Ordering::Equal, |ord, (a, b)| match ord {
Ordering::Equal => a.cmp(&b),
_ => ord,
})
}
}
impl PartialOrd for Word {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Display for Word {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}", self.to_hex())
}
}
impl Randomizable for Word {
const VALUE_SIZE: usize = WORD_SIZE_BYTES;
fn from_random_bytes(bytes: &[u8]) -> Option<Self> {
let bytes_array: Option<[u8; 32]> = bytes.try_into().ok();
if let Some(bytes_array) = bytes_array {
Self::try_from(bytes_array).ok()
} else {
None
}
}
}
// CONVERSIONS: FROM WORD
// ================================================================================================
/// Errors that can occur when working with a [Word].
#[derive(Debug, Error)]
pub enum WordError {
/// Hex-encoded field elements parsed are invalid.
#[error("hex encoded values of a word are invalid")]
HexParse(#[from] HexParseError),
/// Field element conversion failed due to invalid value.
#[error("failed to convert to field element: {0}")]
InvalidFieldElement(String),
/// Failed to convert a slice to an array of expected length.
#[error("invalid input length: expected {1} {0}, but received {2}")]
InvalidInputLength(&'static str, usize, usize),
/// Failed to convert the word's field elements to the specified type.
#[error("failed to convert the word's field elements to type {0}")]
TypeConversion(&'static str),
}
impl TryFrom<&Word> for [bool; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: &Word) -> Result<Self, Self::Error> {
(*value).try_into()
}
}
impl TryFrom<Word> for [bool; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: Word) -> Result<Self, Self::Error> {
fn to_bool(v: u64) -> Option<bool> {
if v <= 1 { Some(v == 1) } else { None }
}
Ok([
to_bool(value.0[0].as_canonical_u64()).ok_or(WordError::TypeConversion("bool"))?,
to_bool(value.0[1].as_canonical_u64()).ok_or(WordError::TypeConversion("bool"))?,
to_bool(value.0[2].as_canonical_u64()).ok_or(WordError::TypeConversion("bool"))?,
to_bool(value.0[3].as_canonical_u64()).ok_or(WordError::TypeConversion("bool"))?,
])
}
}
impl TryFrom<&Word> for [u8; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: &Word) -> Result<Self, Self::Error> {
(*value).try_into()
}
}
impl TryFrom<Word> for [u8; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: Word) -> Result<Self, Self::Error> {
Ok([
value.0[0]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u8"))?,
value.0[1]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u8"))?,
value.0[2]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u8"))?,
value.0[3]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u8"))?,
])
}
}
impl TryFrom<&Word> for [u16; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: &Word) -> Result<Self, Self::Error> {
(*value).try_into()
}
}
impl TryFrom<Word> for [u16; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: Word) -> Result<Self, Self::Error> {
Ok([
value.0[0]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u16"))?,
value.0[1]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u16"))?,
value.0[2]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u16"))?,
value.0[3]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u16"))?,
])
}
}
impl TryFrom<&Word> for [u32; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: &Word) -> Result<Self, Self::Error> {
(*value).try_into()
}
}
impl TryFrom<Word> for [u32; WORD_SIZE_FELT] {
type Error = WordError;
fn try_from(value: Word) -> Result<Self, Self::Error> {
Ok([
value.0[0]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u32"))?,
value.0[1]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u32"))?,
value.0[2]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u32"))?,
value.0[3]
.as_canonical_u64()
.try_into()
.map_err(|_| WordError::TypeConversion("u32"))?,
])
}
}
impl From<&Word> for [u64; WORD_SIZE_FELT] {
fn from(value: &Word) -> Self {
(*value).into()
}
}
impl From<Word> for [u64; WORD_SIZE_FELT] {
fn from(value: Word) -> Self {
[
value.0[0].as_canonical_u64(),
value.0[1].as_canonical_u64(),
value.0[2].as_canonical_u64(),
value.0[3].as_canonical_u64(),
]
}
}
impl From<&Word> for [Felt; WORD_SIZE_FELT] {
fn from(value: &Word) -> Self {
(*value).into()
}
}
impl From<Word> for [Felt; WORD_SIZE_FELT] {
fn from(value: Word) -> Self {
value.0
}
}
impl From<&Word> for [u8; WORD_SIZE_BYTES] {
fn from(value: &Word) -> Self {
(*value).into()
}
}
impl From<Word> for [u8; WORD_SIZE_BYTES] {
fn from(value: Word) -> Self {
value.as_bytes()
}
}
impl From<&Word> for String {
/// The returned string starts with `0x`.
fn from(value: &Word) -> Self {
(*value).into()
}
}
impl From<Word> for String {
/// The returned string starts with `0x`.
fn from(value: Word) -> Self {
value.to_hex()
}
}
// CONVERSIONS: TO WORD
// ================================================================================================
impl From<&[bool; WORD_SIZE_FELT]> for Word {
fn from(value: &[bool; WORD_SIZE_FELT]) -> Self {
(*value).into()
}
}
impl From<[bool; WORD_SIZE_FELT]> for Word {
fn from(value: [bool; WORD_SIZE_FELT]) -> Self {
[value[0] as u32, value[1] as u32, value[2] as u32, value[3] as u32].into()
}
}
impl From<&[u8; WORD_SIZE_FELT]> for Word {
fn from(value: &[u8; WORD_SIZE_FELT]) -> Self {
(*value).into()
}
}
impl From<[u8; WORD_SIZE_FELT]> for Word {
fn from(value: [u8; WORD_SIZE_FELT]) -> Self {
Self([
Felt::from_u8(value[0]),
Felt::from_u8(value[1]),
Felt::from_u8(value[2]),
Felt::from_u8(value[3]),
])
}
}
impl From<&[u16; WORD_SIZE_FELT]> for Word {
fn from(value: &[u16; WORD_SIZE_FELT]) -> Self {
(*value).into()
}
}
impl From<[u16; WORD_SIZE_FELT]> for Word {
fn from(value: [u16; WORD_SIZE_FELT]) -> Self {
Self([
Felt::from_u16(value[0]),
Felt::from_u16(value[1]),
Felt::from_u16(value[2]),
Felt::from_u16(value[3]),
])
}
}
impl From<&[u32; WORD_SIZE_FELT]> for Word {
fn from(value: &[u32; WORD_SIZE_FELT]) -> Self {
(*value).into()
}
}
impl From<[u32; WORD_SIZE_FELT]> for Word {
fn from(value: [u32; WORD_SIZE_FELT]) -> Self {
Self([
Felt::from_u32(value[0]),
Felt::from_u32(value[1]),
Felt::from_u32(value[2]),
Felt::from_u32(value[3]),
])
}
}
impl TryFrom<&[u64; WORD_SIZE_FELT]> for Word {
type Error = WordError;
fn try_from(value: &[u64; WORD_SIZE_FELT]) -> Result<Self, WordError> {
(*value).try_into()
}
}
impl TryFrom<[u64; WORD_SIZE_FELT]> for Word {
type Error = WordError;
fn try_from(value: [u64; WORD_SIZE_FELT]) -> Result<Self, WordError> {
let err = || WordError::InvalidFieldElement("value >= field modulus".into());
Ok(Self([
Felt::from_canonical_checked(value[0]).ok_or_else(err)?,
Felt::from_canonical_checked(value[1]).ok_or_else(err)?,
Felt::from_canonical_checked(value[2]).ok_or_else(err)?,
Felt::from_canonical_checked(value[3]).ok_or_else(err)?,
]))
}
}
impl From<&[Felt; WORD_SIZE_FELT]> for Word {
fn from(value: &[Felt; WORD_SIZE_FELT]) -> Self {
Self(*value)
}
}
impl From<[Felt; WORD_SIZE_FELT]> for Word {
fn from(value: [Felt; WORD_SIZE_FELT]) -> Self {
Self(value)
}
}
impl TryFrom<&[u8; WORD_SIZE_BYTES]> for Word {
type Error = WordError;
fn try_from(value: &[u8; WORD_SIZE_BYTES]) -> Result<Self, Self::Error> {
(*value).try_into()
}
}
impl TryFrom<[u8; WORD_SIZE_BYTES]> for Word {
type Error = WordError;
fn try_from(value: [u8; WORD_SIZE_BYTES]) -> Result<Self, Self::Error> {
// Note: the input length is known, the conversion from slice to array must succeed so the
// `unwrap`s below are safe
let a = u64::from_le_bytes(value[0..8].try_into().unwrap());
let b = u64::from_le_bytes(value[8..16].try_into().unwrap());
let c = u64::from_le_bytes(value[16..24].try_into().unwrap());
let d = u64::from_le_bytes(value[24..32].try_into().unwrap());
let err = || WordError::InvalidFieldElement("value >= field modulus".into());
let a: Felt = Felt::from_canonical_checked(a).ok_or_else(err)?;
let b: Felt = Felt::from_canonical_checked(b).ok_or_else(err)?;
let c: Felt = Felt::from_canonical_checked(c).ok_or_else(err)?;
let d: Felt = Felt::from_canonical_checked(d).ok_or_else(err)?;
Ok(Word([a, b, c, d]))
}
}
impl TryFrom<&[u8]> for Word {
type Error = WordError;
fn try_from(value: &[u8]) -> Result<Self, Self::Error> {
let value: [u8; WORD_SIZE_BYTES] = value
.try_into()
.map_err(|_| WordError::InvalidInputLength("bytes", WORD_SIZE_BYTES, value.len()))?;
value.try_into()
}
}
impl TryFrom<&[Felt]> for Word {
type Error = WordError;
fn try_from(value: &[Felt]) -> Result<Self, Self::Error> {
let value: [Felt; WORD_SIZE_FELT] = value
.try_into()
.map_err(|_| WordError::InvalidInputLength("elements", WORD_SIZE_FELT, value.len()))?;
Ok(value.into())
}
}
impl TryFrom<&str> for Word {
type Error = WordError;
/// Expects the string to start with `0x`.
fn try_from(value: &str) -> Result<Self, Self::Error> {
hex_to_bytes::<WORD_SIZE_BYTES>(value)
.map_err(WordError::HexParse)
.and_then(Word::try_from)
}
}
impl TryFrom<String> for Word {
type Error = WordError;
/// Expects the string to start with `0x`.
fn try_from(value: String) -> Result<Self, Self::Error> {
value.as_str().try_into()
}
}
impl TryFrom<&String> for Word {
type Error = WordError;
/// Expects the string to start with `0x`.
fn try_from(value: &String) -> Result<Self, Self::Error> {
value.as_str().try_into()
}
}
// SERIALIZATION / DESERIALIZATION
// ================================================================================================
impl Serializable for Word {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_bytes(&self.as_bytes());
}
fn get_size_hint(&self) -> usize {
Self::SERIALIZED_SIZE
}
}
impl Deserializable for Word {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let mut inner: [Felt; WORD_SIZE_FELT] = [ZERO; WORD_SIZE_FELT];
for inner in inner.iter_mut() {
let e = source.read_u64()?;
if e >= Felt::ORDER_U64 {
return Err(DeserializationError::InvalidValue(String::from(
"value not in the appropriate range",
)));
}
*inner = Felt::new(e);
}
Ok(Self(inner))
}
}
// ITERATORS
// ================================================================================================
impl IntoIterator for Word {
type Item = Felt;
type IntoIter = <[Felt; 4] as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
// MACROS
// ================================================================================================
/// Construct a new [Word](super::Word) from a hex value.
///
/// Expects a '0x' prefixed hex string followed by up to 64 hex digits.
#[macro_export]
macro_rules! word {
($hex:expr) => {{
let word: Word = match $crate::word::Word::parse($hex) {
Ok(v) => v,
Err(e) => panic!("{}", e),
};
word
}};
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/node.rs | miden-crypto/src/merkle/node.rs | use super::Word;
/// Representation of a node with two children used for iterating over containers.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
#[cfg_attr(test, derive(PartialOrd, Ord))]
pub struct InnerNodeInfo {
pub value: Word,
pub left: Word,
pub right: Word,
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/path.rs | miden-crypto/src/merkle/path.rs | use alloc::vec::Vec;
use core::{
num::NonZero,
ops::{Deref, DerefMut},
};
use super::{InnerNodeInfo, MerkleError, NodeIndex, Rpo256, Word};
use crate::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
// MERKLE PATH
// ================================================================================================
/// A merkle path container, composed of a sequence of nodes of a Merkle tree.
///
/// Indexing into this type starts at the deepest part of the path and gets shallower. That is,
/// the node at index `0` is deeper than the node at index `self.len() - 1`.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct MerklePath {
nodes: Vec<Word>,
}
impl MerklePath {
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Creates a new Merkle path from a list of nodes.
///
/// The list must be in order of deepest to shallowest.
pub fn new(nodes: Vec<Word>) -> Self {
assert!(nodes.len() <= u8::MAX.into(), "MerklePath may have at most 256 items");
Self { nodes }
}
// PROVIDERS
// --------------------------------------------------------------------------------------------
/// Returns a reference to the path node at the specified depth.
///
/// The `depth` parameter is defined in terms of `self.depth()`. Merkle paths conventionally do
/// not include the root, so the shallowest depth is `1`, and the deepest depth is
/// `self.depth()`.
pub fn at_depth(&self, depth: NonZero<u8>) -> Option<Word> {
let index = u8::checked_sub(self.depth(), depth.get())?;
self.nodes.get(index as usize).copied()
}
/// Returns the depth in which this Merkle path proof is valid.
pub fn depth(&self) -> u8 {
self.nodes.len() as u8
}
/// Returns a reference to the [MerklePath]'s nodes, in order of deepest to shallowest.
pub fn nodes(&self) -> &[Word] {
&self.nodes
}
/// Computes the merkle root for this opening.
pub fn compute_root(&self, index: u64, node: Word) -> Result<Word, MerkleError> {
let mut index = NodeIndex::new(self.depth(), index)?;
let root = self.nodes.iter().copied().fold(node, |node, sibling| {
// compute the node and move to the next iteration.
let input = index.build_node(node, sibling);
index.move_up();
Rpo256::merge(&input)
});
Ok(root)
}
/// Verifies the Merkle opening proof towards the provided root.
///
/// # Errors
/// Returns an error if:
/// - provided node index is invalid.
/// - root calculated during the verification differs from the provided one.
pub fn verify(&self, index: u64, node: Word, root: &Word) -> Result<(), MerkleError> {
let computed_root = self.compute_root(index, node)?;
if &computed_root != root {
return Err(MerkleError::ConflictingRoots {
expected_root: *root,
actual_root: computed_root,
});
}
Ok(())
}
/// Given the node this path opens to, return an iterator of all the nodes that are known via
/// this path.
///
/// Each item in the iterator is an [InnerNodeInfo], containing the hash of a node as `.value`,
/// and its two children as `.left` and `.right`. The very first item in that iterator will be
/// the parent of `node_to_prove`, either `left` or `right` will be `node_to_prove` itself, and
/// the other child will be `node_to_prove` as stored in this [MerklePath].
///
/// From there, the iterator will continue to yield every further parent and both of its
/// children, up to and including the root node.
///
/// If `node_to_prove` is not the node this path is an opening to, or `index` is not the
/// correct index for that node, the returned nodes will be meaningless.
///
/// # Errors
/// Returns an error if the specified index is not valid for this path.
pub fn authenticated_nodes(
&self,
index: u64,
node_to_prove: Word,
) -> Result<InnerNodeIterator<'_>, MerkleError> {
Ok(InnerNodeIterator {
nodes: &self.nodes,
index: NodeIndex::new(self.depth(), index)?,
value: node_to_prove,
})
}
}
// CONVERSIONS
// ================================================================================================
impl From<MerklePath> for Vec<Word> {
fn from(path: MerklePath) -> Self {
path.nodes
}
}
impl From<Vec<Word>> for MerklePath {
fn from(path: Vec<Word>) -> Self {
Self::new(path)
}
}
impl From<&[Word]> for MerklePath {
fn from(path: &[Word]) -> Self {
Self::new(path.to_vec())
}
}
impl Deref for MerklePath {
// we use `Vec` here instead of slice so we can call vector mutation methods directly from the
// merkle path (example: `Vec::remove`).
type Target = Vec<Word>;
fn deref(&self) -> &Self::Target {
&self.nodes
}
}
impl DerefMut for MerklePath {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.nodes
}
}
// ITERATORS
// ================================================================================================
impl FromIterator<Word> for MerklePath {
fn from_iter<T: IntoIterator<Item = Word>>(iter: T) -> Self {
Self::new(iter.into_iter().collect())
}
}
impl IntoIterator for MerklePath {
type Item = Word;
type IntoIter = alloc::vec::IntoIter<Word>;
fn into_iter(self) -> Self::IntoIter {
self.nodes.into_iter()
}
}
/// An iterator over internal nodes of a [MerklePath]. See [`MerklePath::authenticated_nodes()`]
pub struct InnerNodeIterator<'a> {
nodes: &'a Vec<Word>,
index: NodeIndex,
value: Word,
}
impl Iterator for InnerNodeIterator<'_> {
type Item = InnerNodeInfo;
fn next(&mut self) -> Option<Self::Item> {
if !self.index.is_root() {
let sibling_pos = self.nodes.len() - self.index.depth() as usize;
let (left, right) = if self.index.is_value_odd() {
(self.nodes[sibling_pos], self.value)
} else {
(self.value, self.nodes[sibling_pos])
};
self.value = Rpo256::merge(&[left, right]);
self.index.move_up();
Some(InnerNodeInfo { value: self.value, left, right })
} else {
None
}
}
}
// MERKLE PATH CONTAINERS
// ================================================================================================
/// A container for a [crate::Word] value and its [MerklePath] opening.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct MerkleProof {
/// The node value opening for `path`.
pub value: Word,
/// The path from `value` to `root` (exclusive).
pub path: MerklePath,
}
impl MerkleProof {
/// Returns a new [MerkleProof] instantiated from the specified value and path.
pub fn new(value: Word, path: MerklePath) -> Self {
Self { value, path }
}
}
impl From<(MerklePath, Word)> for MerkleProof {
fn from((path, value): (MerklePath, Word)) -> Self {
MerkleProof::new(value, path)
}
}
/// A container for a [MerklePath] and its [crate::Word] root.
///
/// This structure does not provide any guarantees regarding the correctness of the path to the
/// root. For more information, check [MerklePath::verify].
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct RootPath {
/// The node value opening for `path`.
pub root: Word,
/// The path from `value` to `root` (exclusive).
pub path: MerklePath,
}
// SERIALIZATION
// ================================================================================================
impl Serializable for MerklePath {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
assert!(self.nodes.len() <= u8::MAX.into(), "Length enforced in the constructor");
target.write_u8(self.nodes.len() as u8);
target.write_many(&self.nodes);
}
}
impl Deserializable for MerklePath {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let count = source.read_u8()?.into();
let nodes = source.read_many::<Word>(count)?;
Ok(Self { nodes })
}
}
impl Serializable for MerkleProof {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.value.write_into(target);
self.path.write_into(target);
}
}
impl Deserializable for MerkleProof {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let value = Word::read_from(source)?;
let path = MerklePath::read_from(source)?;
Ok(Self { value, path })
}
}
impl Serializable for RootPath {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.root.write_into(target);
self.path.write_into(target);
}
}
impl Deserializable for RootPath {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let root = Word::read_from(source)?;
let path = MerklePath::read_from(source)?;
Ok(Self { root, path })
}
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use crate::merkle::{MerklePath, int_to_node};
#[test]
fn test_inner_nodes() {
let nodes = vec![int_to_node(1), int_to_node(2), int_to_node(3), int_to_node(4)];
let merkle_path = MerklePath::new(nodes);
let index = 6;
let node = int_to_node(5);
let root = merkle_path.compute_root(index, node).unwrap();
let inner_root =
merkle_path.authenticated_nodes(index, node).unwrap().last().unwrap().value;
assert_eq!(root, inner_root);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/index.rs | miden-crypto/src/merkle/index.rs | use core::fmt::Display;
use p3_field::PrimeField64;
use super::{Felt, MerkleError, Word};
use crate::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
// NODE INDEX
// ================================================================================================
/// Address to an arbitrary node in a binary tree using level order form.
///
/// The position is represented by the pair `(depth, pos)`, where for a given depth `d` elements
/// are numbered from $0..(2^d)-1$. Example:
///
/// ```text
/// depth
/// 0 0
/// 1 0 1
/// 2 0 1 2 3
/// 3 0 1 2 3 4 5 6 7
/// ```
///
/// The root is represented by the pair $(0, 0)$, its left child is $(1, 0)$ and its right child
/// $(1, 1)$.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct NodeIndex {
depth: u8,
value: u64,
}
impl NodeIndex {
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Creates a new node index.
///
/// # Errors
/// Returns an error if:
/// - `depth` is greater than 64.
/// - `value` is greater than or equal to 2^{depth}.
pub const fn new(depth: u8, value: u64) -> Result<Self, MerkleError> {
if depth > 64 {
Err(MerkleError::DepthTooBig(depth as u64))
} else if (64 - value.leading_zeros()) > depth as u32 {
Err(MerkleError::InvalidNodeIndex { depth, value })
} else {
Ok(Self { depth, value })
}
}
/// Creates a new node index without checking its validity.
pub const fn new_unchecked(depth: u8, value: u64) -> Self {
debug_assert!(depth <= 64);
debug_assert!((64 - value.leading_zeros()) <= depth as u32);
Self { depth, value }
}
/// Creates a new node index for testing purposes.
///
/// # Panics
/// Panics if the `value` is greater than or equal to 2^{depth}.
#[cfg(test)]
pub fn make(depth: u8, value: u64) -> Self {
Self::new(depth, value).unwrap()
}
/// Creates a node index from a pair of field elements representing the depth and value.
///
/// # Errors
/// Returns an error if:
/// - `depth` is greater than 64.
/// - `value` is greater than or equal to 2^{depth}.
pub fn from_elements(depth: &Felt, value: &Felt) -> Result<Self, MerkleError> {
let depth = depth.as_canonical_u64();
let depth = u8::try_from(depth).map_err(|_| MerkleError::DepthTooBig(depth))?;
let value = value.as_canonical_u64();
Self::new(depth, value)
}
/// Creates a new node index pointing to the root of the tree.
pub const fn root() -> Self {
Self { depth: 0, value: 0 }
}
/// Computes sibling index of the current node.
pub const fn sibling(mut self) -> Self {
self.value ^= 1;
self
}
/// Returns left child index of the current node.
pub const fn left_child(mut self) -> Self {
self.depth += 1;
self.value <<= 1;
self
}
/// Returns right child index of the current node.
pub const fn right_child(mut self) -> Self {
self.depth += 1;
self.value = (self.value << 1) + 1;
self
}
/// Returns the parent of the current node. This is the same as [`Self::move_up()`], but returns
/// a new value instead of mutating `self`.
pub const fn parent(mut self) -> Self {
self.depth = self.depth.saturating_sub(1);
self.value >>= 1;
self
}
// PROVIDERS
// --------------------------------------------------------------------------------------------
/// Builds a node to be used as input of a hash function when computing a Merkle path.
///
/// Will evaluate the parity of the current instance to define the result.
pub const fn build_node(&self, slf: Word, sibling: Word) -> [Word; 2] {
if self.is_value_odd() {
[sibling, slf]
} else {
[slf, sibling]
}
}
/// Returns the scalar representation of the depth/value pair.
///
/// It is computed as `2^depth + value`.
pub const fn to_scalar_index(&self) -> u64 {
(1 << self.depth as u64) + self.value
}
/// Returns the depth of the current instance.
pub const fn depth(&self) -> u8 {
self.depth
}
/// Returns the value of this index.
pub const fn value(&self) -> u64 {
self.value
}
/// Returns `true` if the current instance points to a right sibling node.
pub const fn is_value_odd(&self) -> bool {
(self.value & 1) == 1
}
/// Returns `true` if the n-th node on the path points to a right child.
pub const fn is_nth_bit_odd(&self, n: u8) -> bool {
(self.value >> n) & 1 == 1
}
/// Returns `true` if the depth is `0`.
pub const fn is_root(&self) -> bool {
self.depth == 0
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Traverses one level towards the root, decrementing the depth by `1`.
pub fn move_up(&mut self) {
self.depth = self.depth.saturating_sub(1);
self.value >>= 1;
}
/// Traverses towards the root until the specified depth is reached.
///
/// Assumes that the specified depth is smaller than the current depth.
pub fn move_up_to(&mut self, depth: u8) {
debug_assert!(depth < self.depth);
let delta = self.depth.saturating_sub(depth);
self.depth = self.depth.saturating_sub(delta);
self.value >>= delta as u32;
}
// ITERATORS
// --------------------------------------------------------------------------------------------
/// Return an iterator of the indices required for a Merkle proof of inclusion of a node at
/// `self`.
///
/// This is *exclusive* on both ends: neither `self` nor the root index are included in the
/// returned iterator.
pub fn proof_indices(&self) -> impl ExactSizeIterator<Item = NodeIndex> + use<> {
ProofIter { next_index: self.sibling() }
}
}
impl Display for NodeIndex {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "depth={}, value={}", self.depth, self.value)
}
}
impl Serializable for NodeIndex {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_u8(self.depth);
target.write_u64(self.value);
}
}
impl Deserializable for NodeIndex {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let depth = source.read_u8()?;
let value = source.read_u64()?;
NodeIndex::new(depth, value)
.map_err(|_| DeserializationError::InvalidValue("Invalid index".into()))
}
}
/// Implementation for [`NodeIndex::proof_indices()`].
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, Hash)]
struct ProofIter {
next_index: NodeIndex,
}
impl Iterator for ProofIter {
type Item = NodeIndex;
fn next(&mut self) -> Option<NodeIndex> {
if self.next_index.is_root() {
return None;
}
let index = self.next_index;
self.next_index = index.parent().sibling();
Some(index)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = ExactSizeIterator::len(self);
(remaining, Some(remaining))
}
}
impl ExactSizeIterator for ProofIter {
fn len(&self) -> usize {
self.next_index.depth() as usize
}
}
#[cfg(test)]
mod tests {
use assert_matches::assert_matches;
use proptest::prelude::*;
use super::*;
#[test]
fn test_node_index_value_too_high() {
assert_eq!(NodeIndex::new(0, 0).unwrap(), NodeIndex { depth: 0, value: 0 });
let err = NodeIndex::new(0, 1).unwrap_err();
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 0, value: 1 });
assert_eq!(NodeIndex::new(1, 1).unwrap(), NodeIndex { depth: 1, value: 1 });
let err = NodeIndex::new(1, 2).unwrap_err();
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 1, value: 2 });
assert_eq!(NodeIndex::new(2, 3).unwrap(), NodeIndex { depth: 2, value: 3 });
let err = NodeIndex::new(2, 4).unwrap_err();
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 2, value: 4 });
assert_eq!(NodeIndex::new(3, 7).unwrap(), NodeIndex { depth: 3, value: 7 });
let err = NodeIndex::new(3, 8).unwrap_err();
assert_matches!(err, MerkleError::InvalidNodeIndex { depth: 3, value: 8 });
}
#[test]
fn test_node_index_can_represent_depth_64() {
assert!(NodeIndex::new(64, u64::MAX).is_ok());
}
prop_compose! {
fn node_index()(value in 0..2u64.pow(u64::BITS - 1)) -> NodeIndex {
// unwrap never panics because the range of depth is 0..u64::BITS
let mut depth = value.ilog2() as u8;
if value > (1 << depth) { // round up
depth += 1;
}
NodeIndex::new(depth, value).unwrap()
}
}
proptest! {
#[test]
fn arbitrary_index_wont_panic_on_move_up(
mut index in node_index(),
count in prop::num::u8::ANY,
) {
for _ in 0..count {
index.move_up();
}
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/empty_roots.rs | miden-crypto/src/merkle/empty_roots.rs | use core::slice;
use super::{EMPTY_WORD, Felt, Word, smt::InnerNode};
// EMPTY NODES SUBTREES
// ================================================================================================
/// Contains precomputed roots of empty subtrees in a Merkle tree.
pub struct EmptySubtreeRoots;
impl EmptySubtreeRoots {
/// Returns a static slice with roots of empty subtrees of a Merkle tree starting at the
/// specified depth.
pub const fn empty_hashes(tree_depth: u8) -> &'static [Word] {
let ptr = &EMPTY_SUBTREES[255 - tree_depth as usize] as *const Word;
// Safety: this is a static/constant array, so it will never be outlived. If we attempt to
// use regular slices, this wouldn't be a `const` function, meaning we won't be able to use
// the returned value for static/constant definitions.
unsafe { slice::from_raw_parts(ptr, tree_depth as usize + 1) }
}
/// Returns the node's digest for a sub-tree with all its leaves set to the empty word.
pub const fn entry(tree_depth: u8, node_depth: u8) -> &'static Word {
assert!(node_depth <= tree_depth);
let pos = 255 - tree_depth + node_depth;
&EMPTY_SUBTREES[pos as usize]
}
/// Returns a sparse Merkle tree [`InnerNode`] with two empty children.
///
/// # Note
/// `node_depth` is the depth of the **parent** to have empty children. That is, `node_depth`
/// and the depth of the returned [`InnerNode`] are the same, and thus the empty hashes are for
/// subtrees of depth `node_depth + 1`.
pub(crate) const fn get_inner_node(tree_depth: u8, node_depth: u8) -> InnerNode {
let &child = Self::entry(tree_depth, node_depth + 1);
InnerNode { left: child, right: child }
}
}
const EMPTY_SUBTREES: [Word; 256] = [
Word::new([
Felt::new(0xee3d94db86d48dc4),
Felt::new(0x3d13166c7aba0368),
Felt::new(0x282e861f2936aa7),
Felt::new(0xf0328a1745537b4),
]),
Word::new([
Felt::new(0x33174b312b730760),
Felt::new(0x9d1e00c5b50352b2),
Felt::new(0x16bf9ec4acef2e42),
Felt::new(0x4263877e63c4cbe7),
]),
Word::new([
Felt::new(0xa8e039042672a8bc),
Felt::new(0x1010d951d941d9d4),
Felt::new(0xd3e8f0ecc866ac3b),
Felt::new(0xb2dbbbb80da232ba),
]),
Word::new([
Felt::new(0xa0d2c1a3e455f299),
Felt::new(0x648e8e13867dc8eb),
Felt::new(0xe79f94ea61189847),
Felt::new(0xb88a89e1f2765a31),
]),
Word::new([
Felt::new(0xe168133fd9ab570d),
Felt::new(0x6fc7d0295ac5a3a2),
Felt::new(0xc973ea026e9411c),
Felt::new(0x63c29ea04552b532),
]),
Word::new([
Felt::new(0x27e57ecc0f9f196),
Felt::new(0xc02f6e29fd19b059),
Felt::new(0x4a42fbc436efb0b6),
Felt::new(0xbff30574a98a1b29),
]),
Word::new([
Felt::new(0x18c32592a4f4d0b),
Felt::new(0x6b9a08797252d5d5),
Felt::new(0xdbfe48f00a088a2),
Felt::new(0x16b4c3e485b173e3),
]),
Word::new([
Felt::new(0x26c8902938b831a5),
Felt::new(0x66ee91b36943f92e),
Felt::new(0x4e8deeafef9f5725),
Felt::new(0xbb35751d5dfb0a33),
]),
Word::new([
Felt::new(0x74af678f8e020ff4),
Felt::new(0xd4784cda0beed295),
Felt::new(0x4380949d841d793c),
Felt::new(0xdf587011d09d3bbb),
]),
Word::new([
Felt::new(0xa211d1da76aaef98),
Felt::new(0xd904ccc6435e268),
Felt::new(0x1c6f16a5d03b8416),
Felt::new(0x87800f7f5da9c93),
]),
Word::new([
Felt::new(0xa00bbad0a52adeff),
Felt::new(0xe22179c651da9d76),
Felt::new(0x474f10493a3723f4),
Felt::new(0x84397e6bd34a1f5b),
]),
Word::new([
Felt::new(0xe8f440afef4d082b),
Felt::new(0x14fff8e329613cc9),
Felt::new(0x78e984bc8b40f4f1),
Felt::new(0x6ed8f02e5be1bab2),
]),
Word::new([
Felt::new(0xda824edf085b5f9f),
Felt::new(0xc8a8f1c1b86d349e),
Felt::new(0xe1bf6975afb7b2de),
Felt::new(0xd7df51ea51028489),
]),
Word::new([
Felt::new(0xf64873d31456de99),
Felt::new(0x1fc9cb920b6c72b),
Felt::new(0x96613d9d71af4373),
Felt::new(0x61d607eb097e76c9),
]),
Word::new([
Felt::new(0xca304d2b3b778719),
Felt::new(0xa54d8602f37eed39),
Felt::new(0xb4574db6dc09bcf2),
Felt::new(0x5e42cd4f1de9587c),
]),
Word::new([
Felt::new(0x17575dfa689d8a07),
Felt::new(0x1db9d374d7436444),
Felt::new(0x21d1e8dca296f38d),
Felt::new(0xbc4aad43a9d93f54),
]),
Word::new([
Felt::new(0x9fa0697330c054cd),
Felt::new(0xd5d57fbf059452e8),
Felt::new(0xe848fafb1c43414c),
Felt::new(0xacb7754fd77c9d52),
]),
Word::new([
Felt::new(0x406af89b918e596c),
Felt::new(0xb735a2c588ee87df),
Felt::new(0xb40ff1dd1c3c6599),
Felt::new(0x675a582b4c8a68ac),
]),
Word::new([
Felt::new(0x530ff6be0c86a2f6),
Felt::new(0x5541fabfefd34c91),
Felt::new(0x4af1579d212149ae),
Felt::new(0x23962b7df862f27c),
]),
Word::new([
Felt::new(0x1676b694f41cfc0d),
Felt::new(0x59b165ea5f354fd8),
Felt::new(0x5b45ee14e2501f08),
Felt::new(0xd0c8ca7bc2e01e18),
]),
Word::new([
Felt::new(0x2cff5d1e629ddc7b),
Felt::new(0x5062be34e4351fe),
Felt::new(0xfd76495b9d8ea67),
Felt::new(0xb96453b1c8060ca8),
]),
Word::new([
Felt::new(0x860b00517d3de1ef),
Felt::new(0xd609c82af07b9dad),
Felt::new(0xa54a528b8f1cbddc),
Felt::new(0xb4fff658ed97e635),
]),
Word::new([
Felt::new(0xd002cea8f347c347),
Felt::new(0xa135cebffdf3ec10),
Felt::new(0xb0200ea08da2cef4),
Felt::new(0x4e6f2f75d627b137),
]),
Word::new([
Felt::new(0xc1983ce677cabbf4),
Felt::new(0x58f0143480f44788),
Felt::new(0xf8c23e4f84b6c6c1),
Felt::new(0xc9ce41371c4900b8),
]),
Word::new([
Felt::new(0x837d99979bc9a5e6),
Felt::new(0x7621559aa4af175a),
Felt::new(0x6986737347c799c2),
Felt::new(0x8cee99eb47c3e702),
]),
Word::new([
Felt::new(0x42e17ba02508a41f),
Felt::new(0xb95e349bd55ba61f),
Felt::new(0xcc2bfeb29c4c68b2),
Felt::new(0xf268f57860a446b1),
]),
Word::new([
Felt::new(0xd3ffd4ccc6dda508),
Felt::new(0x81db1910ef04ca07),
Felt::new(0x5c698ee6c3aeab97),
Felt::new(0x2ac1e2c2c5f237de),
]),
Word::new([
Felt::new(0x1f42a1ef25bd0aad),
Felt::new(0x81b0f63e2760b8db),
Felt::new(0xe9607c7061b018f9),
Felt::new(0xf02a88202294a700),
]),
Word::new([
Felt::new(0xea5da09b39b60468),
Felt::new(0xe48ea41d94fe91a7),
Felt::new(0x24dde954ce08b32b),
Felt::new(0xe1bb6e41bd0613e6),
]),
Word::new([
Felt::new(0xc5e9f7188b43a24f),
Felt::new(0x8d7132abc9d901e4),
Felt::new(0xdc09a33ff4d0eb03),
Felt::new(0xa119bb1db594b4cf),
]),
Word::new([
Felt::new(0x589002afcbd4a233),
Felt::new(0xe4eae44d3c2a308d),
Felt::new(0x8bc0bca14b6b4dde),
Felt::new(0x3716e0e86a7aaa6c),
]),
Word::new([
Felt::new(0xaa4ba9602230007e),
Felt::new(0x2b2c3e14b888a3d4),
Felt::new(0x90a36fb42ec2ba19),
Felt::new(0x2e07ef26b078c4a7),
]),
Word::new([
Felt::new(0x32307da7aad33113),
Felt::new(0x343ed87928b9ab0c),
Felt::new(0x1c01d79482c021f0),
Felt::new(0x6f866afccc595439),
]),
Word::new([
Felt::new(0x9780804b58b0d066),
Felt::new(0x1329929c6dc19c09),
Felt::new(0xc04add06dbaef6bf),
Felt::new(0xf494a28db17c5c4),
]),
Word::new([
Felt::new(0xe9dbb1c64d55571f),
Felt::new(0x663f0f716f28734),
Felt::new(0x7285fd4b8e87a78c),
Felt::new(0x2e152a4595b7597e),
]),
Word::new([
Felt::new(0x5531fabfa5960807),
Felt::new(0x8afe79be96d903a4),
Felt::new(0x24321cce4f1942f8),
Felt::new(0xb1829ec9d60aac8f),
]),
Word::new([
Felt::new(0x9f7afc6634a82d1),
Felt::new(0x496e26bc17af352b),
Felt::new(0x8216f090e1d13381),
Felt::new(0x610cf5a3b3e190f9),
]),
Word::new([
Felt::new(0xb5f8c141a9acd007),
Felt::new(0x4430345ace970576),
Felt::new(0x64d97e5533db3170),
Felt::new(0x95c016d769b0fc2d),
]),
Word::new([
Felt::new(0x88820d6a7ba5a94a),
Felt::new(0x27b614d79eb7b30b),
Felt::new(0xff2751e904085d5f),
Felt::new(0x752509a0860b37d),
]),
Word::new([
Felt::new(0x1070bc84bb53a855),
Felt::new(0x1edad3d5da84e59b),
Felt::new(0x8efd48a13e4dfe0d),
Felt::new(0x3ab20af6203aba62),
]),
Word::new([
Felt::new(0xb4d6d3cc85438d08),
Felt::new(0x5592639fb2792724),
Felt::new(0x5939996ea4c52176),
Felt::new(0xaa83a79236367ee7),
]),
Word::new([
Felt::new(0x4c08ac735aa1925a),
Felt::new(0x84951e177ac84e86),
Felt::new(0xd5b2657778d3271a),
Felt::new(0x375f75333654a77c),
]),
Word::new([
Felt::new(0x2fcbd8fcd125e5),
Felt::new(0xd8f711ed1b369d43),
Felt::new(0x9688301695b6bcd4),
Felt::new(0x52a010319401179),
]),
Word::new([
Felt::new(0x1c67f8fde4c9c070),
Felt::new(0x438ccdf9d82b3a3f),
Felt::new(0xb9324515d5547ff5),
Felt::new(0x85ff37504c8230f0),
]),
Word::new([
Felt::new(0xcf8b6fabda4621f3),
Felt::new(0x1df94bb4ea8aeb6d),
Felt::new(0x8efffb7e8996b9e5),
Felt::new(0xa9aef575e8a86c4d),
]),
Word::new([
Felt::new(0x6e20862a64baaaef),
Felt::new(0xc54fbbfa034d6f1b),
Felt::new(0x16d9fd099f5bba71),
Felt::new(0xe4ac4cf3186fae83),
]),
Word::new([
Felt::new(0x12914625293d7f84),
Felt::new(0xd3b46add4f77be8),
Felt::new(0xaac8846e6eeb9acd),
Felt::new(0xab6a69452b4b167b),
]),
Word::new([
Felt::new(0x69652e812cdfe03d),
Felt::new(0x22731622b139de96),
Felt::new(0xd7226e9a887f368d),
Felt::new(0xe9bbf6ad8f51ee13),
]),
Word::new([
Felt::new(0xc39a01964af141d7),
Felt::new(0xb5ab2062263dcaa2),
Felt::new(0x1d7fbcd9204cbd34),
Felt::new(0xd48c517d5543c163),
]),
Word::new([
Felt::new(0x44118fda0c2b4af2),
Felt::new(0x487d307ce7444bb2),
Felt::new(0x171b7c6a17d734b2),
Felt::new(0xd9a737ddf65949d9),
]),
Word::new([
Felt::new(0xc2cdc1b940450fec),
Felt::new(0x29864b9632eff0cd),
Felt::new(0x9ae31f150850e78c),
Felt::new(0xf9f9d0ef1092be87),
]),
Word::new([
Felt::new(0x1703dd34002f3862),
Felt::new(0xf04b44446be81ea1),
Felt::new(0x8da51598849beb99),
Felt::new(0x8112e155f7f856a0),
]),
Word::new([
Felt::new(0x3d4da8351f41dc1c),
Felt::new(0x682e55817f56f30b),
Felt::new(0xf20cc7fe5b98b951),
Felt::new(0x8297d3de042785d4),
]),
Word::new([
Felt::new(0x1f9d07a435a6d13e),
Felt::new(0x789a1330825c199a),
Felt::new(0x6e058e9dbc30f3a0),
Felt::new(0xb09be46b59290984),
]),
Word::new([
Felt::new(0xaf2d49c9a3975d21),
Felt::new(0xebd4d399fc30a751),
Felt::new(0x224a3884ca353e5d),
Felt::new(0xbebba344bbe055a7),
]),
Word::new([
Felt::new(0xdf576dc16b0abc3f),
Felt::new(0x40439af403c36338),
Felt::new(0x317b1f2308849c53),
Felt::new(0x91e5c9d14107cb04),
]),
Word::new([
Felt::new(0x93af916aa15f97e2),
Felt::new(0x50d4aec3e408fba7),
Felt::new(0xd16bd5f71b6d6915),
Felt::new(0x27b96db871be03ef),
]),
Word::new([
Felt::new(0x72fce6dd7d54e348),
Felt::new(0x632a2e8b6177c670),
Felt::new(0xefd897bebdc4ec2b),
Felt::new(0xfe66bfe440033790),
]),
Word::new([
Felt::new(0xc581364aef408d6a),
Felt::new(0xfcc7efb35cccae32),
Felt::new(0xee0a97dded065fbf),
Felt::new(0x2b1eb2c45fd0e633),
]),
Word::new([
Felt::new(0x9e460e8159152a88),
Felt::new(0xcc5a2946f03bf507),
Felt::new(0x95535e9cf29e4ab9),
Felt::new(0x29b23d31ffe6df18),
]),
Word::new([
Felt::new(0xbae2c405d8ba715d),
Felt::new(0xb886f0545ae16153),
Felt::new(0x728d5965a4cdfc0b),
Felt::new(0x86bd552048f3ebc4),
]),
Word::new([
Felt::new(0x3a4c6dbaa6feda93),
Felt::new(0x8a32917885a3f22c),
Felt::new(0xd6016ba7fc1a0717),
Felt::new(0x3bfd41569497b156),
]),
Word::new([
Felt::new(0xa907fad371653f15),
Felt::new(0x6be9ce6ac746f5bc),
Felt::new(0x1bee5ac8750d2444),
Felt::new(0x16050d83d4f7a90c),
]),
Word::new([
Felt::new(0x4b194182aa7e9324),
Felt::new(0x813af49c845cea5e),
Felt::new(0x6886f4d8628bab16),
Felt::new(0xe3b6ef1419e2432c),
]),
Word::new([
Felt::new(0x3edc103de28f1fac),
Felt::new(0xb6a05b8802d6ed5c),
Felt::new(0xf320c3f130a175c8),
Felt::new(0x326c8bb02f9a51f6),
]),
Word::new([
Felt::new(0x5b1ac27a49b5d1da),
Felt::new(0x9e1fa75b04da7545),
Felt::new(0x9a522396a1cd68af),
Felt::new(0x91a4d435f3fcd43f),
]),
Word::new([
Felt::new(0x318ac5d8f1e489ce),
Felt::new(0x339e7a0b2aec5843),
Felt::new(0x38f15bf9832a2c28),
Felt::new(0x5e3fef94216f72f1),
]),
Word::new([
Felt::new(0xc43e0723d2a7e79c),
Felt::new(0xa06167cc0ebdf1e5),
Felt::new(0xe62f10089af57ba6),
Felt::new(0x838c863d60b859a2),
]),
Word::new([
Felt::new(0xd10456af5f30e5d5),
Felt::new(0x235df7fe21fb912c),
Felt::new(0xe5acc29d13d80779),
Felt::new(0x580b83247a1f6524),
]),
Word::new([
Felt::new(0x2a8b1bf7e9bc5675),
Felt::new(0x9e523f2d659a3e30),
Felt::new(0x3ecfdb1615666b74),
Felt::new(0xf53746b86fedee7f),
]),
Word::new([
Felt::new(0xa12095b3b22680a9),
Felt::new(0x3010ad751585161d),
Felt::new(0xfb9c0ea33c7437b2),
Felt::new(0x9225d8151ec724a8),
]),
Word::new([
Felt::new(0x1b09eac8ad815107),
Felt::new(0x33cb241ad41b562d),
Felt::new(0xa04f457b4cd1ece9),
Felt::new(0x84f27a45985d700e),
]),
Word::new([
Felt::new(0xe5598d92d1507185),
Felt::new(0x84aa2bf7d87a26e8),
Felt::new(0x158f0e13550dec2a),
Felt::new(0x54d699e5eb65ee63),
]),
Word::new([
Felt::new(0x902e89f122f8f8f7),
Felt::new(0xc2da7127af8c699a),
Felt::new(0x75762e75b77a1662),
Felt::new(0x7e683b3c116af130),
]),
Word::new([
Felt::new(0xabc2aa2ecd2316dd),
Felt::new(0x44558fa721857f00),
Felt::new(0xf61dd475fdbc23d0),
Felt::new(0x22ba84332065a9e8),
]),
Word::new([
Felt::new(0x5aa94e045e4bb7ae),
Felt::new(0xf6ddadbdd8747728),
Felt::new(0xeeab65efab2a1d2),
Felt::new(0xd12cc579c49b9db5),
]),
Word::new([
Felt::new(0x71ea68262a73196a),
Felt::new(0x9612483af09f1bde),
Felt::new(0x7fe5fd69bbf241a4),
Felt::new(0x34de27c57b37975d),
]),
Word::new([
Felt::new(0xf29bc8ba140714f6),
Felt::new(0xf0b44caca4f6561e),
Felt::new(0x742695d702446774),
Felt::new(0x7e1437b52ee16c0c),
]),
Word::new([
Felt::new(0x13f6180493eaa129),
Felt::new(0x8fa2e77f499c911c),
Felt::new(0x1223e5ccda975bf),
Felt::new(0xc2a362e5449eac8b),
]),
Word::new([
Felt::new(0xcf1254ec733c8fb0),
Felt::new(0x34359ae1e2272fc9),
Felt::new(0xce928a65262d59d5),
Felt::new(0xc84e1f72e2e78101),
]),
Word::new([
Felt::new(0x8841b659676a2df5),
Felt::new(0x4c808c965135ff8f),
Felt::new(0x374d574fd96ee7d1),
Felt::new(0xa0ae0e5765bc8716),
]),
Word::new([
Felt::new(0xba3692cf34a6eb7a),
Felt::new(0x384dce8b1fd8fcd5),
Felt::new(0x248f1c83f6cf6055),
Felt::new(0xbf50ca14b3c5b022),
]),
Word::new([
Felt::new(0x18611824fa468341),
Felt::new(0xaab4187ff224ec04),
Felt::new(0x4ad742d8a070d084),
Felt::new(0xfa3bb42df7d86480),
]),
Word::new([
Felt::new(0x2ab25bf43fc462b5),
Felt::new(0x6ac0cc243f54b796),
Felt::new(0x2401eabf391a2199),
Felt::new(0x62a71dae211b983),
]),
Word::new([
Felt::new(0xbc5e568df9f18772),
Felt::new(0xee864850b75a99ba),
Felt::new(0x2a53e3e6776ae456),
Felt::new(0x8eb51bedbe483d7c),
]),
Word::new([
Felt::new(0xce8161f4c705bfbb),
Felt::new(0xf1071a4e343a37e9),
Felt::new(0xddc4878a9e5de00f),
Felt::new(0xee33d737cd3c5dc8),
]),
Word::new([
Felt::new(0x9eadd43aebfcd43d),
Felt::new(0xf35cec43429c0a95),
Felt::new(0xcad253fc16b63e5a),
Felt::new(0xea25dc9baaf21d38),
]),
Word::new([
Felt::new(0xa85a87fbf220f449),
Felt::new(0x1db1c09109882161),
Felt::new(0xab5139cb30eb2c88),
Felt::new(0xe62f2ade31d95b14),
]),
Word::new([
Felt::new(0xad3fae6f7f635376),
Felt::new(0x21e5dba9b8e21ac8),
Felt::new(0x86506eeeba6c7151),
Felt::new(0x6bf71fdffc8d9ae7),
]),
Word::new([
Felt::new(0x37ec52a9396f4574),
Felt::new(0xf19404a514aa9285),
Felt::new(0x3ed5ae669769c4e7),
Felt::new(0x2286b493b85c9481),
]),
Word::new([
Felt::new(0xc37fc37b83940bd2),
Felt::new(0xe3d67417540b620b),
Felt::new(0x1495f7a7848dde0a),
Felt::new(0xeaf4f9c053465ff),
]),
Word::new([
Felt::new(0x80131752569df8f0),
Felt::new(0x30720a862b82f732),
Felt::new(0xabed5fb95dbe678b),
Felt::new(0x6cf7da37075ad45e),
]),
Word::new([
Felt::new(0xa318ea66909473fe),
Felt::new(0x4a6c6ebc4bee8b3c),
Felt::new(0xf0d622f04ce1b02e),
Felt::new(0x92c2f8e192c000a1),
]),
Word::new([
Felt::new(0xb39d728756dca017),
Felt::new(0x4f66acee5bcd7d98),
Felt::new(0xf623331bed29e125),
Felt::new(0xbcfc777f0eb03793),
]),
Word::new([
Felt::new(0x6cdabd98e067b039),
Felt::new(0xd6356a27c3df3ddc),
Felt::new(0xd5afb88820db9d2f),
Felt::new(0x8203a7adfa667bfc),
]),
Word::new([
Felt::new(0x1ddef8e482da50e0),
Felt::new(0x7fa3c9c0865609ec),
Felt::new(0x6ca762886d4d6227),
Felt::new(0x9a95160f2a4fe5d9),
]),
Word::new([
Felt::new(0x607230c3b366dbd5),
Felt::new(0x5b996a7d876b7602),
Felt::new(0xf61df5d15469c8ea),
Felt::new(0x9bb4f5c06ac49403),
]),
Word::new([
Felt::new(0x6a27c9e7082595e7),
Felt::new(0xbf93eb89e2090438),
Felt::new(0xd2db18139bedc636),
Felt::new(0x79710c33a1f1f612),
]),
Word::new([
Felt::new(0xf54e4461aa09608b),
Felt::new(0x898a7b52804d88c9),
Felt::new(0xbc548fab0257ea25),
Felt::new(0xe783017a62b49474),
]),
Word::new([
Felt::new(0xf7efdb376a7734c9),
Felt::new(0x2d4ded56d9ef2076),
Felt::new(0xa17d90a509b879d0),
Felt::new(0xcf012a20045b29e1),
]),
Word::new([
Felt::new(0x37e40a30232a4f06),
Felt::new(0xfbd9877fb761052e),
Felt::new(0xc4c41f56a70377cd),
Felt::new(0x631e942f6680d4cc),
]),
Word::new([
Felt::new(0xcf868b6d54b515a5),
Felt::new(0xa522edf7c43f7aee),
Felt::new(0x66057652f34d479),
Felt::new(0x59f4a86223bc80bd),
]),
Word::new([
Felt::new(0xb7214ce5a0ba8dfd),
Felt::new(0x5c7a6e583e4e255e),
Felt::new(0xabc8369f8bf38a1c),
Felt::new(0xb5db79ae07f0689c),
]),
Word::new([
Felt::new(0x18c980169ef2d0bb),
Felt::new(0x6526b64df8eb4eac),
Felt::new(0xfe4d8327ca5bd91a),
Felt::new(0xe36d607069c7dd85),
]),
Word::new([
Felt::new(0x602a97209948e5cc),
Felt::new(0xb7d19db914da726),
Felt::new(0xe4e43672c24d376c),
Felt::new(0x8bb9f7465e019213),
]),
Word::new([
Felt::new(0x187bff077d393e3d),
Felt::new(0x17fb9a97c5055580),
Felt::new(0x618469c060eb2719),
Felt::new(0xfc7be4b58477e5ac),
]),
Word::new([
Felt::new(0x1d40fcbc7a25cc97),
Felt::new(0xaee142f7cebadbd5),
Felt::new(0x22dbaed94300ddf8),
Felt::new(0xe069c36278753a06),
]),
Word::new([
Felt::new(0xcd1e21c5f02ce44d),
Felt::new(0x3b0ddbaa04daff25),
Felt::new(0xbb55cd14f54818c7),
Felt::new(0xc57f1b84ed302102),
]),
Word::new([
Felt::new(0x5c8e1f56cbdb0f87),
Felt::new(0xeeeb31b4d317cf1d),
Felt::new(0x8bf45cd3659a6d1),
Felt::new(0x9e179aa20693175a),
]),
Word::new([
Felt::new(0x10f58975fbb0fca),
Felt::new(0x5f35c19eb0f615c1),
Felt::new(0x9870cdafe46a3d),
Felt::new(0xcec9d9f3925df88b),
]),
Word::new([
Felt::new(0x89e90b2f029b50c0),
Felt::new(0xd78a4223d0036c8a),
Felt::new(0x996b326a1d5cd76d),
Felt::new(0x5b314d29bb1694e3),
]),
Word::new([
Felt::new(0x1be6e6955ba0f3a8),
Felt::new(0xc7e07c49076315ef),
Felt::new(0x93e91de5c7849fb2),
Felt::new(0xe81bc86fc641596f),
]),
Word::new([
Felt::new(0x5320464735f18522),
Felt::new(0x1a741214432ca63d),
Felt::new(0xaf3ed59d324bdbe8),
Felt::new(0x2493eb414c91ac94),
]),
Word::new([
Felt::new(0x35897b61f231fa86),
Felt::new(0xb1531e954332f229),
Felt::new(0x92e950b1c1f874a),
Felt::new(0x469de0412ca52491),
]),
Word::new([
Felt::new(0x1ecea76deca59ec5),
Felt::new(0xe884b570f5d54e45),
Felt::new(0x58939f3a1b5bc7e1),
Felt::new(0xf14eab10f926958f),
]),
Word::new([
Felt::new(0x26251aa927a69723),
Felt::new(0xb1808fe0795ab008),
Felt::new(0xd195fe923d1944c9),
Felt::new(0x2334a61c28dc63c),
]),
Word::new([
Felt::new(0xe4b659081d9cf4e4),
Felt::new(0xf1174a5f72916819),
Felt::new(0x1de902b42b3b4054),
Felt::new(0xbe2bc215120367d0),
]),
Word::new([
Felt::new(0xfc87b8043d32428f),
Felt::new(0x8f8cb244e3ddf6da),
Felt::new(0xc7539186ece143a7),
Felt::new(0xf28008f902075229),
]),
Word::new([
Felt::new(0xf76c24c9f86c44d3),
Felt::new(0x97c7abcbb6d07d35),
Felt::new(0x9d8e37a1697a0d4),
Felt::new(0xa3f818e48770f5fa),
]),
Word::new([
Felt::new(0x885686c79c1cd95e),
Felt::new(0xcdebe76fd203c23e),
Felt::new(0xdf9b7cd5099673ed),
Felt::new(0xe60438536ad13270),
]),
Word::new([
Felt::new(0x7790809942b9389d),
Felt::new(0xa3d82432c31de99),
Felt::new(0xaea11fece88c7d27),
Felt::new(0x5cc764da96d0b2f0),
]),
Word::new([
Felt::new(0x80e555c41170427f),
Felt::new(0x87e68144276d79c8),
Felt::new(0xebdc63f28aa58a53),
Felt::new(0x168dd22672627819),
]),
Word::new([
Felt::new(0xea1dc59c29da5b6c),
Felt::new(0xa33188c0a077761),
Felt::new(0xabd3c84cddbe1477),
Felt::new(0xd28244bc92f36e0f),
]),
Word::new([
Felt::new(0xdadc2beb7ccfe3fa),
Felt::new(0x218532461f981fb4),
Felt::new(0xf0455f1d4e2f9732),
Felt::new(0xa7338b43d2b7e62d),
]),
Word::new([
Felt::new(0x195d8bc1cfe2711a),
Felt::new(0x44e392ba7e259f47),
Felt::new(0x480120d41e18ab3c),
Felt::new(0x2056ffb29c2d89d1),
]),
Word::new([
Felt::new(0x382e33ba5fe6ada3),
Felt::new(0x45402a8903efebc9),
Felt::new(0xb9b0d63a59c70da),
Felt::new(0x7afebd4726d8cfe5),
]),
Word::new([
Felt::new(0xbf60bf6b45a4c9d),
Felt::new(0xfb5b9b553646f19c),
Felt::new(0x9949b60ce7639da3),
Felt::new(0x9c62552c0d1868ff),
]),
Word::new([
Felt::new(0xdb2a0aba0fc5e4f8),
Felt::new(0x8ee4f01d4b0fa49e),
Felt::new(0xd70a17a77b5c4a03),
Felt::new(0x57aaaa5b48fea66e),
]),
Word::new([
Felt::new(0x6d635940443564cb),
Felt::new(0xc7fbf0e26b5e3ff6),
Felt::new(0xa45bce664368b65e),
Felt::new(0xd6c5c1a92be0c60d),
]),
Word::new([
Felt::new(0x6ea62d6033fb2dd3),
Felt::new(0x1a37910cf90ec6d8),
Felt::new(0x83d826e9933760b5),
Felt::new(0xf8387c90d9c6b5a9),
]),
Word::new([
Felt::new(0x134766f1da2fbc91),
Felt::new(0xcfaeea545df2c757),
Felt::new(0xd0accefaed1eaa0f),
Felt::new(0xec38d4053f84b163),
]),
Word::new([
Felt::new(0xb02ad1e757380aee),
Felt::new(0x4538b8ea13112d4),
Felt::new(0xb2d761fe842a2a85),
Felt::new(0x8e98d58adf5a1f29),
]),
Word::new([
Felt::new(0x44603d9549ddee64),
Felt::new(0x43de72d570967bbb),
Felt::new(0x4a3e71144e62d0fa),
Felt::new(0xffb2fdcb48965939),
]),
Word::new([
Felt::new(0x606f3ee12fe9ec0c),
Felt::new(0xe7d494ab8e483d87),
Felt::new(0x3b47f7c0d316cd4a),
Felt::new(0x86f941c7fa834581),
]),
Word::new([
Felt::new(0x30c2385facf08b86),
Felt::new(0x4446168e25ac2c21),
Felt::new(0x61c6db1c3f283b21),
Felt::new(0x2fdf6bc360bf803),
]),
Word::new([
Felt::new(0xeec8d9cc3e46d243),
Felt::new(0x65bcae511dcce39),
Felt::new(0xd3da5bbfdbd09cd3),
Felt::new(0xe7c35fc3d11216a5),
]),
Word::new([
Felt::new(0x841fb6fb35e7b49b),
Felt::new(0xfc4e2e1239caa7b8),
Felt::new(0x37cb93ec88f102e5),
Felt::new(0xa707a1556032152c),
]),
Word::new([
Felt::new(0x37c67bd7b7cef984),
Felt::new(0x75bbe46da2ee5c90),
Felt::new(0x3a5c568d1f71cab1),
Felt::new(0x36939cdca2dc0b55),
]),
Word::new([
Felt::new(0x4f76756a55f3a644),
Felt::new(0xd30f8fa45394aff4),
Felt::new(0x65c55096158b202f),
Felt::new(0x368a5fb0b0d475d0),
]),
Word::new([
Felt::new(0xa9b9acd256cabb0f),
Felt::new(0xd8b1170f301208c7),
Felt::new(0xab152f908d46bf8),
Felt::new(0x1b7a10556730ec16),
]),
Word::new([
Felt::new(0xd967a72076e3059c),
Felt::new(0xbd1015a08ffe8881),
Felt::new(0xf72f186dde0c6e78),
Felt::new(0xa58910205352895a),
]),
Word::new([
Felt::new(0x130333f2fd400a4d),
Felt::new(0xf20104837a118d6e),
Felt::new(0xda1e5d608fb9062c),
Felt::new(0xb8ac5c76d60950b8),
]),
Word::new([
Felt::new(0x65d0deae6fb0c6cb),
Felt::new(0x1b442ae344dcd9e7),
Felt::new(0x1eedabab8fc07fa4),
Felt::new(0xb0dc89b96f256189),
]),
Word::new([
Felt::new(0xef88de626968c17a),
Felt::new(0x569a01072cdbbc2b),
Felt::new(0xc99bbba6d083c68f),
Felt::new(0x9ed4a176fe341849),
]),
Word::new([
Felt::new(0x5d49d1e9d17448a6),
Felt::new(0x6974d510bc47ee66),
Felt::new(0xbcbea4dec0b68586),
Felt::new(0xdaa5457e0cfd3e61),
]),
Word::new([
Felt::new(0x9fceba739503cda0),
Felt::new(0xb9daf271ac42c8ba),
Felt::new(0x10fe3e8de8680d83),
Felt::new(0xd7e1dc73ced7730b),
]),
Word::new([
Felt::new(0x93ec6c422d4271ea),
Felt::new(0x73923813232b0e70),
Felt::new(0xbbe6a4441a900b65),
Felt::new(0x36b2164f37c9319b),
]),
Word::new([
Felt::new(0xce3ecb2eed624694),
Felt::new(0xb7e1d75fff7a454c),
Felt::new(0x86c24aa3a8d92d2b),
Felt::new(0xb1ba74cafa9ce649),
]),
Word::new([
Felt::new(0xb5fae724eb357479),
Felt::new(0x359532ddc4840cb9),
Felt::new(0x4b111251e037e9fa),
Felt::new(0xfcdab1cdd314c1d9),
]),
Word::new([
Felt::new(0xb3a89464d21c9ff1),
Felt::new(0x8136e1b457a59ca8),
Felt::new(0x88b0fa606b53c4d5),
Felt::new(0x89645f8a9dfe97a2),
]),
Word::new([
Felt::new(0xfe115ef35b814cbf),
Felt::new(0x63de467fb93b6851),
Felt::new(0x17c73b03c9f44ad8),
Felt::new(0x53742721f568b5be),
]),
Word::new([
Felt::new(0xd8110ea6e905cc2),
Felt::new(0xd67b3c7cea25100),
Felt::new(0x9e49b38ed51d1c60),
Felt::new(0xe9e24f9b597c9bfd),
]),
Word::new([
Felt::new(0xefe9086b5bb5a504),
Felt::new(0x991f92a90c9346a3),
Felt::new(0xe4fab215a20f453b),
Felt::new(0x4e4d4dde9146d61a),
]),
Word::new([
Felt::new(0xaa998c3b26497ffa),
Felt::new(0x985bd5cf4ccefb3c),
Felt::new(0xce44e80aa02424bb),
Felt::new(0x75158a37503aed75),
]),
Word::new([
Felt::new(0xdb61760c917116f1),
Felt::new(0xf378c9645174a832),
Felt::new(0x1216aa71b73e7fac),
Felt::new(0x8a4e7f0591a129fd),
]),
Word::new([
Felt::new(0xaf11a04daaf4ed67),
Felt::new(0xd3e59f0d7dad9064),
Felt::new(0x30c206089a2c294d),
Felt::new(0xe104db59761e8a99),
]),
Word::new([
Felt::new(0x70b545ba7a6d447),
Felt::new(0x6ac0e423ddf68913),
Felt::new(0xf9b50997257bb033),
Felt::new(0xdac37c7b1c18b48a),
]),
Word::new([
Felt::new(0xd182b9dff0fcd5c0),
Felt::new(0xf87619ae86b6eb02),
Felt::new(0x6838c1b612b17cb5),
Felt::new(0x9b705d5b6bcf92c),
]),
Word::new([
Felt::new(0xfba622b3026c6193),
Felt::new(0xdacde486f8129b96),
Felt::new(0xd5acd22a7c2cf6aa),
Felt::new(0xf5beb40535e6c0f2),
]),
Word::new([
Felt::new(0x59bde17b2d501969),
Felt::new(0xb4abe1389123d3b9),
Felt::new(0x683d8dd8635d9a67),
Felt::new(0x347e01da4c07833),
]),
Word::new([
Felt::new(0x4e28956ab7162a06),
Felt::new(0xccfcc7358f48c727),
Felt::new(0x7b3485f20c979144),
Felt::new(0xeeb27fa694f1c8fd),
]),
Word::new([
Felt::new(0x275b2c0ee883807b),
Felt::new(0x8f68f2016c1391cd),
Felt::new(0xb59fdccb20322765),
Felt::new(0xeb9b902c5351d5d4),
]),
Word::new([
Felt::new(0xb767d8cb8816cc8e),
Felt::new(0xbd29bb02cdcbc9af),
Felt::new(0xeb1dca9bfebee6f),
Felt::new(0x57597da8109c0354),
]),
Word::new([
Felt::new(0xeb32a8db8cf216),
Felt::new(0xeb5532ac68f304c1),
Felt::new(0x9bca72ffccb957ee),
Felt::new(0x33d4b152ebedb841),
]),
Word::new([
Felt::new(0x439b20dce9810169),
Felt::new(0x2b693e2530a1b88c),
Felt::new(0x36b8898f4e900c7a),
Felt::new(0x7bf5064dde3a0da1),
]),
Word::new([
Felt::new(0x8794201ce6158fe0),
Felt::new(0xfcc53644557471f3),
Felt::new(0xa66d87f6ae6f64d0),
Felt::new(0x4e876d9d933b2ad0),
]),
Word::new([
Felt::new(0x6ff8f4900e43bab6),
Felt::new(0x40014f298cb7b9a3),
Felt::new(0x9d6b252ff946ee3d),
Felt::new(0xb014d99ab8508072),
]),
Word::new([
Felt::new(0x9cdd5a4a37511cae),
Felt::new(0x684444122d770c18),
Felt::new(0x8982944b22a22577),
Felt::new(0x50a58d944629de54),
]),
Word::new([
Felt::new(0x853f5b8ad557fac3),
Felt::new(0xdab1743c03b8da56),
Felt::new(0xc70d6683d4f4c086),
Felt::new(0x2f1d0f67a5dfae4c),
]),
Word::new([
Felt::new(0xf3b6fe76eb11284),
Felt::new(0xbeb9e98b146c63a8),
Felt::new(0xc7e8824fce7777ad),
Felt::new(0x5229918b04410d6a),
]),
Word::new([
Felt::new(0xc170c46601ffc4f3),
Felt::new(0x1258e8e47103c39b),
Felt::new(0x612e99da984aac99),
Felt::new(0xc82fcfcf56d6dd94),
]),
Word::new([
Felt::new(0xf793819d04d5679d),
Felt::new(0xb738b97ec0a52dd3),
Felt::new(0x4df897389119a098),
Felt::new(0xa5af45eb0d007785),
]),
Word::new([
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/sparse_path.rs | miden-crypto/src/merkle/sparse_path.rs | use alloc::{borrow::Cow, vec::Vec};
use core::{
iter::{self, FusedIterator},
num::NonZero,
};
use super::{
EmptySubtreeRoots, InnerNodeInfo, MerkleError, MerklePath, NodeIndex, Word, smt::SMT_MAX_DEPTH,
};
use crate::{
hash::rpo::Rpo256,
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
/// A different representation of [`MerklePath`] designed for memory efficiency for Merkle paths
/// with empty nodes.
///
/// Empty nodes in the path are stored only as their position, represented with a bitmask. A
/// maximum of 64 nodes (`SMT_MAX_DEPTH`) can be stored (empty and non-empty). The more nodes in a
/// path are empty, the less memory this struct will use. This type calculates empty nodes on-demand
/// when iterated through, converted to a [MerklePath], or an empty node is retrieved with
/// [`SparseMerklePath::at_depth()`], which will incur overhead.
///
/// NOTE: This type assumes that Merkle paths always span from the root of the tree to a leaf.
/// Partial paths are not supported.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct SparseMerklePath {
/// A bitmask representing empty nodes. The set bit corresponds to the depth of an empty node.
/// The least significant bit (bit 0) describes depth 1 node (root's children).
/// The `bit index + 1` is equal to node's depth.
empty_nodes_mask: u64,
/// The non-empty nodes, stored in depth-order, but not contiguous across depth.
nodes: Vec<Word>,
}
impl SparseMerklePath {
/// Constructs a new sparse Merkle path from a bitmask of empty nodes and a vector of non-empty
/// nodes.
///
/// The `empty_nodes_mask` is a bitmask where each set bit indicates that the node at that
/// depth is empty. The least significant bit (bit 0) describes depth 1 node (root's children).
/// The `bit index + 1` is equal to node's depth.
/// The `nodes` vector must contain the non-empty nodes in depth order.
///
/// # Errors
/// - [MerkleError::InvalidPathLength] if the provided `nodes` vector is shorter than the
/// minimum length required by the `empty_nodes_mask`.
/// - [MerkleError::DepthTooBig] if the total depth of the path (calculated from the
/// `empty_nodes_mask` and `nodes`) is greater than [SMT_MAX_DEPTH].
pub fn from_parts(empty_nodes_mask: u64, nodes: Vec<Word>) -> Result<Self, MerkleError> {
// The most significant set bit in the mask marks the minimum length of the path.
// For every zero bit before the first set bit, there must be a corresponding node in
// `nodes`.
// For example, if the mask is `0b1100`, this means that the first two nodes
// (depths 1 and 2) are non-empty, and the next two nodes (depths 3 and 4) are empty.
// The minimum length of the path is 4, and the `nodes` vector must contain at least 2
// nodes to account for the first two zeroes in the mask (depths 1 and 2).
let min_path_len = u64::BITS - empty_nodes_mask.leading_zeros();
let empty_nodes_count = empty_nodes_mask.count_ones();
let min_non_empty_nodes = (min_path_len - empty_nodes_count) as usize;
if nodes.len() < min_non_empty_nodes {
return Err(MerkleError::InvalidPathLength(min_non_empty_nodes));
}
let depth = Self::depth_from_parts(empty_nodes_mask, &nodes) as u8;
if depth > SMT_MAX_DEPTH {
return Err(MerkleError::DepthTooBig(depth as u64));
}
Ok(Self { empty_nodes_mask, nodes })
}
/// Constructs a sparse Merkle path from an iterator over Merkle nodes that also knows its
/// exact size (such as iterators created with [Vec::into_iter]). The iterator must be in order
/// of deepest to shallowest.
///
/// Knowing the size is necessary to calculate the depth of the tree, which is needed to detect
/// which nodes are empty nodes.
///
/// # Errors
/// Returns [MerkleError::DepthTooBig] if `tree_depth` is greater than [SMT_MAX_DEPTH].
pub fn from_sized_iter<I>(iterator: I) -> Result<Self, MerkleError>
where
I: IntoIterator<IntoIter: ExactSizeIterator, Item = Word>,
{
let iterator = iterator.into_iter();
let tree_depth = iterator.len() as u8;
if tree_depth > SMT_MAX_DEPTH {
return Err(MerkleError::DepthTooBig(tree_depth as u64));
}
let mut empty_nodes_mask: u64 = 0;
let mut nodes: Vec<Word> = Default::default();
for (depth, node) in iter::zip(path_depth_iter(tree_depth), iterator) {
let &equivalent_empty_node = EmptySubtreeRoots::entry(tree_depth, depth.get());
let is_empty = node == equivalent_empty_node;
let node = if is_empty { None } else { Some(node) };
match node {
Some(node) => nodes.push(node),
None => empty_nodes_mask |= Self::bitmask_for_depth(depth),
}
}
Ok(SparseMerklePath { nodes, empty_nodes_mask })
}
/// Returns the total depth of this path, i.e., the number of nodes this path represents.
pub fn depth(&self) -> u8 {
Self::depth_from_parts(self.empty_nodes_mask, &self.nodes) as u8
}
/// Get a specific node in this path at a given depth.
///
/// The `depth` parameter is defined in terms of `self.depth()`. Merkle paths conventionally do
/// not include the root, so the shallowest depth is `1`, and the deepest depth is
/// `self.depth()`.
///
/// # Errors
/// Returns [MerkleError::DepthTooBig] if `node_depth` is greater than the total depth of this
/// path.
pub fn at_depth(&self, node_depth: NonZero<u8>) -> Result<Word, MerkleError> {
if node_depth.get() > self.depth() {
return Err(MerkleError::DepthTooBig(node_depth.get().into()));
}
let node = if let Some(nonempty_index) = self.get_nonempty_index(node_depth) {
self.nodes[nonempty_index]
} else {
*EmptySubtreeRoots::entry(self.depth(), node_depth.get())
};
Ok(node)
}
/// Deconstructs this path into its component parts.
///
/// Returns a tuple containing:
/// - a bitmask where each set bit indicates that the node at that depth is empty. The least
/// significant bit (bit 0) describes depth 1 node (root's children).
/// - a vector of non-empty nodes in depth order.
pub fn into_parts(self) -> (u64, Vec<Word>) {
(self.empty_nodes_mask, self.nodes)
}
// PROVIDERS
// ============================================================================================
/// Constructs a borrowing iterator over the nodes in this path.
/// Starts from the leaf and iterates toward the root (excluding the root).
pub fn iter(&self) -> impl ExactSizeIterator<Item = Word> {
self.into_iter()
}
/// Computes the Merkle root for this opening.
pub fn compute_root(&self, index: u64, node_to_prove: Word) -> Result<Word, MerkleError> {
let mut index = NodeIndex::new(self.depth(), index)?;
let root = self.iter().fold(node_to_prove, |node, sibling| {
// Compute the node and move to the next iteration.
let children = index.build_node(node, sibling);
index.move_up();
Rpo256::merge(&children)
});
Ok(root)
}
/// Verifies the Merkle opening proof towards the provided root.
///
/// # Errors
/// Returns an error if:
/// - provided node index is invalid.
/// - root calculated during the verification differs from the provided one.
pub fn verify(&self, index: u64, node: Word, &expected_root: &Word) -> Result<(), MerkleError> {
let computed_root = self.compute_root(index, node)?;
if computed_root != expected_root {
return Err(MerkleError::ConflictingRoots {
expected_root,
actual_root: computed_root,
});
}
Ok(())
}
/// Given the node this path opens to, return an iterator of all the nodes that are known via
/// this path.
///
/// Each item in the iterator is an [InnerNodeInfo], containing the hash of a node as `.value`,
/// and its two children as `.left` and `.right`. The very first item in that iterator will be
/// the parent of `node_to_prove` as stored in this [SparseMerklePath].
///
/// From there, the iterator will continue to yield every further parent and both of its
/// children, up to and including the root node.
///
/// If `node_to_prove` is not the node this path is an opening to, or `index` is not the
/// correct index for that node, the returned nodes will be meaningless.
///
/// # Errors
/// Returns an error if the specified index is not valid for this path.
pub fn authenticated_nodes(
&self,
index: u64,
node_to_prove: Word,
) -> Result<InnerNodeIterator<'_>, MerkleError> {
let index = NodeIndex::new(self.depth(), index)?;
Ok(InnerNodeIterator { path: self, index, value: node_to_prove })
}
// PRIVATE HELPERS
// ============================================================================================
const fn bitmask_for_depth(node_depth: NonZero<u8>) -> u64 {
// - 1 because paths do not include the root.
1 << (node_depth.get() - 1)
}
const fn is_depth_empty(&self, node_depth: NonZero<u8>) -> bool {
(self.empty_nodes_mask & Self::bitmask_for_depth(node_depth)) != 0
}
/// Index of the non-empty node in the `self.nodes` vector. If the specified depth is
/// empty, None is returned.
fn get_nonempty_index(&self, node_depth: NonZero<u8>) -> Option<usize> {
if self.is_depth_empty(node_depth) {
return None;
}
let bit_index = node_depth.get() - 1;
let without_shallower = self.empty_nodes_mask >> bit_index;
let empty_deeper = without_shallower.count_ones() as usize;
// The vec index we would use if we didn't have any empty nodes to account for...
let normal_index = (self.depth() - node_depth.get()) as usize;
// subtracted by the number of empty nodes that are deeper than us.
Some(normal_index - empty_deeper)
}
/// Returns the total depth of this path from its parts.
fn depth_from_parts(empty_nodes_mask: u64, nodes: &[Word]) -> usize {
nodes.len() + empty_nodes_mask.count_ones() as usize
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for SparseMerklePath {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_u8(self.depth());
target.write_u64(self.empty_nodes_mask);
target.write_many(&self.nodes);
}
}
impl Deserializable for SparseMerklePath {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let depth = source.read_u8()?;
if depth > SMT_MAX_DEPTH {
return Err(DeserializationError::InvalidValue(format!(
"SparseMerklePath max depth exceeded ({depth} > {SMT_MAX_DEPTH})",
)));
}
let empty_nodes_mask = source.read_u64()?;
let empty_nodes_count = empty_nodes_mask.count_ones();
if empty_nodes_count > depth as u32 {
return Err(DeserializationError::InvalidValue(format!(
"SparseMerklePath has more empty nodes ({empty_nodes_count}) than its full length ({depth})",
)));
}
let count = depth as u32 - empty_nodes_count;
let nodes = source.read_many::<Word>(count as usize)?;
Ok(Self { empty_nodes_mask, nodes })
}
}
// CONVERSIONS
// ================================================================================================
impl From<SparseMerklePath> for MerklePath {
fn from(sparse_path: SparseMerklePath) -> Self {
MerklePath::from_iter(sparse_path)
}
}
impl TryFrom<MerklePath> for SparseMerklePath {
type Error = MerkleError;
/// # Errors
///
/// This conversion returns [MerkleError::DepthTooBig] if the path length is greater than
/// [`SMT_MAX_DEPTH`].
fn try_from(path: MerklePath) -> Result<Self, MerkleError> {
SparseMerklePath::from_sized_iter(path)
}
}
impl From<SparseMerklePath> for Vec<Word> {
fn from(path: SparseMerklePath) -> Self {
Vec::from_iter(path)
}
}
// ITERATORS
// ================================================================================================
/// Iterator for [`SparseMerklePath`]. Starts from the leaf and iterates toward the root (excluding
/// the root).
pub struct SparseMerklePathIter<'p> {
/// The "inner" value we're iterating over.
path: Cow<'p, SparseMerklePath>,
/// The depth a `next()` call will get. `next_depth == 0` indicates that the iterator has been
/// exhausted.
next_depth: u8,
}
impl Iterator for SparseMerklePathIter<'_> {
type Item = Word;
fn next(&mut self) -> Option<Word> {
let this_depth = self.next_depth;
// Paths don't include the root, so if `this_depth` is 0 then we keep returning `None`.
let this_depth = NonZero::new(this_depth)?;
self.next_depth = this_depth.get() - 1;
// `this_depth` is only ever decreasing, so it can't ever exceed `self.path.depth()`.
let node = self
.path
.at_depth(this_depth)
.expect("current depth should never exceed the path depth");
Some(node)
}
// SparseMerkleIter always knows its exact size.
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = ExactSizeIterator::len(self);
(remaining, Some(remaining))
}
}
impl ExactSizeIterator for SparseMerklePathIter<'_> {
fn len(&self) -> usize {
self.next_depth as usize
}
}
impl FusedIterator for SparseMerklePathIter<'_> {}
// TODO: impl DoubleEndedIterator.
impl IntoIterator for SparseMerklePath {
type IntoIter = SparseMerklePathIter<'static>;
type Item = <Self::IntoIter as Iterator>::Item;
fn into_iter(self) -> SparseMerklePathIter<'static> {
let tree_depth = self.depth();
SparseMerklePathIter {
path: Cow::Owned(self),
next_depth: tree_depth,
}
}
}
impl<'p> IntoIterator for &'p SparseMerklePath {
type Item = <SparseMerklePathIter<'p> as Iterator>::Item;
type IntoIter = SparseMerklePathIter<'p>;
fn into_iter(self) -> SparseMerklePathIter<'p> {
let tree_depth = self.depth();
SparseMerklePathIter {
path: Cow::Borrowed(self),
next_depth: tree_depth,
}
}
}
/// An iterator over nodes known by a [SparseMerklePath]. See
/// [`SparseMerklePath::authenticated_nodes()`].
pub struct InnerNodeIterator<'p> {
path: &'p SparseMerklePath,
index: NodeIndex,
value: Word,
}
impl Iterator for InnerNodeIterator<'_> {
type Item = InnerNodeInfo;
fn next(&mut self) -> Option<Self::Item> {
if self.index.is_root() {
return None;
}
let index_depth = NonZero::new(self.index.depth()).expect("non-root depth cannot be 0");
let path_node = self.path.at_depth(index_depth).unwrap();
let children = self.index.build_node(self.value, path_node);
self.value = Rpo256::merge(&children);
self.index.move_up();
Some(InnerNodeInfo {
value: self.value,
left: children[0],
right: children[1],
})
}
}
// COMPARISONS
// ================================================================================================
impl PartialEq<MerklePath> for SparseMerklePath {
fn eq(&self, rhs: &MerklePath) -> bool {
if self.depth() != rhs.depth() {
return false;
}
for (node, &rhs_node) in iter::zip(self, rhs.iter()) {
if node != rhs_node {
return false;
}
}
true
}
}
impl PartialEq<SparseMerklePath> for MerklePath {
fn eq(&self, rhs: &SparseMerklePath) -> bool {
rhs == self
}
}
// HELPERS
// ================================================================================================
/// Iterator for path depths, which start at the deepest part of the tree and go the shallowest
/// depth before the root (depth 1).
fn path_depth_iter(tree_depth: u8) -> impl ExactSizeIterator<Item = NonZero<u8>> {
let top_down_iter = (1..=tree_depth).map(|depth| {
// SAFETY: `RangeInclusive<1, _>` cannot ever yield 0. Even if `tree_depth` is 0, then the
// range is `RangeInclusive<1, 0>` will simply not yield any values, and this block won't
// even be reached.
unsafe { NonZero::new_unchecked(depth) }
});
// Reverse the top-down iterator to get a bottom-up iterator.
top_down_iter.rev()
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use alloc::vec::Vec;
use core::num::NonZero;
use assert_matches::assert_matches;
use p3_field::PrimeCharacteristicRing;
use super::SparseMerklePath;
use crate::{
Felt, ONE, Word,
merkle::{
EmptySubtreeRoots, MerkleError, MerklePath, MerkleTree, NodeIndex,
smt::{LeafIndex, SMT_MAX_DEPTH, SimpleSmt, Smt, SparseMerkleTree},
sparse_path::path_depth_iter,
},
};
fn make_smt(pair_count: u64) -> Smt {
let entries: Vec<(Word, Word)> = (0..pair_count)
.map(|n| {
let leaf_index = ((n as f64 / pair_count as f64) * 255.0) as u64;
let key = Word::new([ONE, ONE, Felt::new(n), Felt::new(leaf_index)]);
let value = Word::new([ONE, ONE, ONE, ONE]);
(key, value)
})
.collect();
Smt::with_entries(entries).unwrap()
}
/// Manually test the exact bit patterns for a sample path of 8 nodes, including both empty and
/// non-empty nodes.
///
/// This also offers an overview of what each part of the bit-math involved means and
/// represents.
#[test]
fn test_sparse_bits() {
const DEPTH: u8 = 8;
let raw_nodes: [Word; DEPTH as usize] = [
// Depth 8.
([8u8, 8, 8, 8].into()),
// Depth 7.
*EmptySubtreeRoots::entry(DEPTH, 7),
// Depth 6.
*EmptySubtreeRoots::entry(DEPTH, 6),
// Depth 5.
[5u8, 5, 5, 5].into(),
// Depth 4.
[4u8, 4, 4, 4].into(),
// Depth 3.
*EmptySubtreeRoots::entry(DEPTH, 3),
// Depth 2.
*EmptySubtreeRoots::entry(DEPTH, 2),
// Depth 1.
*EmptySubtreeRoots::entry(DEPTH, 1),
// Root is not included.
];
let sparse_nodes: [Option<Word>; DEPTH as usize] = [
// Depth 8.
Some([8u8, 8, 8, 8].into()),
// Depth 7.
None,
// Depth 6.
None,
// Depth 5.
Some([5u8, 5, 5, 5].into()),
// Depth 4.
Some([4u8, 4, 4, 4].into()),
// Depth 3.
None,
// Depth 2.
None,
// Depth 1.
None,
// Root is not included.
];
const EMPTY_BITS: u64 = 0b0110_0111;
let sparse_path = SparseMerklePath::from_sized_iter(raw_nodes).unwrap();
assert_eq!(sparse_path.empty_nodes_mask, EMPTY_BITS);
// Keep track of how many non-empty nodes we have seen
let mut nonempty_idx = 0;
// Test starting from the deepest nodes (depth 8)
for depth in (1..=8).rev() {
let idx = (sparse_path.depth() - depth) as usize;
let bit = 1 << (depth - 1);
// Check that the depth bit is set correctly...
let is_set = (sparse_path.empty_nodes_mask & bit) != 0;
assert_eq!(is_set, sparse_nodes.get(idx).unwrap().is_none());
if is_set {
// Check that we don't return digests for empty nodes
let &test_node = sparse_nodes.get(idx).unwrap();
assert_eq!(test_node, None);
} else {
// Check that we can calculate non-empty indices correctly.
let control_node = raw_nodes.get(idx).unwrap();
assert_eq!(
sparse_path.get_nonempty_index(NonZero::new(depth).unwrap()).unwrap(),
nonempty_idx
);
let test_node = sparse_path.nodes.get(nonempty_idx).unwrap();
assert_eq!(test_node, control_node);
nonempty_idx += 1;
}
}
}
#[test]
fn from_parts() {
const DEPTH: u8 = 8;
let raw_nodes: [Word; DEPTH as usize] = [
// Depth 8.
([8u8, 8, 8, 8].into()),
// Depth 7.
*EmptySubtreeRoots::entry(DEPTH, 7),
// Depth 6.
*EmptySubtreeRoots::entry(DEPTH, 6),
// Depth 5.
[5u8, 5, 5, 5].into(),
// Depth 4.
[4u8, 4, 4, 4].into(),
// Depth 3.
*EmptySubtreeRoots::entry(DEPTH, 3),
// Depth 2.
*EmptySubtreeRoots::entry(DEPTH, 2),
// Depth 1.
*EmptySubtreeRoots::entry(DEPTH, 1),
// Root is not included.
];
let empty_nodes_mask = 0b0110_0111;
let nodes = vec![[8u8, 8, 8, 8].into(), [5u8, 5, 5, 5].into(), [4u8, 4, 4, 4].into()];
let insufficient_nodes = vec![[4u8, 4, 4, 4].into()];
let error = SparseMerklePath::from_parts(empty_nodes_mask, insufficient_nodes).unwrap_err();
assert_matches!(error, MerkleError::InvalidPathLength(2));
let iter_sparse_path = SparseMerklePath::from_sized_iter(raw_nodes).unwrap();
let sparse_path = SparseMerklePath::from_parts(empty_nodes_mask, nodes).unwrap();
assert_eq!(sparse_path, iter_sparse_path);
}
#[test]
fn from_sized_iter() {
let tree = make_smt(8192);
for (key, _value) in tree.entries() {
let index = NodeIndex::from(Smt::key_to_leaf_index(key));
let sparse_path = tree.get_path(key);
for (sparse_node, proof_idx) in
itertools::zip_eq(sparse_path.clone(), index.proof_indices())
{
let proof_node = tree.get_node_hash(proof_idx);
assert_eq!(sparse_node, proof_node);
}
}
}
#[test]
fn test_zero_sized() {
let nodes: Vec<Word> = Default::default();
// Sparse paths that don't actually contain any nodes should still be well behaved.
let sparse_path = SparseMerklePath::from_sized_iter(nodes).unwrap();
assert_eq!(sparse_path.depth(), 0);
assert_matches!(
sparse_path.at_depth(NonZero::new(1).unwrap()),
Err(MerkleError::DepthTooBig(1))
);
assert_eq!(sparse_path.iter().next(), None);
assert_eq!(sparse_path.into_iter().next(), None);
}
use proptest::prelude::*;
// Arbitrary instance for Word
impl Arbitrary for Word {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
prop::collection::vec(any::<u64>(), 4)
.prop_map(|vals| {
Word::new([
Felt::new(vals[0]),
Felt::new(vals[1]),
Felt::new(vals[2]),
Felt::new(vals[3]),
])
})
.no_shrink()
.boxed()
}
}
// Arbitrary instance for MerklePath
impl Arbitrary for MerklePath {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
prop::collection::vec(any::<Word>(), 0..=SMT_MAX_DEPTH as usize)
.prop_map(MerklePath::new)
.boxed()
}
}
// Arbitrary instance for SparseMerklePath
impl Arbitrary for SparseMerklePath {
type Parameters = ();
type Strategy = BoxedStrategy<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
(0..=SMT_MAX_DEPTH as usize)
.prop_flat_map(|depth| {
// Generate a bitmask for empty nodes - avoid overflow
let max_mask = if depth > 0 && depth < 64 {
(1u64 << depth) - 1
} else if depth == 64 {
u64::MAX
} else {
0
};
let empty_nodes_mask =
prop::num::u64::ANY.prop_map(move |mask| mask & max_mask);
// Generate non-empty nodes based on the mask
empty_nodes_mask.prop_flat_map(move |mask| {
let empty_count = mask.count_ones() as usize;
let non_empty_count = depth.saturating_sub(empty_count);
prop::collection::vec(any::<Word>(), non_empty_count).prop_map(
move |nodes| SparseMerklePath::from_parts(mask, nodes).unwrap(),
)
})
})
.boxed()
}
}
proptest! {
#[test]
fn sparse_merkle_path_roundtrip_equivalence(path in any::<MerklePath>()) {
// Convert MerklePath to SparseMerklePath and back
let sparse_result = SparseMerklePath::try_from(path.clone());
if path.depth() <= SMT_MAX_DEPTH {
let sparse = sparse_result.unwrap();
let reconstructed = MerklePath::from(sparse);
prop_assert_eq!(path, reconstructed);
} else {
prop_assert!(sparse_result.is_err());
}
}
}
proptest! {
#[test]
fn merkle_path_roundtrip_equivalence(sparse in any::<SparseMerklePath>()) {
// Convert SparseMerklePath to MerklePath and back
let merkle = MerklePath::from(sparse.clone());
let reconstructed = SparseMerklePath::try_from(merkle.clone()).unwrap();
prop_assert_eq!(sparse, reconstructed);
}
}
proptest! {
#[test]
fn path_equivalence_tests(path in any::<MerklePath>(), path2 in any::<MerklePath>()) {
if path.depth() > SMT_MAX_DEPTH {
return Ok(());
}
let sparse = SparseMerklePath::try_from(path.clone()).unwrap();
// Depth consistency
prop_assert_eq!(path.depth(), sparse.depth());
// Node access consistency including path_depth_iter
if path.depth() > 0 {
for depth in path_depth_iter(path.depth()) {
let merkle_node = path.at_depth(depth);
let sparse_node = sparse.at_depth(depth);
match (merkle_node, sparse_node) {
(Some(m), Ok(s)) => prop_assert_eq!(m, s),
(None, Err(_)) => {},
_ => prop_assert!(false, "Inconsistent node access at depth {}", depth.get()),
}
}
}
// Iterator consistency
if path.depth() > 0 {
let merkle_nodes: Vec<_> = path.iter().collect();
let sparse_nodes: Vec<_> = sparse.iter().collect();
prop_assert_eq!(merkle_nodes.len(), sparse_nodes.len());
for (m, s) in merkle_nodes.iter().zip(sparse_nodes.iter()) {
prop_assert_eq!(*m, s);
}
}
// Test equality between different representations
if path2.depth() <= SMT_MAX_DEPTH {
let sparse2 = SparseMerklePath::try_from(path2.clone()).unwrap();
prop_assert_eq!(path == path2, sparse == sparse2);
prop_assert_eq!(path == sparse2, sparse == path2);
}
}
}
// rather heavy tests
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
#[test]
fn compute_root_consistency(
tree_data in any::<RandomMerkleTree>(),
node in any::<Word>()
) {
let RandomMerkleTree { tree, leaves: _, indices } = tree_data;
for &leaf_index in indices.iter() {
let path = tree.get_path(NodeIndex::new(tree.depth(), leaf_index).unwrap()).unwrap();
let sparse = SparseMerklePath::from_sized_iter(path.clone().into_iter()).unwrap();
let merkle_root = path.compute_root(leaf_index, node);
let sparse_root = sparse.compute_root(leaf_index, node);
match (merkle_root, sparse_root) {
(Ok(m), Ok(s)) => prop_assert_eq!(m, s),
(Err(e1), Err(e2)) => {
// Both should have the same error type
prop_assert_eq!(format!("{:?}", e1), format!("{:?}", e2));
},
_ => prop_assert!(false, "Inconsistent compute_root results"),
}
}
}
#[test]
fn verify_consistency(
tree_data in any::<RandomMerkleTree>(),
node in any::<Word>()
) {
let RandomMerkleTree { tree, leaves, indices } = tree_data;
for (i, &leaf_index) in indices.iter().enumerate() {
let leaf = leaves[i];
let path = tree.get_path(NodeIndex::new(tree.depth(), leaf_index).unwrap()).unwrap();
let sparse = SparseMerklePath::from_sized_iter(path.clone().into_iter()).unwrap();
let root = tree.root();
let merkle_verify = path.verify(leaf_index, leaf, &root);
let sparse_verify = sparse.verify(leaf_index, leaf, &root);
match (merkle_verify, sparse_verify) {
(Ok(()), Ok(())) => {},
(Err(e1), Err(e2)) => {
// Both should have the same error type
prop_assert_eq!(format!("{:?}", e1), format!("{:?}", e2));
},
_ => prop_assert!(false, "Inconsistent verify results"),
}
// Test with wrong node - both should fail
let wrong_verify = path.verify(leaf_index, node, &root);
let wrong_sparse_verify = sparse.verify(leaf_index, node, &root);
match (wrong_verify, wrong_sparse_verify) {
(Ok(()), Ok(())) => prop_assert!(false, "Verification should have failed with wrong node"),
(Err(_), Err(_)) => {},
_ => prop_assert!(false, "Inconsistent verification results with wrong node"),
}
}
}
#[test]
fn authenticated_nodes_consistency(
tree_data in any::<RandomMerkleTree>()
) {
let RandomMerkleTree { tree, leaves, indices } = tree_data;
for (i, &leaf_index) in indices.iter().enumerate() {
let leaf = leaves[i];
let path = tree.get_path(NodeIndex::new(tree.depth(), leaf_index).unwrap()).unwrap();
let sparse = SparseMerklePath::from_sized_iter(path.clone().into_iter()).unwrap();
let merkle_result = path.authenticated_nodes(leaf_index, leaf);
let sparse_result = sparse.authenticated_nodes(leaf_index, leaf);
match (merkle_result, sparse_result) {
(Ok(m_iter), Ok(s_iter)) => {
let merkle_nodes: Vec<_> = m_iter.collect();
let sparse_nodes: Vec<_> = s_iter.collect();
prop_assert_eq!(merkle_nodes.len(), sparse_nodes.len());
for (m, s) in merkle_nodes.iter().zip(sparse_nodes.iter()) {
prop_assert_eq!(m, s);
}
},
(Err(e1), Err(e2)) => {
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/error.rs | miden-crypto/src/merkle/error.rs | use alloc::string::String;
use thiserror::Error;
use super::{NodeIndex, Word, smt::MAX_LEAF_ENTRIES};
#[derive(Debug, Error)]
pub enum MerkleError {
#[error("expected merkle root {expected_root} found {actual_root}")]
ConflictingRoots { expected_root: Word, actual_root: Word },
#[error("provided merkle tree depth {0} is too small")]
DepthTooSmall(u8),
#[error("provided merkle tree depth {0} is too big")]
DepthTooBig(u64),
#[error("multiple values provided for merkle tree index {0}")]
DuplicateValuesForIndex(u64),
#[error("entry {node} is not a leaf")]
EntryIsNotLeaf { node: NodeIndex },
#[error("node index value {value} is not valid for depth {depth}")]
InvalidNodeIndex { depth: u8, value: u64 },
#[error("provided node index depth {provided} does not match expected depth {expected}")]
InvalidNodeIndexDepth { expected: u8, provided: u8 },
#[error("provided node list should have a minimum length of {0}")]
InvalidPathLength(usize),
#[error("merkle subtree depth {subtree_depth} exceeds merkle tree depth {tree_depth}")]
SubtreeDepthExceedsDepth { subtree_depth: u8, tree_depth: u8 },
#[error("number of entries in the merkle tree exceeds the maximum of 2^{0}")]
TooManyEntries(u8),
#[error("number of entries in a leaf ({actual}) exceeds the maximum of ({MAX_LEAF_ENTRIES})")]
TooManyLeafEntries { actual: usize },
#[error("node index `{0}` not found in the tree")]
NodeIndexNotFoundInTree(NodeIndex),
#[error("node {0:?} with index `{1}` not found in the store")]
NodeIndexNotFoundInStore(Word, NodeIndex),
#[error("number of provided merkle tree leaves {0} is not a power of two")]
NumLeavesNotPowerOfTwo(usize),
#[error("root {0:?} is not in the store")]
RootNotInStore(Word),
#[error("partial smt does not track the merkle path for key {0}")]
UntrackedKey(Word),
#[error("internal error: {0}")]
InternalError(String),
}
#[cfg(feature = "concurrent")]
impl From<crate::merkle::smt::LargeSmtError> for MerkleError {
fn from(err: crate::merkle::smt::LargeSmtError) -> Self {
use alloc::string::ToString;
match err {
crate::merkle::smt::LargeSmtError::Merkle(me) => me,
crate::merkle::smt::LargeSmtError::Storage(se) => {
MerkleError::InternalError(se.to_string())
},
crate::merkle::smt::LargeSmtError::RootMismatch { expected, actual } => {
MerkleError::ConflictingRoots {
expected_root: expected,
actual_root: actual,
}
},
crate::merkle::smt::LargeSmtError::StorageNotEmpty => {
MerkleError::InternalError("storage is not empty".into())
},
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mod.rs | miden-crypto/src/merkle/mod.rs | //! Data structures related to Merkle trees based on RPO256 hash function.
use super::{EMPTY_WORD, Felt, Word, hash::rpo::Rpo256};
// SUBMODULES
// ================================================================================================
mod empty_roots;
mod error;
mod index;
mod merkle_tree;
mod node;
mod partial_mt;
mod path;
mod sparse_path;
pub mod mmr;
pub mod smt;
pub mod store;
// REEXPORTS
// ================================================================================================
pub use empty_roots::EmptySubtreeRoots;
pub use error::MerkleError;
pub use index::NodeIndex;
pub use merkle_tree::{MerkleTree, path_to_text, tree_to_text};
pub use node::InnerNodeInfo;
pub use partial_mt::PartialMerkleTree;
pub use path::{MerklePath, MerkleProof, RootPath};
pub use sparse_path::SparseMerklePath;
// HELPER FUNCTIONS
// ================================================================================================
#[cfg(test)]
const fn int_to_node(value: u64) -> Word {
use super::ZERO;
Word::new([Felt::new(value), ZERO, ZERO, ZERO])
}
#[cfg(test)]
const fn int_to_leaf(value: u64) -> Word {
use super::ZERO;
Word::new([Felt::new(value), ZERO, ZERO, ZERO])
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/merkle_tree.rs | miden-crypto/src/merkle/merkle_tree.rs | use alloc::{string::String, vec::Vec};
use core::{fmt, slice};
use super::{InnerNodeInfo, MerkleError, MerklePath, NodeIndex, Rpo256, Word};
use crate::utils::{uninit_vector, word_to_hex};
// MERKLE TREE
// ================================================================================================
/// A fully-balanced binary Merkle tree (i.e., a tree where the number of leaves is a power of two).
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct MerkleTree {
nodes: Vec<Word>,
}
impl MerkleTree {
// CONSTRUCTOR
// --------------------------------------------------------------------------------------------
/// Returns a Merkle tree instantiated from the provided leaves.
///
/// # Errors
/// Returns an error if the number of leaves is smaller than two or is not a power of two.
pub fn new<T>(leaves: T) -> Result<Self, MerkleError>
where
T: AsRef<[Word]>,
{
let leaves = leaves.as_ref();
let n = leaves.len();
if n <= 1 {
return Err(MerkleError::DepthTooSmall(n as u8));
} else if !n.is_power_of_two() {
return Err(MerkleError::NumLeavesNotPowerOfTwo(n));
}
// create un-initialized vector to hold all tree nodes
let mut nodes = unsafe { uninit_vector(2 * n) };
nodes[0] = Word::default();
// copy leaves into the second part of the nodes vector
nodes[n..].iter_mut().zip(leaves).for_each(|(node, leaf)| {
*node = *leaf;
});
// re-interpret nodes as an array of two nodes fused together
// Safety: `nodes` will never move here as it is not bound to an external lifetime (i.e.
// `self`).
let ptr = nodes.as_ptr() as *const [Word; 2];
let pairs = unsafe { slice::from_raw_parts(ptr, n) };
// calculate all internal tree nodes
for i in (1..n).rev() {
nodes[i] = Rpo256::merge(&pairs[i]);
}
Ok(Self { nodes })
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the root of this Merkle tree.
pub fn root(&self) -> Word {
self.nodes[1]
}
/// Returns the depth of this Merkle tree.
///
/// Merkle tree of depth 1 has two leaves, depth 2 has four leaves etc.
pub fn depth(&self) -> u8 {
(self.nodes.len() / 2).ilog2() as u8
}
/// Returns a node at the specified depth and index value.
///
/// # Errors
/// Returns an error if:
/// * The specified depth is greater than the depth of the tree.
/// * The specified index is not valid for the specified depth.
pub fn get_node(&self, index: NodeIndex) -> Result<Word, MerkleError> {
if index.is_root() {
return Err(MerkleError::DepthTooSmall(index.depth()));
} else if index.depth() > self.depth() {
return Err(MerkleError::DepthTooBig(index.depth() as u64));
}
let pos = index.to_scalar_index() as usize;
Ok(self.nodes[pos])
}
/// Returns a Merkle path to the node at the specified depth and index value. The node itself
/// is not included in the path.
///
/// # Errors
/// Returns an error if:
/// * The specified depth is greater than the depth of the tree.
/// * The specified value is not valid for the specified depth.
pub fn get_path(&self, index: NodeIndex) -> Result<MerklePath, MerkleError> {
if index.is_root() {
return Err(MerkleError::DepthTooSmall(index.depth()));
} else if index.depth() > self.depth() {
return Err(MerkleError::DepthTooBig(index.depth() as u64));
}
Ok(MerklePath::from(Vec::from_iter(
index.proof_indices().map(|index| self.get_node(index).unwrap()),
)))
}
// ITERATORS
// --------------------------------------------------------------------------------------------
/// Returns an iterator over the leaves of this [MerkleTree].
pub fn leaves(&self) -> impl Iterator<Item = (u64, &Word)> {
let leaves_start = self.nodes.len() / 2;
self.nodes.iter().skip(leaves_start).enumerate().map(|(i, v)| (i as u64, v))
}
/// Returns n iterator over every inner node of this [MerkleTree].
///
/// The iterator order is unspecified.
pub fn inner_nodes(&self) -> InnerNodeIterator<'_> {
InnerNodeIterator {
nodes: &self.nodes,
index: 1, // index 0 is just padding, start at 1
}
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Replaces the leaf at the specified index with the provided value.
///
/// # Errors
/// Returns an error if the specified index value is not a valid leaf value for this tree.
pub fn update_leaf<'a>(&'a mut self, index_value: u64, value: Word) -> Result<(), MerkleError> {
let mut index = NodeIndex::new(self.depth(), index_value)?;
// we don't need to copy the pairs into a new address as we are logically guaranteed to not
// overlap write instructions. however, it's important to bind the lifetime of pairs to
// `self.nodes` so the compiler will never move one without moving the other.
debug_assert_eq!(self.nodes.len() & 1, 0);
let n = self.nodes.len() / 2;
// Safety: the length of nodes is guaranteed to contain pairs of words; hence, pairs of
// digests. we explicitly bind the lifetime here so we add an extra layer of guarantee that
// `self.nodes` will be moved only if `pairs` is moved as well. also, the algorithm is
// logically guaranteed to not overlap write positions as the write index is always half
// the index from which we read the digest input.
let ptr = self.nodes.as_ptr() as *const [Word; 2];
let pairs: &'a [[Word; 2]] = unsafe { slice::from_raw_parts(ptr, n) };
// update the current node
let pos = index.to_scalar_index() as usize;
self.nodes[pos] = value;
// traverse to the root, updating each node with the merged values of its parents
for _ in 0..index.depth() {
index.move_up();
let pos = index.to_scalar_index() as usize;
let value = Rpo256::merge(&pairs[pos]);
self.nodes[pos] = value;
}
Ok(())
}
}
// CONVERSIONS
// ================================================================================================
impl TryFrom<&[Word]> for MerkleTree {
type Error = MerkleError;
fn try_from(value: &[Word]) -> Result<Self, Self::Error> {
MerkleTree::new(value)
}
}
// ITERATORS
// ================================================================================================
/// An iterator over every inner node of the [MerkleTree].
///
/// Use this to extract the data of the tree, there is no guarantee on the order of the elements.
pub struct InnerNodeIterator<'a> {
nodes: &'a Vec<Word>,
index: usize,
}
impl Iterator for InnerNodeIterator<'_> {
type Item = InnerNodeInfo;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.nodes.len() / 2 {
let value = self.index;
let left = self.index * 2;
let right = left + 1;
self.index += 1;
Some(InnerNodeInfo {
value: self.nodes[value],
left: self.nodes[left],
right: self.nodes[right],
})
} else {
None
}
}
}
// UTILITY FUNCTIONS
// ================================================================================================
/// Utility to visualize a [MerkleTree] in text.
pub fn tree_to_text(tree: &MerkleTree) -> Result<String, fmt::Error> {
let indent = " ";
let mut s = String::new();
s.push_str(&word_to_hex(&tree.root())?);
s.push('\n');
for d in 1..=tree.depth() {
let entries = 2u64.pow(d.into());
for i in 0..entries {
let index = NodeIndex::new(d, i).expect("The index must always be valid");
let node = tree.get_node(index).expect("The node must always be found");
for _ in 0..d {
s.push_str(indent);
}
s.push_str(&word_to_hex(&node)?);
s.push('\n');
}
}
Ok(s)
}
/// Utility to visualize a [MerklePath] in text.
pub fn path_to_text(path: &MerklePath) -> Result<String, fmt::Error> {
let mut s = String::new();
s.push('[');
for el in path.iter() {
s.push_str(&word_to_hex(el)?);
s.push_str(", ");
}
// remove the last ", "
if !path.is_empty() {
s.pop();
s.pop();
}
s.push(']');
Ok(s)
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use core::mem::size_of;
use proptest::prelude::*;
use super::*;
use crate::{
Felt, WORD_SIZE,
merkle::{int_to_leaf, int_to_node},
};
const LEAVES4: [Word; WORD_SIZE] =
[int_to_node(1), int_to_node(2), int_to_node(3), int_to_node(4)];
const LEAVES8: [Word; 8] = [
int_to_node(1),
int_to_node(2),
int_to_node(3),
int_to_node(4),
int_to_node(5),
int_to_node(6),
int_to_node(7),
int_to_node(8),
];
#[test]
fn build_merkle_tree() {
let tree = super::MerkleTree::new(LEAVES4).unwrap();
assert_eq!(8, tree.nodes.len());
// leaves were copied correctly
for (a, b) in tree.nodes.iter().skip(4).zip(LEAVES4.iter()) {
assert_eq!(a, b);
}
let (root, node2, node3) = compute_internal_nodes();
assert_eq!(root, tree.nodes[1]);
assert_eq!(node2, tree.nodes[2]);
assert_eq!(node3, tree.nodes[3]);
assert_eq!(root, tree.root());
}
#[test]
fn get_leaf() {
let tree = super::MerkleTree::new(LEAVES4).unwrap();
// check depth 2
assert_eq!(LEAVES4[0], tree.get_node(NodeIndex::make(2, 0)).unwrap());
assert_eq!(LEAVES4[1], tree.get_node(NodeIndex::make(2, 1)).unwrap());
assert_eq!(LEAVES4[2], tree.get_node(NodeIndex::make(2, 2)).unwrap());
assert_eq!(LEAVES4[3], tree.get_node(NodeIndex::make(2, 3)).unwrap());
// check depth 1
let (_, node2, node3) = compute_internal_nodes();
assert_eq!(node2, tree.get_node(NodeIndex::make(1, 0)).unwrap());
assert_eq!(node3, tree.get_node(NodeIndex::make(1, 1)).unwrap());
}
#[test]
fn get_path() {
let tree = super::MerkleTree::new(LEAVES4).unwrap();
let (_, node2, node3) = compute_internal_nodes();
// check depth 2
assert_eq!(vec![LEAVES4[1], node3], *tree.get_path(NodeIndex::make(2, 0)).unwrap());
assert_eq!(vec![LEAVES4[0], node3], *tree.get_path(NodeIndex::make(2, 1)).unwrap());
assert_eq!(vec![LEAVES4[3], node2], *tree.get_path(NodeIndex::make(2, 2)).unwrap());
assert_eq!(vec![LEAVES4[2], node2], *tree.get_path(NodeIndex::make(2, 3)).unwrap());
// check depth 1
assert_eq!(vec![node3], *tree.get_path(NodeIndex::make(1, 0)).unwrap());
assert_eq!(vec![node2], *tree.get_path(NodeIndex::make(1, 1)).unwrap());
}
#[test]
fn update_leaf() {
let mut tree = super::MerkleTree::new(LEAVES8).unwrap();
// update one leaf
let value = 3;
let new_node = int_to_leaf(9);
let mut expected_leaves = LEAVES8.to_vec();
expected_leaves[value as usize] = new_node;
let expected_tree = super::MerkleTree::new(expected_leaves.clone()).unwrap();
tree.update_leaf(value, new_node).unwrap();
assert_eq!(expected_tree.nodes, tree.nodes);
// update another leaf
let value = 6;
let new_node = int_to_leaf(10);
expected_leaves[value as usize] = new_node;
let expected_tree = super::MerkleTree::new(expected_leaves.clone()).unwrap();
tree.update_leaf(value, new_node).unwrap();
assert_eq!(expected_tree.nodes, tree.nodes);
}
#[test]
fn nodes() -> Result<(), MerkleError> {
let tree = super::MerkleTree::new(LEAVES4).unwrap();
let root = tree.root();
let l1n0 = tree.get_node(NodeIndex::make(1, 0))?;
let l1n1 = tree.get_node(NodeIndex::make(1, 1))?;
let l2n0 = tree.get_node(NodeIndex::make(2, 0))?;
let l2n1 = tree.get_node(NodeIndex::make(2, 1))?;
let l2n2 = tree.get_node(NodeIndex::make(2, 2))?;
let l2n3 = tree.get_node(NodeIndex::make(2, 3))?;
let nodes: Vec<InnerNodeInfo> = tree.inner_nodes().collect();
let expected = vec![
InnerNodeInfo { value: root, left: l1n0, right: l1n1 },
InnerNodeInfo { value: l1n0, left: l2n0, right: l2n1 },
InnerNodeInfo { value: l1n1, left: l2n2, right: l2n3 },
];
assert_eq!(nodes, expected);
Ok(())
}
proptest! {
#[test]
fn arbitrary_word_can_be_represented_as_digest(
a in prop::num::u64::ANY,
b in prop::num::u64::ANY,
c in prop::num::u64::ANY,
d in prop::num::u64::ANY,
) {
// this test will assert the memory equivalence between word and digest.
// it is used to safeguard the `[MerkleTee::update_leaf]` implementation
// that assumes this equivalence.
// build a word and copy it to another address as digest
let word = [Felt::new(a), Felt::new(b), Felt::new(c), Felt::new(d)];
let digest = Word::from(word);
// assert the addresses are different
let word_ptr = word.as_ptr() as *const u8;
let digest_ptr = digest.as_ptr() as *const u8;
assert_ne!(word_ptr, digest_ptr);
// compare the bytes representation
let word_bytes = unsafe { slice::from_raw_parts(word_ptr, size_of::<Word>()) };
let digest_bytes = unsafe { slice::from_raw_parts(digest_ptr, size_of::<Word>()) };
assert_eq!(word_bytes, digest_bytes);
}
}
// HELPER FUNCTIONS
// --------------------------------------------------------------------------------------------
fn compute_internal_nodes() -> (Word, Word, Word) {
let node2 = Rpo256::hash_elements(&[*LEAVES4[0], *LEAVES4[1]].concat());
let node3 = Rpo256::hash_elements(&[*LEAVES4[2], *LEAVES4[3]].concat());
let root = Rpo256::merge(&[node2, node3]);
(root, node2, node3)
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/proof.rs | miden-crypto/src/merkle/mmr/proof.rs | /// The representation of a single Merkle path.
use super::super::MerklePath;
use super::forest::Forest;
use crate::Word;
// MMR PROOF
// ================================================================================================
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct MmrPath {
/// The state of the MMR when the MMR path was created.
forest: Forest,
/// The position of the leaf value within the MMR.
position: usize,
/// The Merkle opening, starting from the value's sibling up to and excluding the root of the
/// responsible tree.
merkle_path: MerklePath,
}
impl MmrPath {
/// Creates a new `MmrPath` with the given forest, position, and merkle path.
pub fn new(forest: Forest, position: usize, merkle_path: MerklePath) -> Self {
Self { forest, position, merkle_path }
}
/// Returns the state of the MMR when the MMR path was created.
pub fn forest(&self) -> Forest {
self.forest
}
/// Returns the position of the leaf value within the MMR.
pub fn position(&self) -> usize {
self.position
}
/// Returns the Merkle opening, starting from the value's sibling up to and excluding the root
/// of the responsible tree.
pub fn merkle_path(&self) -> &MerklePath {
&self.merkle_path
}
/// Converts the leaf global position into a local position that can be used to verify the
/// Merkle path.
pub fn relative_pos(&self) -> usize {
self.forest
.leaf_relative_position(self.position)
.expect("position must be part of the forest")
}
/// Returns index of the MMR peak against which the Merkle path in this proof can be verified.
pub fn peak_index(&self) -> usize {
self.forest.tree_index(self.position)
}
}
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct MmrProof {
/// The Merkle path data describing how to authenticate the leaf.
path: MmrPath,
/// The leaf value that was opened.
leaf: Word,
}
impl MmrProof {
/// Creates a new `MmrProof` with the given path and leaf.
pub fn new(path: MmrPath, leaf: Word) -> Self {
Self { path, leaf }
}
/// Returns the Merkle path data describing how to authenticate the leaf.
pub fn path(&self) -> &MmrPath {
&self.path
}
/// Returns the leaf value that was opened.
pub fn leaf(&self) -> Word {
self.leaf
}
/// Returns the state of the MMR when the proof was created.
pub fn forest(&self) -> Forest {
self.path.forest()
}
/// Returns the position of the leaf value within the MMR.
pub fn position(&self) -> usize {
self.path.position()
}
/// Returns the Merkle opening, starting from the value's sibling up to and excluding the root
/// of the responsible tree.
pub fn merkle_path(&self) -> &MerklePath {
self.path.merkle_path()
}
/// Converts the leaf global position into a local position that can be used to verify the
/// merkle_path.
pub fn relative_pos(&self) -> usize {
self.path.relative_pos()
}
/// Returns index of the MMR peak against which the Merkle path in this proof can be verified.
pub fn peak_index(&self) -> usize {
self.path.peak_index()
}
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use super::{MerklePath, MmrPath, MmrProof};
use crate::{Word, merkle::mmr::forest::Forest};
#[test]
fn test_peak_index() {
// --- single peak forest ---------------------------------------------
let forest = Forest::new(11);
// the first 4 leaves belong to peak 0
for position in 0..8 {
let proof = make_dummy_proof(forest, position);
assert_eq!(proof.peak_index(), 0);
}
// --- forest with non-consecutive peaks ------------------------------
let forest = Forest::new(11);
// the first 8 leaves belong to peak 0
for position in 0..8 {
let proof = make_dummy_proof(forest, position);
assert_eq!(proof.peak_index(), 0);
}
// the next 2 leaves belong to peak 1
for position in 8..10 {
let proof = make_dummy_proof(forest, position);
assert_eq!(proof.peak_index(), 1);
}
// the last leaf is the peak 2
let proof = make_dummy_proof(forest, 10);
assert_eq!(proof.peak_index(), 2);
// --- forest with consecutive peaks ----------------------------------
let forest = Forest::new(7);
// the first 4 leaves belong to peak 0
for position in 0..4 {
let proof = make_dummy_proof(forest, position);
assert_eq!(proof.peak_index(), 0);
}
// the next 2 leaves belong to peak 1
for position in 4..6 {
let proof = make_dummy_proof(forest, position);
assert_eq!(proof.peak_index(), 1);
}
// the last leaf is the peak 2
let proof = make_dummy_proof(forest, 6);
assert_eq!(proof.peak_index(), 2);
}
fn make_dummy_proof(forest: Forest, position: usize) -> MmrProof {
let path = MmrPath::new(forest, position, MerklePath::default());
MmrProof::new(path, Word::empty())
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/tests.rs | miden-crypto/src/merkle/mmr/tests.rs | use alloc::vec::Vec;
use assert_matches::assert_matches;
use super::{
super::{InnerNodeInfo, Rpo256, Word},
Mmr, MmrError, MmrPeaks, PartialMmr,
};
use crate::{
Felt,
merkle::{
MerklePath, MerkleTree, NodeIndex, int_to_node,
mmr::{
InOrderIndex, MmrPath, MmrProof,
forest::{Forest, TreeSizeIterator, high_bitmask},
},
},
};
#[test]
fn tests_empty_mmr_peaks() {
let peaks = MmrPeaks::default();
assert_eq!(peaks.num_peaks(), 0);
assert_eq!(peaks.num_leaves(), 0);
}
#[test]
fn test_empty_partial_mmr() {
let mmr = PartialMmr::default();
assert_eq!(mmr.num_leaves(), 0);
assert_eq!(mmr.forest(), Forest::empty());
assert_eq!(mmr.peaks(), MmrPeaks::default());
assert!(mmr.nodes.is_empty());
assert!(!mmr.track_latest);
}
#[test]
fn test_position_equal_or_higher_than_leaves_is_never_contained() {
let empty_forest = 0;
for pos in 1..1024 {
// pos is index, 0 based
// tree is a length counter, 1 based
// so a valid pos is always smaller, not equal, to tree
assert_eq!(leaf_to_corresponding_tree(pos, pos), None);
assert_eq!(leaf_to_corresponding_tree(pos, pos - 1), None);
// and empty forest has no trees, so no position is valid
assert_eq!(leaf_to_corresponding_tree(pos, empty_forest), None);
}
}
#[test]
fn test_position_zero_is_always_contained_by_the_highest_tree() {
for leaves in 1..1024usize {
let tree = leaves.ilog2();
assert_eq!(leaf_to_corresponding_tree(0, leaves), Some(tree));
}
}
#[test]
fn test_leaf_to_corresponding_tree() {
assert_eq!(leaf_to_corresponding_tree(0, 0b0001), Some(0));
assert_eq!(leaf_to_corresponding_tree(0, 0b0010), Some(1));
assert_eq!(leaf_to_corresponding_tree(0, 0b0011), Some(1));
assert_eq!(leaf_to_corresponding_tree(0, 0b1011), Some(3));
// position one is always owned by the left-most tree
assert_eq!(leaf_to_corresponding_tree(1, 0b0010), Some(1));
assert_eq!(leaf_to_corresponding_tree(1, 0b0011), Some(1));
assert_eq!(leaf_to_corresponding_tree(1, 0b1011), Some(3));
// position two starts as its own root, and then it is merged with the left-most tree
assert_eq!(leaf_to_corresponding_tree(2, 0b0011), Some(0));
assert_eq!(leaf_to_corresponding_tree(2, 0b0100), Some(2));
assert_eq!(leaf_to_corresponding_tree(2, 0b1011), Some(3));
// position tree is merged on the left-most tree
assert_eq!(leaf_to_corresponding_tree(3, 0b0011), None);
assert_eq!(leaf_to_corresponding_tree(3, 0b0100), Some(2));
assert_eq!(leaf_to_corresponding_tree(3, 0b1011), Some(3));
assert_eq!(leaf_to_corresponding_tree(4, 0b0101), Some(0));
assert_eq!(leaf_to_corresponding_tree(4, 0b0110), Some(1));
assert_eq!(leaf_to_corresponding_tree(4, 0b0111), Some(1));
assert_eq!(leaf_to_corresponding_tree(4, 0b1000), Some(3));
assert_eq!(leaf_to_corresponding_tree(12, 0b01101), Some(0));
assert_eq!(leaf_to_corresponding_tree(12, 0b01110), Some(1));
assert_eq!(leaf_to_corresponding_tree(12, 0b01111), Some(1));
assert_eq!(leaf_to_corresponding_tree(12, 0b10000), Some(4));
}
#[test]
fn test_high_bitmask() {
assert_eq!(high_bitmask(0), Forest::new(usize::MAX));
assert_eq!(high_bitmask(1), Forest::new(usize::MAX << 1));
assert_eq!(high_bitmask(usize::BITS - 2), Forest::new(0b11usize.rotate_right(2)));
assert_eq!(high_bitmask(usize::BITS - 1), Forest::new(0b1usize.rotate_right(1)));
assert_eq!(high_bitmask(usize::BITS), Forest::empty(), "overflow should be handled");
}
#[test]
fn test_nodes_in_forest() {
assert_eq!(nodes_in_forest(0b0000), 0);
assert_eq!(nodes_in_forest(0b0001), 1);
assert_eq!(nodes_in_forest(0b0010), 3);
assert_eq!(nodes_in_forest(0b0011), 4);
assert_eq!(nodes_in_forest(0b0100), 7);
assert_eq!(nodes_in_forest(0b0101), 8);
assert_eq!(nodes_in_forest(0b0110), 10);
assert_eq!(nodes_in_forest(0b0111), 11);
assert_eq!(nodes_in_forest(0b1000), 15);
assert_eq!(nodes_in_forest(0b1001), 16);
assert_eq!(nodes_in_forest(0b1010), 18);
assert_eq!(nodes_in_forest(0b1011), 19);
}
#[test]
fn test_nodes_in_forest_single_bit() {
assert_eq!(nodes_in_forest(2usize.pow(0)), 2usize.pow(1) - 1);
assert_eq!(nodes_in_forest(2usize.pow(1)), 2usize.pow(2) - 1);
assert_eq!(nodes_in_forest(2usize.pow(2)), 2usize.pow(3) - 1);
assert_eq!(nodes_in_forest(2usize.pow(3)), 2usize.pow(4) - 1);
for bit in 0..(usize::BITS - 1) {
let size = 2usize.pow(bit + 1) - 1;
assert_eq!(nodes_in_forest(1usize << bit), size);
}
}
#[test]
fn test_forest_largest_smallest_tree() {
// largest_tree and smallest_tree return correct results
let forest = Forest::new(0b1101_0100);
let largest = Forest::new(0b1000_0000);
let smallest = Forest::new(0b0000_0100);
assert_eq!(forest.largest_tree(), largest);
assert_eq!(forest.smallest_tree(), smallest);
// no trees in an empty forest
let empty_forest = Forest::new(0);
assert_eq!(empty_forest.largest_tree(), empty_forest);
assert_eq!(empty_forest.smallest_tree(), empty_forest);
}
#[test]
fn test_forest_to_root_index() {
fn idx(pos: usize) -> InOrderIndex {
InOrderIndex::new(pos.try_into().unwrap())
}
// When there is a single tree in the forest, the index is equivalent to the number of
// leaves in that tree, which is `2^n`.
assert_eq!(Forest::new(0b0001).root_in_order_index(), idx(1));
assert_eq!(Forest::new(0b0010).root_in_order_index(), idx(2));
assert_eq!(Forest::new(0b0100).root_in_order_index(), idx(4));
assert_eq!(Forest::new(0b1000).root_in_order_index(), idx(8));
assert_eq!(Forest::new(0b0011).root_in_order_index(), idx(5));
assert_eq!(Forest::new(0b0101).root_in_order_index(), idx(9));
assert_eq!(Forest::new(0b1001).root_in_order_index(), idx(17));
assert_eq!(Forest::new(0b0111).root_in_order_index(), idx(13));
assert_eq!(Forest::new(0b1011).root_in_order_index(), idx(21));
assert_eq!(Forest::new(0b1111).root_in_order_index(), idx(29));
assert_eq!(Forest::new(0b0110).root_in_order_index(), idx(10));
assert_eq!(Forest::new(0b1010).root_in_order_index(), idx(18));
assert_eq!(Forest::new(0b1100).root_in_order_index(), idx(20));
assert_eq!(Forest::new(0b1110).root_in_order_index(), idx(26));
}
#[test]
fn test_forest_to_rightmost_index() {
fn idx(pos: usize) -> InOrderIndex {
InOrderIndex::new(pos.try_into().unwrap())
}
for forest in 1..256 {
assert!(
Forest::new(forest).rightmost_in_order_index().inner() % 2 == 1,
"Leaves are always odd"
);
}
assert_eq!(Forest::new(0b0001).rightmost_in_order_index(), idx(1));
assert_eq!(Forest::new(0b0010).rightmost_in_order_index(), idx(3));
assert_eq!(Forest::new(0b0011).rightmost_in_order_index(), idx(5));
assert_eq!(Forest::new(0b0100).rightmost_in_order_index(), idx(7));
assert_eq!(Forest::new(0b0101).rightmost_in_order_index(), idx(9));
assert_eq!(Forest::new(0b0110).rightmost_in_order_index(), idx(11));
assert_eq!(Forest::new(0b0111).rightmost_in_order_index(), idx(13));
assert_eq!(Forest::new(0b1000).rightmost_in_order_index(), idx(15));
assert_eq!(Forest::new(0b1001).rightmost_in_order_index(), idx(17));
assert_eq!(Forest::new(0b1010).rightmost_in_order_index(), idx(19));
assert_eq!(Forest::new(0b1011).rightmost_in_order_index(), idx(21));
assert_eq!(Forest::new(0b1100).rightmost_in_order_index(), idx(23));
assert_eq!(Forest::new(0b1101).rightmost_in_order_index(), idx(25));
assert_eq!(Forest::new(0b1110).rightmost_in_order_index(), idx(27));
assert_eq!(Forest::new(0b1111).rightmost_in_order_index(), idx(29));
}
#[test]
fn test_bit_position_iterator() {
assert_eq!(TreeSizeIterator::new(Forest::empty()).count(), 0);
assert_eq!(TreeSizeIterator::new(Forest::empty()).rev().count(), 0);
assert_eq!(
TreeSizeIterator::new(Forest::new(1)).collect::<Vec<Forest>>(),
vec![Forest::new(1)]
);
assert_eq!(
TreeSizeIterator::new(Forest::new(1)).rev().collect::<Vec<Forest>>(),
vec![Forest::new(1)],
);
assert_eq!(
TreeSizeIterator::new(Forest::new(2)).collect::<Vec<Forest>>(),
vec![Forest::new(2)]
);
assert_eq!(
TreeSizeIterator::new(Forest::new(2)).rev().collect::<Vec<Forest>>(),
vec![Forest::new(2)],
);
assert_eq!(
TreeSizeIterator::new(Forest::new(3)).collect::<Vec<Forest>>(),
vec![Forest::new(1), Forest::new(2)],
);
assert_eq!(
TreeSizeIterator::new(Forest::new(3)).rev().collect::<Vec<Forest>>(),
vec![Forest::new(2), Forest::new(1)],
);
assert_eq!(
TreeSizeIterator::new(Forest::new(0b11010101)).collect::<Vec<Forest>>(),
vec![0, 2, 4, 6, 7]
.into_iter()
.map(|bit| Forest::new(1 << bit))
.collect::<Vec<_>>()
);
assert_eq!(
TreeSizeIterator::new(Forest::new(0b11010101)).rev().collect::<Vec<Forest>>(),
vec![7, 6, 4, 2, 0]
.into_iter()
.map(|bit| Forest::new(1 << bit))
.collect::<Vec<_>>()
);
let forest = Forest::new(0b1101_0101);
let mut it = TreeSizeIterator::new(forest);
// 0b1101_0101
// ^
let smallest = it.next().unwrap();
assert_eq!(smallest.smallest_tree_unchecked(), smallest);
assert_eq!(smallest.num_leaves(), 0b0000_0001);
assert_eq!(smallest.num_nodes(), 1);
assert_eq!(smallest.num_trees(), 1);
// 0b1101_0101
// ^
let next_smallest = it.next().unwrap();
assert_eq!(next_smallest.smallest_tree_unchecked(), next_smallest);
assert_eq!(next_smallest.num_leaves(), 0b0000_0100);
assert_eq!(next_smallest.num_nodes(), 0b0000_0111);
assert_eq!(next_smallest.num_trees(), 1);
// 0b1101_0101
// ^
let next_smallest = it.next().unwrap();
assert_eq!(next_smallest.smallest_tree_unchecked(), next_smallest);
assert_eq!(next_smallest.num_leaves(), 0b0001_0000);
assert_eq!(next_smallest.num_nodes(), 0b0001_1111);
assert_eq!(next_smallest.num_trees(), 1);
// 0b1101_0101
// ^
let next_smallest = it.next().unwrap();
assert_eq!(next_smallest.smallest_tree_unchecked(), next_smallest);
assert_eq!(next_smallest.num_leaves(), 0b0100_0000);
assert_eq!(next_smallest.num_nodes(), 0b0111_1111);
assert_eq!(next_smallest.num_trees(), 1);
// 0b1101_0101
// ^
let next_smallest = it.next().unwrap();
assert_eq!(next_smallest.smallest_tree_unchecked(), next_smallest);
assert_eq!(next_smallest.num_leaves(), 0b1000_0000);
assert_eq!(next_smallest.num_nodes(), 0b1111_1111);
assert_eq!(next_smallest.num_trees(), 1);
assert_eq!(it.next(), None);
}
const LEAVES: [Word; 7] = [
int_to_node(0),
int_to_node(1),
int_to_node(2),
int_to_node(3),
int_to_node(4),
int_to_node(5),
int_to_node(6),
];
#[test]
fn test_mmr_simple() {
let mut postorder = vec![
LEAVES[0],
LEAVES[1],
merge(LEAVES[0], LEAVES[1]),
LEAVES[2],
LEAVES[3],
merge(LEAVES[2], LEAVES[3]),
];
postorder.push(merge(postorder[2], postorder[5]));
postorder.push(LEAVES[4]);
postorder.push(LEAVES[5]);
postorder.push(merge(LEAVES[4], LEAVES[5]));
postorder.push(LEAVES[6]);
let mut mmr = Mmr::new();
assert_eq!(mmr.forest().num_leaves(), 0);
assert_eq!(mmr.nodes.len(), 0);
mmr.add(LEAVES[0]);
assert_eq!(mmr.forest().num_leaves(), 1);
assert_eq!(mmr.nodes.len(), 1);
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
let acc = mmr.peaks();
assert_eq!(acc.num_leaves(), 1);
assert_eq!(acc.peaks(), &[postorder[0]]);
mmr.add(LEAVES[1]);
assert_eq!(mmr.forest().num_leaves(), 2);
assert_eq!(mmr.nodes.len(), 3);
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
let acc = mmr.peaks();
assert_eq!(acc.num_leaves(), 2);
assert_eq!(acc.peaks(), &[postorder[2]]);
mmr.add(LEAVES[2]);
assert_eq!(mmr.forest().num_leaves(), 3);
assert_eq!(mmr.nodes.len(), 4);
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
let acc = mmr.peaks();
assert_eq!(acc.num_leaves(), 3);
assert_eq!(acc.peaks(), &[postorder[2], postorder[3]]);
mmr.add(LEAVES[3]);
assert_eq!(mmr.forest().num_leaves(), 4);
assert_eq!(mmr.nodes.len(), 7);
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
let acc = mmr.peaks();
assert_eq!(acc.num_leaves(), 4);
assert_eq!(acc.peaks(), &[postorder[6]]);
mmr.add(LEAVES[4]);
assert_eq!(mmr.forest().num_leaves(), 5);
assert_eq!(mmr.nodes.len(), 8);
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
let acc = mmr.peaks();
assert_eq!(acc.num_leaves(), 5);
assert_eq!(acc.peaks(), &[postorder[6], postorder[7]]);
mmr.add(LEAVES[5]);
assert_eq!(mmr.forest().num_leaves(), 6);
assert_eq!(mmr.nodes.len(), 10);
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
let acc = mmr.peaks();
assert_eq!(acc.num_leaves(), 6);
assert_eq!(acc.peaks(), &[postorder[6], postorder[9]]);
mmr.add(LEAVES[6]);
assert_eq!(mmr.forest().num_leaves(), 7);
assert_eq!(mmr.nodes.len(), 11);
assert_eq!(mmr.nodes.as_slice(), &postorder[0..mmr.nodes.len()]);
let acc = mmr.peaks();
assert_eq!(acc.num_leaves(), 7);
assert_eq!(acc.peaks(), &[postorder[6], postorder[9], postorder[10]]);
}
#[test]
fn test_mmr_open() {
let mmr: Mmr = LEAVES.into();
let h01 = merge(LEAVES[0], LEAVES[1]);
let h23 = merge(LEAVES[2], LEAVES[3]);
// node at pos 7 is the root
assert!(mmr.open(7).is_err(), "Element 7 is not in the tree, result should be None");
// node at pos 6 is the root
let empty: MerklePath = MerklePath::new(vec![]);
let opening = mmr
.open(6)
.expect("Element 6 is contained in the tree, expected an opening result.");
assert_eq!(opening.path().merkle_path(), &empty);
assert_eq!(opening.path().forest(), mmr.forest);
assert_eq!(opening.path().position(), 6);
mmr.peaks().verify(LEAVES[6], opening).unwrap();
// nodes 4,5 are depth 1
let root_to_path = MerklePath::new(vec![LEAVES[4]]);
let opening = mmr
.open(5)
.expect("Element 5 is contained in the tree, expected an opening result.");
assert_eq!(opening.path().merkle_path(), &root_to_path);
assert_eq!(opening.path().forest(), mmr.forest);
assert_eq!(opening.path().position(), 5);
mmr.peaks().verify(LEAVES[5], opening).unwrap();
let root_to_path = MerklePath::new(vec![LEAVES[5]]);
let opening = mmr
.open(4)
.expect("Element 4 is contained in the tree, expected an opening result.");
assert_eq!(opening.path().merkle_path(), &root_to_path);
assert_eq!(opening.path().forest(), mmr.forest);
assert_eq!(opening.path().position(), 4);
mmr.peaks().verify(LEAVES[4], opening).unwrap();
// nodes 0,1,2,3 are detph 2
let root_to_path = MerklePath::new(vec![LEAVES[2], h01]);
let opening = mmr
.open(3)
.expect("Element 3 is contained in the tree, expected an opening result.");
assert_eq!(opening.path().merkle_path(), &root_to_path);
assert_eq!(opening.path().forest(), mmr.forest);
assert_eq!(opening.path().position(), 3);
mmr.peaks().verify(LEAVES[3], opening).unwrap();
let root_to_path = MerklePath::new(vec![LEAVES[3], h01]);
let opening = mmr
.open(2)
.expect("Element 2 is contained in the tree, expected an opening result.");
assert_eq!(opening.path().merkle_path(), &root_to_path);
assert_eq!(opening.path().forest(), mmr.forest);
assert_eq!(opening.path().position(), 2);
mmr.peaks().verify(LEAVES[2], opening).unwrap();
let root_to_path = MerklePath::new(vec![LEAVES[0], h23]);
let opening = mmr
.open(1)
.expect("Element 1 is contained in the tree, expected an opening result.");
assert_eq!(opening.path().merkle_path(), &root_to_path);
assert_eq!(opening.path().forest(), mmr.forest);
assert_eq!(opening.path().position(), 1);
mmr.peaks().verify(LEAVES[1], opening).unwrap();
let root_to_path = MerklePath::new(vec![LEAVES[1], h23]);
let opening = mmr
.open(0)
.expect("Element 0 is contained in the tree, expected an opening result.");
assert_eq!(opening.path().merkle_path(), &root_to_path);
assert_eq!(opening.path().forest(), mmr.forest);
assert_eq!(opening.path().position(), 0);
mmr.peaks().verify(LEAVES[0], opening).unwrap();
}
#[test]
fn test_mmr_open_older_version() {
let mmr: Mmr = LEAVES.into();
fn is_even(v: &usize) -> bool {
v & 1 == 0
}
// merkle path of a node is empty if there are no elements to pair with it
for pos in (0..mmr.forest().num_leaves()).filter(is_even) {
let forest = Forest::new(pos + 1);
let proof = mmr.open_at(pos, forest).unwrap();
assert_eq!(proof.path().forest(), forest);
assert_eq!(proof.path().merkle_path().nodes(), []);
assert_eq!(proof.path().position(), pos);
}
// openings match that of a merkle tree
let mtree: MerkleTree = LEAVES[..4].try_into().unwrap();
for forest in 4..=LEAVES.len() {
let forest = Forest::new(forest);
for pos in 0..4 {
let idx = NodeIndex::new(2, pos).unwrap();
let path = mtree.get_path(idx).unwrap();
let proof = mmr.open_at(pos as usize, forest).unwrap();
assert_eq!(path, *proof.path().merkle_path());
}
}
let mtree: MerkleTree = LEAVES[4..6].try_into().unwrap();
for forest in 6..=LEAVES.len() {
let forest = Forest::new(forest);
for pos in 0..2 {
let idx = NodeIndex::new(1, pos).unwrap();
let path = mtree.get_path(idx).unwrap();
// account for the bigger tree with 4 elements
let mmr_pos = (pos + 4) as usize;
let proof = mmr.open_at(mmr_pos, forest).unwrap();
assert_eq!(path, *proof.path().merkle_path());
}
}
}
/// Tests the openings of a simple Mmr with a single tree of depth 8.
#[test]
fn test_mmr_open_eight() {
let leaves = [
int_to_node(0),
int_to_node(1),
int_to_node(2),
int_to_node(3),
int_to_node(4),
int_to_node(5),
int_to_node(6),
int_to_node(7),
];
let mtree: MerkleTree = leaves.as_slice().try_into().unwrap();
let forest = Forest::new(leaves.len());
let mmr: Mmr = leaves.into();
let root = mtree.root();
let position = 0;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
let position = 1;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
let position = 2;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
let position = 3;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
let position = 4;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
let position = 5;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
let position = 6;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
let position = 7;
let proof = mmr.open(position).unwrap();
let merkle_path = mtree.get_path(NodeIndex::new(3, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), leaves[position])
);
assert_eq!(
proof
.path()
.merkle_path()
.compute_root(position as u64, leaves[position])
.unwrap(),
root
);
}
/// Tests the openings of Mmr with a 3 trees of depths 4, 2, and 1.
#[test]
fn test_mmr_open_seven() {
let mtree1: MerkleTree = LEAVES[..4].try_into().unwrap();
let mtree2: MerkleTree = LEAVES[4..6].try_into().unwrap();
let forest = Forest::new(LEAVES.len());
let mmr: Mmr = LEAVES.into();
let position = 0;
let proof = mmr.open(position).unwrap();
let merkle_path: MerklePath =
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), LEAVES[position])
);
assert_eq!(proof.path().merkle_path().compute_root(0, LEAVES[0]).unwrap(), mtree1.root());
let position = 1;
let proof = mmr.open(position).unwrap();
let merkle_path: MerklePath =
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), LEAVES[position])
);
assert_eq!(proof.path().merkle_path().compute_root(1, LEAVES[1]).unwrap(), mtree1.root());
let position = 2;
let proof = mmr.open(position).unwrap();
let merkle_path: MerklePath =
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), LEAVES[position])
);
assert_eq!(proof.path().merkle_path().compute_root(2, LEAVES[2]).unwrap(), mtree1.root());
let position = 3;
let proof = mmr.open(position).unwrap();
let merkle_path: MerklePath =
mtree1.get_path(NodeIndex::new(2, position as u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), LEAVES[position])
);
assert_eq!(proof.path().merkle_path().compute_root(3, LEAVES[3]).unwrap(), mtree1.root());
let position = 4;
let proof = mmr.open(position).unwrap();
let merkle_path: MerklePath = mtree2.get_path(NodeIndex::new(1, 0u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), LEAVES[position])
);
assert_eq!(proof.path().merkle_path().compute_root(0, LEAVES[4]).unwrap(), mtree2.root());
let position = 5;
let proof = mmr.open(position).unwrap();
let merkle_path: MerklePath = mtree2.get_path(NodeIndex::new(1, 1u64).unwrap()).unwrap();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), LEAVES[position])
);
assert_eq!(proof.path().merkle_path().compute_root(1, LEAVES[5]).unwrap(), mtree2.root());
let position = 6;
let proof = mmr.open(position).unwrap();
let merkle_path: MerklePath = [].as_ref().into();
assert_eq!(
proof,
MmrProof::new(MmrPath::new(forest, position, merkle_path), LEAVES[position])
);
assert_eq!(proof.path().merkle_path().compute_root(0, LEAVES[6]).unwrap(), LEAVES[6]);
}
#[test]
fn test_mmr_get() {
let mmr: Mmr = LEAVES.into();
assert_eq!(mmr.get(0).unwrap(), LEAVES[0], "value at pos 0 must correspond");
assert_eq!(mmr.get(1).unwrap(), LEAVES[1], "value at pos 1 must correspond");
assert_eq!(mmr.get(2).unwrap(), LEAVES[2], "value at pos 2 must correspond");
assert_eq!(mmr.get(3).unwrap(), LEAVES[3], "value at pos 3 must correspond");
assert_eq!(mmr.get(4).unwrap(), LEAVES[4], "value at pos 4 must correspond");
assert_eq!(mmr.get(5).unwrap(), LEAVES[5], "value at pos 5 must correspond");
assert_eq!(mmr.get(6).unwrap(), LEAVES[6], "value at pos 6 must correspond");
assert!(mmr.get(7).is_err());
}
#[test]
fn test_mmr_invariants() {
let mut mmr = Mmr::new();
for v in 1..=1028 {
mmr.add(int_to_node(v));
let accumulator = mmr.peaks();
assert_eq!(
v as usize,
mmr.forest().num_leaves(),
"MMR leaf count must increase by one on every add"
);
assert_eq!(
v as usize,
accumulator.num_leaves(),
"MMR and its accumulator must match leaves count"
);
assert_eq!(
accumulator.num_leaves().count_ones() as usize,
accumulator.peaks().len(),
"bits on leaves must match the number of peaks"
);
let expected_nodes: usize =
TreeSizeIterator::new(mmr.forest()).map(|tree| tree.num_nodes()).sum();
assert_eq!(
expected_nodes,
mmr.nodes.len(),
"the sum of every tree size must be equal to the number of nodes in the MMR (forest: {:b})",
mmr.forest(),
);
}
}
#[test]
fn test_mmr_inner_nodes() {
let mmr: Mmr = LEAVES.into();
let nodes: Vec<InnerNodeInfo> = mmr.inner_nodes().collect();
let h01 = Rpo256::merge(&[LEAVES[0], LEAVES[1]]);
let h23 = Rpo256::merge(&[LEAVES[2], LEAVES[3]]);
let h0123 = Rpo256::merge(&[h01, h23]);
let h45 = Rpo256::merge(&[LEAVES[4], LEAVES[5]]);
let postorder = vec![
InnerNodeInfo {
value: h01,
left: LEAVES[0],
right: LEAVES[1],
},
InnerNodeInfo {
value: h23,
left: LEAVES[2],
right: LEAVES[3],
},
InnerNodeInfo { value: h0123, left: h01, right: h23 },
InnerNodeInfo {
value: h45,
left: LEAVES[4],
right: LEAVES[5],
},
];
assert_eq!(postorder, nodes);
}
#[test]
fn test_mmr_peaks() {
let mmr: Mmr = LEAVES.into();
let forest = Forest::new(0b0001);
let acc = mmr.peaks_at(forest).unwrap();
assert_eq!(acc.num_leaves(), forest.num_leaves());
assert_eq!(acc.peaks(), &[mmr.nodes[0]]);
let forest = Forest::new(0b0010);
let acc = mmr.peaks_at(forest).unwrap();
assert_eq!(acc.num_leaves(), forest.num_leaves());
assert_eq!(acc.peaks(), &[mmr.nodes[2]]);
let forest = Forest::new(0b0011);
let acc = mmr.peaks_at(forest).unwrap();
assert_eq!(acc.num_leaves(), forest.num_leaves());
assert_eq!(acc.peaks(), &[mmr.nodes[2], mmr.nodes[3]]);
let forest = Forest::new(0b0100);
let acc = mmr.peaks_at(forest).unwrap();
assert_eq!(acc.num_leaves(), forest.num_leaves());
assert_eq!(acc.peaks(), &[mmr.nodes[6]]);
let forest = Forest::new(0b0101);
let acc = mmr.peaks_at(forest).unwrap();
assert_eq!(acc.num_leaves(), forest.num_leaves());
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[7]]);
let forest = Forest::new(0b0110);
let acc = mmr.peaks_at(forest).unwrap();
assert_eq!(acc.num_leaves(), forest.num_leaves());
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[9]]);
let forest = Forest::new(0b0111);
let acc = mmr.peaks_at(forest).unwrap();
assert_eq!(acc.num_leaves(), forest.num_leaves());
assert_eq!(acc.peaks(), &[mmr.nodes[6], mmr.nodes[9], mmr.nodes[10]]);
}
#[test]
fn test_mmr_hash_peaks() {
let mmr: Mmr = LEAVES.into();
let peaks = mmr.peaks();
let first_peak = Rpo256::merge(&[
Rpo256::merge(&[LEAVES[0], LEAVES[1]]),
Rpo256::merge(&[LEAVES[2], LEAVES[3]]),
]);
let second_peak = Rpo256::merge(&[LEAVES[4], LEAVES[5]]);
let third_peak = LEAVES[6];
// minimum length is 16
let mut expected_peaks = [first_peak, second_peak, third_peak].to_vec();
expected_peaks.resize(16, Word::default());
assert_eq!(peaks.hash_peaks(), Rpo256::hash_elements(&digests_to_elements(&expected_peaks)));
}
#[test]
fn test_mmr_peaks_hash_less_than_16() {
let mut peaks = Vec::new();
for i in 0..16 {
peaks.push(int_to_node(i));
let forest = Forest::new(1 << peaks.len()).all_smaller_trees().unwrap();
let accumulator = MmrPeaks::new(forest, peaks.clone()).unwrap();
// minimum length is 16
let mut expected_peaks = peaks.clone();
expected_peaks.resize(16, Word::default());
assert_eq!(
accumulator.hash_peaks(),
Rpo256::hash_elements(&digests_to_elements(&expected_peaks))
);
}
}
#[test]
fn test_mmr_peaks_hash_odd() {
let peaks: Vec<_> = (0..=17).map(int_to_node).collect();
let forest = Forest::new(1 << peaks.len()).all_smaller_trees_unchecked();
let accumulator = MmrPeaks::new(forest, peaks.clone()).unwrap();
// odd length bigger than 16 is padded to the next even number
let mut expected_peaks = peaks;
expected_peaks.resize(18, Word::default());
assert_eq!(
accumulator.hash_peaks(),
Rpo256::hash_elements(&digests_to_elements(&expected_peaks))
);
}
#[test]
fn test_mmr_delta() {
let mmr: Mmr = LEAVES.into();
let acc = mmr.peaks();
// original_forest can't have more elements
assert!(
mmr.get_delta(Forest::new(LEAVES.len() + 1), mmr.forest()).is_err(),
"Can not provide updates for a newer Mmr"
);
// if the number of elements is the same there is no change
assert!(
mmr.get_delta(Forest::new(LEAVES.len()), mmr.forest()).unwrap().data.is_empty(),
"There are no updates for the same Mmr version"
);
// missing the last element added, which is itself a tree peak
assert_eq!(
mmr.get_delta(Forest::new(6), mmr.forest()).unwrap().data,
vec![acc.peaks()[2]],
"one peak"
);
// missing the sibling to complete the tree of depth 2, and the last element
assert_eq!(
mmr.get_delta(Forest::new(5), mmr.forest()).unwrap().data,
vec![LEAVES[5], acc.peaks()[2]],
"one sibling, one peak"
);
// missing the whole last two trees, only send the peaks
assert_eq!(
mmr.get_delta(Forest::new(4), mmr.forest()).unwrap().data,
vec![acc.peaks()[1], acc.peaks()[2]],
"two peaks"
);
// missing the sibling to complete the first tree, and the two last trees
assert_eq!(
mmr.get_delta(Forest::new(3), mmr.forest()).unwrap().data,
vec![LEAVES[3], acc.peaks()[1], acc.peaks()[2]],
"one sibling, two peaks"
);
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/delta.rs | miden-crypto/src/merkle/mmr/delta.rs | use alloc::vec::Vec;
use super::{super::Word, forest::Forest};
/// Container for the update data of a [super::PartialMmr]
#[derive(Debug)]
pub struct MmrDelta {
/// The new version of the [super::Mmr]
pub forest: Forest,
/// Update data.
///
/// The data is packed as follows:
/// 1. All the elements needed to perform authentication path updates. These are the right
/// siblings required to perform tree merges on the [super::PartialMmr].
/// 2. The new peaks.
pub data: Vec<Word>,
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/forest.rs | miden-crypto/src/merkle/mmr/forest.rs | use core::{
fmt::{Binary, Display},
ops::{BitAnd, BitOr, BitXor, BitXorAssign},
};
use super::InOrderIndex;
use crate::{
Felt,
field::PrimeField64,
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
/// A compact representation of trees in a forest. Used in the Merkle forest (MMR).
///
/// Each active bit of the stored number represents a disjoint tree with number of leaves
/// equal to the bit position.
///
/// The forest value has the following interpretations:
/// - its value is the number of leaves in the forest
/// - the version number (MMR is append only so the number of leaves always increases)
/// - bit count corresponds to the number of trees (trees) in the forest
/// - each true bit position determines the depth of a tree in the forest
///
/// Examples:
/// - `Forest(0)` is a forest with no trees.
/// - `Forest(0b01)` is a forest with a single leaf/node (the smallest tree possible).
/// - `Forest(0b10)` is a forest with a single binary tree with 2 leaves (3 nodes).
/// - `Forest(0b11)` is a forest with two trees: one with 1 leaf (1 node), and one with 2 leaves (3
/// nodes).
/// - `Forest(0b1010)` is a forest with two trees: one with 8 leaves (15 nodes), one with 2 leaves
/// (3 nodes).
/// - `Forest(0b1000)` is a forest with one tree, which has 8 leaves (15 nodes).
#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Forest(usize);
impl Forest {
/// Creates an empty forest (no trees).
pub const fn empty() -> Self {
Self(0)
}
/// Creates a forest with `num_leaves` leaves.
pub const fn new(num_leaves: usize) -> Self {
Self(num_leaves)
}
/// Creates a forest with a given height.
///
/// This is equivalent to `Forest::new(1 << height)`.
///
/// # Panics
///
/// This will panic if `height` is greater than `usize::BITS - 1`.
pub const fn with_height(height: usize) -> Self {
assert!(height < usize::BITS as usize);
Self::new(1 << height)
}
/// Returns true if there are no trees in the forest.
pub fn is_empty(self) -> bool {
self.0 == 0
}
/// Adds exactly one more leaf to the capacity of this forest.
///
/// Some smaller trees might be merged together.
pub fn append_leaf(&mut self) {
self.0 += 1;
}
/// Returns a count of leaves in the entire underlying forest (MMR).
pub fn num_leaves(self) -> usize {
self.0
}
/// Return the total number of nodes of a given forest.
///
/// # Panics
///
/// This will panic if the forest has size greater than `usize::MAX / 2 + 1`.
pub const fn num_nodes(self) -> usize {
assert!(self.0 <= usize::MAX / 2 + 1);
if self.0 <= usize::MAX / 2 {
self.0 * 2 - self.num_trees()
} else {
// If `self.0 > usize::MAX / 2` then we need 128-bit math to double it.
let (inner, num_trees) = (self.0 as u128, self.num_trees() as u128);
(inner * 2 - num_trees) as usize
}
}
/// Return the total number of trees of a given forest (the number of active bits).
pub const fn num_trees(self) -> usize {
self.0.count_ones() as usize
}
/// Returns the height (bit position) of the largest tree in the forest.
///
/// # Panics
///
/// This will panic if the forest is empty.
pub fn largest_tree_height_unchecked(self) -> usize {
// ilog2 is computed with leading zeros, which itself is computed with the intrinsic ctlz.
// [Rust 1.67.0] x86 uses the `bsr` instruction. AArch64 uses the `clz` instruction.
self.0.ilog2() as usize
}
/// Returns the height (bit position) of the largest tree in the forest.
///
/// If the forest cannot be empty, use [`largest_tree_height_unchecked`] for performance.
///
/// [`largest_tree_height_unchecked`]: Self::largest_tree_height_unchecked
pub fn largest_tree_height(self) -> Option<usize> {
if self.is_empty() {
return None;
}
Some(self.largest_tree_height_unchecked())
}
/// Returns a forest with only the largest tree present.
///
/// # Panics
///
/// This will panic if the forest is empty.
pub fn largest_tree_unchecked(self) -> Self {
Self::with_height(self.largest_tree_height_unchecked())
}
/// Returns a forest with only the largest tree present.
///
/// If forest cannot be empty, use `largest_tree` for better performance.
pub fn largest_tree(self) -> Self {
if self.is_empty() {
return Self::empty();
}
self.largest_tree_unchecked()
}
/// Returns the height (bit position) of the smallest tree in the forest.
///
/// # Panics
///
/// This will panic if the forest is empty.
pub fn smallest_tree_height_unchecked(self) -> usize {
// Trailing_zeros is computed with the intrinsic cttz. [Rust 1.67.0] x86 uses the `bsf`
// instruction. AArch64 uses the `rbit clz` instructions.
self.0.trailing_zeros() as usize
}
/// Returns the height (bit position) of the smallest tree in the forest.
///
/// If the forest cannot be empty, use [`smallest_tree_height_unchecked`] for better
/// performance.
///
/// [`smallest_tree_height_unchecked`]: Self::smallest_tree_height_unchecked
pub fn smallest_tree_height(self) -> Option<usize> {
if self.is_empty() {
return None;
}
Some(self.smallest_tree_height_unchecked())
}
/// Returns a forest with only the smallest tree present.
///
/// # Panics
///
/// This will panic if the forest is empty.
pub fn smallest_tree_unchecked(self) -> Self {
Self::with_height(self.smallest_tree_height_unchecked())
}
/// Returns a forest with only the smallest tree present.
///
/// If forest cannot be empty, use `smallest_tree` for performance.
pub fn smallest_tree(self) -> Self {
if self.is_empty() {
return Self::empty();
}
self.smallest_tree_unchecked()
}
/// Keeps only trees larger than the reference tree.
///
/// For example, if we start with the bit pattern `0b0101_0110`, and keep only the trees larger
/// than tree index 1, that targets this bit:
/// ```text
/// Forest(0b0101_0110).trees_larger_than(1)
/// ^
/// Becomes: 0b0101_0100
/// ^
/// ```
/// And keeps only trees *after* that bit, meaning that the tree at `tree_idx` is also removed,
/// resulting in `0b0101_0100`.
///
/// ```
/// # use miden_crypto::merkle::mmr::Forest;
/// let range = Forest::new(0b0101_0110);
/// assert_eq!(range.trees_larger_than(1), Forest::new(0b0101_0100));
/// ```
pub fn trees_larger_than(self, tree_idx: u32) -> Self {
self & high_bitmask(tree_idx + 1)
}
/// Creates a new forest with all possible trees smaller than the smallest tree in this
/// forest.
///
/// This forest must have exactly one tree.
///
/// # Panics
/// With debug assertions enabled, this function panics if this forest does not have
/// exactly one tree.
///
/// For a non-panicking version of this function, see [`Forest::all_smaller_trees()`].
pub fn all_smaller_trees_unchecked(self) -> Self {
debug_assert_eq!(self.num_trees(), 1);
Self::new(self.0 - 1)
}
/// Creates a new forest with all possible trees smaller than the smallest tree in this
/// forest, or returns `None` if this forest has more or less than one tree.
///
/// If the forest cannot have more or less than one tree, use
/// [`Forest::all_smaller_trees_unchecked()`] for performance.
pub fn all_smaller_trees(self) -> Option<Forest> {
if self.num_trees() != 1 {
return None;
}
Some(self.all_smaller_trees_unchecked())
}
/// Returns a forest with exactly one tree, one size (depth) larger than the current one.
pub fn next_larger_tree(self) -> Self {
debug_assert_eq!(self.num_trees(), 1);
Forest(self.0 << 1)
}
/// Returns true if the forest contains a single-node tree.
pub fn has_single_leaf_tree(self) -> bool {
self.0 & 1 != 0
}
/// Add a single-node tree if not already present in the forest.
pub fn with_single_leaf(self) -> Self {
Self::new(self.0 | 1)
}
/// Remove the single-node tree if present in the forest.
pub fn without_single_leaf(self) -> Self {
Self::new(self.0 & (usize::MAX - 1))
}
/// Returns a new forest that does not have the trees that `other` has.
pub fn without_trees(self, other: Forest) -> Self {
self ^ other
}
/// Returns index of the forest tree for a specified leaf index.
pub fn tree_index(&self, leaf_idx: usize) -> usize {
let root = self
.leaf_to_corresponding_tree(leaf_idx)
.expect("position must be part of the forest");
let smaller_tree_mask = Self::new(2_usize.pow(root) - 1);
let num_smaller_trees = (*self & smaller_tree_mask).num_trees();
self.num_trees() - num_smaller_trees - 1
}
/// Returns the smallest tree's root element as an [InOrderIndex].
///
/// This function takes the smallest tree in this forest, "pretends" that it is a subtree of a
/// fully balanced binary tree, and returns the the in-order index of that balanced tree's root
/// node.
pub fn root_in_order_index(&self) -> InOrderIndex {
// Count total size of all trees in the forest.
let nodes = self.num_nodes();
// Add the count for the parent nodes that separate each tree. These are allocated but
// currently empty, and correspond to the nodes that will be used once the trees are merged.
let open_trees = self.num_trees() - 1;
// Remove the leaf-count of the rightmost subtree. The target tree root index comes before
// the subtree, for the in-order tree walk.
let right_subtree_count = self.smallest_tree_unchecked().num_leaves() - 1;
let idx = nodes + open_trees - right_subtree_count;
InOrderIndex::new(idx.try_into().unwrap())
}
/// Returns the in-order index of the rightmost element (the smallest tree).
pub fn rightmost_in_order_index(&self) -> InOrderIndex {
// Count total size of all trees in the forest.
let nodes = self.num_nodes();
// Add the count for the parent nodes that separate each tree. These are allocated but
// currently empty, and correspond to the nodes that will be used once the trees are merged.
let open_trees = self.num_trees() - 1;
let idx = nodes + open_trees;
InOrderIndex::new(idx.try_into().unwrap())
}
/// Given a leaf index in the current forest, return the tree number responsible for the
/// leaf.
///
/// Note:
/// The result is a tree position `p`, it has the following interpretations:
/// - `p+1` is the depth of the tree.
/// - Because the root element is not part of the proof, `p` is the length of the authentication
/// path.
/// - `2^p` is equal to the number of leaves in this particular tree.
/// - And `2^(p+1)-1` corresponds to the size of the tree.
///
/// For example, given a forest with 6 leaves whose forest is `0b110`:
/// ```text
/// __ tree 2 __
/// / \
/// ____ ____ _ tree 1 _
/// / \ / \ / \
/// 0 1 2 3 4 5
/// ```
///
/// Leaf indices `0..=3` are in the tree at index 2 and leaf indices `4..=5` are in the tree at
/// index 1.
pub fn leaf_to_corresponding_tree(self, leaf_idx: usize) -> Option<u32> {
let forest = self.0;
if leaf_idx >= forest {
None
} else {
// - each bit in the forest is a unique tree and the bit position is its power-of-two
// size
// - each tree is associated to a consecutive range of positions equal to its size from
// left-to-right
// - this means the first tree owns from `0` up to the `2^k_0` first positions, where
// `k_0` is the highest set bit position, the second tree from `2^k_0 + 1` up to
// `2^k_1` where `k_1` is the second highest bit, so on.
// - this means the highest bits work as a category marker, and the position is owned by
// the first tree which doesn't share a high bit with the position
let before = forest & leaf_idx;
let after = forest ^ before;
let tree_idx = after.ilog2();
Some(tree_idx)
}
}
/// Given a leaf index in the current forest, return the leaf index in the tree to which
/// the leaf belongs.
pub(super) fn leaf_relative_position(self, leaf_idx: usize) -> Option<usize> {
let tree_idx = self.leaf_to_corresponding_tree(leaf_idx)?;
let forest_before = self & high_bitmask(tree_idx + 1);
Some(leaf_idx - forest_before.0)
}
}
impl Display for Forest {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{}", self.0)
}
}
impl Binary for Forest {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "{:b}", self.0)
}
}
impl BitAnd<Forest> for Forest {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
Self::new(self.0 & rhs.0)
}
}
impl BitOr<Forest> for Forest {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
Self::new(self.0 | rhs.0)
}
}
impl BitXor<Forest> for Forest {
type Output = Self;
fn bitxor(self, rhs: Self) -> Self::Output {
Self::new(self.0 ^ rhs.0)
}
}
impl BitXorAssign<Forest> for Forest {
fn bitxor_assign(&mut self, rhs: Self) {
self.0 ^= rhs.0;
}
}
impl From<Felt> for Forest {
fn from(value: Felt) -> Self {
Self::new(value.as_canonical_u64() as usize)
}
}
impl From<Forest> for Felt {
fn from(value: Forest) -> Self {
Felt::new(value.0 as u64)
}
}
/// Return a bitmask for the bits including and above the given position.
pub(crate) const fn high_bitmask(bit: u32) -> Forest {
if bit > usize::BITS - 1 {
Forest::empty()
} else {
Forest::new(usize::MAX << bit)
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for Forest {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.0.write_into(target);
}
}
impl Deserializable for Forest {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let value = source.read_usize()?;
Ok(Self::new(value))
}
}
// TREE SIZE ITERATOR
// ================================================================================================
/// Iterate over the trees within this `Forest`, from smallest to largest.
///
/// Each item is a "sub-forest", containing only one tree.
pub struct TreeSizeIterator {
inner: Forest,
}
impl TreeSizeIterator {
pub fn new(value: Forest) -> TreeSizeIterator {
TreeSizeIterator { inner: value }
}
}
impl Iterator for TreeSizeIterator {
type Item = Forest;
fn next(&mut self) -> Option<<Self as Iterator>::Item> {
let tree = self.inner.smallest_tree();
if tree.is_empty() {
None
} else {
self.inner = self.inner.without_trees(tree);
Some(tree)
}
}
}
impl DoubleEndedIterator for TreeSizeIterator {
fn next_back(&mut self) -> Option<<Self as Iterator>::Item> {
let tree = self.inner.largest_tree();
if tree.is_empty() {
None
} else {
self.inner = self.inner.without_trees(tree);
Some(tree)
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/error.rs | miden-crypto/src/merkle/mmr/error.rs | use alloc::string::String;
use thiserror::Error;
use crate::merkle::MerkleError;
#[derive(Debug, Error)]
pub enum MmrError {
#[error("mmr does not contain position {0}")]
PositionNotFound(usize),
#[error("mmr peaks are invalid: {0}")]
InvalidPeaks(String),
#[error("mmr forest is out of bounds: requested {0} > current {1}")]
ForestOutOfBounds(usize, usize),
#[error("mmr peak does not match the computed merkle root of the provided authentication path")]
PeakPathMismatch,
#[error("requested peak index is {peak_idx} but the number of peaks is {peaks_len}")]
PeakOutOfBounds { peak_idx: usize, peaks_len: usize },
#[error("invalid mmr update")]
InvalidUpdate,
#[error("mmr does not contain a peak with depth {0}")]
UnknownPeak(u8),
#[error("invalid merkle path")]
InvalidMerklePath(#[source] MerkleError),
#[error("merkle root computation failed")]
MerkleRootComputationFailed(#[source] MerkleError),
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/mod.rs | miden-crypto/src/merkle/mmr/mod.rs | //! Merkle Mountain Range (MMR) data structures.
mod delta;
mod error;
mod forest;
mod full;
mod inorder;
mod partial;
mod peaks;
mod proof;
#[cfg(test)]
mod tests;
// REEXPORTS
// ================================================================================================
pub use delta::MmrDelta;
pub use error::MmrError;
pub use forest::Forest;
pub use full::Mmr;
pub use inorder::InOrderIndex;
pub use partial::PartialMmr;
pub use peaks::MmrPeaks;
pub use proof::{MmrPath, MmrProof};
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/partial.rs | miden-crypto/src/merkle/mmr/partial.rs | use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use super::{MmrDelta, MmrPath};
use crate::{
Word,
merkle::{
InnerNodeInfo, MerklePath, Rpo256,
mmr::{InOrderIndex, MmrError, MmrPeaks, forest::Forest},
},
utils::{ByteReader, ByteWriter, Deserializable, Serializable},
};
// TYPE ALIASES
// ================================================================================================
type NodeMap = BTreeMap<InOrderIndex, Word>;
// PARTIAL MERKLE MOUNTAIN RANGE
// ================================================================================================
/// Partially materialized Merkle Mountain Range (MMR), used to efficiently store and update the
/// authentication paths for a subset of the elements in a full MMR.
///
/// This structure store only the authentication path for a value, the value itself is stored
/// separately.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PartialMmr {
/// The version of the MMR.
///
/// This value serves the following purposes:
///
/// - The forest is a counter for the total number of elements in the MMR.
/// - Since the MMR is an append-only structure, every change to it causes a change to the
/// `forest`, so this value has a dual purpose as a version tag.
/// - The bits in the forest also corresponds to the count and size of every perfect binary
/// tree that composes the MMR structure, which server to compute indexes and perform
/// validation.
pub(crate) forest: Forest,
/// The MMR peaks.
///
/// The peaks are used for two reasons:
///
/// 1. It authenticates the addition of an element to the [PartialMmr], ensuring only valid
/// elements are tracked.
/// 2. During a MMR update peaks can be merged by hashing the left and right hand sides. The
/// peaks are used as the left hand.
///
/// All the peaks of every tree in the MMR forest. The peaks are always ordered by number of
/// leaves, starting from the peak with most children, to the one with least.
pub(crate) peaks: Vec<Word>,
/// Authentication nodes used to construct merkle paths for a subset of the MMR's leaves.
///
/// This does not include the MMR's peaks nor the tracked nodes, only the elements required to
/// construct their authentication paths. This property is used to detect when elements can be
/// safely removed, because they are no longer required to authenticate any element in the
/// [PartialMmr].
///
/// The elements in the MMR are referenced using a in-order tree index. This indexing scheme
/// permits for easy computation of the relative nodes (left/right children, sibling, parent),
/// which is useful for traversal. The indexing is also stable, meaning that merges to the
/// trees in the MMR can be represented without rewrites of the indexes.
pub(crate) nodes: NodeMap,
/// Flag indicating if the odd element should be tracked.
///
/// This flag is necessary because the sibling of the odd doesn't exist yet, so it can not be
/// added into `nodes` to signal the value is being tracked.
pub(crate) track_latest: bool,
}
impl Default for PartialMmr {
/// Creates a new [PartialMmr] with default values.
fn default() -> Self {
let forest = Forest::empty();
let peaks = Vec::new();
let nodes = BTreeMap::new();
let track_latest = false;
Self { forest, peaks, nodes, track_latest }
}
}
impl PartialMmr {
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Returns a new [PartialMmr] instantiated from the specified peaks.
pub fn from_peaks(peaks: MmrPeaks) -> Self {
let forest = peaks.forest();
let peaks = peaks.into();
let nodes = BTreeMap::new();
let track_latest = false;
Self { forest, peaks, nodes, track_latest }
}
/// Returns a new [PartialMmr] instantiated from the specified components.
///
/// This constructor does not check the consistency between peaks and nodes. If the specified
/// peaks are nodes are inconsistent, the returned partial MMR may exhibit undefined behavior.
pub fn from_parts(peaks: MmrPeaks, nodes: NodeMap, track_latest: bool) -> Self {
let forest = peaks.forest();
let peaks = peaks.into();
Self { forest, peaks, nodes, track_latest }
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the current `forest` of this [PartialMmr].
///
/// This value corresponds to the version of the [PartialMmr] and the number of leaves in the
/// underlying MMR.
pub fn forest(&self) -> Forest {
self.forest
}
/// Returns the number of leaves in the underlying MMR for this [PartialMmr].
pub fn num_leaves(&self) -> usize {
self.forest.num_leaves()
}
/// Returns the peaks of the MMR for this [PartialMmr].
pub fn peaks(&self) -> MmrPeaks {
// expect() is OK here because the constructor ensures that MMR peaks can be constructed
// correctly
MmrPeaks::new(self.forest, self.peaks.clone()).expect("invalid MMR peaks")
}
/// Returns true if this partial MMR tracks an authentication path for the leaf at the
/// specified position.
pub fn is_tracked(&self, pos: usize) -> bool {
let leaves = self.forest.num_leaves();
if pos >= leaves {
return false;
} else if pos == leaves - 1 && self.forest.has_single_leaf_tree() {
// if the number of leaves in the MMR is odd and the position is for the last leaf
// whether the leaf is tracked is defined by the `track_latest` flag
return self.track_latest;
}
let leaf_index = InOrderIndex::from_leaf_pos(pos);
self.is_tracked_node(&leaf_index)
}
/// Given a leaf position, returns the Merkle path to its corresponding peak, or None if this
/// partial MMR does not track an authentication paths for the specified leaf.
///
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
/// has position 0, the second position 1, and so on.
///
/// # Errors
/// Returns an error if the specified position is greater-or-equal than the number of leaves
/// in the underlying MMR.
pub fn open(&self, pos: usize) -> Result<Option<MmrPath>, MmrError> {
let tree_bit = self
.forest
.leaf_to_corresponding_tree(pos)
.ok_or(MmrError::PositionNotFound(pos))?;
let depth = tree_bit as usize;
let mut nodes = Vec::with_capacity(depth);
let mut idx = InOrderIndex::from_leaf_pos(pos);
while let Some(node) = self.nodes.get(&idx.sibling()) {
nodes.push(*node);
idx = idx.parent();
}
// If there are nodes then the path must be complete, otherwise it is a bug
debug_assert!(nodes.is_empty() || nodes.len() == depth);
if nodes.len() != depth {
// The requested `pos` is not being tracked.
Ok(None)
} else {
Ok(Some(MmrPath::new(self.forest, pos, MerklePath::new(nodes))))
}
}
// ITERATORS
// --------------------------------------------------------------------------------------------
/// Returns an iterator nodes of all authentication paths of this [PartialMmr].
pub fn nodes(&self) -> impl Iterator<Item = (&InOrderIndex, &Word)> {
self.nodes.iter()
}
/// Returns an iterator over inner nodes of this [PartialMmr] for the specified leaves.
///
/// The order of iteration is not defined. If a leaf is not presented in this partial MMR it
/// is silently ignored.
pub fn inner_nodes<'a, I: Iterator<Item = (usize, Word)> + 'a>(
&'a self,
mut leaves: I,
) -> impl Iterator<Item = InnerNodeInfo> + 'a {
let stack = if let Some((pos, leaf)) = leaves.next() {
let idx = InOrderIndex::from_leaf_pos(pos);
vec![(idx, leaf)]
} else {
Vec::new()
};
InnerNodeIterator {
nodes: &self.nodes,
leaves,
stack,
seen_nodes: BTreeSet::new(),
}
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Adds a new peak and optionally track it. Returns a vector of the authentication nodes
/// inserted into this [PartialMmr] as a result of this operation.
///
/// When `track` is `true` the new leaf is tracked.
pub fn add(&mut self, leaf: Word, track: bool) -> Vec<(InOrderIndex, Word)> {
self.forest.append_leaf();
// We just incremented the forest, so this cannot panic.
let merges = self.forest.smallest_tree_height_unchecked();
let mut new_nodes = Vec::with_capacity(merges);
let peak = if merges == 0 {
self.track_latest = track;
leaf
} else {
let mut track_right = track;
let mut track_left = self.track_latest;
let mut right = leaf;
let mut right_idx = self.forest.rightmost_in_order_index();
for _ in 0..merges {
let left = self.peaks.pop().expect("Missing peak");
let left_idx = right_idx.sibling();
if track_right {
let old = self.nodes.insert(left_idx, left);
new_nodes.push((left_idx, left));
debug_assert!(
old.is_none(),
"Idx {left_idx:?} already contained an element {old:?}",
);
};
if track_left {
let old = self.nodes.insert(right_idx, right);
new_nodes.push((right_idx, right));
debug_assert!(
old.is_none(),
"Idx {right_idx:?} already contained an element {old:?}",
);
};
// Update state for the next iteration.
// --------------------------------------------------------------------------------
// This layer is merged, go up one layer.
right_idx = right_idx.parent();
// Merge the current layer. The result is either the right element of the next
// merge, or a new peak.
right = Rpo256::merge(&[left, right]);
// This iteration merged the left and right nodes, the new value is always used as
// the next iteration's right node. Therefore the tracking flags of this iteration
// have to be merged into the right side only.
track_right = track_right || track_left;
// On the next iteration, a peak will be merged. If any of its children are tracked,
// then we have to track the left side
track_left = self.is_tracked_node(&right_idx.sibling());
}
right
};
self.peaks.push(peak);
new_nodes
}
/// Adds the authentication path represented by [MerklePath] if it is valid.
///
/// The `leaf_pos` refers to the global position of the leaf in the MMR, these are 0-indexed
/// values assigned in a strictly monotonic fashion as elements are inserted into the MMR,
/// this value corresponds to the values used in the MMR structure.
///
/// The `leaf` corresponds to the value at `leaf_pos`, and `path` is the authentication path for
/// that element up to its corresponding Mmr peak. The `leaf` is only used to compute the root
/// from the authentication path to valid the data, only the authentication data is saved in
/// the structure. If the value is required it should be stored out-of-band.
pub fn track(
&mut self,
leaf_pos: usize,
leaf: Word,
path: &MerklePath,
) -> Result<(), MmrError> {
// Checks there is a tree with same depth as the authentication path, if not the path is
// invalid.
let tree = Forest::new(1 << path.depth());
if (tree & self.forest).is_empty() {
return Err(MmrError::UnknownPeak(path.depth()));
};
if leaf_pos + 1 == self.forest.num_leaves()
&& path.depth() == 0
&& self.peaks.last().is_some_and(|v| *v == leaf)
{
self.track_latest = true;
return Ok(());
}
// ignore the trees smaller than the target (these elements are position after the current
// target and don't affect the target leaf_pos)
let target_forest = self.forest ^ (self.forest & tree.all_smaller_trees_unchecked());
let peak_pos = target_forest.num_trees() - 1;
// translate from mmr leaf_pos to merkle path
let path_idx = leaf_pos - (target_forest ^ tree).num_leaves();
// Compute the root of the authentication path, and check it matches the current version of
// the PartialMmr.
let computed = path
.compute_root(path_idx as u64, leaf)
.map_err(MmrError::MerkleRootComputationFailed)?;
if self.peaks[peak_pos] != computed {
return Err(MmrError::PeakPathMismatch);
}
let mut idx = InOrderIndex::from_leaf_pos(leaf_pos);
for leaf in path.nodes() {
self.nodes.insert(idx.sibling(), *leaf);
idx = idx.parent();
}
Ok(())
}
/// Removes a leaf of the [PartialMmr] and the unused nodes from the authentication path.
///
/// Returns a vector of the authentication nodes removed from this [PartialMmr] as a result
/// of this operation. This is useful for client-side pruning, where the caller needs to know
/// which nodes can be deleted from storage.
///
/// Note: `leaf_pos` corresponds to the position in the MMR and not on an individual tree.
pub fn untrack(&mut self, leaf_pos: usize) -> Vec<(InOrderIndex, Word)> {
let mut idx = InOrderIndex::from_leaf_pos(leaf_pos);
let mut removed = Vec::new();
// `idx` represent the element that can be computed by the authentication path, because
// these elements can be computed they are not saved for the authentication of the current
// target. In other words, if the idx is present it was added for the authentication of
// another element, and no more elements should be removed otherwise it would remove that
// element's authentication data.
while let Some(word) = self.nodes.remove(&idx.sibling()) {
removed.push((idx.sibling(), word));
if self.nodes.contains_key(&idx) {
break;
}
idx = idx.parent();
}
removed
}
/// Applies updates to this [PartialMmr] and returns a vector of new authentication nodes
/// inserted into the partial MMR.
pub fn apply(&mut self, delta: MmrDelta) -> Result<Vec<(InOrderIndex, Word)>, MmrError> {
if delta.forest < self.forest {
return Err(MmrError::InvalidPeaks(format!(
"forest of mmr delta {} is less than current forest {}",
delta.forest, self.forest
)));
}
let mut inserted_nodes = Vec::new();
if delta.forest == self.forest {
if !delta.data.is_empty() {
return Err(MmrError::InvalidUpdate);
}
return Ok(inserted_nodes);
}
// find the tree merges
let changes = self.forest ^ delta.forest;
// `largest_tree_unchecked()` panics if `changes` is empty. `changes` cannot be empty
// unless `self.forest == delta.forest`, which is guarded against above.
let largest = changes.largest_tree_unchecked();
// The largest tree itself also cannot be an empty forest, so this cannot panic either.
let merges = self.forest & largest.all_smaller_trees_unchecked();
debug_assert!(
!self.track_latest || merges.has_single_leaf_tree(),
"if there is an odd element, a merge is required"
);
// count the number elements needed to produce largest from the current state
let (merge_count, new_peaks) = if !merges.is_empty() {
let depth = largest.smallest_tree_height_unchecked();
// `merges` also cannot be an empty forest, so this cannot panic either.
let skipped = merges.smallest_tree_height_unchecked();
let computed = merges.num_trees() - 1;
let merge_count = depth - skipped - computed;
let new_peaks = delta.forest & largest.all_smaller_trees_unchecked();
(merge_count, new_peaks)
} else {
(0, changes)
};
// verify the delta size
if delta.data.len() != merge_count + new_peaks.num_trees() {
return Err(MmrError::InvalidUpdate);
}
// keeps track of how many data elements from the update have been consumed
let mut update_count = 0;
if !merges.is_empty() {
// starts at the smallest peak and follows the merged peaks
let mut peak_idx = self.forest.root_in_order_index();
// match order of the update data while applying it
self.peaks.reverse();
// set to true when the data is needed for authentication paths updates
let mut track = self.track_latest;
self.track_latest = false;
let mut peak_count = 0;
let mut target = merges.smallest_tree_unchecked();
let mut new = delta.data[0];
update_count += 1;
while target < largest {
// check if either the left or right subtrees have saved for authentication paths.
// If so, turn tracking on to update those paths.
if target != Forest::new(1) && !track {
track = self.is_tracked_node(&peak_idx);
}
// update data only contains the nodes from the right subtrees, left nodes are
// either previously known peaks or computed values
let (left, right) = if !(target & merges).is_empty() {
let peak = self.peaks[peak_count];
let sibling_idx = peak_idx.sibling();
// if the sibling peak is tracked, add this peaks to the set of
// authentication nodes
if self.is_tracked_node(&sibling_idx) {
self.nodes.insert(peak_idx, new);
inserted_nodes.push((peak_idx, new));
}
peak_count += 1;
(peak, new)
} else {
let update = delta.data[update_count];
update_count += 1;
(new, update)
};
if track {
let sibling_idx = peak_idx.sibling();
if peak_idx.is_left_child() {
self.nodes.insert(sibling_idx, right);
inserted_nodes.push((sibling_idx, right));
} else {
self.nodes.insert(sibling_idx, left);
inserted_nodes.push((sibling_idx, left));
}
}
peak_idx = peak_idx.parent();
new = Rpo256::merge(&[left, right]);
target = target.next_larger_tree();
}
debug_assert!(peak_count == merges.num_trees());
// restore the peaks order
self.peaks.reverse();
// remove the merged peaks
self.peaks.truncate(self.peaks.len() - peak_count);
// add the newly computed peak, the result of the merges
self.peaks.push(new);
}
// The rest of the update data is composed of peaks. None of these elements can contain
// tracked elements because the peaks were unknown, and it is not possible to add elements
// for tacking without authenticating it to a peak.
self.peaks.extend_from_slice(&delta.data[update_count..]);
self.forest = delta.forest;
debug_assert!(self.peaks.len() == self.forest.num_trees());
Ok(inserted_nodes)
}
// HELPER METHODS
// --------------------------------------------------------------------------------------------
/// Returns true if this [PartialMmr] tracks authentication path for the node at the specified
/// index.
fn is_tracked_node(&self, node_index: &InOrderIndex) -> bool {
if node_index.is_leaf() {
self.nodes.contains_key(&node_index.sibling())
} else {
let left_child = node_index.left_child();
let right_child = node_index.right_child();
self.nodes.contains_key(&left_child) | self.nodes.contains_key(&right_child)
}
}
}
// CONVERSIONS
// ================================================================================================
impl From<MmrPeaks> for PartialMmr {
fn from(peaks: MmrPeaks) -> Self {
Self::from_peaks(peaks)
}
}
impl From<PartialMmr> for MmrPeaks {
fn from(partial_mmr: PartialMmr) -> Self {
// Safety: the [PartialMmr] maintains the constraints the number of true bits in the forest
// matches the number of peaks, as required by the [MmrPeaks]
MmrPeaks::new(partial_mmr.forest, partial_mmr.peaks).unwrap()
}
}
impl From<&MmrPeaks> for PartialMmr {
fn from(peaks: &MmrPeaks) -> Self {
Self::from_peaks(peaks.clone())
}
}
impl From<&PartialMmr> for MmrPeaks {
fn from(partial_mmr: &PartialMmr) -> Self {
// Safety: the [PartialMmr] maintains the constraints the number of true bits in the forest
// matches the number of peaks, as required by the [MmrPeaks]
MmrPeaks::new(partial_mmr.forest, partial_mmr.peaks.clone()).unwrap()
}
}
// ITERATORS
// ================================================================================================
/// An iterator over every inner node of the [PartialMmr].
pub struct InnerNodeIterator<'a, I: Iterator<Item = (usize, Word)>> {
nodes: &'a NodeMap,
leaves: I,
stack: Vec<(InOrderIndex, Word)>,
seen_nodes: BTreeSet<InOrderIndex>,
}
impl<I: Iterator<Item = (usize, Word)>> Iterator for InnerNodeIterator<'_, I> {
type Item = InnerNodeInfo;
fn next(&mut self) -> Option<Self::Item> {
while let Some((idx, node)) = self.stack.pop() {
let parent_idx = idx.parent();
let new_node = self.seen_nodes.insert(parent_idx);
// if we haven't seen this node's parent before, and the node has a sibling, return
// the inner node defined by the parent of this node, and move up the branch
if new_node && let Some(sibling) = self.nodes.get(&idx.sibling()) {
let (left, right) = if parent_idx.left_child() == idx {
(node, *sibling)
} else {
(*sibling, node)
};
let parent = Rpo256::merge(&[left, right]);
let inner_node = InnerNodeInfo { value: parent, left, right };
self.stack.push((parent_idx, parent));
return Some(inner_node);
}
// the previous leaf has been processed, try to process the next leaf
if let Some((pos, leaf)) = self.leaves.next() {
let idx = InOrderIndex::from_leaf_pos(pos);
self.stack.push((idx, leaf));
}
}
None
}
}
impl Serializable for PartialMmr {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.forest.num_leaves().write_into(target);
self.peaks.write_into(target);
self.nodes.write_into(target);
target.write_bool(self.track_latest);
}
}
impl Deserializable for PartialMmr {
fn read_from<R: ByteReader>(
source: &mut R,
) -> Result<Self, crate::utils::DeserializationError> {
let forest = Forest::new(usize::read_from(source)?);
let peaks = Vec::<Word>::read_from(source)?;
let nodes = NodeMap::read_from(source)?;
let track_latest = source.read_bool()?;
Ok(Self { forest, peaks, nodes, track_latest })
}
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use alloc::{collections::BTreeSet, vec::Vec};
use super::{MmrPeaks, PartialMmr};
use crate::{
Word,
merkle::{
NodeIndex, int_to_node,
mmr::{Mmr, forest::Forest},
store::MerkleStore,
},
utils::{Deserializable, Serializable},
};
// Note: This function works around the fact that P3 constructors are not const.
// Once upstream Plonky3 releases our const constructor changes, this should be
// reverted to `const LEAVES: [Word; 7] = [...]`. See issue #731.
fn leaves() -> [Word; 7] {
[
int_to_node(0),
int_to_node(1),
int_to_node(2),
int_to_node(3),
int_to_node(4),
int_to_node(5),
int_to_node(6),
]
}
#[test]
fn test_partial_mmr_apply_delta() {
// build an MMR with 10 nodes (2 peaks) and a partial MMR based on it
let mut mmr = Mmr::default();
(0..10).for_each(|i| mmr.add(int_to_node(i)));
let mut partial_mmr: PartialMmr = mmr.peaks().into();
// add authentication path for position 1 and 8
{
let node = mmr.get(1).unwrap();
let proof = mmr.open(1).unwrap();
partial_mmr.track(1, node, proof.path().merkle_path()).unwrap();
}
{
let node = mmr.get(8).unwrap();
let proof = mmr.open(8).unwrap();
partial_mmr.track(8, node, proof.path().merkle_path()).unwrap();
}
// add 2 more nodes into the MMR and validate apply_delta()
(10..12).for_each(|i| mmr.add(int_to_node(i)));
validate_apply_delta(&mmr, &mut partial_mmr);
// add 1 more node to the MMR, validate apply_delta() and start tracking the node
mmr.add(int_to_node(12));
validate_apply_delta(&mmr, &mut partial_mmr);
{
let node = mmr.get(12).unwrap();
let proof = mmr.open(12).unwrap();
partial_mmr.track(12, node, proof.path().merkle_path()).unwrap();
assert!(partial_mmr.track_latest);
}
// by this point we are tracking authentication paths for positions: 1, 8, and 12
// add 3 more nodes to the MMR (collapses to 1 peak) and validate apply_delta()
(13..16).for_each(|i| mmr.add(int_to_node(i)));
validate_apply_delta(&mmr, &mut partial_mmr);
}
fn validate_apply_delta(mmr: &Mmr, partial: &mut PartialMmr) {
let tracked_leaves = partial
.nodes
.iter()
.filter(|&(index, _)| index.is_leaf())
.map(|(index, _)| index.sibling())
.collect::<Vec<_>>();
let nodes_before = partial.nodes.clone();
// compute and apply delta
let delta = mmr.get_delta(partial.forest(), mmr.forest()).unwrap();
let nodes_delta = partial.apply(delta).unwrap();
// new peaks were computed correctly
assert_eq!(mmr.peaks(), partial.peaks());
let mut expected_nodes = nodes_before;
for (key, value) in nodes_delta {
// nodes should not be duplicated
assert!(expected_nodes.insert(key, value).is_none());
}
// new nodes should be a combination of original nodes and delta
assert_eq!(expected_nodes, partial.nodes);
// make sure tracked leaves open to the same proofs as in the underlying MMR
for index in tracked_leaves {
let pos = index.inner() / 2;
let proof1 = partial.open(pos).unwrap().unwrap();
let proof2 = mmr.open(pos).unwrap();
assert_eq!(proof1, *proof2.path());
}
}
#[test]
fn test_partial_mmr_inner_nodes_iterator() {
// build the MMR
let mmr: Mmr = leaves().into();
let first_peak = mmr.peaks().peaks()[0];
// -- test single tree ----------------------------
// get path and node for position 1
let node1 = mmr.get(1).unwrap();
let proof1 = mmr.open(1).unwrap();
// create partial MMR and add authentication path to node at position 1
let mut partial_mmr: PartialMmr = mmr.peaks().into();
partial_mmr.track(1, node1, proof1.path().merkle_path()).unwrap();
// empty iterator should have no nodes
assert_eq!(partial_mmr.inner_nodes([].iter().cloned()).next(), None);
// build Merkle store from authentication paths in partial MMR
let mut store: MerkleStore = MerkleStore::new();
store.extend(partial_mmr.inner_nodes([(1, node1)].iter().cloned()));
let index1 = NodeIndex::new(2, 1).unwrap();
let path1 = store.get_path(first_peak, index1).unwrap().path;
assert_eq!(path1, *proof1.path().merkle_path());
// -- test no duplicates --------------------------
// build the partial MMR
let mut partial_mmr: PartialMmr = mmr.peaks().into();
let node0 = mmr.get(0).unwrap();
let proof0 = mmr.open(0).unwrap();
let node2 = mmr.get(2).unwrap();
let proof2 = mmr.open(2).unwrap();
partial_mmr.track(0, node0, proof0.path().merkle_path()).unwrap();
partial_mmr.track(1, node1, proof1.path().merkle_path()).unwrap();
partial_mmr.track(2, node2, proof2.path().merkle_path()).unwrap();
// make sure there are no duplicates
let leaves = [(0, node0), (1, node1), (2, node2)];
let mut nodes = BTreeSet::new();
for node in partial_mmr.inner_nodes(leaves.iter().cloned()) {
assert!(nodes.insert(node.value));
}
// and also that the store is still be built correctly
store.extend(partial_mmr.inner_nodes(leaves.iter().cloned()));
let index0 = NodeIndex::new(2, 0).unwrap();
let index1 = NodeIndex::new(2, 1).unwrap();
let index2 = NodeIndex::new(2, 2).unwrap();
let path0 = store.get_path(first_peak, index0).unwrap().path;
let path1 = store.get_path(first_peak, index1).unwrap().path;
let path2 = store.get_path(first_peak, index2).unwrap().path;
assert_eq!(path0, *proof0.path().merkle_path());
assert_eq!(path1, *proof1.path().merkle_path());
assert_eq!(path2, *proof2.path().merkle_path());
// -- test multiple trees -------------------------
// build the partial MMR
let mut partial_mmr: PartialMmr = mmr.peaks().into();
let node5 = mmr.get(5).unwrap();
let proof5 = mmr.open(5).unwrap();
partial_mmr.track(1, node1, proof1.path().merkle_path()).unwrap();
partial_mmr.track(5, node5, proof5.path().merkle_path()).unwrap();
// build Merkle store from authentication paths in partial MMR
let mut store: MerkleStore = MerkleStore::new();
store.extend(partial_mmr.inner_nodes([(1, node1), (5, node5)].iter().cloned()));
let index1 = NodeIndex::new(2, 1).unwrap();
let index5 = NodeIndex::new(1, 1).unwrap();
let second_peak = mmr.peaks().peaks()[1];
let path1 = store.get_path(first_peak, index1).unwrap().path;
let path5 = store.get_path(second_peak, index5).unwrap().path;
assert_eq!(path1, *proof1.path().merkle_path());
assert_eq!(path5, *proof5.path().merkle_path());
}
#[test]
fn test_partial_mmr_add_without_track() {
let mut mmr = Mmr::default();
let empty_peaks = MmrPeaks::new(Forest::empty(), vec![]).unwrap();
let mut partial_mmr = PartialMmr::from_peaks(empty_peaks);
for el in (0..256).map(int_to_node) {
mmr.add(el);
partial_mmr.add(el, false);
assert_eq!(mmr.peaks(), partial_mmr.peaks());
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/peaks.rs | miden-crypto/src/merkle/mmr/peaks.rs | use alloc::vec::Vec;
use crate::{
Felt, Word, ZERO,
hash::rpo::Rpo256,
merkle::mmr::{Forest, MmrError, MmrProof},
};
// MMR PEAKS
// ================================================================================================
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct MmrPeaks {
/// The number of leaves (represented by [`Forest`]) is used to differentiate MMRs that have
/// the same number of peaks. This happens because the number of peaks goes up-and-down as
/// the structure is used causing existing trees to be merged and new ones to be created.
/// As an example, every time the MMR has a power-of-two number of leaves there is a single
/// peak.
///
/// Every tree in the MMR forest has a distinct power-of-two size, this means only the right-
/// most tree can have an odd number of elements (i.e. `1`). Additionally this means that the
/// bits in `num_leaves` conveniently encode the size of each individual tree.
///
/// Examples:
///
/// - With 5 leaves, the binary `0b101`. The number of set bits is equal the number of peaks,
/// in this case there are 2 peaks. The 0-indexed least-significant position of the bit
/// determines the number of elements of a tree, so the rightmost tree has `2**0` elements
/// and the left most has `2**2`.
/// - With 12 leaves, the binary is `0b1100`, this case also has 2 peaks, the leftmost tree has
/// `2**3=8` elements, and the right most has `2**2=4` elements.
forest: Forest,
/// All the peaks of every tree in the MMR forest. The peaks are always ordered by number of
/// leaves, starting from the peak with most children, to the one with least.
///
/// Invariant: The length of `peaks` must be equal to the number of true bits in `num_leaves`.
peaks: Vec<Word>,
}
impl Default for MmrPeaks {
/// Returns new [`MmrPeaks`] instantiated from an empty vector of peaks and 0 leaves.
fn default() -> Self {
Self {
forest: Forest::empty(),
peaks: Vec::new(),
}
}
}
impl MmrPeaks {
// CONSTRUCTOR
// --------------------------------------------------------------------------------------------
/// Returns new [MmrPeaks] instantiated from the provided vector of peaks and the number of
/// leaves in the underlying MMR.
///
/// # Errors
/// Returns an error if the number of leaves and the number of peaks are inconsistent.
pub fn new(forest: Forest, peaks: Vec<Word>) -> Result<Self, MmrError> {
if forest.num_trees() != peaks.len() {
return Err(MmrError::InvalidPeaks(format!(
"number of one bits in leaves is {} which does not equal peak length {}",
forest.num_trees(),
peaks.len()
)));
}
Ok(Self { forest, peaks })
}
// ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the underlying forest (a set of mountain range peaks).
pub fn forest(&self) -> Forest {
self.forest
}
/// Returns a count of leaves in the underlying MMR.
pub fn num_leaves(&self) -> usize {
self.forest.num_leaves()
}
/// Returns the number of peaks of the underlying MMR.
pub fn num_peaks(&self) -> usize {
self.peaks.len()
}
/// Returns the list of peaks of the underlying MMR.
pub fn peaks(&self) -> &[Word] {
&self.peaks
}
/// Returns the peak by the provided index.
///
/// # Errors
/// Returns an error if the provided peak index is greater or equal to the current number of
/// peaks in the Mmr.
pub fn get_peak(&self, peak_idx: usize) -> Result<&Word, MmrError> {
self.peaks
.get(peak_idx)
.ok_or(MmrError::PeakOutOfBounds { peak_idx, peaks_len: self.peaks.len() })
}
/// Converts this [MmrPeaks] into its components: number of leaves (represented as a [`Forest`])
/// and a vector of peaks of the underlying MMR.
pub fn into_parts(self) -> (Forest, Vec<Word>) {
(self.forest, self.peaks)
}
/// Hashes the peaks.
///
/// The procedure will:
/// - Flatten and pad the peaks to a vector of Felts.
/// - Hash the vector of Felts.
pub fn hash_peaks(&self) -> Word {
Rpo256::hash_elements(&self.flatten_and_pad_peaks())
}
/// Verifies the Merkle opening proof.
///
/// # Errors
/// Returns an error if:
/// - provided opening proof is invalid.
/// - Mmr root value computed using the provided leaf value differs from the actual one.
pub fn verify(&self, value: Word, opening: MmrProof) -> Result<(), MmrError> {
let root = self.get_peak(opening.peak_index())?;
opening
.path()
.merkle_path()
.verify(opening.relative_pos() as u64, value, root)
.map_err(MmrError::InvalidMerklePath)
}
/// Flattens and pads the peaks to make hashing inside of the Miden VM easier.
///
/// The procedure will:
/// - Flatten the vector of Words into a vector of Felts.
/// - Pad the peaks with ZERO to an even number of words, this removes the need to handle RPO
/// padding.
/// - Pad the peaks to a minimum length of 16 words, which reduces the constant cost of hashing.
pub fn flatten_and_pad_peaks(&self) -> Vec<Felt> {
let num_peaks = self.peaks.len();
// To achieve the padding rules above we calculate the length of the final vector.
// This is calculated as the number of field elements. Each peak is 4 field elements.
// The length is calculated as follows:
// - If there are less than 16 peaks, the data is padded to 16 peaks and as such requires 64
// field elements.
// - If there are more than 16 peaks and the number of peaks is odd, the data is padded to
// an even number of peaks and as such requires `(num_peaks + 1) * 4` field elements.
// - If there are more than 16 peaks and the number of peaks is even, the data is not padded
// and as such requires `num_peaks * 4` field elements.
let len = if num_peaks < 16 {
64
} else if num_peaks % 2 == 1 {
(num_peaks + 1) * 4
} else {
num_peaks * 4
};
let mut elements = Vec::with_capacity(len);
elements.extend_from_slice(
&self
.peaks
.as_slice()
.iter()
.map(|digest| digest.as_slice())
.collect::<Vec<_>>()
.concat(),
);
elements.resize(len, ZERO);
elements
}
}
impl From<MmrPeaks> for Vec<Word> {
fn from(peaks: MmrPeaks) -> Self {
peaks.peaks
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/inorder.rs | miden-crypto/src/merkle/mmr/inorder.rs | //! Index for nodes of a binary tree based on an in-order tree walk.
//!
//! In-order walks have the parent node index split its left and right subtrees. All the left
//! children have indexes lower than the parent, meanwhile all the right subtree higher indexes.
//! This property makes it is easy to compute changes to the index by adding or subtracting the
//! leaves count.
use core::num::NonZeroUsize;
use crate::utils::{ByteReader, ByteWriter, Deserializable, Serializable};
// IN-ORDER INDEX
// ================================================================================================
/// Index of nodes in a perfectly balanced binary tree based on an in-order tree walk.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct InOrderIndex {
idx: usize,
}
impl InOrderIndex {
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Returns a new [InOrderIndex] instantiated from the provided value.
pub fn new(idx: NonZeroUsize) -> InOrderIndex {
InOrderIndex { idx: idx.get() }
}
/// Return a new [InOrderIndex] instantiated from the specified leaf position.
///
/// # Panics:
/// If `leaf` is higher than or equal to `usize::MAX / 2`.
pub fn from_leaf_pos(leaf: usize) -> InOrderIndex {
// Convert the position from 0-indexed to 1-indexed, since the bit manipulation in this
// implementation only works 1-indexed counting.
let pos = leaf + 1;
InOrderIndex { idx: pos * 2 - 1 }
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// True if the index is pointing at a leaf.
///
/// Every odd number represents a leaf.
pub fn is_leaf(&self) -> bool {
self.idx & 1 == 1
}
/// Returns true if this note is a left child of its parent.
pub fn is_left_child(&self) -> bool {
self.parent().left_child() == *self
}
/// Returns the level of the index.
///
/// Starts at level zero for leaves and increases by one for each parent.
pub fn level(&self) -> u32 {
self.idx.trailing_zeros()
}
/// Returns the index of the left child.
///
/// # Panics:
/// If the index corresponds to a leaf.
pub fn left_child(&self) -> InOrderIndex {
// The left child is itself a parent, with an index that splits its left/right subtrees. To
// go from the parent index to its left child, it is only necessary to subtract the count
// of elements on the child's right subtree + 1.
let els = 1 << (self.level() - 1);
InOrderIndex { idx: self.idx - els }
}
/// Returns the index of the right child.
///
/// # Panics:
/// If the index corresponds to a leaf.
pub fn right_child(&self) -> InOrderIndex {
// To compute the index of the parent of the right subtree it is sufficient to add the size
// of its left subtree + 1.
let els = 1 << (self.level() - 1);
InOrderIndex { idx: self.idx + els }
}
/// Returns the index of the parent node.
pub fn parent(&self) -> InOrderIndex {
// If the current index corresponds to a node in a left tree, to go up a level it is
// required to add the number of nodes of the right sibling, analogously if the node is a
// right child, going up requires subtracting the number of nodes in its left subtree.
//
// Both of the above operations can be performed by bitwise manipulation. Below the mask
// sets the number of trailing zeros to be equal the new level of the index, and the bit
// marks the parent.
let target = self.level() + 1;
let bit = 1 << target;
let mask = bit - 1;
let idx = self.idx ^ (self.idx & mask);
InOrderIndex { idx: idx | bit }
}
/// Returns the index of the sibling node.
pub fn sibling(&self) -> InOrderIndex {
let parent = self.parent();
if *self > parent {
parent.left_child()
} else {
parent.right_child()
}
}
/// Returns the inner value of this [InOrderIndex].
pub fn inner(&self) -> usize {
self.idx
}
}
impl Serializable for InOrderIndex {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_usize(self.idx);
}
}
impl Deserializable for InOrderIndex {
fn read_from<R: ByteReader>(
source: &mut R,
) -> Result<Self, crate::utils::DeserializationError> {
let idx = source.read_usize()?;
Ok(InOrderIndex { idx })
}
}
// CONVERSIONS FROM IN-ORDER INDEX
// ------------------------------------------------------------------------------------------------
impl From<InOrderIndex> for usize {
fn from(index: InOrderIndex) -> Self {
index.idx
}
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod test {
use proptest::prelude::*;
use super::InOrderIndex;
use crate::utils::{Deserializable, Serializable};
proptest! {
#[test]
fn proptest_inorder_index_random(count in 1..1000usize) {
let left_pos = count * 2;
let right_pos = count * 2 + 1;
let left = InOrderIndex::from_leaf_pos(left_pos);
let right = InOrderIndex::from_leaf_pos(right_pos);
assert!(left.is_leaf());
assert!(right.is_leaf());
assert_eq!(left.parent(), right.parent());
assert_eq!(left.parent().right_child(), right);
assert_eq!(left, right.parent().left_child());
assert_eq!(left.sibling(), right);
assert_eq!(left, right.sibling());
}
}
#[test]
fn test_inorder_index_basic() {
let left = InOrderIndex::from_leaf_pos(0);
let right = InOrderIndex::from_leaf_pos(1);
assert!(left.is_leaf());
assert!(right.is_leaf());
assert_eq!(left.parent(), right.parent());
assert_eq!(left.parent().right_child(), right);
assert_eq!(left, right.parent().left_child());
assert_eq!(left.sibling(), right);
assert_eq!(left, right.sibling());
}
#[test]
fn test_inorder_index_serialization() {
let index = InOrderIndex::from_leaf_pos(5);
let bytes = index.to_bytes();
let index2 = InOrderIndex::read_from_bytes(&bytes).unwrap();
assert_eq!(index, index2);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/mmr/full.rs | miden-crypto/src/merkle/mmr/full.rs | //! A fully materialized Merkle mountain range (MMR).
//!
//! A MMR is a forest structure, i.e. it is an ordered set of disjoint rooted trees. The trees are
//! ordered by size, from the most to least number of leaves. Every tree is a perfect binary tree,
//! meaning a tree has all its leaves at the same depth, and every inner node has a branch-factor
//! of 2 with both children set.
//!
//! Additionally the structure only supports adding leaves to the right-most tree, the one with the
//! least number of leaves. The structure preserves the invariant that each tree has different
//! depths, i.e. as part of adding a new element to the forest the trees with same depth are
//! merged, creating a new tree with depth d+1, this process is continued until the property is
//! reestablished.
use alloc::vec::Vec;
use super::{
super::{InnerNodeInfo, MerklePath},
MmrDelta, MmrError, MmrPath, MmrPeaks, MmrProof,
forest::{Forest, TreeSizeIterator},
};
use crate::{
Word,
merkle::Rpo256,
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
// MMR
// ===============================================================================================
/// A fully materialized Merkle Mountain Range, with every tree in the forest and all their
/// elements.
///
/// Since this is a full representation of the MMR, elements are never removed and the MMR will
/// grow roughly `O(2n)` in number of leaf elements.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Mmr {
/// Refer to the `forest` method documentation for details of the semantics of this value.
pub(super) forest: Forest,
/// Contains every element of the forest.
///
/// The trees are in postorder sequential representation. This representation allows for all
/// the elements of every tree in the forest to be stored in the same sequential buffer. It
/// also means new elements can be added to the forest, and merging of trees is very cheap with
/// no need to copy elements.
pub(super) nodes: Vec<Word>,
}
impl Default for Mmr {
fn default() -> Self {
Self::new()
}
}
impl Mmr {
// CONSTRUCTORS
// ============================================================================================
/// Constructor for an empty `Mmr`.
pub fn new() -> Mmr {
Mmr {
forest: Forest::empty(),
nodes: Vec::new(),
}
}
// ACCESSORS
// ============================================================================================
/// Returns the MMR forest representation. See [`Forest`].
pub const fn forest(&self) -> Forest {
self.forest
}
// FUNCTIONALITY
// ============================================================================================
/// Returns an [MmrProof] for the leaf at the specified position.
///
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
/// has position 0, the second position 1, and so on.
///
/// # Errors
/// Returns an error if the specified leaf position is out of bounds for this MMR.
pub fn open(&self, pos: usize) -> Result<MmrProof, MmrError> {
self.open_at(pos, self.forest)
}
/// Returns an [MmrProof] for the leaf at the specified position using the state of the MMR
/// at the specified `forest`.
///
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
/// has position 0, the second position 1, and so on.
///
/// # Errors
/// Returns an error if:
/// - The specified leaf position is out of bounds for this MMR.
/// - The specified `forest` value is not valid for this MMR.
pub fn open_at(&self, pos: usize, forest: Forest) -> Result<MmrProof, MmrError> {
if forest > self.forest {
return Err(MmrError::ForestOutOfBounds(forest.num_leaves(), self.forest.num_leaves()));
}
let (leaf, path) = self.collect_merkle_path_and_value(pos, forest)?;
let path = MmrPath::new(forest, pos, MerklePath::new(path));
Ok(MmrProof::new(path, leaf))
}
/// Returns the leaf value at position `pos`.
///
/// Note: The leaf position is the 0-indexed number corresponding to the order the leaves were
/// added, this corresponds to the MMR size _prior_ to adding the element. So the 1st element
/// has position 0, the second position 1, and so on.
pub fn get(&self, pos: usize) -> Result<Word, MmrError> {
let (value, _) = self.collect_merkle_path_and_value(pos, self.forest)?;
Ok(value)
}
/// Adds a new element to the MMR.
pub fn add(&mut self, el: Word) {
// Note: every node is also a tree of size 1, adding an element to the forest creates a new
// rooted-tree of size 1. This may temporarily break the invariant that every tree in the
// forest has different sizes, the loop below will eagerly merge trees of same size and
// restore the invariant.
self.nodes.push(el);
let mut left_offset = self.nodes.len().saturating_sub(2);
let mut right = el;
let mut left_tree = 1;
while !(self.forest & Forest::new(left_tree)).is_empty() {
right = Rpo256::merge(&[self.nodes[left_offset], right]);
self.nodes.push(right);
left_offset = left_offset.saturating_sub(Forest::new(left_tree).num_nodes());
left_tree <<= 1;
}
self.forest.append_leaf();
}
/// Returns the current peaks of the MMR.
pub fn peaks(&self) -> MmrPeaks {
self.peaks_at(self.forest).expect("failed to get peaks at current forest")
}
/// Returns the peaks of the MMR at the state specified by `forest`.
///
/// # Errors
/// Returns an error if the specified `forest` value is not valid for this MMR.
pub fn peaks_at(&self, forest: Forest) -> Result<MmrPeaks, MmrError> {
if forest > self.forest {
return Err(MmrError::ForestOutOfBounds(forest.num_leaves(), self.forest.num_leaves()));
}
let peaks: Vec<Word> = TreeSizeIterator::new(forest)
.rev()
.map(|tree| tree.num_nodes())
.scan(0, |offset, el| {
*offset += el;
Some(*offset)
})
.map(|offset| self.nodes[offset - 1])
.collect();
// Safety: the invariant is maintained by the [Mmr]
let peaks = MmrPeaks::new(forest, peaks)?;
Ok(peaks)
}
/// Compute the required update to `original_forest`.
///
/// The result is a packed sequence of the authentication elements required to update the trees
/// that have been merged together, followed by the new peaks of the [Mmr].
pub fn get_delta(&self, from_forest: Forest, to_forest: Forest) -> Result<MmrDelta, MmrError> {
if to_forest > self.forest {
return Err(MmrError::ForestOutOfBounds(
to_forest.num_leaves(),
self.forest.num_leaves(),
));
}
if from_forest > to_forest {
return Err(MmrError::ForestOutOfBounds(
from_forest.num_leaves(),
to_forest.num_leaves(),
));
}
if from_forest == to_forest {
return Ok(MmrDelta { forest: to_forest, data: Vec::new() });
}
let mut result = Vec::new();
// Find the largest tree in this [Mmr] which is new to `from_forest`.
let candidate_trees = to_forest ^ from_forest;
let mut new_high = candidate_trees.largest_tree_unchecked();
// Collect authentication nodes used for tree merges
// ----------------------------------------------------------------------------------------
// Find the trees from `from_forest` that have been merged into `new_high`.
let mut merges = from_forest & new_high.all_smaller_trees_unchecked();
// Find the peaks that are common to `from_forest` and this [Mmr]
let common_trees = from_forest ^ merges;
if !merges.is_empty() {
// Skip the smallest trees unknown to `from_forest`.
let mut target = merges.smallest_tree_unchecked();
// Collect siblings required to computed the merged tree's peak
while target < new_high {
// Computes the offset to the smallest know peak
// - common_trees: peaks unchanged in the current update, target comes after these.
// - merges: peaks that have not been merged so far, target comes after these.
// - target: tree from which to load the sibling. On the first iteration this is a
// value known by the partial mmr, on subsequent iterations this value is to be
// computed from the known peaks and provided authentication nodes.
let known = (common_trees | merges | target).num_nodes();
let sibling = target.num_nodes();
result.push(self.nodes[known + sibling - 1]);
// Update the target and account for tree merges
target = target.next_larger_tree();
while !(merges & target).is_empty() {
target = target.next_larger_tree();
}
// Remove the merges done so far
merges ^= merges & target.all_smaller_trees_unchecked();
}
} else {
// The new high tree may not be the result of any merges, if it is smaller than all the
// trees of `from_forest`.
new_high = Forest::empty();
}
// Collect the new [Mmr] peaks
// ----------------------------------------------------------------------------------------
let mut new_peaks = to_forest ^ common_trees ^ new_high;
let old_peaks = to_forest ^ new_peaks;
let mut offset = old_peaks.num_nodes();
while !new_peaks.is_empty() {
let target = new_peaks.largest_tree_unchecked();
offset += target.num_nodes();
result.push(self.nodes[offset - 1]);
new_peaks ^= target;
}
Ok(MmrDelta { forest: to_forest, data: result })
}
/// An iterator over inner nodes in the MMR. The order of iteration is unspecified.
pub fn inner_nodes(&self) -> MmrNodes<'_> {
MmrNodes {
mmr: self,
forest: 0,
last_right: 0,
index: 0,
}
}
// UTILITIES
// ============================================================================================
/// Internal function used to collect the leaf value and its Merkle path.
///
/// The arguments are relative to the target tree. To compute the opening of the second leaf
/// for a tree with depth 2 in the forest `0b110`:
///
/// - `leaf_idx`: Position corresponding to the order the leaves were added.
/// - `forest`: State of the MMR.
fn collect_merkle_path_and_value(
&self,
leaf_idx: usize,
forest: Forest,
) -> Result<(Word, Vec<Word>), MmrError> {
// find the target tree responsible for the MMR position
let tree_bit = forest
.leaf_to_corresponding_tree(leaf_idx)
.ok_or(MmrError::PositionNotFound(leaf_idx))?;
// isolate the trees before the target
let forest_before = forest.trees_larger_than(tree_bit);
let index_offset = forest_before.num_nodes();
// update the value position from global to the target tree
let relative_pos = leaf_idx - forest_before.num_leaves();
// see documentation of `leaf_to_corresponding_tree` for details
let tree_depth = (tree_bit + 1) as usize;
let mut path = Vec::with_capacity(tree_depth);
// The tree walk below goes from the root to the leaf, compute the root index to start
let mut forest_target: usize = 1usize << tree_bit;
let mut index = Forest::new(forest_target).num_nodes() - 1;
// Loop until the leaf is reached
while forest_target > 1 {
// Update the depth of the tree to correspond to a subtree
forest_target >>= 1;
// compute the indices of the right and left subtrees based on the post-order
let right_offset = index - 1;
let left_offset = right_offset - Forest::new(forest_target).num_nodes();
let left_or_right = relative_pos & forest_target;
let sibling = if left_or_right != 0 {
// going down the right subtree, the right child becomes the new root
index = right_offset;
// and the left child is the authentication
self.nodes[index_offset + left_offset]
} else {
index = left_offset;
self.nodes[index_offset + right_offset]
};
path.push(sibling);
}
debug_assert!(path.len() == tree_depth - 1);
// the rest of the codebase has the elements going from leaf to root, adjust it here for
// easy of use/consistency sake
path.reverse();
let value = self.nodes[index_offset + index];
Ok((value, path))
}
}
// CONVERSIONS
// ================================================================================================
impl<T> From<T> for Mmr
where
T: IntoIterator<Item = Word>,
{
fn from(values: T) -> Self {
let mut mmr = Mmr::new();
for v in values {
mmr.add(v)
}
mmr
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for Mmr {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.forest.write_into(target);
self.nodes.write_into(target);
}
}
impl Deserializable for Mmr {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let forest = Forest::read_from(source)?;
let nodes = Vec::<Word>::read_from(source)?;
Ok(Self { forest, nodes })
}
}
// ITERATOR
// ===============================================================================================
/// Yields inner nodes of the [Mmr].
pub struct MmrNodes<'a> {
/// [Mmr] being yielded, when its `forest` value is matched, the iterations is finished.
mmr: &'a Mmr,
/// Keeps track of the left nodes yielded so far waiting for a right pair, this matches the
/// semantics of the [Mmr]'s forest attribute, since that too works as a buffer of left nodes
/// waiting for a pair to be hashed together.
forest: usize,
/// Keeps track of the last right node yielded, after this value is set, the next iteration
/// will be its parent with its corresponding left node that has been yield already.
last_right: usize,
/// The current index in the `nodes` vector.
index: usize,
}
impl Iterator for MmrNodes<'_> {
type Item = InnerNodeInfo;
fn next(&mut self) -> Option<Self::Item> {
debug_assert!(self.last_right.count_ones() <= 1, "last_right tracks zero or one element");
// only parent nodes are emitted, remove the single node tree from the forest
let target = self.mmr.forest.without_single_leaf().num_leaves();
if self.forest < target {
if self.last_right == 0 {
// yield the left leaf
debug_assert!(self.last_right == 0, "left must be before right");
self.forest |= 1;
self.index += 1;
// yield the right leaf
debug_assert!((self.forest & 1) == 1, "right must be after left");
self.last_right |= 1;
self.index += 1;
};
debug_assert!(
self.forest & self.last_right != 0,
"parent requires both a left and right",
);
// compute the number of nodes in the right tree, this is the offset to the
// previous left parent
let right_nodes = Forest::new(self.last_right).num_nodes();
// the next parent position is one above the position of the pair
let parent = self.last_right << 1;
// the left node has been paired and the current parent yielded, removed it from the
// forest
self.forest ^= self.last_right;
if self.forest & parent == 0 {
// this iteration yielded the left parent node
debug_assert!(self.forest & 1 == 0, "next iteration yields a left leaf");
self.last_right = 0;
self.forest ^= parent;
} else {
// the left node of the parent level has been yielded already, this iteration
// was the right parent. Next iteration yields their parent.
self.last_right = parent;
}
// yields a parent
let value = self.mmr.nodes[self.index];
let right = self.mmr.nodes[self.index - 1];
let left = self.mmr.nodes[self.index - 1 - right_nodes];
self.index += 1;
let node = InnerNodeInfo { value, left, right };
Some(node)
} else {
None
}
}
}
// TESTS
// ================================================================================================
#[cfg(test)]
mod tests {
use alloc::vec::Vec;
use crate::{
Felt, Word, ZERO,
merkle::mmr::Mmr,
utils::{Deserializable, Serializable},
};
#[test]
fn test_serialization() {
let nodes = (0u64..128u64)
.map(|value| Word::new([ZERO, ZERO, ZERO, Felt::new(value)]))
.collect::<Vec<_>>();
let mmr = Mmr::from(nodes);
let serialized = mmr.to_bytes();
let deserialized = Mmr::read_from_bytes(&serialized).unwrap();
assert_eq!(mmr.forest, deserialized.forest);
assert_eq!(mmr.nodes, deserialized.nodes);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/store/tests.rs | miden-crypto/src/merkle/store/tests.rs | #![cfg(feature = "std")]
use assert_matches::assert_matches;
use seq_macro::seq;
#[cfg(feature = "std")]
use {
super::{Deserializable, Serializable},
alloc::boxed::Box,
std::error::Error,
};
use super::{
EmptySubtreeRoots, MerkleError, MerklePath, MerkleStore, NodeIndex, PartialMerkleTree, Rpo256,
Word,
};
use crate::{
Felt, ONE, WORD_SIZE, ZERO,
merkle::{
MerkleTree, int_to_leaf, int_to_node,
smt::{LeafIndex, SMT_MAX_DEPTH, SimpleSmt},
},
};
// TEST DATA
// ================================================================================================
const KEYS4: [u64; 4] = [0, 1, 2, 3];
const VALUES4: [Word; 4] = [int_to_node(1), int_to_node(2), int_to_node(3), int_to_node(4)];
const VALUES8: [Word; 8] = [
int_to_node(1),
int_to_node(2),
int_to_node(3),
int_to_node(4),
int_to_node(5),
int_to_node(6),
int_to_node(7),
int_to_node(8),
];
// TESTS
// ================================================================================================
#[test]
fn test_root_not_in_store() -> Result<(), MerkleError> {
let mtree = MerkleTree::new(VALUES4)?;
let store = MerkleStore::from(&mtree);
assert_matches!(
store.get_node(VALUES4[0], NodeIndex::make(mtree.depth(), 0)),
Err(MerkleError::RootNotInStore(root)) if root == VALUES4[0],
"Leaf 0 is not a root"
);
assert_matches!(
store.get_path(VALUES4[0], NodeIndex::make(mtree.depth(), 0)),
Err(MerkleError::RootNotInStore(root)) if root == VALUES4[0],
"Leaf 0 is not a root"
);
assert!(
!store.has_path(VALUES4[0], NodeIndex::make(mtree.depth(), 0)),
"Leaf 0 is not a root"
);
Ok(())
}
#[test]
fn test_merkle_tree() -> Result<(), MerkleError> {
let mtree = MerkleTree::new(VALUES4)?;
let store = MerkleStore::from(&mtree);
// STORE LEAVES ARE CORRECT -------------------------------------------------------------------
// checks the leaves in the store corresponds to the expected values
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap(),
VALUES4[0],
"node 0 must be in the tree"
);
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)).unwrap(),
VALUES4[1],
"node 1 must be in the tree"
);
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)).unwrap(),
VALUES4[2],
"node 2 must be in the tree"
);
assert_eq!(
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)).unwrap(),
VALUES4[3],
"node 3 must be in the tree"
);
// STORE LEAVES MATCH TREE --------------------------------------------------------------------
// sanity check the values returned by the store and the tree
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 0)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap(),
"node 0 must be the same for both MerkleTree and MerkleStore"
);
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 1)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 1)).unwrap(),
"node 1 must be the same for both MerkleTree and MerkleStore"
);
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 2)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 2)).unwrap(),
"node 2 must be the same for both MerkleTree and MerkleStore"
);
assert_eq!(
mtree.get_node(NodeIndex::make(mtree.depth(), 3)).unwrap(),
store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3)).unwrap(),
"node 3 must be the same for both MerkleTree and MerkleStore"
);
// STORE MERKLE PATH MATCHES ==============================================================
// assert the merkle path returned by the store is the same as the one in the tree
let result = store.get_path(mtree.root(), NodeIndex::make(mtree.depth(), 0)).unwrap();
assert_eq!(
VALUES4[0], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 0)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(mtree.root(), NodeIndex::make(mtree.depth(), 0)),
"path for index 0 must exist"
);
let result = store.get_path(mtree.root(), NodeIndex::make(mtree.depth(), 1)).unwrap();
assert_eq!(
VALUES4[1], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 1)).unwrap(),
result.path,
"merkle path for index 1 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(mtree.root(), NodeIndex::make(mtree.depth(), 1)),
"path for index 1 must exist"
);
let result = store.get_path(mtree.root(), NodeIndex::make(mtree.depth(), 2)).unwrap();
assert_eq!(
VALUES4[2], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 2)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(mtree.root(), NodeIndex::make(mtree.depth(), 2)),
"path for index 2 must exist"
);
let result = store.get_path(mtree.root(), NodeIndex::make(mtree.depth(), 3)).unwrap();
assert_eq!(
VALUES4[3], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
mtree.get_path(NodeIndex::make(mtree.depth(), 3)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(mtree.root(), NodeIndex::make(mtree.depth(), 3)),
"path for index 3 must exist"
);
Ok(())
}
#[test]
fn test_empty_roots() {
let store = MerkleStore::default();
let mut root = Word::default();
for depth in 0..255 {
root = Rpo256::merge(&[root; 2]);
assert!(
store.get_node(root, NodeIndex::make(0, 0)).is_ok(),
"The root of the empty tree of depth {depth} must be registered"
);
}
}
#[test]
fn test_leaf_paths_for_empty_trees() -> Result<(), MerkleError> {
let store = MerkleStore::default();
// Starts at 1 because leaves are not included in the store.
// Ends at 64 because it is not possible to represent an index of a depth greater than 64,
// because a u64 is used to index the leaf.
seq!(DEPTH in 1_u8..64_u8 {
let smt = SimpleSmt::<DEPTH>::new()?;
let index = NodeIndex::make(DEPTH, 0);
let store_path = store.get_path(smt.root(), index)?;
let smt_path = smt.open(&LeafIndex::<DEPTH>::new(0)?).path;
assert_eq!(
store_path.value,
Word::default(),
"the leaf of an empty tree is always ZERO"
);
assert_eq!(
store_path.path, smt_path,
"the returned merkle path does not match the computed values"
);
assert_eq!(
store_path.path.compute_root(DEPTH.into(), Word::default()).unwrap(),
smt.root(),
"computed root from the path must match the empty tree root"
);
assert!(store.has_path(smt.root(), index), "path for index 0 at depth {} must exist", DEPTH);
});
Ok(())
}
#[test]
fn test_get_invalid_node() {
let mtree = MerkleTree::new(VALUES4).expect("creating a merkle tree must work");
let store = MerkleStore::from(&mtree);
let _ = store.get_node(mtree.root(), NodeIndex::make(mtree.depth(), 3));
}
#[test]
fn test_add_sparse_merkle_tree_one_level() -> Result<(), MerkleError> {
let keys2: [u64; 2] = [0, 1];
let leaves2: [Word; 2] = [int_to_leaf(1), int_to_leaf(2)];
let smt = SimpleSmt::<1>::with_leaves(keys2.into_iter().zip(leaves2)).unwrap();
let store = MerkleStore::from(&smt);
let idx = NodeIndex::make(1, 0);
assert_eq!(smt.get_node(idx).unwrap(), leaves2[0]);
assert_eq!(store.get_node(smt.root(), idx).unwrap(), smt.get_node(idx).unwrap());
let idx = NodeIndex::make(1, 1);
assert_eq!(smt.get_node(idx).unwrap(), leaves2[1]);
assert_eq!(store.get_node(smt.root(), idx).unwrap(), smt.get_node(idx).unwrap());
Ok(())
}
#[test]
fn test_sparse_merkle_tree() -> Result<(), MerkleError> {
let smt =
SimpleSmt::<SMT_MAX_DEPTH>::with_leaves(KEYS4.into_iter().zip(VALUES4.to_vec())).unwrap();
let store = MerkleStore::from(&smt);
// STORE LEAVES ARE CORRECT ==============================================================
// checks the leaves in the store corresponds to the expected values
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
VALUES4[0],
"node 0 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
VALUES4[1],
"node 1 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
VALUES4[2],
"node 2 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
VALUES4[3],
"node 3 must be in the tree"
);
assert_eq!(
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
Word::default(),
"unmodified node 4 must be ZERO"
);
// STORE LEAVES MATCH TREE ===============================================================
// sanity check the values returned by the store and the tree
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap(),
"node 0 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap(),
"node 1 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap(),
"node 2 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap(),
"node 3 must be the same for both SparseMerkleTree and MerkleStore"
);
assert_eq!(
smt.get_node(NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
store.get_node(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap(),
"node 4 must be the same for both SparseMerkleTree and MerkleStore"
);
// STORE MERKLE PATH MATCHES ==============================================================
// assert the merkle path returned by the store is the same as the one in the tree
let result = store.get_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)).unwrap();
assert_eq!(
VALUES4[0], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
smt.open(&LeafIndex::<SMT_MAX_DEPTH>::new(0).unwrap()).path,
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 0)),
"path for index 0 must exist"
);
let result = store.get_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)).unwrap();
assert_eq!(
VALUES4[1], result.value,
"Value for merkle path at index 1 must match leaf value"
);
assert_eq!(
smt.open(&LeafIndex::<SMT_MAX_DEPTH>::new(1).unwrap()).path,
result.path,
"merkle path for index 1 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 1)),
"path for index 1 must exist"
);
let result = store.get_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)).unwrap();
assert_eq!(
VALUES4[2], result.value,
"Value for merkle path at index 2 must match leaf value"
);
assert_eq!(
smt.open(&LeafIndex::<SMT_MAX_DEPTH>::new(2).unwrap()).path,
result.path,
"merkle path for index 2 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 2)),
"path for index 2 must exist"
);
let result = store.get_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)).unwrap();
assert_eq!(
VALUES4[3], result.value,
"Value for merkle path at index 3 must match leaf value"
);
assert_eq!(
smt.open(&LeafIndex::<SMT_MAX_DEPTH>::new(3).unwrap()).path,
result.path,
"merkle path for index 3 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 3)),
"path for index 3 must exist"
);
let result = store.get_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)).unwrap();
assert_eq!(
Word::default(),
result.value,
"Value for merkle path at index 4 must match leaf value"
);
assert_eq!(
smt.open(&LeafIndex::<SMT_MAX_DEPTH>::new(4).unwrap()).path,
result.path,
"merkle path for index 4 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(smt.root(), NodeIndex::make(SMT_MAX_DEPTH, 4)),
"path for index 4 must exist"
);
Ok(())
}
#[test]
fn test_add_merkle_paths() -> Result<(), MerkleError> {
let mtree = MerkleTree::new(VALUES4)?;
let i0 = 0;
let p0 = mtree.get_path(NodeIndex::make(2, i0)).unwrap();
let i1 = 1;
let p1 = mtree.get_path(NodeIndex::make(2, i1)).unwrap();
let i2 = 2;
let p2 = mtree.get_path(NodeIndex::make(2, i2)).unwrap();
let i3 = 3;
let p3 = mtree.get_path(NodeIndex::make(2, i3)).unwrap();
let paths = [
(i0, VALUES4[i0 as usize], p0),
(i1, VALUES4[i1 as usize], p1),
(i2, VALUES4[i2 as usize], p2),
(i3, VALUES4[i3 as usize], p3),
];
let mut store = MerkleStore::default();
store.add_merkle_paths(paths.clone()).expect("the valid paths must work");
let pmt = PartialMerkleTree::with_paths(paths).unwrap();
// STORE LEAVES ARE CORRECT ==============================================================
// checks the leaves in the store corresponds to the expected values
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
VALUES4[0],
"node 0 must be in the pmt"
);
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
VALUES4[1],
"node 1 must be in the pmt"
);
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
VALUES4[2],
"node 2 must be in the pmt"
);
assert_eq!(
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
VALUES4[3],
"node 3 must be in the pmt"
);
// STORE LEAVES MATCH PMT ================================================================
// sanity check the values returned by the store and the pmt
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
"node 0 must be the same for both PartialMerkleTree and MerkleStore"
);
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
"node 1 must be the same for both PartialMerkleTree and MerkleStore"
);
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
"node 2 must be the same for both PartialMerkleTree and MerkleStore"
);
assert_eq!(
pmt.get_node(NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
store.get_node(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
"node 3 must be the same for both PartialMerkleTree and MerkleStore"
);
// STORE MERKLE PATH MATCHES ==============================================================
// assert the merkle path returned by the store is the same as the one in the pmt
let result = store.get_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)).unwrap();
assert_eq!(
VALUES4[0], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 0)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 0)),
"path for index 0 must exist"
);
let result = store.get_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)).unwrap();
assert_eq!(
VALUES4[1], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 1)).unwrap(),
result.path,
"merkle path for index 1 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 1)),
"path for index 1 must exist"
);
let result = store.get_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)).unwrap();
assert_eq!(
VALUES4[2], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 2)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 2)),
"path for index 2 must exist"
);
let result = store.get_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)).unwrap();
assert_eq!(
VALUES4[3], result.value,
"Value for merkle path at index 0 must match leaf value"
);
assert_eq!(
pmt.get_path(NodeIndex::make(pmt.max_depth(), 3)).unwrap(),
result.path,
"merkle path for index 0 must be the same for the MerkleTree and MerkleStore"
);
assert!(
store.has_path(pmt.root(), NodeIndex::make(pmt.max_depth(), 3)),
"path for index 3 must exist"
);
Ok(())
}
#[test]
fn wont_open_to_different_depth_root() {
let empty = EmptySubtreeRoots::empty_hashes(64);
let a = Word::new([ONE; 4]);
let b = Word::new([Felt::new(2); 4]);
// Compute the root for a different depth. We cherry-pick this specific depth to prevent a
// regression to a bug in the past that allowed the user to fetch a node at a depth lower than
// the inserted path of a Merkle tree.
let mut root = Rpo256::merge(&[a, b]);
for depth in (1..=63).rev() {
root = Rpo256::merge(&[root, empty[depth]]);
}
// For this example, the depth of the Merkle tree is 1, as we have only two leaves. Here we
// attempt to fetch a node on the maximum depth, and it should fail because the root shouldn't
// exist for the set.
let mtree = MerkleTree::new(vec![a, b]).unwrap();
let store = MerkleStore::from(&mtree);
let index = NodeIndex::root();
let err = store.get_node(root, index).err().unwrap();
assert_matches!(err, MerkleError::RootNotInStore(err_root) if err_root == root);
}
#[test]
fn store_path_opens_from_leaf() {
let a = Word::new([ONE; 4]);
let b = Word::new([Felt::new(2); 4]);
let c = Word::new([Felt::new(3); 4]);
let d = Word::new([Felt::new(4); 4]);
let e = Word::new([Felt::new(5); 4]);
let f = Word::new([Felt::new(6); 4]);
let g = Word::new([Felt::new(7); 4]);
let h = Word::new([Felt::new(8); 4]);
let i = Rpo256::merge(&[a, b]);
let j = Rpo256::merge(&[c, d]);
let k = Rpo256::merge(&[e, f]);
let l = Rpo256::merge(&[g, h]);
let m = Rpo256::merge(&[i, j]);
let n = Rpo256::merge(&[k, l]);
let root = Rpo256::merge(&[m, n]);
let mtree = MerkleTree::new(vec![a, b, c, d, e, f, g, h]).unwrap();
let store = MerkleStore::from(&mtree);
let path = store.get_path(root, NodeIndex::make(3, 1)).unwrap().path;
let expected = MerklePath::new([a, j, n].to_vec());
assert_eq!(path, expected);
}
#[test]
fn test_set_node() -> Result<(), MerkleError> {
let mtree = MerkleTree::new(VALUES4)?;
let mut store = MerkleStore::from(&mtree);
let value = int_to_node(42);
let index = NodeIndex::make(mtree.depth(), 0);
let new_root = store.set_node(mtree.root(), index, value)?.root;
assert_eq!(store.get_node(new_root, index).unwrap(), value, "value must have changed");
Ok(())
}
#[test]
fn test_constructors() -> Result<(), MerkleError> {
let mtree = MerkleTree::new(VALUES4)?;
let store = MerkleStore::from(&mtree);
let depth = mtree.depth();
let leaves = 2u64.pow(depth.into());
for index in 0..leaves {
let index = NodeIndex::make(depth, index);
let value_path = store.get_path(mtree.root(), index)?;
assert_eq!(mtree.get_path(index)?, value_path.path);
assert!(
store.has_path(mtree.root(), index),
"path for index {} at depth {} must exist",
index.value(),
depth
);
}
const DEPTH: u8 = 32;
let smt = SimpleSmt::<DEPTH>::with_leaves(KEYS4.into_iter().zip(VALUES4)).unwrap();
let store = MerkleStore::from(&smt);
for key in KEYS4 {
let index = NodeIndex::make(DEPTH, key);
let value_path = store.get_path(smt.root(), index)?;
assert_eq!(smt.open(&LeafIndex::<DEPTH>::new(key).unwrap()).path, value_path.path);
assert!(
store.has_path(smt.root(), index),
"path for key {} at depth {} must exist",
key,
DEPTH
);
}
let d = 2;
let paths = [
(0, VALUES4[0], mtree.get_path(NodeIndex::make(d, 0)).unwrap()),
(1, VALUES4[1], mtree.get_path(NodeIndex::make(d, 1)).unwrap()),
(2, VALUES4[2], mtree.get_path(NodeIndex::make(d, 2)).unwrap()),
(3, VALUES4[3], mtree.get_path(NodeIndex::make(d, 3)).unwrap()),
];
let mut store1 = MerkleStore::default();
store1.add_merkle_paths(paths.clone())?;
let mut store2 = MerkleStore::default();
store2.add_merkle_path(0, VALUES4[0], mtree.get_path(NodeIndex::make(d, 0))?)?;
store2.add_merkle_path(1, VALUES4[1], mtree.get_path(NodeIndex::make(d, 1))?)?;
store2.add_merkle_path(2, VALUES4[2], mtree.get_path(NodeIndex::make(d, 2))?)?;
store2.add_merkle_path(3, VALUES4[3], mtree.get_path(NodeIndex::make(d, 3))?)?;
let pmt = PartialMerkleTree::with_paths(paths).unwrap();
for key in [0, 1, 2, 3] {
let index = NodeIndex::make(d, key);
let value_path1 = store1.get_path(pmt.root(), index)?;
let value_path2 = store2.get_path(pmt.root(), index)?;
assert_eq!(value_path1, value_path2);
let index = NodeIndex::make(d, key);
assert_eq!(pmt.get_path(index)?, value_path1.path);
assert!(
store1.has_path(pmt.root(), index),
"path for key {} at depth {} must exist in store1",
key,
d
);
assert!(
store2.has_path(pmt.root(), index),
"path for key {} at depth {} must exist in store2",
key,
d
);
}
Ok(())
}
#[test]
fn node_path_should_be_truncated_by_midtier_insert() {
let key = 0b11010010_11001100_11001100_11001100_11001100_11001100_11001100_11001100_u64;
let mut store = MerkleStore::new();
let root: Word = EmptySubtreeRoots::empty_hashes(64)[0];
// insert first node - works as expected
let depth = 64;
let node = Word::from([Felt::new(key); WORD_SIZE]);
let index = NodeIndex::new(depth, key).unwrap();
let root = store.set_node(root, index, node).unwrap().root;
let result = store.get_node(root, index).unwrap();
let path = store.get_path(root, index).unwrap().path;
assert_eq!(node, result);
assert_eq!(path.depth(), depth);
assert!(path.verify(index.value(), result, &root).is_ok());
assert!(store.has_path(root, index), "path for first inserted node must exist");
// flip the first bit of the key and insert the second node on a different depth
let key = key ^ (1 << 63);
let key = key >> 8;
let depth = 56;
let node = Word::from([Felt::new(key); WORD_SIZE]);
let index = NodeIndex::new(depth, key).unwrap();
let root = store.set_node(root, index, node).unwrap().root;
let result = store.get_node(root, index).unwrap();
let path = store.get_path(root, index).unwrap().path;
assert_eq!(node, result);
assert_eq!(path.depth(), depth);
assert!(path.verify(index.value(), result, &root).is_ok());
assert!(store.has_path(root, index), "path for second inserted node must exist");
// attempt to fetch a path of the second node to depth 64
// should fail because the previously inserted node will remove its sub-tree from the set
let key = key << 8;
let index = NodeIndex::new(64, key).unwrap();
assert!(store.get_node(root, index).is_err());
}
// LEAF TRAVERSAL
// ================================================================================================
#[test]
fn get_leaf_depth_works_depth_64() {
let mut store = MerkleStore::new();
let mut root: Word = EmptySubtreeRoots::empty_hashes(64)[0];
let key = u64::MAX;
// this will create a rainbow tree and test all opening to depth 64
for d in 0..64 {
let k = key & (u64::MAX >> d);
let node = Word::from([Felt::new(k); WORD_SIZE]);
let index = NodeIndex::new(64, k).unwrap();
// assert the leaf doesn't exist before the insert. the returned depth should always
// increment with the paths count of the set, as they are intersecting one another up to
// the first bits of the used key.
assert_eq!(d, store.get_leaf_depth(root, 64, k).unwrap());
// insert and assert the correct depth
root = store.set_node(root, index, node).unwrap().root;
assert_eq!(64, store.get_leaf_depth(root, 64, k).unwrap());
}
}
#[test]
fn get_leaf_depth_works_with_incremental_depth() {
let mut store = MerkleStore::new();
let mut root: Word = EmptySubtreeRoots::empty_hashes(64)[0];
// insert some path to the left of the root and assert it
let key = 0b01001011_10110110_00001101_01110100_00111011_10101101_00000100_01000001_u64;
assert_eq!(0, store.get_leaf_depth(root, 64, key).unwrap());
let depth = 64;
let index = NodeIndex::new(depth, key).unwrap();
let node = Word::from([Felt::new(key); WORD_SIZE]);
root = store.set_node(root, index, node).unwrap().root;
assert_eq!(depth, store.get_leaf_depth(root, 64, key).unwrap());
// flip the key to the right of the root and insert some content on depth 16
let key = 0b11001011_10110110_00000000_00000000_00000000_00000000_00000000_00000000_u64;
assert_eq!(1, store.get_leaf_depth(root, 64, key).unwrap());
let depth = 16;
let index = NodeIndex::new(depth, key >> (64 - depth)).unwrap();
let node = Word::from([Felt::new(key); WORD_SIZE]);
root = store.set_node(root, index, node).unwrap().root;
assert_eq!(depth, store.get_leaf_depth(root, 64, key).unwrap());
// attempt the sibling of the previous leaf
let key = 0b11001011_10110111_00000000_00000000_00000000_00000000_00000000_00000000_u64;
assert_eq!(16, store.get_leaf_depth(root, 64, key).unwrap());
let index = NodeIndex::new(depth, key >> (64 - depth)).unwrap();
let node = Word::from([Felt::new(key); WORD_SIZE]);
root = store.set_node(root, index, node).unwrap().root;
assert_eq!(depth, store.get_leaf_depth(root, 64, key).unwrap());
// move down to the next depth and assert correct behavior
let key = 0b11001011_10110100_00000000_00000000_00000000_00000000_00000000_00000000_u64;
assert_eq!(15, store.get_leaf_depth(root, 64, key).unwrap());
let depth = 17;
let index = NodeIndex::new(depth, key >> (64 - depth)).unwrap();
let node = Word::from([Felt::new(key); WORD_SIZE]);
root = store.set_node(root, index, node).unwrap().root;
assert_eq!(depth, store.get_leaf_depth(root, 64, key).unwrap());
}
#[test]
fn get_leaf_depth_works_with_depth_8() {
let mut store = MerkleStore::new();
let mut root: Word = EmptySubtreeRoots::empty_hashes(8)[0];
// insert some random, 8 depth keys. `a` diverges from the first bit
let a = 0b01101001_u64;
let b = 0b10011001_u64;
let c = 0b10010110_u64;
let d = 0b11110110_u64;
for k in [a, b, c, d] {
let index = NodeIndex::new(8, k).unwrap();
let node = Word::from([Felt::new(k); WORD_SIZE]);
root = store.set_node(root, index, node).unwrap().root;
}
// assert all leaves returns the inserted depth
for k in [a, b, c, d] {
assert_eq!(8, store.get_leaf_depth(root, 8, k).unwrap());
}
// flip last bit of a and expect it to return the same depth, but for an empty node
assert_eq!(8, store.get_leaf_depth(root, 8, 0b01101000_u64).unwrap());
// flip fourth bit of a and expect an empty node on depth 4
assert_eq!(4, store.get_leaf_depth(root, 8, 0b01111001_u64).unwrap());
// flip third bit of a and expect an empty node on depth 3
assert_eq!(3, store.get_leaf_depth(root, 8, 0b01001001_u64).unwrap());
// flip second bit of a and expect an empty node on depth 2
assert_eq!(2, store.get_leaf_depth(root, 8, 0b00101001_u64).unwrap());
// flip fourth bit of c and expect an empty node on depth 4
assert_eq!(4, store.get_leaf_depth(root, 8, 0b10000110_u64).unwrap());
// flip second bit of d and expect an empty node on depth 3 as depth 2 conflicts with b and c
assert_eq!(3, store.get_leaf_depth(root, 8, 0b10110110_u64).unwrap());
// duplicate the tree on `a` and assert the depth is short-circuited by such sub-tree
let index = NodeIndex::new(8, a).unwrap();
root = store.set_node(root, index, root).unwrap().root;
assert_matches!(store.get_leaf_depth(root, 8, a).unwrap_err(), MerkleError::DepthTooBig(9));
}
#[test]
fn find_lone_leaf() {
let mut store = MerkleStore::new();
let empty = EmptySubtreeRoots::empty_hashes(64);
let mut root: Word = empty[0];
// insert a single leaf into the store at depth 64
let key_a = 0b01010101_10101010_00001111_01110100_00111011_10101101_00000100_01000001_u64;
let idx_a = NodeIndex::make(64, key_a);
let val_a = Word::from([ONE, ONE, ONE, ONE]);
root = store.set_node(root, idx_a, val_a).unwrap().root;
// for every ancestor of A, A should be a long leaf
for depth in 1..64 {
let parent_index = NodeIndex::make(depth, key_a >> (64 - depth));
let parent = store.get_node(root, parent_index).unwrap();
let res = store.find_lone_leaf(parent, parent_index, 64).unwrap();
assert_eq!(res, Some((idx_a, val_a)));
}
// insert another leaf into the store such that it has the same 8 bit prefix as A
let key_b = 0b01010101_01111010_00001111_01110100_00111011_10101101_00000100_01000001_u64;
let idx_b = NodeIndex::make(64, key_b);
let val_b = Word::from([ONE, ONE, ONE, ZERO]);
root = store.set_node(root, idx_b, val_b).unwrap().root;
// for any node which is common between A and B, find_lone_leaf() should return None as the
// node has two descendants
for depth in 1..9 {
let parent_index = NodeIndex::make(depth, key_a >> (64 - depth));
let parent = store.get_node(root, parent_index).unwrap();
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/store/mod.rs | miden-crypto/src/merkle/store/mod.rs | //! Merkle store for efficiently storing multiple Merkle trees with common subtrees.
use alloc::vec::Vec;
use core::borrow::Borrow;
use super::{
EmptySubtreeRoots, InnerNodeInfo, MerkleError, MerklePath, MerkleProof, MerkleTree, NodeIndex,
PartialMerkleTree, RootPath, Rpo256, Word,
mmr::Mmr,
smt::{SimpleSmt, Smt},
};
use crate::{
Map,
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
#[cfg(test)]
mod tests;
// MERKLE STORE
// ================================================================================================
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct StoreNode {
left: Word,
right: Word,
}
/// An in-memory data store for Merkelized data.
///
/// This is a in memory data store for Merkle trees, this store allows all the nodes of multiple
/// trees to live as long as necessary and without duplication, this allows the implementation of
/// space efficient persistent data structures.
///
/// Example usage:
///
/// ```rust
/// # use miden_crypto::{ZERO, Felt, Word};
/// # use miden_crypto::merkle::{NodeIndex, MerkleTree, store::MerkleStore};
/// # use miden_crypto::hash::rpo::Rpo256;
/// # use miden_crypto::field::PrimeCharacteristicRing;
/// # const fn int_to_node(value: u64) -> Word {
/// # Word::new([Felt::new(value), ZERO, ZERO, ZERO])
/// # }
/// # let A = int_to_node(1);
/// # let B = int_to_node(2);
/// # let C = int_to_node(3);
/// # let D = int_to_node(4);
/// # let E = int_to_node(5);
/// # let F = int_to_node(6);
/// # let G = int_to_node(7);
/// # let H0 = int_to_node(8);
/// # let H1 = int_to_node(9);
/// # let T0 = MerkleTree::new([A, B, C, D, E, F, G, H0].to_vec()).expect("even number of leaves provided");
/// # let T1 = MerkleTree::new([A, B, C, D, E, F, G, H1].to_vec()).expect("even number of leaves provided");
/// # let ROOT0 = T0.root();
/// # let ROOT1 = T1.root();
/// let mut store: MerkleStore = MerkleStore::new();
///
/// // the store is initialized with the SMT empty nodes
/// assert_eq!(store.num_internal_nodes(), 255);
///
/// let tree1 = MerkleTree::new(vec![A, B, C, D, E, F, G, H0]).unwrap();
/// let tree2 = MerkleTree::new(vec![A, B, C, D, E, F, G, H1]).unwrap();
///
/// // populates the store with two merkle trees, common nodes are shared
/// store.extend(tree1.inner_nodes());
/// store.extend(tree2.inner_nodes());
///
/// // every leaf except the last are the same
/// for i in 0..7 {
/// let idx0 = NodeIndex::new(3, i).unwrap();
/// let d0 = store.get_node(ROOT0, idx0).unwrap();
/// let idx1 = NodeIndex::new(3, i).unwrap();
/// let d1 = store.get_node(ROOT1, idx1).unwrap();
/// assert_eq!(d0, d1, "Both trees have the same leaf at pos {i}");
/// }
///
/// // The leaves A-B-C-D are the same for both trees, so are their 2 immediate parents
/// for i in 0..4 {
/// let idx0 = NodeIndex::new(3, i).unwrap();
/// let d0 = store.get_path(ROOT0, idx0).unwrap();
/// let idx1 = NodeIndex::new(3, i).unwrap();
/// let d1 = store.get_path(ROOT1, idx1).unwrap();
/// assert_eq!(d0.path[0..2], d1.path[0..2], "Both sub-trees are equal up to two levels");
/// }
///
/// // Common internal nodes are shared, the two added trees have a total of 30, but the store has
/// // only 10 new entries, corresponding to the 10 unique internal nodes of these trees.
/// assert_eq!(store.num_internal_nodes() - 255, 10);
/// ```
#[derive(Debug, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct MerkleStore {
nodes: Map<Word, StoreNode>,
}
impl Default for MerkleStore {
fn default() -> Self {
Self::new()
}
}
impl MerkleStore {
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Creates an empty `MerkleStore` instance.
pub fn new() -> MerkleStore {
// pre-populate the store with the empty hashes
let nodes = empty_hashes().collect();
MerkleStore { nodes }
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Return a count of the non-leaf nodes in the store.
pub fn num_internal_nodes(&self) -> usize {
self.nodes.len()
}
/// Returns the node at `index` rooted on the tree `root`.
///
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
/// store.
pub fn get_node(&self, root: Word, index: NodeIndex) -> Result<Word, MerkleError> {
let mut hash = root;
// corner case: check the root is in the store when called with index `NodeIndex::root()`
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
for i in (0..index.depth()).rev() {
let node = self
.nodes
.get(&hash)
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
let is_right = index.is_nth_bit_odd(i);
hash = if is_right { node.right } else { node.left };
}
Ok(hash)
}
/// Returns the node at the specified `index` and its opening to the `root`.
///
/// The path starts at the sibling of the target leaf.
///
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
/// store.
pub fn get_path(&self, root: Word, index: NodeIndex) -> Result<MerkleProof, MerkleError> {
let mut hash = root;
let mut path = Vec::with_capacity(index.depth().into());
// corner case: check the root is in the store when called with index `NodeIndex::root()`
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
for i in (0..index.depth()).rev() {
let node = self
.nodes
.get(&hash)
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
let is_right = index.is_nth_bit_odd(i);
hash = if is_right {
path.push(node.left);
node.right
} else {
path.push(node.right);
node.left
}
}
// the path is computed from root to leaf, so it must be reversed
path.reverse();
Ok(MerkleProof::new(hash, MerklePath::new(path)))
}
/// Returns `true` if a valid path exists from `root` to the specified `index`, `false`
/// otherwise.
///
/// This method checks if all nodes needed to traverse from `root` to `index` are present in the
/// store, without building the actual path. It is more efficient than `get_path` when only
/// existence verification is needed.
pub fn has_path(&self, root: Word, index: NodeIndex) -> bool {
// check if the root exists
if !self.nodes.contains_key(&root) {
return false;
}
// traverse from root to index
let mut hash = root;
for i in (0..index.depth()).rev() {
let node = match self.nodes.get(&hash) {
Some(node) => node,
None => return false,
};
let is_right = index.is_nth_bit_odd(i);
hash = if is_right { node.right } else { node.left };
}
true
}
// LEAF TRAVERSAL
// --------------------------------------------------------------------------------------------
/// Returns the depth of the first leaf or an empty node encountered while traversing the tree
/// from the specified root down according to the provided index.
///
/// The `tree_depth` parameter specifies the depth of the tree rooted at `root`. The
/// maximum value the argument accepts is [u64::BITS].
///
/// # Errors
/// Will return an error if:
/// - The provided root is not found.
/// - The provided `tree_depth` is greater than 64.
/// - The provided `index` is not valid for a depth equivalent to `tree_depth`.
/// - No leaf or an empty node was found while traversing the tree down to `tree_depth`.
pub fn get_leaf_depth(
&self,
root: Word,
tree_depth: u8,
index: u64,
) -> Result<u8, MerkleError> {
// validate depth and index
if tree_depth > 64 {
return Err(MerkleError::DepthTooBig(tree_depth as u64));
}
NodeIndex::new(tree_depth, index)?;
// check if the root exists, providing the proper error report if it doesn't
let empty = EmptySubtreeRoots::empty_hashes(tree_depth);
let mut hash = root;
if !self.nodes.contains_key(&hash) {
return Err(MerkleError::RootNotInStore(hash));
}
// we traverse from root to leaf, so the path is reversed
let mut path = (index << (64 - tree_depth)).reverse_bits();
// iterate every depth and reconstruct the path from root to leaf
for depth in 0..=tree_depth {
// we short-circuit if an empty node has been found
if hash == empty[depth as usize] {
return Ok(depth);
}
// fetch the children pair, mapped by its parent hash
let children = match self.nodes.get(&hash) {
Some(node) => node,
None => return Ok(depth),
};
// traverse down
hash = if path & 1 == 0 { children.left } else { children.right };
path >>= 1;
}
// return an error because we exhausted the index but didn't find either a leaf or an
// empty node
Err(MerkleError::DepthTooBig(tree_depth as u64 + 1))
}
/// Returns index and value of a leaf node which is the only leaf node in a subtree defined by
/// the provided root. If the subtree contains zero or more than one leaf nodes None is
/// returned.
///
/// The `tree_depth` parameter specifies the depth of the parent tree such that `root` is
/// located in this tree at `root_index`. The maximum value the argument accepts is
/// [u64::BITS].
///
/// # Errors
/// Will return an error if:
/// - The provided root is not found.
/// - The provided `tree_depth` is greater than 64.
/// - The provided `root_index` has depth greater than `tree_depth`.
/// - A lone node at depth `tree_depth` is not a leaf node.
pub fn find_lone_leaf(
&self,
root: Word,
root_index: NodeIndex,
tree_depth: u8,
) -> Result<Option<(NodeIndex, Word)>, MerkleError> {
// we set max depth at u64::BITS as this is the largest meaningful value for a 64-bit index
const MAX_DEPTH: u8 = u64::BITS as u8;
if tree_depth > MAX_DEPTH {
return Err(MerkleError::DepthTooBig(tree_depth as u64));
}
let empty = EmptySubtreeRoots::empty_hashes(MAX_DEPTH);
let mut node = root;
if !self.nodes.contains_key(&node) {
return Err(MerkleError::RootNotInStore(node));
}
let mut index = root_index;
if index.depth() > tree_depth {
return Err(MerkleError::DepthTooBig(index.depth() as u64));
}
// traverse down following the path of single non-empty nodes; this works because if a
// node has two empty children it cannot contain a lone leaf. similarly if a node has
// two non-empty children it must contain at least two leaves.
for depth in index.depth()..tree_depth {
// if the node is a leaf, return; otherwise, examine the node's children
let children = match self.nodes.get(&node) {
Some(node) => node,
None => return Ok(Some((index, node))),
};
let empty_node = empty[depth as usize + 1];
node = if children.left != empty_node && children.right == empty_node {
index = index.left_child();
children.left
} else if children.left == empty_node && children.right != empty_node {
index = index.right_child();
children.right
} else {
return Ok(None);
};
}
// if we are here, we got to `tree_depth`; thus, either the current node is a leaf node,
// and so we return it, or it is an internal node, and then we return an error
if self.nodes.contains_key(&node) {
Err(MerkleError::DepthTooBig(tree_depth as u64 + 1))
} else {
Ok(Some((index, node)))
}
}
// DATA EXTRACTORS
// --------------------------------------------------------------------------------------------
/// Returns a subset of this Merkle store such that the returned Merkle store contains all
/// nodes which are descendants of the specified roots.
///
/// The roots for which no descendants exist in this Merkle store are ignored.
pub fn subset<I, R>(&self, roots: I) -> MerkleStore
where
I: Iterator<Item = R>,
R: Borrow<Word>,
{
let mut store = MerkleStore::new();
for root in roots {
let root = *root.borrow();
store.clone_tree_from(root, self);
}
store
}
/// Iterator over the inner nodes of the [MerkleStore].
pub fn inner_nodes(&self) -> impl Iterator<Item = InnerNodeInfo> + '_ {
self.nodes
.iter()
.map(|(r, n)| InnerNodeInfo { value: *r, left: n.left, right: n.right })
}
/// Iterator over the non-empty leaves of the Merkle tree associated with the specified `root`
/// and `max_depth`.
pub fn non_empty_leaves(
&self,
root: Word,
max_depth: u8,
) -> impl Iterator<Item = (NodeIndex, Word)> + '_ {
let empty_roots = EmptySubtreeRoots::empty_hashes(max_depth);
let mut stack = Vec::new();
stack.push((NodeIndex::new_unchecked(0, 0), root));
core::iter::from_fn(move || {
while let Some((index, node_hash)) = stack.pop() {
// if we are at the max depth then we have reached a leaf
if index.depth() == max_depth {
return Some((index, node_hash));
}
// fetch the nodes children and push them onto the stack if they are not the roots
// of empty subtrees
if let Some(node) = self.nodes.get(&node_hash) {
if !empty_roots.contains(&node.left) {
stack.push((index.left_child(), node.left));
}
if !empty_roots.contains(&node.right) {
stack.push((index.right_child(), node.right));
}
// if the node is not in the store assume it is a leaf
} else {
return Some((index, node_hash));
}
}
None
})
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Adds all the nodes of a Merkle path represented by `path`, opening to `node`. Returns the
/// new root.
///
/// This will compute the sibling elements determined by the Merkle `path` and `node`, and
/// include all the nodes into the store.
pub fn add_merkle_path(
&mut self,
index: u64,
node: Word,
path: MerklePath,
) -> Result<Word, MerkleError> {
let root = path.authenticated_nodes(index, node)?.fold(Word::default(), |_, node| {
let value: Word = node.value;
let left: Word = node.left;
let right: Word = node.right;
debug_assert_eq!(Rpo256::merge(&[left, right]), value);
self.nodes.insert(value, StoreNode { left, right });
node.value
});
Ok(root)
}
/// Adds all the nodes of multiple Merkle paths into the store.
///
/// This will compute the sibling elements for each Merkle `path` and include all the nodes
/// into the store.
///
/// For further reference, check [MerkleStore::add_merkle_path].
pub fn add_merkle_paths<I>(&mut self, paths: I) -> Result<(), MerkleError>
where
I: IntoIterator<Item = (u64, Word, MerklePath)>,
{
for (index_value, node, path) in paths.into_iter() {
self.add_merkle_path(index_value, node, path)?;
}
Ok(())
}
/// Sets a node to `value`.
///
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeNotInStore` if a node needed to traverse from `root` to `index` is not present in the
/// store.
pub fn set_node(
&mut self,
mut root: Word,
index: NodeIndex,
value: Word,
) -> Result<RootPath, MerkleError> {
let node = value;
let MerkleProof { value, path } = self.get_path(root, index)?;
// performs the update only if the node value differs from the opening
if node != value {
root = self.add_merkle_path(index.value(), node, path.clone())?;
}
Ok(RootPath { root, path })
}
/// Merges two elements and adds the resulting node into the store.
///
/// Merges arbitrary values. They may be leaves, nodes, or a mixture of both.
pub fn merge_roots(&mut self, left_root: Word, right_root: Word) -> Result<Word, MerkleError> {
let parent = Rpo256::merge(&[left_root, right_root]);
self.nodes.insert(parent, StoreNode { left: left_root, right: right_root });
Ok(parent)
}
// HELPER METHODS
// --------------------------------------------------------------------------------------------
/// Returns the inner storage of this MerkleStore while consuming `self`.
pub fn into_inner(self) -> Map<Word, StoreNode> {
self.nodes
}
/// Recursively clones a tree with the specified root from the specified source into self.
///
/// If the source store does not contain a tree with the specified root, this is a noop.
fn clone_tree_from(&mut self, root: Word, source: &Self) {
// process the node only if it is in the source
if let Some(node) = source.nodes.get(&root) {
// if the node has already been inserted, no need to process it further as all of its
// descendants should be already cloned from the source store
if self.nodes.insert(root, *node).is_none() {
self.clone_tree_from(node.left, source);
self.clone_tree_from(node.right, source);
}
}
}
}
// CONVERSIONS
// ================================================================================================
impl From<&MerkleTree> for MerkleStore {
fn from(value: &MerkleTree) -> Self {
let nodes = combine_nodes_with_empty_hashes(value.inner_nodes()).collect();
Self { nodes }
}
}
impl<const DEPTH: u8> From<&SimpleSmt<DEPTH>> for MerkleStore {
fn from(value: &SimpleSmt<DEPTH>) -> Self {
let nodes = combine_nodes_with_empty_hashes(value.inner_nodes()).collect();
Self { nodes }
}
}
impl From<&Smt> for MerkleStore {
fn from(value: &Smt) -> Self {
let nodes = combine_nodes_with_empty_hashes(value.inner_nodes()).collect();
Self { nodes }
}
}
impl From<&Mmr> for MerkleStore {
fn from(value: &Mmr) -> Self {
let nodes = combine_nodes_with_empty_hashes(value.inner_nodes()).collect();
Self { nodes }
}
}
impl From<&PartialMerkleTree> for MerkleStore {
fn from(value: &PartialMerkleTree) -> Self {
let nodes = combine_nodes_with_empty_hashes(value.inner_nodes()).collect();
Self { nodes }
}
}
impl FromIterator<InnerNodeInfo> for MerkleStore {
fn from_iter<I: IntoIterator<Item = InnerNodeInfo>>(iter: I) -> Self {
let nodes = combine_nodes_with_empty_hashes(iter).collect();
Self { nodes }
}
}
impl FromIterator<(Word, StoreNode)> for MerkleStore {
fn from_iter<I: IntoIterator<Item = (Word, StoreNode)>>(iter: I) -> Self {
let nodes = iter.into_iter().chain(empty_hashes()).collect();
Self { nodes }
}
}
// ITERATORS
// ================================================================================================
impl Extend<InnerNodeInfo> for MerkleStore {
fn extend<I: IntoIterator<Item = InnerNodeInfo>>(&mut self, iter: I) {
self.nodes.extend(
iter.into_iter()
.map(|info| (info.value, StoreNode { left: info.left, right: info.right })),
);
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for StoreNode {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.left.write_into(target);
self.right.write_into(target);
}
}
impl Deserializable for StoreNode {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let left = Word::read_from(source)?;
let right = Word::read_from(source)?;
Ok(StoreNode { left, right })
}
}
impl Serializable for MerkleStore {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_u64(self.nodes.len() as u64);
for (k, v) in self.nodes.iter() {
k.write_into(target);
v.write_into(target);
}
}
}
impl Deserializable for MerkleStore {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let len = source.read_u64()?;
let mut nodes: Vec<(Word, StoreNode)> = Vec::with_capacity(len as usize);
for _ in 0..len {
let key = Word::read_from(source)?;
let value = StoreNode::read_from(source)?;
nodes.push((key, value));
}
Ok(nodes.into_iter().collect())
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Creates empty hashes for all the subtrees of a tree with a max depth of 255.
fn empty_hashes() -> impl Iterator<Item = (Word, StoreNode)> {
let subtrees = EmptySubtreeRoots::empty_hashes(255);
subtrees
.iter()
.rev()
.copied()
.zip(subtrees.iter().rev().skip(1).copied())
.map(|(child, parent)| (parent, StoreNode { left: child, right: child }))
}
/// Consumes an iterator of [InnerNodeInfo] and returns an iterator of `(value, node)` tuples
/// which includes the nodes associate with roots of empty subtrees up to a depth of 255.
fn combine_nodes_with_empty_hashes(
nodes: impl IntoIterator<Item = InnerNodeInfo>,
) -> impl Iterator<Item = (Word, StoreNode)> {
nodes
.into_iter()
.map(|info| (info.value, StoreNode { left: info.left, right: info.right }))
.chain(empty_hashes())
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/partial_mt/tests.rs | miden-crypto/src/merkle/partial_mt/tests.rs | use alloc::{collections::BTreeMap, vec::Vec};
use super::{
super::{
MerkleError, MerkleTree, NodeIndex, PartialMerkleTree, int_to_node, store::MerkleStore,
},
Deserializable, InnerNodeInfo, MerkleProof, Serializable, Word,
};
// TEST DATA
// ================================================================================================
const NODE10: NodeIndex = NodeIndex::new_unchecked(1, 0);
const NODE11: NodeIndex = NodeIndex::new_unchecked(1, 1);
const NODE20: NodeIndex = NodeIndex::new_unchecked(2, 0);
const NODE21: NodeIndex = NodeIndex::new_unchecked(2, 1);
const NODE22: NodeIndex = NodeIndex::new_unchecked(2, 2);
const NODE23: NodeIndex = NodeIndex::new_unchecked(2, 3);
const NODE30: NodeIndex = NodeIndex::new_unchecked(3, 0);
const NODE31: NodeIndex = NodeIndex::new_unchecked(3, 1);
const NODE32: NodeIndex = NodeIndex::new_unchecked(3, 2);
const NODE33: NodeIndex = NodeIndex::new_unchecked(3, 3);
const VALUES8: [Word; 8] = [
int_to_node(30),
int_to_node(31),
int_to_node(32),
int_to_node(33),
int_to_node(34),
int_to_node(35),
int_to_node(36),
int_to_node(37),
];
// TESTS
// ================================================================================================
// For the Partial Merkle Tree tests we will use parts of the Merkle Tree which full form is
// illustrated below:
//
// __________ root __________
// / \
// ____ 10 ____ ____ 11 ____
// / \ / \
// 20 21 22 23
// / \ / \ / \ / \
// (30) (31) (32) (33) (34) (35) (36) (37)
//
// Where node number is a concatenation of its depth and index. For example, node with
// NodeIndex(3, 5) will be labeled as `35`. Leaves of the tree are shown as nodes with parenthesis
// (33).
/// Checks that creation of the PMT with `with_leaves()` constructor is working correctly.
#[test]
fn with_leaves() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let leaf_nodes_vec = vec![
(NODE20, mt.get_node(NODE20).unwrap()),
(NODE32, mt.get_node(NODE32).unwrap()),
(NODE33, mt.get_node(NODE33).unwrap()),
(NODE22, mt.get_node(NODE22).unwrap()),
(NODE23, mt.get_node(NODE23).unwrap()),
];
let leaf_nodes: BTreeMap<NodeIndex, Word> = leaf_nodes_vec.into_iter().collect();
let pmt = PartialMerkleTree::with_leaves(leaf_nodes).unwrap();
assert_eq!(expected_root, pmt.root())
}
/// Checks that `with_leaves()` function returns an error when using incomplete set of nodes.
#[test]
fn err_with_leaves() {
// NODE22 is missing
let leaf_nodes_vec = vec![
(NODE20, int_to_node(20)),
(NODE32, int_to_node(32)),
(NODE33, int_to_node(33)),
(NODE23, int_to_node(23)),
];
let leaf_nodes: BTreeMap<NodeIndex, Word> = leaf_nodes_vec.into_iter().collect();
assert!(PartialMerkleTree::with_leaves(leaf_nodes).is_err());
}
/// Tests that `with_leaves()` returns `EntryIsNotLeaf` error when an entry
/// is an ancestor of another entry.
#[test]
fn err_with_leaves_entry_is_not_leaf() {
// Provide all 8 leaves at depth 3
let mut entries: BTreeMap<NodeIndex, Word> = (0u64..8)
.map(|i| (NodeIndex::new(3, i).unwrap(), VALUES8[i as usize]))
.collect();
// Add an entry at depth 1 - this is an ancestor of some depth-3 entries
entries.insert(NodeIndex::new(1, 0).unwrap(), int_to_node(999));
// Verify we get EntryIsNotLeaf error
match PartialMerkleTree::with_leaves(entries) {
Err(MerkleError::EntryIsNotLeaf { node }) => {
assert_eq!(node.depth(), 1);
assert_eq!(node.value(), 0);
},
other => panic!("Expected EntryIsNotLeaf error, got {:?}", other),
}
}
/// Checks that root returned by `root()` function is equal to the expected one.
#[test]
fn get_root() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
assert_eq!(expected_root, pmt.root());
}
/// This test checks correctness of the `add_path()` and `get_path()` functions. First it creates a
/// PMT using `add_path()` by adding Merkle Paths from node 33 and node 22 to the empty PMT. Then
/// it checks that paths returned by `get_path()` function are equal to the expected ones.
#[test]
fn add_and_get_paths() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let expected_path33 = ms.get_path(expected_root, NODE33).unwrap();
let expected_path22 = ms.get_path(expected_root, NODE22).unwrap();
let mut pmt = PartialMerkleTree::new();
pmt.add_path(3, expected_path33.value, expected_path33.path.clone()).unwrap();
pmt.add_path(2, expected_path22.value, expected_path22.path.clone()).unwrap();
let path33 = pmt.get_path(NODE33).unwrap();
let path22 = pmt.get_path(NODE22).unwrap();
let actual_root = pmt.root();
assert_eq!(expected_path33.path, path33);
assert_eq!(expected_path22.path, path22);
assert_eq!(expected_root, actual_root);
}
/// Checks that function `get_node` used on nodes 10 and 32 returns expected values.
#[test]
fn get_node() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
assert_eq!(ms.get_node(expected_root, NODE32).unwrap(), pmt.get_node(NODE32).unwrap());
assert_eq!(ms.get_node(expected_root, NODE10).unwrap(), pmt.get_node(NODE10).unwrap());
}
/// Updates leaves of the PMT using `update_leaf()` function and checks that new root of the tree
/// is equal to the expected one.
#[test]
fn update_leaf() {
let mt = MerkleTree::new(VALUES8).unwrap();
let root = mt.root();
let mut ms = MerkleStore::from(&mt);
let path33 = ms.get_path(root, NODE33).unwrap();
let mut pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
let new_value32 = int_to_node(132);
let expected_root = ms.set_node(root, NODE32, new_value32).unwrap().root;
pmt.update_leaf(2, new_value32).unwrap();
let actual_root = pmt.root();
assert_eq!(expected_root, actual_root);
let new_value20 = int_to_node(120);
let expected_root = ms.set_node(expected_root, NODE20, new_value20).unwrap().root;
pmt.update_leaf(0, new_value20).unwrap();
let actual_root = pmt.root();
assert_eq!(expected_root, actual_root);
let new_value11 = int_to_node(111);
let expected_root = ms.set_node(expected_root, NODE11, new_value11).unwrap().root;
pmt.update_leaf(6, new_value11).unwrap();
let actual_root = pmt.root();
assert_eq!(expected_root, actual_root);
}
/// Checks that paths of the PMT returned by `paths()` function are equal to the expected ones.
#[test]
fn get_paths() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let path22 = ms.get_path(expected_root, NODE22).unwrap();
let mut pmt = PartialMerkleTree::new();
pmt.add_path(3, path33.value, path33.path).unwrap();
pmt.add_path(2, path22.value, path22.path).unwrap();
// After PMT creation with path33 (33; 32, 20, 11) and path22 (22; 23, 10) we will have this
// tree:
//
// ______root______
// / \
// ___10___ ___11___
// / \ / \
// (20) 21 (22) (23)
// / \
// (32) (33)
//
// Which have leaf nodes 20, 22, 23, 32 and 33. Hence overall we will have 5 paths -- one path
// for each leaf.
let leaves = [NODE20, NODE22, NODE23, NODE32, NODE33];
let expected_paths: Vec<(NodeIndex, MerkleProof)> = leaves
.iter()
.map(|&leaf| {
(
leaf,
MerkleProof {
value: mt.get_node(leaf).unwrap(),
path: mt.get_path(leaf).unwrap(),
},
)
})
.collect();
let actual_paths = pmt.to_paths();
assert_eq!(expected_paths, actual_paths);
}
// Checks correctness of leaves determination when using the `leaves()` function.
#[test]
fn leaves() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let path22 = ms.get_path(expected_root, NODE22).unwrap();
let mut pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
// After PMT creation with path33 (33; 32, 20, 11) we will have this tree:
//
// ______root______
// / \
// ___10___ (11)
// / \
// (20) 21
// / \
// (32) (33)
//
// Which have leaf nodes 11, 20, 32 and 33.
let value11 = mt.get_node(NODE11).unwrap();
let value20 = mt.get_node(NODE20).unwrap();
let value32 = mt.get_node(NODE32).unwrap();
let value33 = mt.get_node(NODE33).unwrap();
let leaves = [(NODE11, value11), (NODE20, value20), (NODE32, value32), (NODE33, value33)];
let expected_leaves = leaves.iter().copied();
assert!(expected_leaves.eq(pmt.leaves()));
pmt.add_path(2, path22.value, path22.path).unwrap();
// After adding the path22 (22; 23, 10) to the existing PMT we will have this tree:
//
// ______root______
// / \
// ___10___ ___11___
// / \ / \
// (20) 21 (22) (23)
// / \
// (32) (33)
//
// Which have leaf nodes 20, 22, 23, 32 and 33.
let value20 = mt.get_node(NODE20).unwrap();
let value22 = mt.get_node(NODE22).unwrap();
let value23 = mt.get_node(NODE23).unwrap();
let value32 = mt.get_node(NODE32).unwrap();
let value33 = mt.get_node(NODE33).unwrap();
let leaves = vec![
(NODE20, value20),
(NODE22, value22),
(NODE23, value23),
(NODE32, value32),
(NODE33, value33),
];
let expected_leaves = leaves.iter().copied();
assert!(expected_leaves.eq(pmt.leaves()));
}
/// Checks that nodes of the PMT returned by `inner_nodes()` function are equal to the expected
/// ones.
#[test]
fn test_inner_node_iterator() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let path22 = ms.get_path(expected_root, NODE22).unwrap();
let mut pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
// get actual inner nodes
let actual: Vec<InnerNodeInfo> = pmt.inner_nodes().collect();
let expected_n00 = mt.root();
let expected_n10 = mt.get_node(NODE10).unwrap();
let expected_n11 = mt.get_node(NODE11).unwrap();
let expected_n20 = mt.get_node(NODE20).unwrap();
let expected_n21 = mt.get_node(NODE21).unwrap();
let expected_n32 = mt.get_node(NODE32).unwrap();
let expected_n33 = mt.get_node(NODE33).unwrap();
// create vector of the expected inner nodes
let mut expected = vec![
InnerNodeInfo {
value: expected_n00,
left: expected_n10,
right: expected_n11,
},
InnerNodeInfo {
value: expected_n10,
left: expected_n20,
right: expected_n21,
},
InnerNodeInfo {
value: expected_n21,
left: expected_n32,
right: expected_n33,
},
];
assert_eq!(actual, expected);
// add another path to the Partial Merkle Tree
pmt.add_path(2, path22.value, path22.path).unwrap();
// get new actual inner nodes
let actual: Vec<InnerNodeInfo> = pmt.inner_nodes().collect();
let expected_n22 = mt.get_node(NODE22).unwrap();
let expected_n23 = mt.get_node(NODE23).unwrap();
let info_11 = InnerNodeInfo {
value: expected_n11,
left: expected_n22,
right: expected_n23,
};
// add new inner node to the existing vertor
expected.insert(2, info_11);
assert_eq!(actual, expected);
}
/// Checks that serialization and deserialization implementations for the PMT are working
/// correctly.
#[test]
fn serialization() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let path22 = ms.get_path(expected_root, NODE22).unwrap();
let pmt = PartialMerkleTree::with_paths([
(3, path33.value, path33.path),
(2, path22.value, path22.path),
])
.unwrap();
let serialized_pmt = pmt.to_bytes();
let deserialized_pmt = PartialMerkleTree::read_from_bytes(&serialized_pmt).unwrap();
assert_eq!(deserialized_pmt, pmt);
}
/// Checks that deserialization fails with incorrect data.
#[test]
fn err_deserialization() {
let mut tree_bytes: Vec<u8> = vec![5];
tree_bytes.append(&mut NODE20.to_bytes());
tree_bytes.append(&mut int_to_node(20).to_bytes());
tree_bytes.append(&mut NODE21.to_bytes());
tree_bytes.append(&mut int_to_node(21).to_bytes());
// node with depth 1 could have index 0 or 1, but it has 2
tree_bytes.append(&mut vec![1, 2]);
tree_bytes.append(&mut int_to_node(11).to_bytes());
assert!(PartialMerkleTree::read_from_bytes(&tree_bytes).is_err());
}
/// Checks that addition of the path with different root will cause an error.
#[test]
fn err_add_path() {
let path33 = vec![int_to_node(1), int_to_node(2), int_to_node(3)].into();
let path22 = vec![int_to_node(4), int_to_node(5)].into();
let mut pmt = PartialMerkleTree::new();
pmt.add_path(3, int_to_node(6), path33).unwrap();
assert!(pmt.add_path(2, int_to_node(7), path22).is_err());
}
/// Checks that the request of the node which is not in the PMT will cause an error.
#[test]
fn err_get_node() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
assert!(pmt.get_node(NODE22).is_err());
assert!(pmt.get_node(NODE23).is_err());
assert!(pmt.get_node(NODE30).is_err());
assert!(pmt.get_node(NODE31).is_err());
}
/// Checks that the request of the path from the leaf which is not in the PMT will cause an error.
#[test]
fn err_get_path() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
assert!(pmt.get_path(NODE22).is_err());
assert!(pmt.get_path(NODE23).is_err());
assert!(pmt.get_path(NODE30).is_err());
assert!(pmt.get_path(NODE31).is_err());
}
#[test]
fn err_update_leaf() {
let mt = MerkleTree::new(VALUES8).unwrap();
let expected_root = mt.root();
let ms = MerkleStore::from(&mt);
let path33 = ms.get_path(expected_root, NODE33).unwrap();
let mut pmt = PartialMerkleTree::with_paths([(3, path33.value, path33.path)]).unwrap();
assert!(pmt.update_leaf(8, int_to_node(38)).is_err());
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/partial_mt/mod.rs | miden-crypto/src/merkle/partial_mt/mod.rs | use alloc::{
collections::{BTreeMap, BTreeSet},
string::String,
vec::Vec,
};
use core::fmt;
use super::{
EMPTY_WORD, InnerNodeInfo, MerkleError, MerklePath, MerkleProof, NodeIndex, Rpo256, Word,
};
use crate::utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, word_to_hex,
};
#[cfg(test)]
mod tests;
// CONSTANTS
// ================================================================================================
/// Index of the root node.
const ROOT_INDEX: NodeIndex = NodeIndex::root();
/// An Word consisting of 4 ZERO elements.
const EMPTY_DIGEST: Word = EMPTY_WORD;
// PARTIAL MERKLE TREE
// ================================================================================================
/// A partial Merkle tree with NodeIndex keys and 4-element [Word] leaf values. Partial Merkle
/// Tree allows to create Merkle Tree by providing Merkle paths of different lengths.
///
/// The root of the tree is recomputed on each new leaf update.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct PartialMerkleTree {
max_depth: u8,
nodes: BTreeMap<NodeIndex, Word>,
leaves: BTreeSet<NodeIndex>,
}
impl Default for PartialMerkleTree {
fn default() -> Self {
Self::new()
}
}
impl PartialMerkleTree {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// Minimum supported depth.
pub const MIN_DEPTH: u8 = 1;
/// Maximum supported depth.
pub const MAX_DEPTH: u8 = 64;
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Returns a new empty [PartialMerkleTree].
pub fn new() -> Self {
PartialMerkleTree {
max_depth: 0,
nodes: BTreeMap::new(),
leaves: BTreeSet::new(),
}
}
/// Appends the provided paths iterator into the set.
///
/// Analogous to [Self::add_path].
pub fn with_paths<I>(paths: I) -> Result<Self, MerkleError>
where
I: IntoIterator<Item = (u64, Word, MerklePath)>,
{
// create an empty tree
let tree = PartialMerkleTree::new();
paths.into_iter().try_fold(tree, |mut tree, (index, value, path)| {
tree.add_path(index, value, path)?;
Ok(tree)
})
}
/// Returns a new [PartialMerkleTree] instantiated with leaves map as specified by the provided
/// entries.
///
/// # Errors
/// Returns an error if:
/// - If the depth is 0 or is greater than 64.
/// - The number of entries exceeds the maximum tree capacity, that is 2^{depth}.
/// - The provided entries contain an insufficient set of nodes.
/// - Any entry is an ancestor of another entry (creates hash ambiguity).
pub fn with_leaves<R, I>(entries: R) -> Result<Self, MerkleError>
where
R: IntoIterator<IntoIter = I>,
I: Iterator<Item = (NodeIndex, Word)> + ExactSizeIterator,
{
let mut layers: BTreeMap<u8, Vec<u64>> = BTreeMap::new();
let mut leaves = BTreeSet::new();
let mut nodes = BTreeMap::new();
// add data to the leaves and nodes maps and also fill layers map, where the key is the
// depth of the node and value is its index.
for (node_index, hash) in entries.into_iter() {
leaves.insert(node_index);
nodes.insert(node_index, hash);
layers
.entry(node_index.depth())
.and_modify(|layer_vec| layer_vec.push(node_index.value()))
.or_insert(vec![node_index.value()]);
}
// make sure the depth of the last layer is 64 or smaller
if let Some(last_layer) = layers.last_entry() {
let last_layer_depth = *last_layer.key();
if last_layer_depth > 64 {
return Err(MerkleError::TooManyEntries(last_layer_depth));
}
}
// Get maximum depth
let max_depth = *layers.keys().next_back().unwrap_or(&0);
// fill layers without nodes with empty vector
for depth in 0..max_depth {
layers.entry(depth).or_default();
}
let mut layer_iter = layers.into_values().rev();
let mut parent_layer = layer_iter.next().unwrap();
let mut current_layer;
for depth in (1..max_depth + 1).rev() {
// set current_layer = parent_layer and parent_layer = layer_iter.next()
current_layer = layer_iter.next().unwrap();
core::mem::swap(&mut current_layer, &mut parent_layer);
for index_value in current_layer {
// get the parent node index
let parent_node = NodeIndex::new(depth - 1, index_value / 2)?;
// If parent already exists, check if it's user-provided (invalid) or computed
// (skip)
if parent_layer.contains(&parent_node.value()) {
// If the parent was provided as a leaf, that's invalid - we can't have both
// a node and its descendant in the input set.
if leaves.contains(&parent_node) {
return Err(MerkleError::EntryIsNotLeaf { node: parent_node });
}
continue;
}
// create current node index
let index = NodeIndex::new(depth, index_value)?;
// get hash of the current node
let node = nodes.get(&index).ok_or(MerkleError::NodeIndexNotFoundInTree(index))?;
// get hash of the sibling node
let sibling = nodes
.get(&index.sibling())
.ok_or(MerkleError::NodeIndexNotFoundInTree(index.sibling()))?;
// get parent hash
let parent = Rpo256::merge(&index.build_node(*node, *sibling));
// add index value of the calculated node to the parents layer
parent_layer.push(parent_node.value());
// add index and hash to the nodes map
nodes.insert(parent_node, parent);
}
}
Ok(PartialMerkleTree { max_depth, nodes, leaves })
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the root of this Merkle tree.
pub fn root(&self) -> Word {
self.nodes.get(&ROOT_INDEX).cloned().unwrap_or(EMPTY_DIGEST)
}
/// Returns the depth of this Merkle tree.
pub fn max_depth(&self) -> u8 {
self.max_depth
}
/// Returns a node at the specified NodeIndex.
///
/// # Errors
/// Returns an error if the specified NodeIndex is not contained in the nodes map.
pub fn get_node(&self, index: NodeIndex) -> Result<Word, MerkleError> {
self.nodes
.get(&index)
.ok_or(MerkleError::NodeIndexNotFoundInTree(index))
.copied()
}
/// Returns true if provided index contains in the leaves set, false otherwise.
pub fn is_leaf(&self, index: NodeIndex) -> bool {
self.leaves.contains(&index)
}
/// Returns a vector of paths from every leaf to the root.
pub fn to_paths(&self) -> Vec<(NodeIndex, MerkleProof)> {
let mut paths = Vec::new();
self.leaves.iter().for_each(|&leaf| {
paths.push((
leaf,
MerkleProof {
value: self.get_node(leaf).expect("Failed to get leaf node"),
path: self.get_path(leaf).expect("Failed to get path"),
},
));
});
paths
}
/// Returns a Merkle path from the node at the specified index to the root.
///
/// The node itself is not included in the path.
///
/// # Errors
/// Returns an error if:
/// - the specified index has depth set to 0 or the depth is greater than the depth of this
/// Merkle tree.
/// - the specified index is not contained in the nodes map.
pub fn get_path(&self, mut index: NodeIndex) -> Result<MerklePath, MerkleError> {
if index.is_root() {
return Err(MerkleError::DepthTooSmall(index.depth()));
} else if index.depth() > self.max_depth() {
return Err(MerkleError::DepthTooBig(index.depth() as u64));
}
if !self.nodes.contains_key(&index) {
return Err(MerkleError::NodeIndexNotFoundInTree(index));
}
let mut path = Vec::new();
for _ in 0..index.depth() {
let sibling_index = index.sibling();
index.move_up();
let sibling =
self.nodes.get(&sibling_index).cloned().expect("Sibling node not in the map");
path.push(sibling);
}
Ok(MerklePath::new(path))
}
// ITERATORS
// --------------------------------------------------------------------------------------------
/// Returns an iterator over the leaves of this [PartialMerkleTree].
pub fn leaves(&self) -> impl Iterator<Item = (NodeIndex, Word)> + '_ {
self.leaves.iter().map(|&leaf| {
(
leaf,
self.get_node(leaf)
.unwrap_or_else(|_| panic!("Leaf with {leaf} is not in the nodes map")),
)
})
}
/// Returns an iterator over the inner nodes of this Merkle tree.
pub fn inner_nodes(&self) -> impl Iterator<Item = InnerNodeInfo> + '_ {
let inner_nodes = self.nodes.iter().filter(|(index, _)| !self.leaves.contains(index));
inner_nodes.map(|(index, digest)| {
let left_hash =
self.nodes.get(&index.left_child()).expect("Failed to get left child hash");
let right_hash =
self.nodes.get(&index.right_child()).expect("Failed to get right child hash");
InnerNodeInfo {
value: *digest,
left: *left_hash,
right: *right_hash,
}
})
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Adds the nodes of the specified Merkle path to this [PartialMerkleTree]. The `index_value`
/// and `value` parameters specify the leaf node at which the path starts.
///
/// # Errors
/// Returns an error if:
/// - The depth of the specified node_index is greater than 64 or smaller than 1.
/// - The specified path is not consistent with other paths in the set (i.e., resolves to a
/// different root).
pub fn add_path(
&mut self,
index_value: u64,
value: Word,
path: MerklePath,
) -> Result<(), MerkleError> {
let index_value = NodeIndex::new(path.len() as u8, index_value)?;
Self::check_depth(index_value.depth())?;
self.update_depth(index_value.depth());
// add provided node and its sibling to the leaves set
self.leaves.insert(index_value);
let sibling_node_index = index_value.sibling();
self.leaves.insert(sibling_node_index);
// add provided node and its sibling to the nodes map
self.nodes.insert(index_value, value);
self.nodes.insert(sibling_node_index, path[0]);
// traverse to the root, updating the nodes
let mut index_value = index_value;
let node = Rpo256::merge(&index_value.build_node(value, path[0]));
let root = path.iter().skip(1).copied().fold(node, |node, hash| {
index_value.move_up();
// insert calculated node to the nodes map
self.nodes.insert(index_value, node);
// if the calculated node was a leaf, remove it from leaves set.
self.leaves.remove(&index_value);
let sibling_node = index_value.sibling();
// Insert node from Merkle path to the nodes map. This sibling node becomes a leaf only
// if it is a new node (it wasn't in nodes map).
// Node can be in 3 states: internal node, leaf of the tree and not a tree node at all.
// - Internal node can only stay in this state -- addition of a new path can't make it
// a leaf or remove it from the tree.
// - Leaf node can stay in the same state (remain a leaf) or can become an internal
// node. In the first case we don't need to do anything, and the second case is handled
// by the call of `self.leaves.remove(&index_value);`
// - New node can be a calculated node or a "sibling" node from a Merkle Path:
// --- Calculated node, obviously, never can be a leaf.
// --- Sibling node can be only a leaf, because otherwise it is not a new node.
if self.nodes.insert(sibling_node, hash).is_none() {
self.leaves.insert(sibling_node);
}
Rpo256::merge(&index_value.build_node(node, hash))
});
// if the path set is empty (the root is all ZEROs), set the root to the root of the added
// path; otherwise, the root of the added path must be identical to the current root
if self.root() == EMPTY_DIGEST {
self.nodes.insert(ROOT_INDEX, root);
} else if self.root() != root {
return Err(MerkleError::ConflictingRoots {
expected_root: self.root(),
actual_root: root,
});
}
Ok(())
}
/// Updates value of the leaf at the specified index returning the old leaf value.
///
/// By default the specified index is assumed to belong to the deepest layer. If the considered
/// node does not belong to the tree, the first node on the way to the root will be changed.
///
/// This also recomputes all hashes between the leaf and the root, updating the root itself.
///
/// # Errors
/// Returns an error if:
/// - No entry exists at the specified index.
/// - The specified index is greater than the maximum number of nodes on the deepest layer.
pub fn update_leaf(&mut self, index: u64, value: Word) -> Result<Word, MerkleError> {
let mut node_index = NodeIndex::new(self.max_depth(), index)?;
// proceed to the leaf
for _ in 0..node_index.depth() {
if !self.leaves.contains(&node_index) {
node_index.move_up();
}
}
// add node value to the nodes Map
let old_value = self
.nodes
.insert(node_index, value)
.ok_or(MerkleError::NodeIndexNotFoundInTree(node_index))?;
// if the old value and new value are the same, there is nothing to update
if value == old_value {
return Ok(old_value);
}
let mut value = value;
for _ in 0..node_index.depth() {
let sibling = self.nodes.get(&node_index.sibling()).expect("sibling should exist");
value = Rpo256::merge(&node_index.build_node(value, *sibling));
node_index.move_up();
self.nodes.insert(node_index, value);
}
Ok(old_value)
}
// UTILITY FUNCTIONS
// --------------------------------------------------------------------------------------------
/// Utility to visualize a [PartialMerkleTree] in text.
pub fn print(&self) -> Result<String, fmt::Error> {
let indent = " ";
let mut s = String::new();
s.push_str("root: ");
s.push_str(&word_to_hex(&self.root())?);
s.push('\n');
for d in 1..=self.max_depth() {
let entries = 2u64.pow(d.into());
for i in 0..entries {
let index = NodeIndex::new(d, i).expect("The index must always be valid");
let node = self.get_node(index);
let node = match node {
Err(_) => continue,
Ok(node) => node,
};
for _ in 0..d {
s.push_str(indent);
}
s.push_str(&format!("({}, {}): ", index.depth(), index.value()));
s.push_str(&word_to_hex(&node)?);
s.push('\n');
}
}
Ok(s)
}
// HELPER METHODS
// --------------------------------------------------------------------------------------------
/// Updates depth value with the maximum of current and provided depth.
fn update_depth(&mut self, new_depth: u8) {
self.max_depth = new_depth.max(self.max_depth);
}
/// Returns an error if the depth is 0 or is greater than 64.
fn check_depth(depth: u8) -> Result<(), MerkleError> {
// validate the range of the depth.
if depth < Self::MIN_DEPTH {
return Err(MerkleError::DepthTooSmall(depth));
} else if Self::MAX_DEPTH < depth {
return Err(MerkleError::DepthTooBig(depth as u64));
}
Ok(())
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for PartialMerkleTree {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
// write leaf nodes
target.write_u64(self.leaves.len() as u64);
for leaf_index in self.leaves.iter() {
leaf_index.write_into(target);
self.get_node(*leaf_index).expect("Leaf hash not found").write_into(target);
}
}
}
impl Deserializable for PartialMerkleTree {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let leaves_len = source.read_u64()? as usize;
let mut leaf_nodes = Vec::with_capacity(leaves_len);
// add leaf nodes to the vector
for _ in 0..leaves_len {
let index = NodeIndex::read_from(source)?;
let hash = Word::read_from(source)?;
leaf_nodes.push((index, hash));
}
let pmt = PartialMerkleTree::with_leaves(leaf_nodes).map_err(|_| {
DeserializationError::InvalidValue("Invalid data for PartialMerkleTree creation".into())
})?;
Ok(pmt)
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/mod.rs | miden-crypto/src/merkle/smt/mod.rs | //! Sparse Merkle Tree (SMT) data structures.
use alloc::vec::Vec;
use core::{
fmt::{self, Display},
hash::Hash,
};
use super::{EmptySubtreeRoots, InnerNodeInfo, MerkleError, NodeIndex, SparseMerklePath};
use crate::{
EMPTY_WORD, Map, Word,
hash::rpo::Rpo256,
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
mod full;
pub use full::{MAX_LEAF_ENTRIES, SMT_DEPTH, Smt, SmtLeaf, SmtLeafError, SmtProof, SmtProofError};
#[cfg(feature = "concurrent")]
mod large;
#[cfg(feature = "internal")]
pub use full::concurrent::{SubtreeLeaf, build_subtree_for_bench};
#[cfg(feature = "concurrent")]
pub use large::{
LargeSmt, LargeSmtError, MemoryStorage, SmtStorage, StorageUpdateParts, StorageUpdates,
Subtree, SubtreeError,
};
#[cfg(feature = "rocksdb")]
pub use large::{RocksDbConfig, RocksDbStorage};
mod large_forest;
pub use large_forest::{History, HistoryError, HistoryView, LargeSmtForestError};
mod simple;
pub use simple::{SimpleSmt, SimpleSmtProof};
mod partial;
pub use partial::PartialSmt;
mod forest;
pub use forest::SmtForest;
// CONSTANTS
// ================================================================================================
/// Minimum supported depth.
pub const SMT_MIN_DEPTH: u8 = 1;
/// Maximum supported depth.
pub const SMT_MAX_DEPTH: u8 = 64;
// SPARSE MERKLE TREE
// ================================================================================================
type InnerNodes = Map<NodeIndex, InnerNode>;
type Leaves<T> = Map<u64, T>;
type NodeMutations = Map<NodeIndex, NodeMutation>;
/// An abstract description of a sparse Merkle tree.
///
/// A sparse Merkle tree is a key-value map which also supports proving that a given value is indeed
/// stored at a given key in the tree. It is viewed as always being fully populated. If a leaf's
/// value was not explicitly set, then its value is the default value. Typically, the vast majority
/// of leaves will store the default value (hence it is "sparse"), and therefore the internal
/// representation of the tree will only keep track of the leaves that have a different value from
/// the default.
///
/// All leaves sit at the same depth. The deeper the tree, the more leaves it has; but also the
/// longer its proofs are - of exactly `log(depth)` size. A tree cannot have depth 0, since such a
/// tree is just a single value, and is probably a programming mistake.
///
/// Every key maps to one leaf. If there are as many keys as there are leaves, then
/// [Self::Leaf] should be the same type as [Self::Value], as is the case with
/// [crate::merkle::SimpleSmt]. However, if there are more keys than leaves, then [`Self::Leaf`]
/// must accommodate all keys that map to the same leaf.
///
/// [SparseMerkleTree] currently doesn't support optimizations that compress Merkle proofs.
pub(crate) trait SparseMerkleTree<const DEPTH: u8> {
/// The type for a key
type Key: Clone + Ord + Eq + Hash;
/// The type for a value
type Value: Clone + PartialEq;
/// The type for a leaf
type Leaf: Clone;
/// The type for an opening (i.e. a "proof") of a leaf
type Opening;
/// The default value used to compute the hash of empty leaves
const EMPTY_VALUE: Self::Value;
/// The root of the empty tree with provided DEPTH
const EMPTY_ROOT: Word;
// PROVIDED METHODS
// ---------------------------------------------------------------------------------------------
/// Returns a [SparseMerklePath] to the specified key.
///
/// Mostly this is an implementation detail of [`Self::open()`].
fn get_path(&self, key: &Self::Key) -> SparseMerklePath {
let index = NodeIndex::from(Self::key_to_leaf_index(key));
// SAFETY: this is guaranteed to have depth <= SMT_MAX_DEPTH
SparseMerklePath::from_sized_iter(
index.proof_indices().map(|index| self.get_node_hash(index)),
)
.expect("failed to convert to SparseMerklePath")
}
/// Get the hash of a node at an arbitrary index, including the root or leaf hashes.
///
/// The root index simply returns [`Self::root()`]. Other hashes are retrieved by calling
/// [`Self::get_inner_node()`] on the parent, and returning the respective child hash.
fn get_node_hash(&self, index: NodeIndex) -> Word {
if index.is_root() {
return self.root();
}
let InnerNode { left, right } = self.get_inner_node(index.parent());
let index_is_right = index.is_value_odd();
if index_is_right { right } else { left }
}
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a sparse
/// Merkle path to the leaf, as well as the leaf itself.
fn open(&self, key: &Self::Key) -> Self::Opening {
let leaf = self.get_leaf(key);
let merkle_path = self.get_path(key);
Self::path_and_leaf_to_opening(merkle_path, leaf)
}
/// Inserts a value at the specified key, returning the previous value associated with that key.
/// Recall that by definition, any key that hasn't been updated is associated with
/// [`Self::EMPTY_VALUE`].
///
/// This also recomputes all hashes between the leaf (associated with the key) and the root,
/// updating the root itself.
fn insert(&mut self, key: Self::Key, value: Self::Value) -> Result<Self::Value, MerkleError> {
let old_value = self.insert_value(key.clone(), value.clone())?.unwrap_or(Self::EMPTY_VALUE);
// if the old value and new value are the same, there is nothing to update
if value == old_value {
return Ok(value);
}
let leaf = self.get_leaf(&key);
let node_index = {
let leaf_index: LeafIndex<DEPTH> = Self::key_to_leaf_index(&key);
leaf_index.into()
};
self.recompute_nodes_from_index_to_root(node_index, Self::hash_leaf(&leaf));
Ok(old_value)
}
/// Recomputes the branch nodes (including the root) from `index` all the way to the root.
/// `node_hash_at_index` is the hash of the node stored at index.
fn recompute_nodes_from_index_to_root(
&mut self,
mut index: NodeIndex,
node_hash_at_index: Word,
) {
let mut node_hash = node_hash_at_index;
for node_depth in (0..index.depth()).rev() {
let is_right = index.is_value_odd();
index.move_up();
let InnerNode { left, right } = self.get_inner_node(index);
let (left, right) = if is_right {
(left, node_hash)
} else {
(node_hash, right)
};
node_hash = Rpo256::merge(&[left, right]);
if node_hash == *EmptySubtreeRoots::entry(DEPTH, node_depth) {
// If a subtree is empty, then can remove the inner node, since it's equal to the
// default value
self.remove_inner_node(index);
} else {
self.insert_inner_node(index, InnerNode { left, right });
}
}
self.set_root(node_hash);
}
/// Computes what changes are necessary to insert the specified key-value pairs into this Merkle
/// tree, allowing for validation before applying those changes.
///
/// This method returns a [`MutationSet`], which contains all the information for inserting
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
/// [`SparseMerkleTree::apply_mutations()`] can be called in order to commit these changes to
/// the Merkle tree, or [`drop()`] to discard them.
///
/// # Errors
/// If mutations would exceed [`MAX_LEAF_ENTRIES`] (1024 entries) in a leaf, returns
/// [`MerkleError::TooManyLeafEntries`].
fn compute_mutations(
&self,
kv_pairs: impl IntoIterator<Item = (Self::Key, Self::Value)>,
) -> Result<MutationSet<DEPTH, Self::Key, Self::Value>, MerkleError> {
self.compute_mutations_sequential(kv_pairs)
}
/// Sequential version of [`SparseMerkleTree::compute_mutations()`].
/// This is the default implementation.
fn compute_mutations_sequential(
&self,
kv_pairs: impl IntoIterator<Item = (Self::Key, Self::Value)>,
) -> Result<MutationSet<DEPTH, Self::Key, Self::Value>, MerkleError> {
use NodeMutation::*;
let mut new_root = self.root();
let mut new_pairs: Map<Self::Key, Self::Value> = Default::default();
let mut node_mutations: NodeMutations = Default::default();
for (key, value) in kv_pairs {
// If the old value and the new value are the same, there is nothing to update.
// For the unusual case that kv_pairs has multiple values at the same key, we'll have
// to check the key-value pairs we've already seen to get the "effective" old value.
let old_value = new_pairs.get(&key).cloned().unwrap_or_else(|| self.get_value(&key));
if value == old_value {
continue;
}
let leaf_index = Self::key_to_leaf_index(&key);
let mut node_index = NodeIndex::from(leaf_index);
// We need the current leaf's hash to calculate the new leaf, but in the rare case that
// `kv_pairs` has multiple pairs that go into the same leaf, then those pairs are also
// part of the "current leaf".
let old_leaf = {
let pairs_at_index = new_pairs
.iter()
.filter(|&(new_key, _)| Self::key_to_leaf_index(new_key) == leaf_index);
pairs_at_index.fold(self.get_leaf(&key), |acc, (k, v)| {
// Most of the time `pairs_at_index` should only contain a single entry (or
// none at all), as multi-leaves should be really rare.
let existing_leaf = acc.clone();
self.construct_prospective_leaf(existing_leaf, k, v)
.expect("current leaf should be valid")
})
};
let new_leaf =
self.construct_prospective_leaf(old_leaf, &key, &value).map_err(|e| match e {
SmtLeafError::TooManyLeafEntries { actual } => {
MerkleError::TooManyLeafEntries { actual }
},
other => panic!("unexpected SmtLeaf::insert error: {:?}", other),
})?;
let mut new_child_hash = Self::hash_leaf(&new_leaf);
for node_depth in (0..node_index.depth()).rev() {
// Whether the node we're replacing is the right child or the left child.
let is_right = node_index.is_value_odd();
node_index.move_up();
let old_node = node_mutations
.get(&node_index)
.map(|mutation| match mutation {
Addition(node) => node.clone(),
Removal => EmptySubtreeRoots::get_inner_node(DEPTH, node_depth),
})
.unwrap_or_else(|| self.get_inner_node(node_index));
let new_node = if is_right {
InnerNode {
left: old_node.left,
right: new_child_hash,
}
} else {
InnerNode {
left: new_child_hash,
right: old_node.right,
}
};
// The next iteration will operate on this new node's hash.
new_child_hash = new_node.hash();
let &equivalent_empty_hash = EmptySubtreeRoots::entry(DEPTH, node_depth);
let is_removal = new_child_hash == equivalent_empty_hash;
let new_entry = if is_removal { Removal } else { Addition(new_node) };
node_mutations.insert(node_index, new_entry);
}
// Once we're at depth 0, the last node we made is the new root.
new_root = new_child_hash;
// And then we're done with this pair; on to the next one.
new_pairs.insert(key, value);
}
Ok(MutationSet {
old_root: self.root(),
new_root,
node_mutations,
new_pairs,
})
}
/// Applies the prospective mutations computed with [`SparseMerkleTree::compute_mutations()`] to
/// this tree.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
/// the `mutations` were computed against, and the second item is the actual current root of
/// this tree.
/// If mutations would exceed [`MAX_LEAF_ENTRIES`] (1024 entries) in a leaf, returns
/// [`MerkleError::TooManyLeafEntries`].
fn apply_mutations(
&mut self,
mutations: MutationSet<DEPTH, Self::Key, Self::Value>,
) -> Result<(), MerkleError>
where
Self: Sized,
{
use NodeMutation::*;
let MutationSet {
old_root,
node_mutations,
new_pairs,
new_root,
} = mutations;
// Guard against accidentally trying to apply mutations that were computed against a
// different tree, including a stale version of this tree.
if old_root != self.root() {
return Err(MerkleError::ConflictingRoots {
expected_root: self.root(),
actual_root: old_root,
});
}
for (index, mutation) in node_mutations {
match mutation {
Removal => {
self.remove_inner_node(index);
},
Addition(node) => {
self.insert_inner_node(index, node);
},
}
}
for (key, value) in new_pairs {
self.insert_value(key, value)?;
}
self.set_root(new_root);
Ok(())
}
/// Applies the prospective mutations computed with [`SparseMerkleTree::compute_mutations()`] to
/// this tree and returns the reverse mutation set. Applying the reverse mutation sets to the
/// updated tree will revert the changes.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
/// the `mutations` were computed against, and the second item is the actual current root of
/// this tree.
fn apply_mutations_with_reversion(
&mut self,
mutations: MutationSet<DEPTH, Self::Key, Self::Value>,
) -> Result<MutationSet<DEPTH, Self::Key, Self::Value>, MerkleError>
where
Self: Sized,
{
use NodeMutation::*;
let MutationSet {
old_root,
node_mutations,
new_pairs,
new_root,
} = mutations;
// Guard against accidentally trying to apply mutations that were computed against a
// different tree, including a stale version of this tree.
if old_root != self.root() {
return Err(MerkleError::ConflictingRoots {
expected_root: self.root(),
actual_root: old_root,
});
}
let mut reverse_mutations = NodeMutations::new();
for (index, mutation) in node_mutations {
match mutation {
Removal => {
if let Some(node) = self.remove_inner_node(index) {
reverse_mutations.insert(index, Addition(node));
}
},
Addition(node) => {
if let Some(old_node) = self.insert_inner_node(index, node) {
reverse_mutations.insert(index, Addition(old_node));
} else {
reverse_mutations.insert(index, Removal);
}
},
}
}
let mut reverse_pairs = Map::new();
for (key, value) in new_pairs {
match self.insert_value(key.clone(), value)? {
Some(old_value) => {
reverse_pairs.insert(key, old_value);
},
None => {
reverse_pairs.insert(key, Self::EMPTY_VALUE);
},
}
}
self.set_root(new_root);
Ok(MutationSet {
old_root: new_root,
node_mutations: reverse_mutations,
new_pairs: reverse_pairs,
new_root: old_root,
})
}
// REQUIRED METHODS
// ---------------------------------------------------------------------------------------------
/// Construct this type from already computed leaves and nodes. The caller ensures passed
/// arguments are correct and consistent with each other.
fn from_raw_parts(
inner_nodes: InnerNodes,
leaves: Leaves<Self::Leaf>,
root: Word,
) -> Result<Self, MerkleError>
where
Self: Sized;
/// The root of the tree
fn root(&self) -> Word;
/// Sets the root of the tree
fn set_root(&mut self, root: Word);
/// Retrieves an inner node at the given index
fn get_inner_node(&self, index: NodeIndex) -> InnerNode;
/// Inserts an inner node at the given index
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) -> Option<InnerNode>;
/// Removes an inner node at the given index
fn remove_inner_node(&mut self, index: NodeIndex) -> Option<InnerNode>;
/// Inserts a leaf node, and returns the value at the key if already exists
fn insert_value(
&mut self,
key: Self::Key,
value: Self::Value,
) -> Result<Option<Self::Value>, MerkleError>;
/// Returns the value at the specified key. Recall that by definition, any key that hasn't been
/// updated is associated with [`Self::EMPTY_VALUE`].
fn get_value(&self, key: &Self::Key) -> Self::Value;
/// Returns the leaf at the specified index.
fn get_leaf(&self, key: &Self::Key) -> Self::Leaf;
/// Returns the hash of a leaf
fn hash_leaf(leaf: &Self::Leaf) -> Word;
/// Returns what a leaf would look like if a key-value pair were inserted into the tree, without
/// mutating the tree itself. The existing leaf can be empty.
///
/// To get a prospective leaf based on the current state of the tree, use `self.get_leaf(key)`
/// as the argument for `existing_leaf`. The return value from this function can be chained back
/// into this function as the first argument to continue making prospective changes.
///
/// # Invariants
/// Because this method is for a prospective key-value insertion into a specific leaf,
/// `existing_leaf` must have the same leaf index as `key` (as determined by
/// [`SparseMerkleTree::key_to_leaf_index()`]), or the result will be meaningless.
///
/// # Errors
/// If inserting the key-value pair would exceed [`MAX_LEAF_ENTRIES`] (1024 entries) in a leaf,
/// returns [`SmtLeafError::TooManyLeafEntries`].
fn construct_prospective_leaf(
&self,
existing_leaf: Self::Leaf,
key: &Self::Key,
value: &Self::Value,
) -> Result<Self::Leaf, SmtLeafError>;
/// Maps a key to a leaf index
fn key_to_leaf_index(key: &Self::Key) -> LeafIndex<DEPTH>;
/// Maps a (SparseMerklePath, Self::Leaf) to an opening.
///
/// The length `path` is guaranteed to be equal to `DEPTH`
fn path_and_leaf_to_opening(path: SparseMerklePath, leaf: Self::Leaf) -> Self::Opening;
}
// INNER NODE
// ================================================================================================
/// This struct is public so functions returning it can be used in `benches/`, but is otherwise not
/// part of the public API.
#[doc(hidden)]
#[derive(Debug, Default, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct InnerNode {
pub left: Word,
pub right: Word,
}
impl InnerNode {
pub fn hash(&self) -> Word {
Rpo256::merge(&[self.left, self.right])
}
}
// LEAF INDEX
// ================================================================================================
/// The index of a leaf, at a depth known at compile-time.
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct LeafIndex<const DEPTH: u8> {
index: NodeIndex,
}
impl<const DEPTH: u8> LeafIndex<DEPTH> {
/// Creates a new `LeafIndex` with the specified value.
///
/// # Errors
///
/// Returns an error if the provided depth is less than the minimum supported depth.
pub fn new(value: u64) -> Result<Self, MerkleError> {
if DEPTH < SMT_MIN_DEPTH {
return Err(MerkleError::DepthTooSmall(DEPTH));
}
Ok(LeafIndex { index: NodeIndex::new(DEPTH, value)? })
}
/// Returns the numeric value of this leaf index.
pub fn value(&self) -> u64 {
self.index.value()
}
}
impl LeafIndex<SMT_MAX_DEPTH> {
/// Creates a new `LeafIndex` at the maximum supported depth without validation.
pub const fn new_max_depth(value: u64) -> Self {
LeafIndex {
index: NodeIndex::new_unchecked(SMT_MAX_DEPTH, value),
}
}
}
impl<const DEPTH: u8> From<LeafIndex<DEPTH>> for NodeIndex {
fn from(value: LeafIndex<DEPTH>) -> Self {
value.index
}
}
impl<const DEPTH: u8> TryFrom<NodeIndex> for LeafIndex<DEPTH> {
type Error = MerkleError;
fn try_from(node_index: NodeIndex) -> Result<Self, Self::Error> {
if node_index.depth() != DEPTH {
return Err(MerkleError::InvalidNodeIndexDepth {
expected: DEPTH,
provided: node_index.depth(),
});
}
Self::new(node_index.value())
}
}
impl<const DEPTH: u8> Serializable for LeafIndex<DEPTH> {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.index.write_into(target);
}
}
impl<const DEPTH: u8> Deserializable for LeafIndex<DEPTH> {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
Ok(Self { index: source.read()? })
}
}
impl<const DEPTH: u8> Display for LeafIndex<DEPTH> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "DEPTH={}, value={}", DEPTH, self.value())
}
}
// MUTATIONS
// ================================================================================================
/// A change to an inner node of a sparse Merkle tree that hasn't yet been applied.
/// [`MutationSet`] stores this type in relation to a [`NodeIndex`] to keep track of what changes
/// need to occur at which node indices.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum NodeMutation {
/// Node needs to be removed.
Removal,
/// Node needs to be inserted.
Addition(InnerNode),
}
/// Represents a group of prospective mutations to a `SparseMerkleTree`, created by
/// `SparseMerkleTree::compute_mutations()`, and that can be applied with
/// `SparseMerkleTree::apply_mutations()`.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct MutationSet<const DEPTH: u8, K: Eq + Hash, V> {
/// The root of the Merkle tree this MutationSet is for, recorded at the time
/// [`SparseMerkleTree::compute_mutations()`] was called. Exists to guard against applying
/// mutations to the wrong tree or applying stale mutations to a tree that has since changed.
old_root: Word,
/// The set of nodes that need to be removed or added. The "effective" node at an index is the
/// Merkle tree's existing node at that index, with the [`NodeMutation`] in this map at that
/// index overlaid, if any. Each [`NodeMutation::Addition`] corresponds to a
/// [`SparseMerkleTree::insert_inner_node()`] call, and each [`NodeMutation::Removal`]
/// corresponds to a [`SparseMerkleTree::remove_inner_node()`] call.
node_mutations: NodeMutations,
/// The set of top-level key-value pairs we're prospectively adding to the tree, including
/// adding empty values. The "effective" value for a key is the value in this Map, falling
/// back to the existing value in the Merkle tree. Each entry corresponds to a
/// [`SparseMerkleTree::insert_value()`] call.
new_pairs: Map<K, V>,
/// The calculated root for the Merkle tree, given these mutations. Publicly retrievable with
/// [`MutationSet::root()`]. Corresponds to a [`SparseMerkleTree::set_root()`]. call.
new_root: Word,
}
impl<const DEPTH: u8, K: Eq + Hash, V> MutationSet<DEPTH, K, V> {
/// Returns the SMT root that was calculated during `SparseMerkleTree::compute_mutations()`. See
/// that method for more information.
pub fn root(&self) -> Word {
self.new_root
}
/// Returns the SMT root before the mutations were applied.
pub fn old_root(&self) -> Word {
self.old_root
}
/// Returns the set of inner nodes that need to be removed or added.
pub fn node_mutations(&self) -> &NodeMutations {
&self.node_mutations
}
/// Returns the set of top-level key-value pairs that need to be added, updated or deleted
/// (i.e. set to `EMPTY_WORD`).
pub fn new_pairs(&self) -> &Map<K, V> {
&self.new_pairs
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for InnerNode {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write(self.left);
target.write(self.right);
}
}
impl Deserializable for InnerNode {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let left = source.read()?;
let right = source.read()?;
Ok(Self { left, right })
}
}
impl Serializable for NodeMutation {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
match self {
NodeMutation::Removal => target.write_bool(false),
NodeMutation::Addition(inner_node) => {
target.write_bool(true);
inner_node.write_into(target);
},
}
}
}
impl Deserializable for NodeMutation {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
if source.read_bool()? {
let inner_node = source.read()?;
return Ok(NodeMutation::Addition(inner_node));
}
Ok(NodeMutation::Removal)
}
}
impl<const DEPTH: u8, K: Serializable + Eq + Hash, V: Serializable> Serializable
for MutationSet<DEPTH, K, V>
{
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write(self.old_root);
target.write(self.new_root);
let inner_removals: Vec<_> = self
.node_mutations
.iter()
.filter(|(_, value)| matches!(value, NodeMutation::Removal))
.map(|(key, _)| key)
.collect();
let inner_additions: Vec<_> = self
.node_mutations
.iter()
.filter_map(|(key, value)| match value {
NodeMutation::Addition(node) => Some((key, node)),
_ => None,
})
.collect();
target.write(inner_removals);
target.write(inner_additions);
target.write_usize(self.new_pairs.len());
target.write_many(&self.new_pairs);
}
}
impl<const DEPTH: u8, K: Deserializable + Ord + Eq + Hash, V: Deserializable> Deserializable
for MutationSet<DEPTH, K, V>
{
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let old_root = source.read()?;
let new_root = source.read()?;
let inner_removals: Vec<NodeIndex> = source.read()?;
let inner_additions: Vec<(NodeIndex, InnerNode)> = source.read()?;
let node_mutations = NodeMutations::from_iter(
inner_removals.into_iter().map(|index| (index, NodeMutation::Removal)).chain(
inner_additions
.into_iter()
.map(|(index, node)| (index, NodeMutation::Addition(node))),
),
);
let num_new_pairs = source.read_usize()?;
let new_pairs = source.read_many(num_new_pairs)?;
let new_pairs = Map::from_iter(new_pairs);
Ok(Self {
old_root,
node_mutations,
new_pairs,
new_root,
})
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/partial/tests.rs | miden-crypto/src/merkle/smt/partial/tests.rs | use alloc::collections::{BTreeMap, BTreeSet};
use assert_matches::assert_matches;
use super::{PartialSmt, SMT_DEPTH};
#[cfg(any(test, feature = "std"))]
use crate::rand::test_utils::{rand_array, rand_value};
use crate::{
EMPTY_WORD, Felt, ONE, Word, ZERO,
merkle::{
EmptySubtreeRoots, MerkleError,
smt::{Smt, SmtLeaf},
},
utils::{Deserializable, Serializable},
};
/// Tests that a partial SMT constructed from a root is well behaved and returns expected
/// values.
#[test]
fn partial_smt_new_with_no_entries() {
let key0 = Word::from(rand_array::<Felt, 4>());
let value0 = Word::from(rand_array::<Felt, 4>());
let full = Smt::with_entries([(key0, value0)]).unwrap();
let partial_smt = PartialSmt::new(full.root());
assert!(!partial_smt.tracks_leaves());
assert_eq!(partial_smt.num_entries(), 0);
assert_eq!(partial_smt.num_leaves(), 0);
assert_eq!(partial_smt.entries().count(), 0);
assert_eq!(partial_smt.leaves().count(), 0);
assert_eq!(partial_smt.root(), full.root());
}
/// Tests that a PartialSmt with a non-empty root but no proofs cannot track or update keys.
#[test]
fn partial_smt_non_empty_root_no_proofs() {
let key: Word = rand_value();
let value: Word = rand_value();
let full = Smt::with_entries([(key, value)]).unwrap();
// Create partial with non-empty root but don't add any proofs
let mut partial = PartialSmt::new(full.root());
// Can't get value for key with value - not trackable without proofs
assert!(partial.get_value(&key).is_err());
// Can't insert for key with value - not trackable
assert!(partial.insert(key, value).is_err());
// Can't get value for empty key either - still not trackable
let empty_key: Word = rand_value();
assert!(partial.get_value(&empty_key).is_err());
// Can't insert at empty key - not trackable
assert!(partial.insert(empty_key, value).is_err());
}
/// Tests that a basic PartialSmt can be built from a full one and that inserting or removing
/// values whose merkle path were added to the partial SMT results in the same root as the
/// equivalent update in the full tree.
#[test]
fn partial_smt_insert_and_remove() {
let key0 = Word::from(rand_array::<Felt, 4>());
let key1 = Word::from(rand_array::<Felt, 4>());
let key2 = Word::from(rand_array::<Felt, 4>());
// A key for which we won't add a value so it will be empty.
let key_empty = Word::from(rand_array::<Felt, 4>());
let value0 = Word::from(rand_array::<Felt, 4>());
let value1 = Word::from(rand_array::<Felt, 4>());
let value2 = Word::from(rand_array::<Felt, 4>());
let mut kv_pairs = vec![(key0, value0), (key1, value1), (key2, value2)];
// Add more random leaves.
kv_pairs.reserve(1000);
for _ in 0..1000 {
let key = Word::from(rand_array::<Felt, 4>());
let value = Word::from(rand_array::<Felt, 4>());
kv_pairs.push((key, value));
}
let mut full = Smt::with_entries(kv_pairs).unwrap();
// Constructing a partial SMT from proofs succeeds.
// ----------------------------------------------------------------------------------------
let proof0 = full.open(&key0);
let proof2 = full.open(&key2);
let proof_empty = full.open(&key_empty);
assert!(proof_empty.leaf().is_empty());
let mut partial = PartialSmt::from_proofs([proof0, proof2, proof_empty]).unwrap();
assert_eq!(full.root(), partial.root());
assert_eq!(partial.get_value(&key0).unwrap(), value0);
let error = partial.get_value(&key1).unwrap_err();
assert_matches!(error, MerkleError::UntrackedKey(_));
assert_eq!(partial.get_value(&key2).unwrap(), value2);
// Insert new values for added keys with empty and non-empty values.
// ----------------------------------------------------------------------------------------
let new_value0 = Word::from(rand_array::<Felt, 4>());
let new_value2 = Word::from(rand_array::<Felt, 4>());
// A non-empty value for the key that was previously empty.
let new_value_empty_key = Word::from(rand_array::<Felt, 4>());
full.insert(key0, new_value0).unwrap();
full.insert(key2, new_value2).unwrap();
full.insert(key_empty, new_value_empty_key).unwrap();
partial.insert(key0, new_value0).unwrap();
partial.insert(key2, new_value2).unwrap();
// This updates a key whose value was previously empty.
partial.insert(key_empty, new_value_empty_key).unwrap();
assert_eq!(full.root(), partial.root());
assert_eq!(partial.get_value(&key0).unwrap(), new_value0);
assert_eq!(partial.get_value(&key2).unwrap(), new_value2);
assert_eq!(partial.get_value(&key_empty).unwrap(), new_value_empty_key);
// Remove an added key.
// ----------------------------------------------------------------------------------------
full.insert(key0, EMPTY_WORD).unwrap();
partial.insert(key0, EMPTY_WORD).unwrap();
assert_eq!(full.root(), partial.root());
assert_eq!(partial.get_value(&key0).unwrap(), EMPTY_WORD);
// Check if returned openings are the same in partial and full SMT.
// ----------------------------------------------------------------------------------------
// This is a key whose value is empty since it was removed.
assert_eq!(full.open(&key0), partial.open(&key0).unwrap());
// This is a key whose value is non-empty.
assert_eq!(full.open(&key2), partial.open(&key2).unwrap());
// Attempting to update a key whose merkle path was not added is an error.
// ----------------------------------------------------------------------------------------
let error = partial.clone().insert(key1, Word::from(rand_array::<Felt, 4>())).unwrap_err();
assert_matches!(error, MerkleError::UntrackedKey(_));
let error = partial.insert(key1, EMPTY_WORD).unwrap_err();
assert_matches!(error, MerkleError::UntrackedKey(_));
}
/// Test that we can add an SmtLeaf::Multiple variant to a partial SMT.
#[test]
fn partial_smt_multiple_leaf_success() {
// key0 and key1 have the same felt at index 3 so they will be placed in the same leaf.
let key0 = Word::from([ZERO, ZERO, ZERO, ONE]);
let key1 = Word::from([ONE, ONE, ONE, ONE]);
let key2 = Word::from(rand_array::<Felt, 4>());
let value0 = Word::from(rand_array::<Felt, 4>());
let value1 = Word::from(rand_array::<Felt, 4>());
let value2 = Word::from(rand_array::<Felt, 4>());
let full = Smt::with_entries([(key0, value0), (key1, value1), (key2, value2)]).unwrap();
// Make sure our assumption about the leaf being a multiple is correct.
let SmtLeaf::Multiple(_) = full.get_leaf(&key0) else {
panic!("expected full tree to produce multiple leaf")
};
let proof0 = full.open(&key0);
let proof2 = full.open(&key2);
let partial = PartialSmt::from_proofs([proof0, proof2]).unwrap();
assert_eq!(partial.root(), full.root());
assert_eq!(partial.get_leaf(&key0).unwrap(), full.get_leaf(&key0));
// key1 is present in the partial tree because it is part of the proof of key0.
assert_eq!(partial.get_leaf(&key1).unwrap(), full.get_leaf(&key1));
assert_eq!(partial.get_leaf(&key2).unwrap(), full.get_leaf(&key2));
}
/// Tests that adding proofs to a partial SMT whose roots are not the same will result in an
/// error.
///
/// This test uses only empty values in the partial SMT.
#[test]
fn partial_smt_root_mismatch_on_empty_values() {
let key0 = Word::from(rand_array::<Felt, 4>());
let key1 = Word::from(rand_array::<Felt, 4>());
let key2 = Word::from(rand_array::<Felt, 4>());
let value0 = EMPTY_WORD;
let value1 = Word::from(rand_array::<Felt, 4>());
let value2 = EMPTY_WORD;
let kv_pairs = vec![(key0, value0)];
let mut full = Smt::with_entries(kv_pairs).unwrap();
// This proof will become stale after the tree is modified.
let stale_proof = full.open(&key2);
// Insert a non-empty value so the root actually changes.
full.insert(key1, value1).unwrap();
full.insert(key2, value2).unwrap();
// Construct a partial SMT against the latest root.
let mut partial = PartialSmt::new(full.root());
// Adding the stale proof should fail as its root is different.
let err = partial.add_proof(stale_proof).unwrap_err();
assert_matches!(err, MerkleError::ConflictingRoots { .. });
}
/// Tests that adding proofs to a partial SMT whose roots are not the same will result in an
/// error.
///
/// This test uses only non-empty values in the partial SMT.
#[test]
fn partial_smt_root_mismatch_on_non_empty_values() {
let key0 = Word::new(rand_array());
let key1 = Word::new(rand_array());
let key2 = Word::new(rand_array());
let value0 = Word::new(rand_array());
let value1 = Word::new(rand_array());
let value2 = Word::new(rand_array());
let kv_pairs = vec![(key0, value0), (key1, value1)];
let mut full = Smt::with_entries(kv_pairs).unwrap();
// This proof will become stale after the tree is modified.
let stale_proof = full.open(&key0);
// Insert a value so the root changes.
full.insert(key2, value2).unwrap();
// Construct a partial SMT against the latest root.
let mut partial = PartialSmt::new(full.root());
// Adding the stale proof should fail as its root is different.
let err = partial.add_proof(stale_proof).unwrap_err();
assert_matches!(err, MerkleError::ConflictingRoots { .. });
}
/// Tests that from_proofs fails when the proofs roots do not match.
#[test]
fn partial_smt_from_proofs_fails_on_root_mismatch() {
let key0 = Word::new(rand_array());
let key1 = Word::new(rand_array());
let value0 = Word::new(rand_array());
let value1 = Word::new(rand_array());
let mut full = Smt::with_entries([(key0, value0)]).unwrap();
// This proof will become stale after the tree is modified.
let stale_proof = full.open(&key0);
// Insert a value so the root changes.
full.insert(key1, value1).unwrap();
// Construct a partial SMT against the latest root.
let err = PartialSmt::from_proofs([full.open(&key1), stale_proof]).unwrap_err();
assert_matches!(err, MerkleError::ConflictingRoots { .. });
}
/// Tests that a basic PartialSmt's iterator APIs return the expected values.
#[test]
fn partial_smt_iterator_apis() {
let key0 = Word::new(rand_array());
let key1 = Word::new(rand_array());
let key2 = Word::new(rand_array());
// A key for which we won't add a value so it will be empty.
let key_empty = Word::new(rand_array());
let value0 = Word::new(rand_array());
let value1 = Word::new(rand_array());
let value2 = Word::new(rand_array());
let mut kv_pairs = vec![(key0, value0), (key1, value1), (key2, value2)];
// Add more random leaves.
kv_pairs.reserve(1000);
for _ in 0..1000 {
let key = Word::new(rand_array());
let value = Word::new(rand_array());
kv_pairs.push((key, value));
}
let full = Smt::with_entries(kv_pairs).unwrap();
// Construct a partial SMT from proofs.
// ----------------------------------------------------------------------------------------
let proof0 = full.open(&key0);
let proof2 = full.open(&key2);
let proof_empty = full.open(&key_empty);
assert!(proof_empty.leaf().is_empty());
let proofs = [proof0, proof2, proof_empty];
let partial = PartialSmt::from_proofs(proofs.clone()).unwrap();
assert!(partial.tracks_leaves());
assert_eq!(full.root(), partial.root());
// There should be 2 non-empty entries.
assert_eq!(partial.num_entries(), 2);
// There should be 2 leaves (empty leaves are not stored).
assert_eq!(partial.num_leaves(), 2);
// The leaves API should only return tracked but non-empty leaves.
// ----------------------------------------------------------------------------------------
// Construct the sorted vector of leaves that should be yielded by the partial SMT.
let expected_leaves: BTreeMap<_, _> =
[SmtLeaf::new_single(key0, value0), SmtLeaf::new_single(key2, value2)]
.into_iter()
.map(|leaf| (leaf.index(), leaf))
.collect();
let actual_leaves = partial
.leaves()
.map(|(idx, leaf)| (idx, leaf.clone()))
.collect::<BTreeMap<_, _>>();
assert_eq!(actual_leaves.len(), expected_leaves.len());
assert_eq!(actual_leaves, expected_leaves);
// The num_leaves API should return the count of explicitly stored leaves.
// ----------------------------------------------------------------------------------------
// We added 3 proofs but empty leaves are not stored, so num_leaves should be 2.
assert_eq!(partial.num_leaves(), 2);
// The entries of the merkle paths from the proofs should exist as children of inner nodes
// in the partial SMT.
// ----------------------------------------------------------------------------------------
let partial_inner_nodes: BTreeSet<_> =
partial.inner_nodes().flat_map(|node| [node.left, node.right]).collect();
let empty_subtree_roots: BTreeSet<_> = (0..SMT_DEPTH)
.map(|depth| *EmptySubtreeRoots::entry(SMT_DEPTH, depth))
.collect();
for merkle_path in proofs.into_iter().map(|proof| proof.into_parts().0) {
for (idx, digest) in merkle_path.into_iter().enumerate() {
assert!(
partial_inner_nodes.contains(&digest) || empty_subtree_roots.contains(&digest),
"failed at idx {idx}"
);
}
}
}
/// Test that the default partial SMT's tracks_leaves method returns `false`.
#[test]
fn partial_smt_tracks_leaves() {
assert!(!PartialSmt::default().tracks_leaves());
}
/// `PartialSmt` serde round-trip when constructed from just a root.
#[test]
fn partial_smt_with_empty_leaves_serialization_roundtrip() {
let partial_smt = PartialSmt::new(rand_value());
assert_eq!(partial_smt, PartialSmt::read_from_bytes(&partial_smt.to_bytes()).unwrap());
}
/// `PartialSmt` serde round-trip. Also tests conversion from SMT.
#[test]
fn partial_smt_serialization_roundtrip() {
let key = rand_value();
let val = rand_value();
let key_1 = rand_value();
let val_1 = rand_value();
let key_2 = rand_value();
let val_2 = rand_value();
let smt: Smt = Smt::with_entries([(key, val), (key_1, val_1), (key_2, val_2)]).unwrap();
let partial_smt = PartialSmt::from_proofs([smt.open(&key)]).unwrap();
assert_eq!(partial_smt.root(), smt.root());
assert_matches!(partial_smt.open(&key_1), Err(MerkleError::UntrackedKey(_)));
assert_matches!(partial_smt.open(&key), Ok(_));
let bytes = partial_smt.to_bytes();
let decoded = PartialSmt::read_from_bytes(&bytes).unwrap();
assert_eq!(partial_smt, decoded);
}
/// Tests that add_path correctly updates num_entries for increasing entry counts.
///
/// Note that decreasing counts are not possible with the current API.
#[test]
fn partial_smt_add_proof_num_entries() {
// key0 and key1 have the same felt at index 3 so they will be placed in the same leaf.
let key0 = Word::from([ZERO, ZERO, ZERO, ONE]);
let key1 = Word::from([ONE, ONE, ONE, ONE]);
let key2 = Word::from([ONE, ONE, ONE, Felt::new(5)]);
let value0 = Word::from(rand_array::<Felt, 4>());
let value1 = Word::from(rand_array::<Felt, 4>());
let value2 = Word::from(rand_array::<Felt, 4>());
let full = Smt::with_entries([(key0, value0), (key1, value1), (key2, value2)]).unwrap();
let mut partial = PartialSmt::new(full.root());
// Add the multi-entry leaf
partial.add_proof(full.open(&key0)).unwrap();
assert_eq!(partial.num_entries(), 2);
// Add the single-entry leaf
partial.add_proof(full.open(&key2)).unwrap();
assert_eq!(partial.num_entries(), 3);
// Setting a value to the empty word removes decreases the number of entries.
partial.insert(key0, Word::empty()).unwrap();
assert_eq!(partial.num_entries(), 2);
}
/// Tests implicit tracking of empty subtrees based on the visualization from PR #375.
///
/// ```text
/// g (root)
/// / \
/// e f
/// / \ / \
/// a b c d
/// /\ /\ /\ /\
/// 0 1 2 3 4 5 6 7
/// ```
///
/// State:
/// - Subtree f is entirely empty.
/// - Key 1 has a value and a proof in the partial SMT.
/// - Key 3 has a value but is missing from the partial SMT (making node b non-empty).
/// - Keys 0, 2, 4, 5, 6, 7 are empty.
///
/// Expected:
/// - Key 1: CAN update (explicitly tracked via proof)
/// - Key 0: CAN update (under same parent 'a' as key 1, provably empty)
/// - Keys 4, 5, 6, 7: CAN update (in empty subtree f, provably empty)
/// - Keys 2, 3: CANNOT update (under non-empty node b, only have its hash)
#[test]
fn partial_smt_tracking_visualization() {
// Situation in the diagram mapped to depth-64 SMT.
const LEAF_0: u64 = 0;
const LEAF_1: u64 = 1 << 61;
const LEAF_2: u64 = 1 << 62;
const LEAF_3: u64 = (1 << 62) | (1 << 61);
const LEAF_4: u64 = 1 << 63;
const LEAF_5: u64 = (1 << 63) | (1 << 61);
const LEAF_6: u64 = (1 << 63) | (1 << 62);
const LEAF_7: u64 = (1 << 63) | (1 << 62) | (1 << 61);
let key_0 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_0)]);
let key_1 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_1)]);
let key_2 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_2)]);
let key_3 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_3)]);
let key_4 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_4)]);
let key_5 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_5)]);
let key_6 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_6)]);
let key_7 = Word::from([ZERO, ZERO, ZERO, Felt::new(LEAF_7)]);
// Create full SMT with keys 1 and 3 (key_3 makes node b non-empty)
let mut full = Smt::with_entries([(key_1, rand_value()), (key_3, rand_value())]).unwrap();
// Create partial SMT with ONLY the proof for key 1
let proof_1 = full.open(&key_1);
let mut partial = PartialSmt::from_proofs([proof_1]).unwrap();
assert_eq!(full.root(), partial.root());
// Key 1: CAN update (explicitly tracked via proof)
let new_value_1: Word = rand_value();
full.insert(key_1, new_value_1).unwrap();
partial.insert(key_1, new_value_1).unwrap();
assert_eq!(full.root(), partial.root());
// Key 0: CAN update (under same parent 'a' as key 1, empty)
let value_0: Word = rand_value();
full.insert(key_0, value_0).unwrap();
partial.insert(key_0, value_0).unwrap();
assert_eq!(full.root(), partial.root());
// Key 4: CAN update (in empty subtree f)
let value_4: Word = rand_value();
full.insert(key_4, value_4).unwrap();
partial.insert(key_4, value_4).unwrap();
assert_eq!(full.root(), partial.root());
// Note: After inserting key 4, subtree f is no longer empty, but keys 5, 6, 7
// remain trackable through the inner nodes created by previous inserts.
// Key 5: CAN update
let value_5: Word = rand_value();
full.insert(key_5, value_5).unwrap();
partial.insert(key_5, value_5).unwrap();
assert_eq!(full.root(), partial.root());
// Key 6: CAN update
let value_6: Word = rand_value();
full.insert(key_6, value_6).unwrap();
partial.insert(key_6, value_6).unwrap();
assert_eq!(full.root(), partial.root());
// Key 7: CAN update
let value_7: Word = rand_value();
full.insert(key_7, value_7).unwrap();
partial.insert(key_7, value_7).unwrap();
assert_eq!(full.root(), partial.root());
// Key 2: CANNOT update (under non-empty node b, only have its hash)
let result = partial.insert(key_2, rand_value());
assert_matches!(result, Err(MerkleError::UntrackedKey(_)));
// Key 3: CANNOT update (has data but no proof in partial SMT)
let result = partial.insert(key_3, rand_value());
assert_matches!(result, Err(MerkleError::UntrackedKey(_)));
// Verify roots still match (failed inserts should not modify partial SMT)
assert_eq!(full.root(), partial.root());
}
#[test]
fn partial_smt_implicit_empty_tree() {
let mut full = Smt::new();
let mut partial = PartialSmt::new(full.root());
let key: Word = rand_value();
let value: Word = rand_value();
full.insert(key, value).unwrap();
// Can insert into empty partial SMT (implicitly tracked)
partial.insert(key, value).unwrap();
assert_eq!(full.root(), partial.root());
assert_eq!(partial.get_value(&key).unwrap(), value);
}
#[test]
fn partial_smt_implicit_insert_and_remove() {
let mut full = Smt::new();
let mut partial = PartialSmt::new(full.root());
let key: Word = rand_value();
let value: Word = rand_value();
// Insert into implicitly tracked leaf
full.insert(key, value).unwrap();
partial.insert(key, value).unwrap();
assert_eq!(full.root(), partial.root());
// Remove the value we just inserted
full.insert(key, EMPTY_WORD).unwrap();
partial.insert(key, EMPTY_WORD).unwrap();
assert_eq!(full.root(), partial.root());
assert_eq!(partial.get_value(&key).unwrap(), EMPTY_WORD);
assert_eq!(partial.num_entries(), 0);
// Empty leaves are removed from storage
assert_eq!(partial.num_leaves(), 0);
}
/// Tests that deserialization fails when an inner node hash is inconsistent with its parent.
#[test]
fn partial_smt_deserialize_invalid_inner_node() {
let key: Word = rand_value();
let value: Word = rand_value();
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let mut partial = PartialSmt::new(smt.root());
partial.add_proof(proof).unwrap();
// Serialize and tamper with inner node data
let mut bytes = partial.to_bytes();
// The inner node data is at the end of the serialization.
// Flip a byte in the inner node section to corrupt it.
let last_idx = bytes.len() - 1;
bytes[last_idx] ^= 0xff;
let result = PartialSmt::read_from_bytes(&bytes);
assert!(result.is_err());
}
/// Tests that deserialization fails when a leaf hash is inconsistent with its parent inner
/// node.
#[test]
fn partial_smt_deserialize_invalid_leaf() {
let key: Word = rand_value();
let value: Word = rand_value();
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let mut partial = PartialSmt::new(smt.root());
partial.add_proof(proof).unwrap();
// Serialize the partial SMT
let bytes = partial.to_bytes();
// Find where the leaf data starts (after root and leaves count).
// Root is 32 bytes, leaves count is 8 bytes, leaf position is 8 bytes.
// Tamper with leaf value data (after position).
// Byte position to flip.
let leaf_value_offset = 32 + 8 + 8 + 10;
let mut tampered_bytes = bytes.clone();
// Flip a byte in the leaf value data to corrupt it.
tampered_bytes[leaf_value_offset] ^= 0xff;
let result = PartialSmt::read_from_bytes(&tampered_bytes);
assert!(result.is_err());
}
/// Tests that deserialization fails when the root is inconsistent with the inner nodes.
#[test]
fn partial_smt_deserialize_invalid_root() {
let key: Word = rand_value();
let value: Word = rand_value();
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let mut partial = PartialSmt::new(smt.root());
partial.add_proof(proof).unwrap();
// Serialize and tamper with root (first 32 bytes)
let mut bytes = partial.to_bytes();
bytes[0] ^= 0xff;
let result = PartialSmt::read_from_bytes(&bytes);
assert!(result.is_err());
}
/// Tests that deserialization fails when leaves count is tampered to be smaller.
#[test]
fn partial_smt_deserialize_leaves_count_smaller() {
let key: Word = rand_value();
let value: Word = rand_value();
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let mut partial = PartialSmt::new(smt.root());
partial.add_proof(proof).unwrap();
let mut bytes = partial.to_bytes();
// Tamper the leaves count to be smaller by one
let leaves_count_offset = 32;
let count =
u64::from_le_bytes(bytes[leaves_count_offset..leaves_count_offset + 8].try_into().unwrap());
let tampered_count = count.saturating_sub(1);
bytes[leaves_count_offset..leaves_count_offset + 8]
.copy_from_slice(&tampered_count.to_le_bytes());
let result = PartialSmt::read_from_bytes(&bytes);
assert!(result.is_err());
}
/// Tests that deserialization fails when leaves count is tampered to be larger.
#[test]
fn partial_smt_deserialize_leaves_count_larger() {
let key: Word = rand_value();
let value: Word = rand_value();
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let mut partial = PartialSmt::new(smt.root());
partial.add_proof(proof).unwrap();
let mut bytes = partial.to_bytes();
// Tamper the leaves count to be larger by one
let leaves_count_offset = 32;
let count =
u64::from_le_bytes(bytes[leaves_count_offset..leaves_count_offset + 8].try_into().unwrap());
let tampered_count = count + 1;
bytes[leaves_count_offset..leaves_count_offset + 8]
.copy_from_slice(&tampered_count.to_le_bytes());
let result = PartialSmt::read_from_bytes(&bytes);
assert!(result.is_err());
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/partial/mod.rs | miden-crypto/src/merkle/smt/partial/mod.rs | use super::{EmptySubtreeRoots, LeafIndex, SMT_DEPTH};
use crate::{
EMPTY_WORD, Word,
merkle::{
InnerNodeInfo, MerkleError, NodeIndex, SparseMerklePath,
smt::{InnerNode, InnerNodes, Leaves, SmtLeaf, SmtLeafError, SmtProof},
},
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
#[cfg(all(test, feature = "std"))]
mod tests;
/// A partial version of an [`super::Smt`].
///
/// This type can track a subset of the key-value pairs of a full [`super::Smt`] and allows for
/// updating those pairs to compute the new root of the tree, as if the updates had been done on the
/// full tree. This is useful so that not all leaves have to be present and loaded into memory to
/// compute an update.
///
/// A key is considered "tracked" if either:
/// 1. Its merkle path was explicitly added to the tree (via [`PartialSmt::add_path`] or
/// [`PartialSmt::add_proof`]), or
/// 2. The path from the leaf to the root goes through empty subtrees that are consistent with the
/// stored inner nodes (provably empty with zero hash computations).
///
/// The second condition allows updating keys in empty subtrees without explicitly adding their
/// merkle paths. This is verified by walking up from the leaf and checking that any stored
/// inner node has an empty subtree root as the child on our path.
///
/// An important caveat is that only tracked keys can be updated. Attempting to update an
/// untracked key will result in an error. See [`PartialSmt::insert`] for more details.
///
/// Once a partial SMT has been constructed, its root is set in stone. All subsequently added proofs
/// or merkle paths must match that root, otherwise an error is returned.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct PartialSmt {
root: Word,
num_entries: usize,
leaves: Leaves<SmtLeaf>,
inner_nodes: InnerNodes,
}
impl PartialSmt {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// The default value used to compute the hash of empty leaves.
pub const EMPTY_VALUE: Word = EMPTY_WORD;
/// The root of an empty tree.
pub const EMPTY_ROOT: Word = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Constructs a [`PartialSmt`] from a root.
///
/// All subsequently added proofs or paths must have the same root.
pub fn new(root: Word) -> Self {
Self {
root,
num_entries: 0,
leaves: Leaves::<SmtLeaf>::default(),
inner_nodes: InnerNodes::default(),
}
}
/// Instantiates a new [`PartialSmt`] by calling [`PartialSmt::add_proof`] for all [`SmtProof`]s
/// in the provided iterator.
///
/// If the provided iterator is empty, an empty [`PartialSmt`] is returned.
///
/// # Errors
///
/// Returns an error if:
/// - the roots of the provided proofs are not the same.
pub fn from_proofs<I>(proofs: I) -> Result<Self, MerkleError>
where
I: IntoIterator<Item = SmtProof>,
{
let mut proofs = proofs.into_iter();
let Some(first_proof) = proofs.next() else {
return Ok(Self::default());
};
// Add the first path to an empty partial SMT without checking that the existing root
// matches the new one. This sets the expected root to the root of the first proof and all
// subsequently added proofs must match it.
let mut partial_smt = Self::default();
let (path, leaf) = first_proof.into_parts();
let path_root = partial_smt.add_path_unchecked(leaf, path);
partial_smt.root = path_root;
for proof in proofs {
partial_smt.add_proof(proof)?;
}
Ok(partial_smt)
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the root of the tree.
pub fn root(&self) -> Word {
self.root
}
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
/// path to the leaf, as well as the leaf itself.
///
/// # Errors
///
/// Returns an error if:
/// - the key is not tracked by this partial SMT.
pub fn open(&self, key: &Word) -> Result<SmtProof, MerkleError> {
let leaf = self.get_leaf(key)?;
let merkle_path = self.get_path(key);
Ok(SmtProof::new_unchecked(merkle_path, leaf))
}
/// Returns the leaf to which `key` maps.
///
/// # Errors
///
/// Returns an error if:
/// - the key is not tracked by this partial SMT.
pub fn get_leaf(&self, key: &Word) -> Result<SmtLeaf, MerkleError> {
self.get_tracked_leaf(key).ok_or(MerkleError::UntrackedKey(*key))
}
/// Returns the value associated with `key`.
///
/// # Errors
///
/// Returns an error if:
/// - the key is not tracked by this partial SMT.
pub fn get_value(&self, key: &Word) -> Result<Word, MerkleError> {
self.get_tracked_leaf(key)
.map(|leaf| leaf.get_value(key).unwrap_or_default())
.ok_or(MerkleError::UntrackedKey(*key))
}
/// Returns an iterator over the inner nodes of the [`PartialSmt`].
pub fn inner_nodes(&self) -> impl Iterator<Item = InnerNodeInfo> + '_ {
self.inner_nodes.values().map(|e| InnerNodeInfo {
value: e.hash(),
left: e.left,
right: e.right,
})
}
/// Returns an iterator over the [`InnerNode`] and the respective [`NodeIndex`] of the
/// [`PartialSmt`].
pub fn inner_node_indices(&self) -> impl Iterator<Item = (NodeIndex, InnerNode)> + '_ {
self.inner_nodes.iter().map(|(idx, inner)| (*idx, inner.clone()))
}
/// Returns an iterator over the explicitly stored leaves of the [`PartialSmt`] in arbitrary
/// order.
///
/// Note: This only returns leaves that were explicitly added via [`Self::add_path`] or
/// [`Self::add_proof`], or created through [`Self::insert`]. It does not include implicitly
/// trackable leaves in empty subtrees.
pub fn leaves(&self) -> impl Iterator<Item = (LeafIndex<SMT_DEPTH>, &SmtLeaf)> {
self.leaves
.iter()
.map(|(leaf_index, leaf)| (LeafIndex::new_max_depth(*leaf_index), leaf))
}
/// Returns an iterator over the tracked, non-empty key-value pairs of the [`PartialSmt`] in
/// arbitrary order.
pub fn entries(&self) -> impl Iterator<Item = &(Word, Word)> {
self.leaves().flat_map(|(_, leaf)| leaf.entries())
}
/// Returns the number of non-empty leaves in this tree.
///
/// Note that this may return a different value from [Self::num_entries()] as a single leaf may
/// contain more than one key-value pair.
pub fn num_leaves(&self) -> usize {
self.leaves.len()
}
/// Returns the number of tracked, non-empty key-value pairs in this tree.
///
/// Note that this may return a different value from [Self::num_leaves()] as a single leaf may
/// contain more than one key-value pair.
pub fn num_entries(&self) -> usize {
self.num_entries
}
/// Returns a boolean value indicating whether the [`PartialSmt`] tracks any leaves.
///
/// Note that if a partial SMT does not track leaves, its root is not necessarily the empty SMT
/// root, since it could have been constructed from a different root but without tracking any
/// leaves.
pub fn tracks_leaves(&self) -> bool {
!self.leaves.is_empty()
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Inserts a value at the specified key, returning the previous value associated with that key.
/// Recall that by definition, any key that hasn't been updated is associated with
/// [`Self::EMPTY_VALUE`].
///
/// This also recomputes all hashes between the leaf (associated with the key) and the root,
/// updating the root itself.
///
/// # Errors
///
/// Returns an error if:
/// - the key is not tracked (see the type documentation for the definition of "tracked"). If an
/// error is returned the tree is in the same state as before.
/// - inserting the key-value pair would exceed [`super::MAX_LEAF_ENTRIES`] (1024 entries) in
/// the leaf.
pub fn insert(&mut self, key: Word, value: Word) -> Result<Word, MerkleError> {
let current_leaf = self.get_tracked_leaf(&key).ok_or(MerkleError::UntrackedKey(key))?;
let leaf_index = current_leaf.index();
let previous_value = current_leaf.get_value(&key).unwrap_or(EMPTY_WORD);
let prev_entries = current_leaf.num_entries();
let leaf = self
.leaves
.entry(leaf_index.value())
.or_insert_with(|| SmtLeaf::new_empty(leaf_index));
if value != EMPTY_WORD {
leaf.insert(key, value).map_err(|e| match e {
SmtLeafError::TooManyLeafEntries { actual } => {
MerkleError::TooManyLeafEntries { actual }
},
other => panic!("unexpected SmtLeaf::insert error: {:?}", other),
})?;
} else {
leaf.remove(key);
}
let current_entries = leaf.num_entries();
let new_leaf_hash = leaf.hash();
self.num_entries = self.num_entries + current_entries - prev_entries;
// Remove empty leaf
if current_entries == 0 {
self.leaves.remove(&leaf_index.value());
}
// Recompute the path from leaf to root
self.recompute_nodes_from_leaf_to_root(leaf_index, new_leaf_hash);
Ok(previous_value)
}
/// Adds an [`SmtProof`] to this [`PartialSmt`].
///
/// This is a convenience method which calls [`Self::add_path`] on the proof. See its
/// documentation for details on errors.
pub fn add_proof(&mut self, proof: SmtProof) -> Result<(), MerkleError> {
let (path, leaf) = proof.into_parts();
self.add_path(leaf, path)
}
/// Adds a leaf and its sparse merkle path to this [`PartialSmt`].
///
/// If this function was called, any key that is part of the `leaf` can subsequently be updated
/// to a new value and produce a correct new tree root.
///
/// # Errors
///
/// Returns an error if:
/// - the new root after the insertion of the leaf and the path does not match the existing
/// root. If an error is returned, the tree is left in an inconsistent state.
pub fn add_path(&mut self, leaf: SmtLeaf, path: SparseMerklePath) -> Result<(), MerkleError> {
let path_root = self.add_path_unchecked(leaf, path);
// Check if the newly added merkle path is consistent with the existing tree. If not, the
// merkle path was invalid or computed against another tree.
if self.root() != path_root {
return Err(MerkleError::ConflictingRoots {
expected_root: self.root(),
actual_root: path_root,
});
}
Ok(())
}
// PRIVATE HELPERS
// --------------------------------------------------------------------------------------------
/// Adds a leaf and its sparse merkle path to this [`PartialSmt`] and returns the root of the
/// inserted path.
///
/// This does not check that the path root matches the existing root of the tree and if so, the
/// tree is left in an inconsistent state. This state can be made consistent again by setting
/// the root of the SMT to the path root.
fn add_path_unchecked(&mut self, leaf: SmtLeaf, path: SparseMerklePath) -> Word {
let mut current_index = leaf.index().index;
let mut node_hash_at_current_index = leaf.hash();
let prev_entries = self
.leaves
.get(¤t_index.value())
.map(|leaf| leaf.num_entries())
.unwrap_or(0);
let current_entries = leaf.num_entries();
// Only store non-empty leaves
if current_entries > 0 {
self.leaves.insert(current_index.value(), leaf);
} else {
self.leaves.remove(¤t_index.value());
}
// Guaranteed not to over/underflow. All variables are <= MAX_LEAF_ENTRIES and result > 0.
self.num_entries = self.num_entries + current_entries - prev_entries;
for sibling_hash in path {
// Find the index of the sibling node and compute whether it is a left or right child.
let is_sibling_right = current_index.sibling().is_value_odd();
// Move the index up so it points to the parent of the current index and the sibling.
current_index.move_up();
// Construct the new parent node from the child that was updated and the sibling from
// the merkle path.
let new_parent_node = if is_sibling_right {
InnerNode {
left: node_hash_at_current_index,
right: sibling_hash,
}
} else {
InnerNode {
left: sibling_hash,
right: node_hash_at_current_index,
}
};
node_hash_at_current_index = new_parent_node.hash();
self.insert_inner_node(current_index, new_parent_node);
}
node_hash_at_current_index
}
/// Returns the leaf for a key if it can be tracked.
///
/// A key is trackable if:
/// 1. It was explicitly added via `add_path`/`add_proof`, OR
/// 2. The path to the leaf goes through empty subtrees (provably empty)
///
/// Returns `None` if the key cannot be tracked (path goes through non-empty
/// subtrees we don't have data for).
fn get_tracked_leaf(&self, key: &Word) -> Option<SmtLeaf> {
let leaf_index = Self::key_to_leaf_index(key);
// Explicitly stored leaves are always trackable
if let Some(leaf) = self.leaves.get(&leaf_index.value()) {
return Some(leaf.clone());
}
// Empty tree - all leaves implicitly trackable
if self.root == Self::EMPTY_ROOT {
return Some(SmtLeaf::new_empty(leaf_index));
}
// Walk from root down towards the leaf
let target: NodeIndex = leaf_index.into();
let mut index = NodeIndex::root();
for i in (0..SMT_DEPTH).rev() {
let inner_node = self.get_inner_node(index)?;
let is_right = target.is_nth_bit_odd(i);
let child_hash = if is_right { inner_node.right } else { inner_node.left };
// If child is empty subtree root, leaf is implicitly trackable
if child_hash == *EmptySubtreeRoots::entry(SMT_DEPTH, SMT_DEPTH - i) {
return Some(SmtLeaf::new_empty(leaf_index));
}
index = if is_right {
index.right_child()
} else {
index.left_child()
};
}
// Reached leaf level without finding empty subtree - can't track
None
}
/// Converts a key to a leaf index.
fn key_to_leaf_index(key: &Word) -> LeafIndex<SMT_DEPTH> {
let most_significant_felt = key[3];
LeafIndex::new_max_depth(most_significant_felt.as_int())
}
/// Returns the inner node at the specified index, or `None` if not stored.
fn get_inner_node(&self, index: NodeIndex) -> Option<InnerNode> {
self.inner_nodes.get(&index).cloned()
}
/// Returns the inner node at the specified index, falling back to the empty subtree root
/// if not stored.
fn get_inner_node_or_empty(&self, index: NodeIndex) -> InnerNode {
self.get_inner_node(index)
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(SMT_DEPTH, index.depth()))
}
/// Inserts an inner node at the specified index, or removes it if it equals the empty
/// subtree root.
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) {
if inner_node == EmptySubtreeRoots::get_inner_node(SMT_DEPTH, index.depth()) {
self.inner_nodes.remove(&index);
} else {
self.inner_nodes.insert(index, inner_node);
}
}
/// Returns the merkle path for a key by walking up the tree from the leaf.
fn get_path(&self, key: &Word) -> SparseMerklePath {
let index = NodeIndex::from(Self::key_to_leaf_index(key));
// Use proof_indices to get sibling indices from leaf to root,
// and get each sibling's hash
SparseMerklePath::from_sized_iter(index.proof_indices().map(|idx| self.get_node_hash(idx)))
.expect("path should be valid since it's from a valid SMT")
}
/// Get the hash of a node at an arbitrary index, including the root or leaf hashes.
///
/// The root index simply returns the root. Other hashes are retrieved by looking at
/// the parent inner node and returning the respective child hash.
fn get_node_hash(&self, index: NodeIndex) -> Word {
if index.is_root() {
return self.root;
}
let InnerNode { left, right } = self.get_inner_node_or_empty(index.parent());
if index.is_value_odd() { right } else { left }
}
/// Recomputes all inner nodes from a leaf up to the root after a leaf value change.
fn recompute_nodes_from_leaf_to_root(
&mut self,
leaf_index: LeafIndex<SMT_DEPTH>,
leaf_hash: Word,
) {
use crate::hash::rpo::Rpo256;
let mut index: NodeIndex = leaf_index.into();
let mut node_hash = leaf_hash;
for _ in (0..index.depth()).rev() {
let is_right = index.is_value_odd();
index.move_up();
let InnerNode { left, right } = self.get_inner_node_or_empty(index);
let (left, right) = if is_right {
(left, node_hash)
} else {
(node_hash, right)
};
node_hash = Rpo256::merge(&[left, right]);
// insert_inner_node handles removing empty subtree roots
self.insert_inner_node(index, InnerNode { left, right });
}
self.root = node_hash;
}
/// Validates the internal structure during deserialization.
///
/// Checks that:
/// - Each inner node's hash is consistent with its parent.
/// - Each leaf's hash is consistent with its parent inner node's left/right child.
fn validate(&self) -> Result<(), DeserializationError> {
// Validate each inner node is consistent with its parent
for (&idx, node) in &self.inner_nodes {
let node_hash = node.hash();
let expected_hash = self.get_node_hash(idx);
if node_hash != expected_hash {
return Err(DeserializationError::InvalidValue(
"inner node hash is inconsistent with parent".into(),
));
}
}
// Validate each leaf's hash is consistent with its parent inner node
for (&leaf_pos, leaf) in &self.leaves {
let leaf_index = LeafIndex::<SMT_DEPTH>::new_max_depth(leaf_pos);
let node_index: NodeIndex = leaf_index.into();
let leaf_hash = leaf.hash();
let expected_hash = self.get_node_hash(node_index);
if leaf_hash != expected_hash {
return Err(DeserializationError::InvalidValue(
"leaf hash is inconsistent with parent inner node".into(),
));
}
}
Ok(())
}
}
impl Default for PartialSmt {
/// Returns a new, empty [`PartialSmt`].
///
/// All leaves in the returned tree are set to [`Self::EMPTY_VALUE`].
fn default() -> Self {
Self::new(Self::EMPTY_ROOT)
}
}
// CONVERSIONS
// ================================================================================================
impl From<super::Smt> for PartialSmt {
fn from(smt: super::Smt) -> Self {
Self {
root: smt.root(),
num_entries: smt.num_entries(),
leaves: smt.leaves().map(|(idx, leaf)| (idx.value(), leaf.clone())).collect(),
inner_nodes: smt.inner_node_indices().collect(),
}
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for PartialSmt {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write(self.root());
target.write_usize(self.leaves.len());
for (i, leaf) in &self.leaves {
target.write_u64(*i);
target.write(leaf);
}
target.write_usize(self.inner_nodes.len());
for (idx, node) in &self.inner_nodes {
target.write(idx);
target.write(node);
}
}
}
impl Deserializable for PartialSmt {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let root: Word = source.read()?;
let mut leaves = Leaves::<SmtLeaf>::default();
for _ in 0..source.read_usize()? {
let pos: u64 = source.read()?;
let leaf: SmtLeaf = source.read()?;
leaves.insert(pos, leaf);
}
let mut inner_nodes = InnerNodes::default();
for _ in 0..source.read_usize()? {
let idx: NodeIndex = source.read()?;
let node: InnerNode = source.read()?;
inner_nodes.insert(idx, node);
}
let num_entries = leaves.values().map(|leaf| leaf.num_entries()).sum();
let partial = Self { root, num_entries, leaves, inner_nodes };
partial.validate()?;
Ok(partial)
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/forest/store.rs | miden-crypto/src/merkle/smt/forest/store.rs | use alloc::vec::Vec;
use crate::{
Map, Word,
hash::rpo::Rpo256,
merkle::{EmptySubtreeRoots, MerkleError, MerklePath, MerkleProof, NodeIndex, smt::SMT_DEPTH},
};
// SMT FOREST STORE
// ================================================================================================
#[derive(Debug, Default, Copy, Clone, Eq, PartialEq)]
struct ForestInnerNode {
left: Word,
right: Word,
rc: usize,
}
impl ForestInnerNode {
pub fn hash(&self) -> Word {
Rpo256::merge(&[self.left, self.right])
}
}
/// An in-memory data store for SmtForest data.
///
/// This is an internal memory data store for SmtForest data. Similarly to the `MerkleStore`, it
/// allows all the nodes of multiple trees to live as long as necessary and without duplication,
/// this allows the implementation of space efficient persistent data structures.
///
/// Unlike `MerkleStore`, unused nodes can be easily removed from the store by leveraing
/// reference counting.
#[derive(Debug, Default, Clone, Eq, PartialEq)]
pub(super) struct SmtStore {
nodes: Map<Word, ForestInnerNode>,
}
impl SmtStore {
/// Creates a new, empty in-memory store for SmtForest data.
pub fn new() -> Self {
// pre-populate the store with the empty hashes
let nodes = empty_hashes().collect();
Self { nodes }
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the node at `index` rooted on the tree `root`.
///
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeIndexNotFoundInStore` if a node needed to traverse from `root` to `index` is not
/// present in the store.
pub fn get_node(&self, root: Word, index: NodeIndex) -> Result<Word, MerkleError> {
let mut hash = root;
// corner case: check the root is in the store when called with index `NodeIndex::root()`
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
// traverse from root to index
for i in (0..index.depth()).rev() {
let node = self
.nodes
.get(&hash)
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
hash = if index.is_nth_bit_odd(i) { node.right } else { node.left }
}
Ok(hash)
}
/// Returns the node at the specified `index` and its opening to the `root`.
///
/// The path starts at the sibling of the target leaf.
///
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeIndexNotFoundInStore` if a node needed to traverse from `root` to `index` is not
/// present in the store.
pub fn get_path(&self, root: Word, index: NodeIndex) -> Result<MerkleProof, MerkleError> {
let IndexedPath { value, path } = self.get_indexed_path(root, index)?;
let path_iter = path.into_iter().rev().map(|(_, value)| value);
Ok(MerkleProof::new(value, MerklePath::from_iter(path_iter)))
}
/// Returns the node at the specified `index` and its opening to the `root`.
///
/// The path starts below the root and contains all nodes in the opening
/// all the way to the sibling of the target leaf.
///
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeIndexNotFoundInStore` if a node needed to traverse from `root` to `index` is not
/// present in the store.
fn get_indexed_path(&self, root: Word, index: NodeIndex) -> Result<IndexedPath, MerkleError> {
let mut hash = root;
let mut path = Vec::with_capacity(index.depth().into());
// corner case: check the root is in the store when called with index `NodeIndex::root()`
self.nodes.get(&hash).ok_or(MerkleError::RootNotInStore(hash))?;
// Build sibling node index at each level as we traverse from root to leaf
let mut current_index = NodeIndex::root();
for i in (0..index.depth()).rev() {
let node = self
.nodes
.get(&hash)
.ok_or(MerkleError::NodeIndexNotFoundInStore(hash, index))?;
hash = if index.is_nth_bit_odd(i) {
path.push((current_index.left_child(), node.left));
current_index = current_index.right_child();
node.right
} else {
path.push((current_index.right_child(), node.right));
current_index = current_index.left_child();
node.left
}
}
Ok(IndexedPath { value: hash, path })
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Sets multiple leaf values at once with a single root transition.
///
/// # Errors
/// This method can return the following errors:
/// - `RootNotInStore` if the `root` is not present in the store.
/// - `NodeIndexNotFoundInStore` if a node needed to traverse from `root` to `index` is not
/// present in the store.
pub fn set_leaves(
&mut self,
root: Word,
leaves: impl IntoIterator<Item = (NodeIndex, Word)>,
) -> Result<Word, MerkleError> {
self.nodes.get(&root).ok_or(MerkleError::RootNotInStore(root))?;
// Collect opening nodes and updated leaves
let mut nodes_by_index = Map::<NodeIndex, Word>::new();
let mut leaves_by_index = Map::<NodeIndex, Word>::new();
for (index, leaf_hash) in leaves {
// Record all sibling nodes along the path from root to this index
let indexed_path = self.get_indexed_path(root, index)?;
// See if we are actually updating the leaf value. If not, we can skip processing it.
if indexed_path.value == leaf_hash {
continue;
}
nodes_by_index.extend(indexed_path.path);
// Record the updated leaf value at this index
leaves_by_index.insert(index, leaf_hash);
}
if leaves_by_index.is_empty() {
// No leaves were updated, return the original root
return Ok(root);
}
#[allow(unused_mut)]
let mut sorted_leaf_indices = leaves_by_index.keys().cloned().collect::<Vec<_>>();
#[cfg(feature = "hashmaps")]
// Sort leaves by NodeIndex to easily detect when leaves share a parent (only neighboring
// leaves can share a parent). Hashbrown::HashMap doesn't maintain key ordering, so
// we need to sort the indices.
sorted_leaf_indices.sort();
// Ensure new leaf values override current opening values.
nodes_by_index.extend(leaves_by_index);
// Keep track of affected ancestors to avoid recomputing nodes multiple times
let mut ancestors: Vec<NodeIndex> = Vec::new();
// Start with a guard value, all ancestors have depth < SMT_DEPTH
let mut last_ancestor = NodeIndex::new_unchecked(SMT_DEPTH, 0);
for leaf_index in sorted_leaf_indices {
let parent = leaf_index.parent();
// Check if we already processed the sibling of this leaf. If so, the parent is already
// added to the ancestors list. This works because leaves are sorted by index.
if parent != last_ancestor {
last_ancestor = parent;
ancestors.push(last_ancestor);
}
}
// Gather all ancestors up to the root (deduplicated)
// `ancestors` behaves as both a BFS queue (starting at all updated leaves' parents) and
// provides a way of checking if we are not processing the same ancestor multiple times.
let mut index = 0;
while index < ancestors.len() {
let node = ancestors[index];
if node.is_root() {
break;
}
// if we haven't processed node's sibling yet, it will be a new parent
let parent = node.parent();
if parent != last_ancestor {
last_ancestor = parent;
ancestors.push(last_ancestor);
}
index += 1;
}
// Stash all new nodes until we know there are no errors
let mut new_nodes: Map<Word, ForestInnerNode> = Map::new();
for index in ancestors {
let left_index = index.left_child();
let right_index = index.right_child();
let left_value = *nodes_by_index
.get(&left_index)
.ok_or(MerkleError::NodeIndexNotFoundInTree(left_index))?;
let right_value = *nodes_by_index
.get(&right_index)
.ok_or(MerkleError::NodeIndexNotFoundInTree(right_index))?;
let node = ForestInnerNode {
left: left_value,
right: right_value,
rc: 0,
};
let new_key = node.hash();
new_nodes.insert(new_key, node);
nodes_by_index.insert(index, new_key);
}
let new_root = nodes_by_index
.get(&NodeIndex::root())
.cloned()
.ok_or(MerkleError::NodeIndexNotFoundInStore(root, NodeIndex::root()))?;
// The update was computed successfully, update ref counts and insert into the store
fn dfs(
node: Word,
store: &mut Map<Word, ForestInnerNode>,
new_nodes: &mut Map<Word, ForestInnerNode>,
) {
if node == Word::empty() {
return;
}
if let Some(node) = store.get_mut(&node) {
// This node already exists in the store, increase its reference count.
// Stops the dfs descent here to leave children ref counts unchanged.
node.rc += 1;
} else if let Some(mut smt_node) = new_nodes.remove(&node) {
// This is a non-leaf node, insert it into the store and process its children.
smt_node.rc = 1;
store.insert(node, smt_node);
dfs(smt_node.left, store, new_nodes);
dfs(smt_node.right, store, new_nodes);
}
}
dfs(new_root, &mut self.nodes, &mut new_nodes);
Ok(new_root)
}
/// Decreases the reference count of the specified node and releases memory if the count
/// reached zero.
///
/// Returns the terminal nodes (leaves) that were removed.
fn remove_node(&mut self, node: Word) -> Vec<Word> {
if node == Word::empty() {
return vec![];
}
let Some(smt_node) = self.nodes.get_mut(&node) else {
return vec![node];
};
smt_node.rc -= 1;
if smt_node.rc > 0 {
return vec![];
}
let left = smt_node.left;
let right = smt_node.right;
let mut result = Vec::new();
result.extend(self.remove_node(left));
result.extend(self.remove_node(right));
result
}
/// Removes the specified roots from the store and releases memory used by now
/// unreachable nodes.
///
/// Returns the terminal nodes (leaves) that were removed.
pub fn remove_roots(&mut self, roots: impl IntoIterator<Item = Word>) -> Vec<Word> {
let mut removed_leaves = Vec::new();
for root in roots {
removed_leaves.extend(self.remove_node(root));
}
removed_leaves
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Creates empty hashes for all the subtrees of a tree with a max depth of [`SMT_DEPTH`].
fn empty_hashes() -> impl Iterator<Item = (Word, ForestInnerNode)> {
let subtrees = EmptySubtreeRoots::empty_hashes(SMT_DEPTH);
subtrees
.iter()
.rev()
.copied()
.zip(subtrees.iter().rev().skip(1).copied())
.map(|(child, parent)| (parent, ForestInnerNode { left: child, right: child, rc: 1 }))
}
/// A Merkle opening that starts below the root and ends at the sibling of the target leaf.
/// Indexed by the NodeIndex at each level to efficiently query all the hashes needed for a batch
/// update.
struct IndexedPath {
value: Word,
path: Vec<(NodeIndex, Word)>,
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/forest/tests.rs | miden-crypto/src/merkle/smt/forest/tests.rs | use assert_matches::assert_matches;
use itertools::Itertools;
use super::{EmptySubtreeRoots, MerkleError, SmtForest, Word};
use crate::{
Felt, ONE, WORD_SIZE, ZERO,
merkle::{
int_to_node,
smt::{SMT_DEPTH, SmtProofError},
},
};
// TESTS
// ================================================================================================
#[test]
fn test_insert_root_not_in_store() -> Result<(), MerkleError> {
let mut forest = SmtForest::new();
let word = Word::new([ONE; WORD_SIZE]);
assert_matches!(
forest.insert(word, word, word),
Err(MerkleError::RootNotInStore(_)),
"The forest is empty, so only empty root is valid"
);
Ok(())
}
#[test]
fn test_insert_root_empty() -> Result<(), MerkleError> {
let mut forest = SmtForest::new();
let empty_tree_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
let key = Word::new([ZERO; WORD_SIZE]);
let value = Word::new([ONE; WORD_SIZE]);
assert_eq!(
forest.insert(empty_tree_root, key, value)?,
Word::new([
Felt::new(10376354645124572258),
Felt::new(13808228093617896354),
Felt::new(4835829334388921262),
Felt::new(2144113770050911180)
]),
);
Ok(())
}
#[test]
fn test_insert_multiple_values() -> Result<(), MerkleError> {
let mut forest = SmtForest::new();
let empty_tree_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
let key = Word::new([ZERO; WORD_SIZE]);
let value = Word::new([ONE; WORD_SIZE]);
let new_root = forest.insert(empty_tree_root, key, value)?;
assert_eq!(
new_root,
Word::new([
Felt::new(10376354645124572258),
Felt::new(13808228093617896354),
Felt::new(4835829334388921262),
Felt::new(2144113770050911180)
]),
);
let new_root = forest.insert(new_root, key, value)?;
assert_eq!(
new_root,
Word::new([
Felt::new(10376354645124572258),
Felt::new(13808228093617896354),
Felt::new(4835829334388921262),
Felt::new(2144113770050911180)
]),
);
// Inserting the same key-value pair again should return the same root
let root_duplicate = forest.insert(new_root, key, value)?;
assert_eq!(new_root, root_duplicate);
let key2 = Word::new([ZERO, ONE, ZERO, ONE]);
let new_root = forest.insert(new_root, key2, value)?;
assert_eq!(
new_root,
Word::new([
Felt::new(1600265794710932756),
Felt::new(4102884415474859847),
Felt::new(7916203901318401823),
Felt::new(9187865964280213047)
])
);
Ok(())
}
#[test]
fn test_batch_insert() -> Result<(), MerkleError> {
let forest = SmtForest::new();
let empty_tree_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
let values = vec![
(Word::new([ZERO; WORD_SIZE]), Word::new([ONE; WORD_SIZE])),
(Word::new([ZERO, ONE, ZERO, ONE]), Word::new([ONE; WORD_SIZE])),
(Word::new([ZERO, ONE, ZERO, ZERO]), Word::new([ONE; WORD_SIZE])),
];
values.into_iter().permutations(3).for_each(|values| {
let mut forest = forest.clone();
let new_root = forest.batch_insert(empty_tree_root, values.clone()).unwrap();
assert_eq!(
new_root,
Word::new([
Felt::new(7086678883692273722),
Felt::new(12292668811816691012),
Felt::new(10126815404170194367),
Felt::new(1147037274136690014)
])
);
for (key, value) in values {
let proof = forest.open(new_root, key).unwrap();
proof.verify_presence(&key, &value, &new_root).unwrap();
}
});
Ok(())
}
#[test]
fn test_open_root_not_in_store() -> Result<(), MerkleError> {
let forest = SmtForest::new();
let word = Word::new([ONE; WORD_SIZE]);
assert_matches!(
forest.open(word, word),
Err(MerkleError::RootNotInStore(_)),
"The forest is empty, so only empty root is valid"
);
Ok(())
}
#[test]
fn test_open_root_in_store() -> Result<(), MerkleError> {
let mut forest = SmtForest::new();
let root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
let root = forest.insert(
root,
Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(0)]),
int_to_node(1),
)?;
let root = forest.insert(
root,
Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(1)]),
int_to_node(2),
)?;
let root = forest.insert(
root,
Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(2)]),
int_to_node(3),
)?;
let proof =
forest.open(root, Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(2)]))?;
proof
.verify_presence(
&Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(2)]),
&int_to_node(3),
&root,
)
.expect("proof should verify membership");
Ok(())
}
#[test]
fn test_multiple_versions_of_same_key() -> Result<(), MerkleError> {
// Verify that when we insert multiple values for the same key,
// we can still open valid proofs for all historical roots.
let mut forest = SmtForest::new();
let empty_tree_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
let key = Word::new([ZERO; WORD_SIZE]);
// Insert the same key with different values, creating multiple roots
let value1 = Word::new([ONE; WORD_SIZE]);
let root1 = forest.insert(empty_tree_root, key, value1)?;
let value2 = Word::new([Felt::new(2); WORD_SIZE]);
let root2 = forest.insert(root1, key, value2)?;
let value3 = Word::new([Felt::new(3); WORD_SIZE]);
let root3 = forest.insert(root2, key, value3)?;
// All three roots should be different
assert_ne!(root1, root2);
assert_ne!(root2, root3);
assert_ne!(root1, root3);
// Open proofs for each historical root and verify them
let proof1 = forest.open(root1, key)?;
proof1
.verify_presence(&key, &value1, &root1)
.expect("Proof for root1 should verify with value1");
let proof2 = forest.open(root2, key)?;
proof2
.verify_presence(&key, &value2, &root2)
.expect("Proof for root2 should verify with value2");
let proof3 = forest.open(root3, key)?;
proof3
.verify_presence(&key, &value3, &root3)
.expect("Proof for root3 should verify with value3");
// Wrong values cannot be verified - should return ValueMismatch
assert_matches!(
proof1.verify_presence(&key, &value2, &root1),
Err(SmtProofError::ValueMismatch { .. }),
"Proof for root1 should not verify with value2"
);
assert_matches!(
proof3.verify_presence(&key, &value1, &root3),
Err(SmtProofError::ValueMismatch { .. }),
"Proof for root3 should not verify with value1"
);
Ok(())
}
#[test]
fn test_pop_roots() -> Result<(), MerkleError> {
let mut forest = SmtForest::new();
let empty_tree_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
let key = Word::new([ZERO; WORD_SIZE]);
let value = Word::new([ONE; WORD_SIZE]);
let root = forest.insert(empty_tree_root, key, value)?;
assert_eq!(forest.roots.len(), 1);
assert_eq!(forest.leaves.len(), 1);
forest.pop_smts(vec![root]);
assert_eq!(forest.roots.len(), 0);
assert_eq!(forest.leaves.len(), 0);
Ok(())
}
#[test]
fn test_removing_empty_smt_from_forest() {
let mut forest = SmtForest::new();
let empty_tree_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
let non_empty_root = Word::new([ONE; WORD_SIZE]);
// Popping zero SMTs from forest should be a no-op (no panic or error)
forest.pop_smts(vec![]);
// Popping a non-existent root should be a no-op (no panic or error)
forest.pop_smts(vec![non_empty_root]);
// Popping the empty root should be a no-op (no panic or error)
forest.pop_smts(vec![empty_tree_root]);
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/forest/mod.rs | miden-crypto/src/merkle/smt/forest/mod.rs | use alloc::{collections::BTreeSet, vec::Vec};
use super::{EmptySubtreeRoots, MerkleError, NodeIndex, SmtLeaf, SmtProof, Word};
use crate::{
Map,
merkle::smt::{LeafIndex, SMT_DEPTH, SmtLeafError, SmtProofError, forest::store::SmtStore},
};
mod store;
#[cfg(test)]
mod tests;
// SPARSE MERKLE TREE FOREST
// ================================================================================================
/// An in-memory data collection of sparse Merkle trees (SMTs).
///
/// Each SMT in the forest is identified by its root hash. The forest stores all leaves of all SMTs
/// in the forest, as well as all Merkle paths required to prove membership of any leaf in any SMT.
///
/// An empty tree root is always present in the forest.
///
/// Example usage:
///
/// ```rust
/// use miden_crypto::{
/// Felt, ONE, WORD_SIZE, Word, ZERO,
/// merkle::{
/// EmptySubtreeRoots,
/// smt::{MAX_LEAF_ENTRIES, SMT_DEPTH, SmtForest},
/// },
/// };
///
/// // Create a new SMT forest
/// let mut forest = SmtForest::new();
///
/// // Insert a key-value pair into an SMT with an empty root
/// let empty_tree_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
/// let key = Word::new([ZERO; WORD_SIZE]);
/// let value = Word::new([ONE; WORD_SIZE]);
/// let new_root = forest.insert(empty_tree_root, key, value).unwrap();
///
/// // Insert multiple key-value pairs
/// let mut entries = Vec::new();
/// for i in 0..MAX_LEAF_ENTRIES {
/// let key = Word::new([Felt::new(i as u64); WORD_SIZE]);
/// let value = Word::new([Felt::new((i + 1) as u64); WORD_SIZE]);
/// entries.push((key, value));
/// }
/// let new_root = forest.batch_insert(new_root, entries.into_iter()).unwrap();
///
/// // Open a proof for the inserted key
/// let proof = forest.open(new_root, key).unwrap();
///
/// // Prune SMTs to release memory used by their nodes and leaves
/// forest.pop_smts(vec![new_root]);
/// ```
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct SmtForest {
/// Roots of all SMTs in this forest. Any time an SMT in this forest is updated, we add a new
/// root to this set.
roots: BTreeSet<Word>,
/// Stores Merkle paths for all SMTs in this forest.
store: SmtStore,
/// Leaves of all SMTs stored in this forest
leaves: Map<Word, SmtLeaf>,
}
impl Default for SmtForest {
fn default() -> Self {
Self::new()
}
}
impl SmtForest {
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Creates an empty `SmtForest` instance.
pub fn new() -> SmtForest {
let roots = BTreeSet::new();
let store = SmtStore::new();
let leaves = Map::new();
SmtForest { roots, store, leaves }
}
// DATA EXTRACTORS
// --------------------------------------------------------------------------------------------
/// Returns an opening for the specified key in the SMT with the specified root.
///
/// Returns an error if an SMT with this root is not in the forest, or if the forest does
/// not have sufficient data to provide an opening for the specified key.
pub fn open(&self, root: Word, key: Word) -> Result<SmtProof, MerkleError> {
if !self.contains_root(root) {
return Err(MerkleError::RootNotInStore(root));
}
let leaf_index = NodeIndex::from(LeafIndex::from(key));
let proof = self.store.get_path(root, leaf_index)?;
let path = proof.path.try_into()?;
let leaf = proof.value;
let Some(leaf) = self.leaves.get(&leaf).cloned() else {
return Err(MerkleError::UntrackedKey(key));
};
SmtProof::new(path, leaf).map_err(|error| match error {
SmtProofError::InvalidMerklePathLength(depth) => MerkleError::InvalidPathLength(depth),
// These variants are only returned by verification methods, not by SmtProof::new()
SmtProofError::InvalidKeyForProof
| SmtProofError::ValueMismatch { .. }
| SmtProofError::ConflictingRoots { .. }
| SmtProofError::ValuePresent { .. } => unreachable!(),
})
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Inserts the specified key-value pair into an SMT with the specified root. This will also
/// add a new root to the forest. Returns the new root.
///
/// Returns an error if an SMT with the specified root is not in the forest, these is not
/// enough data in the forest to perform the insert, or if the insert would create a leaf
/// with too many entries.
pub fn insert(&mut self, root: Word, key: Word, value: Word) -> Result<Word, MerkleError> {
self.batch_insert(root, vec![(key, value)])
}
/// Inserts the specified key-value pairs into an SMT with the specified root. This will also
/// add a single new root to the forest for the entire batch of inserts. Returns the new root.
///
/// Returns an error if an SMT with the specified root is not in the forest, these is not
/// enough data in the forest to perform the insert, or if the insert would create a leaf
/// with too many entries.
pub fn batch_insert(
&mut self,
root: Word,
entries: impl IntoIterator<Item = (Word, Word)> + Clone,
) -> Result<Word, MerkleError> {
if !self.contains_root(root) {
return Err(MerkleError::RootNotInStore(root));
}
// Find all affected leaf indices
let indices = entries
.clone()
.into_iter()
.map(|(key, _)| LeafIndex::from(key))
.collect::<BTreeSet<_>>();
// Create new SmtLeaf objects for updated key-value pairs
let mut new_leaves = Map::new();
for index in indices {
let node_index = NodeIndex::from(index);
let current_hash = self.store.get_node(root, node_index)?;
let current_leaf = self
.leaves
.get(¤t_hash)
.cloned()
.unwrap_or_else(|| SmtLeaf::new_empty(index));
new_leaves.insert(index, (current_hash, current_leaf));
}
for (key, value) in entries {
let index = LeafIndex::from(key);
let (_old_hash, leaf) = new_leaves.get_mut(&index).unwrap();
leaf.insert(key, value).map_err(to_merkle_error)?;
}
// Calculate new leaf hashes, skip processing unchanged leaves
new_leaves = new_leaves
.into_iter()
.filter_map(|(key, (old_hash, leaf))| {
let new_hash = leaf.hash();
if new_hash == old_hash {
None
} else {
Some((key, (new_hash, leaf)))
}
})
.collect();
// Update SmtStore with new leaf hashes
let new_leaf_entries =
new_leaves.iter().map(|(index, leaf)| (NodeIndex::from(*index), leaf.0));
let new_root = self.store.set_leaves(root, new_leaf_entries)?;
// Update successful, insert new leaves into the forest
for (leaf_hash, leaf) in new_leaves.into_values() {
self.leaves.insert(leaf_hash, leaf);
}
self.roots.insert(new_root);
Ok(new_root)
}
/// Removes the specified SMTs (identified by their roots) from the forest.
/// Releases memory used by nodes and leaves that are no longer reachable.
/// Roots not in the forest and empty trees are ignored.
pub fn pop_smts(&mut self, roots: impl IntoIterator<Item = Word>) {
let roots = roots
.into_iter()
.filter(|root| {
// don't use self.contains_root here because we don't remove empty trees
self.roots.contains(root)
})
.collect::<Vec<_>>();
for root in &roots {
self.roots.remove(root);
}
for leaf in self.store.remove_roots(roots) {
self.leaves.remove(&leaf);
}
}
// HELPER METHODS
// --------------------------------------------------------------------------------------------
/// Checks if the forest contains the specified root or if it is the empty tree root
/// (always present in the forest).
fn contains_root(&self, root: Word) -> bool {
self.roots.contains(&root) || *EmptySubtreeRoots::entry(SMT_DEPTH, 0) == root
}
}
fn to_merkle_error(err: SmtLeafError) -> MerkleError {
match err {
SmtLeafError::TooManyLeafEntries { actual } => MerkleError::TooManyLeafEntries { actual },
_ => unreachable!("other SmtLeafError variants should not be possible here"),
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/full/leaf.rs | miden-crypto/src/merkle/smt/full/leaf.rs | use alloc::{string::ToString, vec::Vec};
use core::cmp::Ordering;
use super::EMPTY_WORD;
use crate::{
Felt, Word,
field::PrimeField64,
hash::rpo::Rpo256,
merkle::smt::{LeafIndex, MAX_LEAF_ENTRIES, SMT_DEPTH, SmtLeafError},
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
/// Represents a leaf node in the Sparse Merkle Tree.
///
/// A leaf can be empty, hold a single key-value pair, or multiple key-value pairs.
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub enum SmtLeaf {
/// An empty leaf at the specified index.
Empty(LeafIndex<SMT_DEPTH>),
/// A leaf containing a single key-value pair.
Single((Word, Word)),
/// A leaf containing multiple key-value pairs.
Multiple(Vec<(Word, Word)>),
}
impl SmtLeaf {
// CONSTRUCTORS
// ---------------------------------------------------------------------------------------------
/// Returns a new leaf with the specified entries
///
/// # Errors
/// - Returns an error if 2 keys in `entries` map to a different leaf index
/// - Returns an error if 1 or more keys in `entries` map to a leaf index different from
/// `leaf_index`
pub fn new(
entries: Vec<(Word, Word)>,
leaf_index: LeafIndex<SMT_DEPTH>,
) -> Result<Self, SmtLeafError> {
match entries.len() {
0 => Ok(Self::new_empty(leaf_index)),
1 => {
let (key, value) = entries[0];
let computed_index = LeafIndex::<SMT_DEPTH>::from(key);
if computed_index != leaf_index {
return Err(SmtLeafError::InconsistentSingleLeafIndices {
key,
expected_leaf_index: leaf_index,
actual_leaf_index: computed_index,
});
}
Ok(Self::new_single(key, value))
},
_ => {
let leaf = Self::new_multiple(entries)?;
// `new_multiple()` checked that all keys map to the same leaf index. We still need
// to ensure that leaf index is `leaf_index`.
if leaf.index() != leaf_index {
Err(SmtLeafError::InconsistentMultipleLeafIndices {
leaf_index_from_keys: leaf.index(),
leaf_index_supplied: leaf_index,
})
} else {
Ok(leaf)
}
},
}
}
/// Returns a new empty leaf with the specified leaf index
pub fn new_empty(leaf_index: LeafIndex<SMT_DEPTH>) -> Self {
Self::Empty(leaf_index)
}
/// Returns a new single leaf with the specified entry. The leaf index is derived from the
/// entry's key.
pub fn new_single(key: Word, value: Word) -> Self {
Self::Single((key, value))
}
/// Returns a new multiple leaf with the specified entries. The leaf index is derived from the
/// entries' keys.
///
/// # Errors
/// - Returns an error if 2 keys in `entries` map to a different leaf index
/// - Returns an error if the number of entries exceeds [`MAX_LEAF_ENTRIES`]
pub fn new_multiple(entries: Vec<(Word, Word)>) -> Result<Self, SmtLeafError> {
if entries.len() < 2 {
return Err(SmtLeafError::MultipleLeafRequiresTwoEntries(entries.len()));
}
if entries.len() > MAX_LEAF_ENTRIES {
return Err(SmtLeafError::TooManyLeafEntries { actual: entries.len() });
}
// Check that all keys map to the same leaf index
{
let mut keys = entries.iter().map(|(key, _)| key);
let first_key = *keys.next().expect("ensured at least 2 entries");
let first_leaf_index: LeafIndex<SMT_DEPTH> = first_key.into();
for &next_key in keys {
let next_leaf_index: LeafIndex<SMT_DEPTH> = next_key.into();
if next_leaf_index != first_leaf_index {
return Err(SmtLeafError::InconsistentMultipleLeafKeys {
key_1: first_key,
key_2: next_key,
});
}
}
}
Ok(Self::Multiple(entries))
}
// PUBLIC ACCESSORS
// ---------------------------------------------------------------------------------------------
/// Returns true if the leaf is empty
pub fn is_empty(&self) -> bool {
matches!(self, Self::Empty(_))
}
/// Returns the leaf's index in the [`super::Smt`]
pub fn index(&self) -> LeafIndex<SMT_DEPTH> {
match self {
SmtLeaf::Empty(leaf_index) => *leaf_index,
SmtLeaf::Single((key, _)) => (*key).into(),
SmtLeaf::Multiple(entries) => {
// Note: All keys are guaranteed to have the same leaf index
let (first_key, _) = entries[0];
first_key.into()
},
}
}
/// Returns the number of entries stored in the leaf
pub fn num_entries(&self) -> usize {
match self {
SmtLeaf::Empty(_) => 0,
SmtLeaf::Single(_) => 1,
SmtLeaf::Multiple(entries) => entries.len(),
}
}
/// Computes the hash of the leaf
pub fn hash(&self) -> Word {
match self {
SmtLeaf::Empty(_) => EMPTY_WORD,
SmtLeaf::Single((key, value)) => Rpo256::merge(&[*key, *value]),
SmtLeaf::Multiple(kvs) => {
let elements: Vec<Felt> = kvs.iter().copied().flat_map(kv_to_elements).collect();
Rpo256::hash_elements(&elements)
},
}
}
// ITERATORS
// ---------------------------------------------------------------------------------------------
/// Returns a slice with key-value pairs in the leaf.
pub fn entries(&self) -> &[(Word, Word)] {
match self {
SmtLeaf::Empty(_) => &[],
SmtLeaf::Single(kv_pair) => core::slice::from_ref(kv_pair),
SmtLeaf::Multiple(kv_pairs) => kv_pairs,
}
}
// CONVERSIONS
// ---------------------------------------------------------------------------------------------
/// Converts a leaf to a list of field elements
pub fn to_elements(&self) -> Vec<Felt> {
self.clone().into_elements()
}
/// Converts a leaf to a list of field elements
pub fn into_elements(self) -> Vec<Felt> {
self.into_entries().into_iter().flat_map(kv_to_elements).collect()
}
/// Converts a leaf the key-value pairs in the leaf
pub fn into_entries(self) -> Vec<(Word, Word)> {
match self {
SmtLeaf::Empty(_) => Vec::new(),
SmtLeaf::Single(kv_pair) => vec![kv_pair],
SmtLeaf::Multiple(kv_pairs) => kv_pairs,
}
}
// HELPERS
// ---------------------------------------------------------------------------------------------
/// Returns the value associated with `key` in the leaf, or `None` if `key` maps to another
/// leaf.
pub(in crate::merkle::smt) fn get_value(&self, key: &Word) -> Option<Word> {
// Ensure that `key` maps to this leaf
if self.index() != (*key).into() {
return None;
}
match self {
SmtLeaf::Empty(_) => Some(EMPTY_WORD),
SmtLeaf::Single((key_in_leaf, value_in_leaf)) => {
if key == key_in_leaf {
Some(*value_in_leaf)
} else {
Some(EMPTY_WORD)
}
},
SmtLeaf::Multiple(kv_pairs) => {
for (key_in_leaf, value_in_leaf) in kv_pairs {
if key == key_in_leaf {
return Some(*value_in_leaf);
}
}
Some(EMPTY_WORD)
},
}
}
/// Inserts key-value pair into the leaf; returns the previous value associated with `key`, if
/// any.
///
/// The caller needs to ensure that `key` has the same leaf index as all other keys in the leaf
///
/// # Errors
/// Returns an error if inserting the key-value pair would exceed [`MAX_LEAF_ENTRIES`] (1024
/// entries) in the leaf.
pub(in crate::merkle::smt) fn insert(
&mut self,
key: Word,
value: Word,
) -> Result<Option<Word>, SmtLeafError> {
match self {
SmtLeaf::Empty(_) => {
*self = SmtLeaf::new_single(key, value);
Ok(None)
},
SmtLeaf::Single(kv_pair) => {
if kv_pair.0 == key {
// the key is already in this leaf. Update the value and return the previous
// value
let old_value = kv_pair.1;
kv_pair.1 = value;
Ok(Some(old_value))
} else {
// Another entry is present in this leaf. Transform the entry into a list
// entry, and make sure the key-value pairs are sorted by key
// This stays within MAX_LEAF_ENTRIES limit. We're only adding one entry to a
// single leaf
let mut pairs = vec![*kv_pair, (key, value)];
pairs.sort_by(|(key_1, _), (key_2, _)| cmp_keys(*key_1, *key_2));
*self = SmtLeaf::Multiple(pairs);
Ok(None)
}
},
SmtLeaf::Multiple(kv_pairs) => {
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
Ok(pos) => {
let old_value = kv_pairs[pos].1;
kv_pairs[pos].1 = value;
Ok(Some(old_value))
},
Err(pos) => {
if kv_pairs.len() >= MAX_LEAF_ENTRIES {
return Err(SmtLeafError::TooManyLeafEntries {
actual: kv_pairs.len() + 1,
});
}
kv_pairs.insert(pos, (key, value));
Ok(None)
},
}
},
}
}
/// Removes key-value pair from the leaf stored at key; returns the previous value associated
/// with `key`, if any. Also returns an `is_empty` flag, indicating whether the leaf became
/// empty, and must be removed from the data structure it is contained in.
pub(in crate::merkle::smt) fn remove(&mut self, key: Word) -> (Option<Word>, bool) {
match self {
SmtLeaf::Empty(_) => (None, false),
SmtLeaf::Single((key_at_leaf, value_at_leaf)) => {
if *key_at_leaf == key {
// our key was indeed stored in the leaf, so we return the value that was stored
// in it, and indicate that the leaf should be removed
let old_value = *value_at_leaf;
// Note: this is not strictly needed, since the caller is expected to drop this
// `SmtLeaf` object.
*self = SmtLeaf::new_empty(key.into());
(Some(old_value), true)
} else {
// another key is stored at leaf; nothing to update
(None, false)
}
},
SmtLeaf::Multiple(kv_pairs) => {
match kv_pairs.binary_search_by(|kv_pair| cmp_keys(kv_pair.0, key)) {
Ok(pos) => {
let old_value = kv_pairs[pos].1;
let _ = kv_pairs.remove(pos);
debug_assert!(!kv_pairs.is_empty());
if kv_pairs.len() == 1 {
// convert the leaf into `Single`
*self = SmtLeaf::Single(kv_pairs[0]);
}
(Some(old_value), false)
},
Err(_) => {
// other keys are stored at leaf; nothing to update
(None, false)
},
}
},
}
}
}
impl Serializable for SmtLeaf {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
// Write: num entries
self.num_entries().write_into(target);
// Write: leaf index
let leaf_index: u64 = self.index().value();
leaf_index.write_into(target);
// Write: entries
for (key, value) in self.entries() {
key.write_into(target);
value.write_into(target);
}
}
}
impl Deserializable for SmtLeaf {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
// Read: num entries
let num_entries = source.read_usize()?;
// Read: leaf index
let leaf_index: LeafIndex<SMT_DEPTH> = {
let value = source.read_u64()?;
LeafIndex::new_max_depth(value)
};
// Read: entries
let mut entries: Vec<(Word, Word)> = Vec::new();
for _ in 0..num_entries {
let key: Word = source.read()?;
let value: Word = source.read()?;
entries.push((key, value));
}
Self::new(entries, leaf_index)
.map_err(|err| DeserializationError::InvalidValue(err.to_string()))
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Converts a key-value tuple to an iterator of `Felt`s
pub(crate) fn kv_to_elements((key, value): (Word, Word)) -> impl Iterator<Item = Felt> {
let key_elements = key.into_iter();
let value_elements = value.into_iter();
key_elements.chain(value_elements)
}
/// Compares two keys, compared element-by-element using their integer representations starting with
/// the most significant element.
pub(crate) fn cmp_keys(key_1: Word, key_2: Word) -> Ordering {
for (v1, v2) in key_1.iter().zip(key_2.iter()).rev() {
let v1 = (*v1).as_canonical_u64();
let v2 = (*v2).as_canonical_u64();
if v1 != v2 {
return v1.cmp(&v2);
}
}
Ordering::Equal
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/full/proof.rs | miden-crypto/src/merkle/smt/full/proof.rs | use alloc::string::ToString;
use super::{SMT_DEPTH, SmtLeaf, SmtProofError, SparseMerklePath, Word};
use crate::{
merkle::InnerNodeInfo,
utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable},
};
/// A proof which can be used to assert presence (or absence) of key-value pairs
/// in a [`super::Smt`] (Sparse Merkle Tree).
///
/// The proof consists of a sparse Merkle path and a leaf, which describes the node located at
/// the base of the path.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SmtProof {
/// The sparse Merkle path from the leaf to the root.
path: SparseMerklePath,
/// The leaf node containing one or more key-value pairs.
leaf: SmtLeaf,
}
impl SmtProof {
// CONSTRUCTOR
// --------------------------------------------------------------------------------------------
/// Returns a new instance of [`SmtProof`] instantiated from the specified path and leaf.
///
/// # Errors
/// Returns an error if the path length does not match the expected [`SMT_DEPTH`],
/// which would make the proof invalid.
pub fn new(path: SparseMerklePath, leaf: SmtLeaf) -> Result<Self, SmtProofError> {
let depth = path.depth();
if depth != SMT_DEPTH {
return Err(SmtProofError::InvalidMerklePathLength(depth as usize));
}
Ok(Self { path, leaf })
}
/// Returns a new instance of [`SmtProof`] instantiated from the specified path and leaf.
///
/// The length of the path is not checked. Reserved for internal use.
pub(in crate::merkle::smt) fn new_unchecked(path: SparseMerklePath, leaf: SmtLeaf) -> Self {
Self { path, leaf }
}
// PROOF VERIFIER
// --------------------------------------------------------------------------------------------
/// Verifies that a [`super::Smt`] with the specified root contains the provided key-value pair.
///
/// # Errors
/// - [`SmtProofError::InvalidKeyForProof`] if the key maps to a different leaf index than this
/// proof's leaf.
/// - [`SmtProofError::ConflictingRoots`] if the computed root doesn't match the expected root.
/// - [`SmtProofError::ValueMismatch`] if the value doesn't match the value in the leaf.
pub fn verify_presence(
&self,
key: &Word,
value: &Word,
root: &Word,
) -> Result<(), SmtProofError> {
let value_in_leaf = self.leaf.get_value(key).ok_or(SmtProofError::InvalidKeyForProof)?;
// Check root before value so that ValueMismatch implies the proof is valid
let computed_root = self.compute_root();
if computed_root != *root {
return Err(SmtProofError::ConflictingRoots {
expected_root: *root,
actual_root: computed_root,
});
}
if value_in_leaf != *value {
return Err(SmtProofError::ValueMismatch { expected: *value, actual: value_in_leaf });
}
Ok(())
}
/// Verifies that a [`super::Smt`] with the specified root does not contain any value
/// for the provided key (i.e., the key is unset).
///
/// This is equivalent to calling `verify_presence(key, &EMPTY_WORD, root)`, but makes
/// the intent clearer.
///
/// # Errors
/// - [`SmtProofError::InvalidKeyForProof`] if the key maps to a different leaf index than this
/// proof's leaf.
/// - [`SmtProofError::ConflictingRoots`] if the computed root doesn't match the expected root.
/// - [`SmtProofError::ValueMismatch`] if the key has a value in the tree (i.e., is not empty).
pub fn verify_unset(&self, key: &Word, root: &Word) -> Result<(), SmtProofError> {
self.verify_presence(key, &super::EMPTY_WORD, root)
}
/// Verifies that a specific key-value pair is not in the tree.
///
/// This succeeds if the key exists with a different value, or if the key is unset.
///
/// # Errors
/// - [`SmtProofError::InvalidKeyForProof`] if the key maps to a different leaf index than this
/// proof's leaf.
/// - [`SmtProofError::ConflictingRoots`] if the computed root doesn't match the expected root.
/// - [`SmtProofError::ValuePresent`] if the key-value pair exists in the tree.
pub fn verify_absence(
&self,
key: &Word,
value: &Word,
root: &Word,
) -> Result<(), SmtProofError> {
match self.verify_presence(key, value, root) {
// The key-value pair exists - absence verification fails
Ok(()) => Err(SmtProofError::ValuePresent { key: *key, value: *value }),
// Value is different - the pair is absent, success
Err(SmtProofError::ValueMismatch { .. }) => Ok(()),
// Other errors propagate as-is
Err(e) => Err(e),
}
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the value associated with the specific key according to this proof, or None if
/// this proof does not contain a value for the specified key.
///
/// A key-value pair generated by using this method should pass the `verify_presence()` check.
pub fn get(&self, key: &Word) -> Option<Word> {
self.leaf.get_value(key)
}
/// Computes the root of a [`super::Smt`] to which this proof resolves.
pub fn compute_root(&self) -> Word {
self.path
.compute_root(self.leaf.index().value(), self.leaf.hash())
.expect("failed to compute Merkle path root")
}
/// Returns the proof's sparse Merkle path.
pub fn path(&self) -> &SparseMerklePath {
&self.path
}
/// Returns the leaf associated with the proof.
pub fn leaf(&self) -> &SmtLeaf {
&self.leaf
}
/// Returns an iterator over every inner node of this proof's merkle path.
pub fn authenticated_nodes(&self) -> impl Iterator<Item = InnerNodeInfo> + '_ {
self.path
.authenticated_nodes(self.leaf.index().value(), self.leaf.hash())
.expect("leaf index is u64 and should be less than 2^SMT_DEPTH")
}
/// Consume the proof and returns its parts.
pub fn into_parts(self) -> (SparseMerklePath, SmtLeaf) {
(self.path, self.leaf)
}
}
impl Serializable for SmtProof {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
self.path.write_into(target);
self.leaf.write_into(target);
}
}
impl Deserializable for SmtProof {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let path = SparseMerklePath::read_from(source)?;
let leaf = SmtLeaf::read_from(source)?;
Self::new(path, leaf).map_err(|err| DeserializationError::InvalidValue(err.to_string()))
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/full/tests.rs | miden-crypto/src/merkle/smt/full/tests.rs | use alloc::vec::Vec;
use assert_matches::assert_matches;
use p3_field::PrimeCharacteristicRing;
use super::{EMPTY_WORD, LeafIndex, NodeIndex, SMT_DEPTH, Smt, SmtLeaf};
use crate::{
Felt, ONE, WORD_SIZE, Word,
hash::rpo::Rpo256,
merkle::{
EmptySubtreeRoots,
smt::{
Map, MutationSet, NodeMutation, SmtLeafError, SmtProofError, SparseMerkleTree,
full::MAX_LEAF_ENTRIES,
},
store::MerkleStore,
},
utils::{Deserializable, Serializable},
};
// SMT
// --------------------------------------------------------------------------------------------
/// This test checks that inserting twice at the same key functions as expected. The test covers
/// only the case where the key is alone in its leaf
#[test]
fn test_smt_insert_at_same_key() {
let mut smt = Smt::default();
let mut store: MerkleStore = MerkleStore::default();
assert_eq!(smt.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
let key_1: Word = {
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
Word::from([ONE, ONE, ONE, Felt::new(raw)])
};
let key_1_index: NodeIndex = LeafIndex::<SMT_DEPTH>::from(key_1).into();
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([ONE + ONE; WORD_SIZE]);
// Insert value 1 and ensure root is as expected
{
let leaf_node = build_empty_or_single_leaf_node(key_1, value_1);
let tree_root = store.set_node(smt.root(), key_1_index, leaf_node).unwrap().root;
let old_value_1 = smt.insert(key_1, value_1).unwrap();
assert_eq!(old_value_1, EMPTY_WORD);
assert_eq!(smt.root(), tree_root);
}
// Insert value 2 and ensure root is as expected
{
let leaf_node = build_empty_or_single_leaf_node(key_1, value_2);
let tree_root = store.set_node(smt.root(), key_1_index, leaf_node).unwrap().root;
let old_value_2 = smt.insert(key_1, value_2).unwrap();
assert_eq!(old_value_2, value_1);
assert_eq!(smt.root(), tree_root);
}
}
/// This test checks that inserting twice at the same key functions as expected. The test covers
/// only the case where the leaf type is `SmtLeaf::Multiple`
#[test]
fn test_smt_insert_at_same_key_2() {
// The most significant u64 used for both keys (to ensure they map to the same leaf)
let key_msb: u64 = 42;
let key_already_present = Word::from([2_u64, 2_u64, 2_u64, key_msb].map(Felt::new));
let key_already_present_index: NodeIndex =
LeafIndex::<SMT_DEPTH>::from(key_already_present).into();
let value_already_present = Word::new([ONE + ONE + ONE; WORD_SIZE]);
let mut smt =
Smt::with_entries(core::iter::once((key_already_present, value_already_present))).unwrap();
let mut store: MerkleStore = {
let mut store = MerkleStore::default();
let leaf_node = build_empty_or_single_leaf_node(key_already_present, value_already_present);
store
.set_node(*EmptySubtreeRoots::entry(SMT_DEPTH, 0), key_already_present_index, leaf_node)
.unwrap();
store
};
let key_1: Word = Word::from([ONE, ONE, ONE, Felt::new(key_msb)]);
let key_1_index: NodeIndex = LeafIndex::<SMT_DEPTH>::from(key_1).into();
assert_eq!(key_1_index, key_already_present_index);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([ONE + ONE; WORD_SIZE]);
// Insert value 1 and ensure root is as expected
{
// Note: key_1 comes first because it is smaller
let leaf_node = build_multiple_leaf_node(&[
(key_1, value_1),
(key_already_present, value_already_present),
]);
let tree_root = store.set_node(smt.root(), key_1_index, leaf_node).unwrap().root;
let old_value_1 = smt.insert(key_1, value_1).unwrap();
assert_eq!(old_value_1, EMPTY_WORD);
assert_eq!(smt.root(), tree_root);
}
// Insert value 2 and ensure root is as expected
{
let leaf_node = build_multiple_leaf_node(&[
(key_1, value_2),
(key_already_present, value_already_present),
]);
let tree_root = store.set_node(smt.root(), key_1_index, leaf_node).unwrap().root;
let old_value_2 = smt.insert(key_1, value_2).unwrap();
assert_eq!(old_value_2, value_1);
assert_eq!(smt.root(), tree_root);
}
}
/// This test ensures that the root of the tree is as expected when we add/remove 3 items at 3
/// different keys. This also tests that the merkle paths produced are as expected.
#[test]
fn test_smt_insert_and_remove_multiple_values() {
fn insert_values_and_assert_path(
smt: &mut Smt,
store: &mut MerkleStore,
key_values: &[(Word, Word)],
) {
for &(key, value) in key_values {
let key_index: NodeIndex = LeafIndex::<SMT_DEPTH>::from(key).into();
let leaf_node = build_empty_or_single_leaf_node(key, value);
let tree_root = store.set_node(smt.root(), key_index, leaf_node).unwrap().root;
smt.insert(key, value).unwrap();
assert_eq!(smt.root(), tree_root);
let expected_path = store.get_path(tree_root, key_index).unwrap();
assert_eq!(smt.open(&key).into_parts().0, expected_path.path);
}
}
let mut smt = Smt::default();
let mut store: MerkleStore = MerkleStore::default();
assert_eq!(smt.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
let key_1: Word = {
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
Word::from([ONE, ONE, ONE, Felt::new(raw)])
};
let key_2: Word = {
let raw = 0b_11111111_11111111_11111111_11111111_11111111_11111111_11111111_11111111_u64;
Word::from([ONE, ONE, ONE, Felt::new(raw)])
};
let key_3: Word = {
let raw = 0b_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_u64;
Word::from([ONE, ONE, ONE, Felt::new(raw)])
};
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([ONE + ONE; WORD_SIZE]);
let value_3 = Word::new([ONE + ONE + ONE; WORD_SIZE]);
// Insert values in the tree
let key_values = [(key_1, value_1), (key_2, value_2), (key_3, value_3)];
insert_values_and_assert_path(&mut smt, &mut store, &key_values);
// Remove values from the tree
let key_empty_values = [(key_1, EMPTY_WORD), (key_2, EMPTY_WORD), (key_3, EMPTY_WORD)];
insert_values_and_assert_path(&mut smt, &mut store, &key_empty_values);
let empty_root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
assert_eq!(smt.root(), empty_root);
// an empty tree should have no leaves or inner nodes
assert!(smt.leaves.is_empty());
assert!(smt.inner_nodes.is_empty());
}
/// Verify that the `insert_inner_node` doesn't store empty subtrees.
#[test]
fn test_smt_dont_store_empty_subtrees() {
use crate::merkle::smt::InnerNode;
let mut smt = Smt::default();
let node_index = NodeIndex::new(10, 42).unwrap();
let depth = node_index.depth();
let empty_subtree_node = EmptySubtreeRoots::get_inner_node(SMT_DEPTH, depth);
// Empty subtrees are not stored
assert!(!smt.inner_nodes.contains_key(&node_index));
let old_node = smt.insert_inner_node(node_index, empty_subtree_node.clone());
assert_eq!(old_node, None);
assert!(!smt.inner_nodes.contains_key(&node_index));
// Insert a non-empty node, then insert the empty subtree node again. This should remove the
// inner node.
let non_empty_node = InnerNode {
left: Word::new([ONE; 4]),
right: Word::new([ONE + ONE; 4]),
};
smt.insert_inner_node(node_index, non_empty_node.clone());
let old_node = smt.insert_inner_node(node_index, empty_subtree_node.clone());
assert_eq!(old_node, Some(non_empty_node));
assert!(!smt.inner_nodes.contains_key(&node_index));
// Verify that get_inner_node returns the correct empty subtree node
let retrieved_node = smt.get_inner_node(node_index);
assert_eq!(retrieved_node, empty_subtree_node);
}
/// This tests that inserting the empty value does indeed remove the key-value contained at the
/// leaf. We insert & remove 3 values at the same leaf to ensure that all cases are covered (empty,
/// single, multiple).
#[test]
fn test_smt_removal() {
let mut smt = Smt::default();
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
let key_1: Word = Word::from([ONE, ONE, ONE, Felt::new(raw)]);
let key_2: Word = Word::from([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(raw)]);
let key_3: Word = Word::from([
Felt::from_u32(3_u32),
Felt::from_u32(3_u32),
Felt::from_u32(3_u32),
Felt::new(raw),
]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let value_3 = Word::new([Felt::from_u32(3_u32); WORD_SIZE]);
// insert key-value 1
{
let old_value_1 = smt.insert(key_1, value_1).unwrap();
assert_eq!(old_value_1, EMPTY_WORD);
assert_eq!(smt.get_leaf(&key_1), SmtLeaf::Single((key_1, value_1)));
}
// insert key-value 2
{
let old_value_2 = smt.insert(key_2, value_2).unwrap();
assert_eq!(old_value_2, EMPTY_WORD);
assert_eq!(
smt.get_leaf(&key_2),
SmtLeaf::Multiple(vec![(key_1, value_1), (key_2, value_2)])
);
}
// insert key-value 3
{
let old_value_3 = smt.insert(key_3, value_3).unwrap();
assert_eq!(old_value_3, EMPTY_WORD);
assert_eq!(
smt.get_leaf(&key_3),
SmtLeaf::Multiple(vec![(key_1, value_1), (key_2, value_2), (key_3, value_3)])
);
}
// remove key 3
{
let old_value_3 = smt.insert(key_3, EMPTY_WORD).unwrap();
assert_eq!(old_value_3, value_3);
assert_eq!(
smt.get_leaf(&key_3),
SmtLeaf::Multiple(vec![(key_1, value_1), (key_2, value_2)])
);
}
// remove key 2
{
let old_value_2 = smt.insert(key_2, EMPTY_WORD).unwrap();
assert_eq!(old_value_2, value_2);
assert_eq!(smt.get_leaf(&key_2), SmtLeaf::Single((key_1, value_1)));
}
// remove key 1
{
let old_value_1 = smt.insert(key_1, EMPTY_WORD).unwrap();
assert_eq!(old_value_1, value_1);
assert_eq!(smt.get_leaf(&key_1), SmtLeaf::new_empty(key_1.into()));
}
}
/// This tests that we can correctly calculate prospective leaves -- that is, we can construct
/// correct [`SmtLeaf`] values for a theoretical insertion on a Merkle tree without mutating or
/// cloning the tree.
#[test]
fn test_prospective_hash() {
let mut smt = Smt::default();
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
let key_1: Word = Word::from([ONE, ONE, ONE, Felt::new(raw)]);
let key_2: Word = Word::from([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(raw)]);
// Sort key_3 before key_1, to test non-append insertion.
let key_3: Word = Word::from([
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::new(raw),
]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let value_3 = Word::new([Felt::from_u32(3_u32); WORD_SIZE]);
// insert key-value 1
{
let prospective = smt
.construct_prospective_leaf(smt.get_leaf(&key_1), &key_1, &value_1)
.unwrap()
.hash();
smt.insert(key_1, value_1).unwrap();
let leaf = smt.get_leaf(&key_1);
assert_eq!(
prospective,
leaf.hash(),
"prospective hash for leaf {leaf:?} did not match actual hash",
);
}
// insert key-value 2
{
let prospective = smt
.construct_prospective_leaf(smt.get_leaf(&key_2), &key_2, &value_2)
.unwrap()
.hash();
smt.insert(key_2, value_2).unwrap();
let leaf = smt.get_leaf(&key_2);
assert_eq!(
prospective,
leaf.hash(),
"prospective hash for leaf {leaf:?} did not match actual hash",
);
}
// insert key-value 3
{
let prospective = smt
.construct_prospective_leaf(smt.get_leaf(&key_3), &key_3, &value_3)
.unwrap()
.hash();
smt.insert(key_3, value_3).unwrap();
let leaf = smt.get_leaf(&key_3);
assert_eq!(
prospective,
leaf.hash(),
"prospective hash for leaf {leaf:?} did not match actual hash",
);
}
// remove key 3
{
let old_leaf = smt.get_leaf(&key_3);
let old_value_3 = smt.insert(key_3, EMPTY_WORD).unwrap();
assert_eq!(old_value_3, value_3);
let prospective_leaf = smt
.construct_prospective_leaf(smt.get_leaf(&key_3), &key_3, &old_value_3)
.unwrap();
assert_eq!(
old_leaf.hash(),
prospective_leaf.hash(),
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
\n original leaf: {old_leaf:?}\
\n prospective leaf: {prospective_leaf:?}",
);
}
// remove key 2
{
let old_leaf = smt.get_leaf(&key_2);
let old_value_2 = smt.insert(key_2, EMPTY_WORD).unwrap();
assert_eq!(old_value_2, value_2);
let prospective_leaf = smt
.construct_prospective_leaf(smt.get_leaf(&key_2), &key_2, &old_value_2)
.unwrap();
assert_eq!(
old_leaf.hash(),
prospective_leaf.hash(),
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
\n original leaf: {old_leaf:?}\
\n prospective leaf: {prospective_leaf:?}",
);
}
// remove key 1
{
let old_leaf = smt.get_leaf(&key_1);
let old_value_1 = smt.insert(key_1, EMPTY_WORD).unwrap();
assert_eq!(old_value_1, value_1);
let prospective_leaf = smt
.construct_prospective_leaf(smt.get_leaf(&key_1), &key_1, &old_value_1)
.unwrap();
assert_eq!(
old_leaf.hash(),
prospective_leaf.hash(),
"removing and prospectively re-adding a leaf didn't yield the original leaf:\
\n original leaf: {old_leaf:?}\
\n prospective leaf: {prospective_leaf:?}",
);
}
}
/// This tests that we can perform prospective changes correctly.
#[test]
fn test_prospective_insertion() {
let mut smt = Smt::default();
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
let key_1: Word = Word::from([ONE, ONE, ONE, Felt::new(raw)]);
let key_2: Word = Word::from([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(raw)]);
// Sort key_3 before key_1, to test non-append insertion.
let key_3: Word = Word::from([
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::new(raw),
]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let value_3 = Word::new([Felt::from_u32(3_u32); WORD_SIZE]);
let root_empty = smt.root();
let root_1 = {
smt.insert(key_1, value_1).unwrap();
smt.root()
};
let root_2 = {
smt.insert(key_2, value_2).unwrap();
smt.root()
};
let root_3 = {
smt.insert(key_3, value_3).unwrap();
smt.root()
};
// Test incremental updates.
let mut smt = Smt::default();
let mutations = smt.compute_mutations(vec![(key_1, value_1)]).unwrap();
assert_eq!(mutations.root(), root_1, "prospective root 1 did not match actual root 1");
let revert = apply_mutations(&mut smt, mutations);
assert_eq!(smt.root(), root_1, "mutations before and after apply did not match");
assert_eq!(revert.old_root, smt.root(), "reverse mutations old root did not match");
assert_eq!(revert.root(), root_empty, "reverse mutations new root did not match");
assert_eq!(
revert.new_pairs,
Map::from_iter([(key_1, EMPTY_WORD)]),
"reverse mutations pairs did not match"
);
assert_eq!(
revert.node_mutations,
smt.inner_nodes.keys().map(|key| (*key, NodeMutation::Removal)).collect(),
"reverse mutations inner nodes did not match"
);
let mutations = smt.compute_mutations(vec![(key_2, value_2)]).unwrap();
assert_eq!(mutations.root(), root_2, "prospective root 2 did not match actual root 2");
let mutations = smt
.compute_mutations(vec![(key_3, EMPTY_WORD), (key_2, value_2), (key_3, value_3)])
.unwrap();
assert_eq!(mutations.root(), root_3, "mutations before and after apply did not match");
let old_root = smt.root();
let revert = apply_mutations(&mut smt, mutations);
assert_eq!(revert.old_root, smt.root(), "reverse mutations old root did not match");
assert_eq!(revert.root(), old_root, "reverse mutations new root did not match");
assert_eq!(
revert.new_pairs,
Map::from_iter([(key_2, EMPTY_WORD), (key_3, EMPTY_WORD)]),
"reverse mutations pairs did not match"
);
// Edge case: multiple values at the same key, where a later pair restores the original value.
let mutations = smt.compute_mutations(vec![(key_3, EMPTY_WORD), (key_3, value_3)]).unwrap();
assert_eq!(mutations.root(), root_3);
let old_root = smt.root();
let revert = apply_mutations(&mut smt, mutations);
assert_eq!(smt.root(), root_3);
assert_eq!(revert.old_root, smt.root(), "reverse mutations old root did not match");
assert_eq!(revert.root(), old_root, "reverse mutations new root did not match");
assert_eq!(
revert.new_pairs,
Map::from_iter([(key_3, value_3)]),
"reverse mutations pairs did not match"
);
// Test batch updates, and that the order doesn't matter.
let pairs =
vec![(key_3, value_2), (key_2, EMPTY_WORD), (key_1, EMPTY_WORD), (key_3, EMPTY_WORD)];
let mutations = smt.compute_mutations(pairs).unwrap();
assert_eq!(
mutations.root(),
root_empty,
"prospective root for batch removal did not match actual root",
);
let old_root = smt.root();
let revert = apply_mutations(&mut smt, mutations);
assert_eq!(smt.root(), root_empty, "mutations before and after apply did not match");
assert_eq!(revert.old_root, smt.root(), "reverse mutations old root did not match");
assert_eq!(revert.root(), old_root, "reverse mutations new root did not match");
assert_eq!(
revert.new_pairs,
Map::from_iter([(key_1, value_1), (key_2, value_2), (key_3, value_3)]),
"reverse mutations pairs did not match"
);
let pairs = vec![(key_3, value_3), (key_1, value_1), (key_2, value_2)];
let mutations = smt.compute_mutations(pairs).unwrap();
assert_eq!(mutations.root(), root_3);
smt.apply_mutations(mutations).unwrap();
assert_eq!(smt.root(), root_3);
}
#[test]
fn test_mutations_no_mutations() {
let key = Word::from([ONE, ONE, ONE, ONE]);
let value = Word::new([ONE; WORD_SIZE]);
let entries = [(key, value)];
let tree = Smt::with_entries(entries).unwrap();
let mutations = tree.compute_mutations(entries).unwrap();
assert_eq!(mutations.root(), mutations.old_root(), "Root should not change");
assert!(mutations.node_mutations().is_empty(), "Node mutations should be empty");
assert!(mutations.new_pairs().is_empty(), "There should be no new pairs");
}
#[test]
fn test_mutations_revert() {
let mut smt = Smt::default();
let key_1: Word = Word::from([ONE, ONE, ONE, Felt::new(1)]);
let key_2: Word = Word::from([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(2)]);
let key_3: Word = Word::from([
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::new(3),
]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let value_3 = Word::new([Felt::from_u32(3_u32); WORD_SIZE]);
smt.insert(key_1, value_1).unwrap();
smt.insert(key_2, value_2).unwrap();
let mutations = smt
.compute_mutations(vec![(key_1, EMPTY_WORD), (key_2, value_1), (key_3, value_3)])
.unwrap();
let original = smt.clone();
let revert = smt.apply_mutations_with_reversion(mutations).unwrap();
assert_eq!(revert.old_root, smt.root(), "reverse mutations old root did not match");
assert_eq!(revert.root(), original.root(), "reverse mutations new root did not match");
smt.apply_mutations(revert).unwrap();
assert_eq!(smt, original, "SMT with applied revert mutations did not match original SMT");
}
#[test]
fn test_mutation_set_serialization() {
let mut smt = Smt::default();
let key_1: Word = Word::from([ONE, ONE, ONE, Felt::new(1)]);
let key_2: Word = Word::from([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(2)]);
let key_3: Word = Word::from([
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::from_u32(0_u32),
Felt::new(3),
]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let value_3 = Word::new([Felt::from_u32(3_u32); WORD_SIZE]);
smt.insert(key_1, value_1).unwrap();
smt.insert(key_2, value_2).unwrap();
let mutations = smt
.compute_mutations(vec![(key_1, EMPTY_WORD), (key_2, value_1), (key_3, value_3)])
.unwrap();
let serialized = mutations.to_bytes();
let deserialized = MutationSet::<SMT_DEPTH, Word, Word>::read_from_bytes(&serialized).unwrap();
assert_eq!(deserialized, mutations, "deserialized mutations did not match original");
let revert = smt.apply_mutations_with_reversion(mutations).unwrap();
let serialized = revert.to_bytes();
let deserialized = MutationSet::<SMT_DEPTH, Word, Word>::read_from_bytes(&serialized).unwrap();
assert_eq!(deserialized, revert, "deserialized mutations did not match original");
}
/// Tests that 2 key-value pairs stored in the same leaf have the same path
#[test]
fn test_smt_path_to_keys_in_same_leaf_are_equal() {
let raw = 0b_01101001_01101100_00011111_11111111_10010110_10010011_11100000_00000000_u64;
let key_1: Word = Word::from([ONE, ONE, ONE, Felt::new(raw)]);
let key_2: Word = Word::from([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(raw)]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let smt = Smt::with_entries([(key_1, value_1), (key_2, value_2)]).unwrap();
assert_eq!(smt.open(&key_1), smt.open(&key_2));
}
/// Tests that an empty leaf hashes to the empty word
#[test]
fn test_empty_leaf_hash() {
let smt = Smt::default();
let leaf = smt.get_leaf(&Word::default());
assert_eq!(leaf.hash(), EMPTY_WORD);
}
/// Tests that `get_value()` works as expected
#[test]
fn test_smt_get_value() {
let key_1: Word = Word::from([ONE, ONE, ONE, ONE]);
let key_2: Word = Word::from([2_u32, 2_u32, 2_u32, 2_u32]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let smt = Smt::with_entries([(key_1, value_1), (key_2, value_2)]).unwrap();
let returned_value_1 = smt.get_value(&key_1);
let returned_value_2 = smt.get_value(&key_2);
assert_eq!(value_1, returned_value_1);
assert_eq!(value_2, returned_value_2);
// Check that a key with no inserted value returns the empty word
let key_no_value = Word::from([42_u32, 42_u32, 42_u32, 42_u32]);
assert_eq!(EMPTY_WORD, smt.get_value(&key_no_value));
}
/// Tests that `entries()` works as expected
#[test]
fn test_smt_entries() {
let key_1 = Word::from([ONE, ONE, ONE, ONE]);
let key_2 = Word::from([2_u32, 2_u32, 2_u32, 2_u32]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let entries = [(key_1, value_1), (key_2, value_2)];
let smt = Smt::with_entries(entries).unwrap();
let mut expected = Vec::from_iter(entries);
expected.sort_by_key(|(k, _)| *k);
let mut actual: Vec<_> = smt.entries().cloned().collect();
actual.sort_by_key(|(k, _)| *k);
assert_eq!(actual, expected);
}
/// Tests that `EMPTY_ROOT` constant generated in the `Smt` equals to the root of the empty tree of
/// depth 64
#[test]
fn test_smt_check_empty_root_constant() {
// get the root of the empty tree of depth 64
let empty_root_64_depth = EmptySubtreeRoots::empty_hashes(64)[0];
assert_eq!(empty_root_64_depth, Smt::EMPTY_ROOT);
}
// SMT LEAF
// --------------------------------------------------------------------------------------------
#[test]
fn test_empty_smt_leaf_serialization() {
let empty_leaf = SmtLeaf::new_empty(LeafIndex::new_max_depth(42));
let mut serialized = empty_leaf.to_bytes();
// extend buffer with random bytes
serialized.extend([1, 2, 3, 4, 5]);
let deserialized = SmtLeaf::read_from_bytes(&serialized).unwrap();
assert_eq!(empty_leaf, deserialized);
}
#[test]
fn test_single_smt_leaf_serialization() {
let single_leaf = SmtLeaf::new_single(
Word::from([10_u32, 11_u32, 12_u32, 13_u32]),
Word::new([
Felt::from_u32(1_u32),
Felt::from_u32(2_u32),
Felt::from_u32(3_u32),
Felt::from_u32(4_u32),
]),
);
let mut serialized = single_leaf.to_bytes();
// extend buffer with random bytes
serialized.extend([1, 2, 3, 4, 5]);
let deserialized = SmtLeaf::read_from_bytes(&serialized).unwrap();
assert_eq!(single_leaf, deserialized);
}
#[test]
fn test_multiple_smt_leaf_serialization_success() {
let multiple_leaf = SmtLeaf::new_multiple(vec![
(
Word::from([10_u32, 11_u32, 12_u32, 13_u32]),
Word::new([
Felt::from_u32(1_u32),
Felt::from_u32(2_u32),
Felt::from_u32(3_u32),
Felt::from_u32(4_u32),
]),
),
(
Word::from([100_u32, 101_u32, 102_u32, 13_u32]),
Word::new([
Felt::from_u32(11_u32),
Felt::from_u32(12_u32),
Felt::from_u32(13_u32),
Felt::from_u32(14_u32),
]),
),
])
.unwrap();
let mut serialized = multiple_leaf.to_bytes();
// extend buffer with random bytes
serialized.extend([1, 2, 3, 4, 5]);
let deserialized = SmtLeaf::read_from_bytes(&serialized).unwrap();
assert_eq!(multiple_leaf, deserialized);
}
/// Test that creating a multiple leaf with exactly MAX_LEAF_ENTRIES works
/// and that constructing a leaf with MAX_LEAF_ENTRIES + 1 returns an error.
#[test]
fn test_max_leaf_entries_validation() {
let mut entries = Vec::new();
for i in 0..MAX_LEAF_ENTRIES {
let key = Word::new([ONE, ONE, Felt::new(i as u64), ONE]);
let value = Word::new([ONE, ONE, ONE, Felt::new(i as u64)]);
entries.push((key, value));
}
let result = SmtLeaf::new_multiple(entries.clone());
assert!(result.is_ok(), "Should allow exactly MAX_LEAF_ENTRIES entries");
// Test that creating a multiple leaf with more than MAX_LEAF_ENTRIES fails
let key = Word::new([ONE, ONE, Felt::new(MAX_LEAF_ENTRIES as u64), ONE]);
let value = Word::new([ONE, ONE, ONE, Felt::new(MAX_LEAF_ENTRIES as u64)]);
entries.push((key, value));
let error = SmtLeaf::new_multiple(entries).unwrap_err();
assert_matches!(
error,
SmtLeafError::TooManyLeafEntries { .. },
"should reject more than MAX_LEAF_ENTRIES entries"
);
}
/// Tests that verify_presence returns InvalidKeyForProof when key maps to different leaf index
#[test]
fn test_smt_proof_error_invalid_key_for_proof() {
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let value = Word::new([ONE; WORD_SIZE]);
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let root = smt.root();
// Use a key that maps to a different leaf index (different most significant felt)
let different_index_key = Word::from([ONE, ONE, ONE, Felt::new(999)]);
assert_matches!(
proof.verify_presence(&different_index_key, &value, &root),
Err(SmtProofError::InvalidKeyForProof)
);
}
/// Tests that verify_presence returns ValueMismatch when value doesn't match
#[test]
fn test_smt_proof_error_value_mismatch() {
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let value = Word::new([ONE; WORD_SIZE]);
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let root = smt.root();
// Use the correct key but wrong value
let wrong_value = Word::new([Felt::new(999); WORD_SIZE]);
assert_matches!(
proof.verify_presence(&key, &wrong_value, &root),
Err(SmtProofError::ValueMismatch { expected, actual })
if expected == wrong_value && actual == value
);
}
/// Tests that verify_presence returns ConflictingRoots when root doesn't match
#[test]
fn test_smt_proof_error_conflicting_roots() {
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let value = Word::new([ONE; WORD_SIZE]);
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let actual_root = smt.root();
// Use a completely wrong root
let wrong_root = Word::new([Felt::new(999); WORD_SIZE]);
assert_matches!(
proof.verify_presence(&key, &value, &wrong_root),
Err(SmtProofError::ConflictingRoots { expected_root, actual_root: got_root })
if expected_root == wrong_root && got_root == actual_root
);
}
/// Tests that verify_unset returns Ok for keys with no value
#[test]
fn test_smt_proof_verify_unset_success() {
// Use an empty tree where no keys have values
let smt = Smt::default();
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let proof = smt.open(&key);
let root = smt.root();
// This key has no value in the empty tree
proof.verify_unset(&key, &root).unwrap();
}
/// Tests that verify_unset returns ValueMismatch when key has a value
#[test]
fn test_smt_proof_verify_unset_fails_when_value_exists() {
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let value = Word::new([ONE; WORD_SIZE]);
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let root = smt.root();
// Key has a value, so non-membership should fail
assert_matches!(proof.verify_unset(&key, &root), Err(SmtProofError::ValueMismatch { .. }));
}
/// Tests that verify_absence returns Ok when the key has a different value
#[test]
fn test_smt_proof_verify_absence_success_different_value() {
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let actual_value = Word::new([ONE; WORD_SIZE]);
let smt = Smt::with_entries([(key, actual_value)]).unwrap();
let proof = smt.open(&key);
let root = smt.root();
// The key has a different value, so this pair is absent
let absent_value = Word::new([Felt::new(999); WORD_SIZE]);
proof.verify_absence(&key, &absent_value, &root).unwrap();
}
/// Tests that verify_absence returns Ok when the key is unset
#[test]
fn test_smt_proof_verify_absence_success_key_unset() {
// Use an empty tree
let smt = Smt::default();
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let proof = smt.open(&key);
let root = smt.root();
// Any non-empty value should be absent since key is unset
let value = Word::new([ONE; WORD_SIZE]);
proof.verify_absence(&key, &value, &root).unwrap();
}
/// Tests that verify_absence returns ValuePresent when the key-value pair exists
#[test]
fn test_smt_proof_error_value_present() {
let key = Word::from([ONE, ONE, ONE, Felt::new(42)]);
let value = Word::new([ONE; WORD_SIZE]);
let smt = Smt::with_entries([(key, value)]).unwrap();
let proof = smt.open(&key);
let root = smt.root();
// The exact key-value pair exists, so absence verification fails
assert_matches!(
proof.verify_absence(&key, &value, &root),
Err(SmtProofError::ValuePresent { key: k, value: v }) if k == key && v == value
);
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/full/error.rs | miden-crypto/src/merkle/smt/full/error.rs | use thiserror::Error;
use crate::{
Word,
merkle::smt::{LeafIndex, MAX_LEAF_ENTRIES, SMT_DEPTH},
};
// SMT LEAF ERROR
// =================================================================================================
/// Errors that can occur when constructing or validating SMT leaves.
#[derive(Debug, Error)]
pub enum SmtLeafError {
/// Keys map to different leaf indices in a multiple-leaf structure.
#[error(
"multiple leaf requires all keys to map to the same leaf index but key1 {key_1} and key2 {key_2} map to different indices"
)]
/// A single leaf key maps to a different index than expected.
InconsistentMultipleLeafKeys { key_1: Word, key_2: Word },
#[error(
"single leaf key {key} maps to leaf {actual_leaf_index} but was expected to map to leaf {expected_leaf_index}"
)]
InconsistentSingleLeafIndices {
key: Word,
expected_leaf_index: LeafIndex<SMT_DEPTH>,
actual_leaf_index: LeafIndex<SMT_DEPTH>,
},
/// Supplied leaf index does not match the expected index for the provided keys.
#[error(
"supplied leaf index {leaf_index_supplied:?} does not match {leaf_index_from_keys:?} for multiple leaf"
)]
InconsistentMultipleLeafIndices {
leaf_index_from_keys: LeafIndex<SMT_DEPTH>,
leaf_index_supplied: LeafIndex<SMT_DEPTH>,
},
/// Multiple leaf requires at least two entries, but fewer were provided.
#[error("multiple leaf requires at least two entries but only {0} were given")]
MultipleLeafRequiresTwoEntries(usize),
/// Multiple leaf contains more entries than the maximum allowed.
#[error(
"multiple leaf contains {actual} entries but the maximum allowed is {MAX_LEAF_ENTRIES}"
)]
TooManyLeafEntries { actual: usize },
}
// SMT PROOF ERROR
// =================================================================================================
/// Errors that can occur when validating SMT proofs.
#[derive(Debug, Error, PartialEq, Eq)]
pub enum SmtProofError {
/// The length of the provided Merkle path is not [`SMT_DEPTH`].
#[error("merkle path length {0} does not match SMT depth {SMT_DEPTH}")]
InvalidMerklePathLength(usize),
/// The key maps to a different leaf index than the proof's leaf.
#[error("key maps to a different leaf index than the proof")]
InvalidKeyForProof,
/// The value does not match the value in the leaf for the given key.
#[error("value mismatch: expected {expected}, got {actual}")]
ValueMismatch { expected: Word, actual: Word },
/// The computed root does not match the expected root.
#[error("expected merkle root {expected_root} found {actual_root}")]
ConflictingRoots { expected_root: Word, actual_root: Word },
/// The key-value pair exists in the tree.
#[error("key-value pair exists in the tree: key {key}, value {value}")]
ValuePresent { key: Word, value: Word },
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/full/mod.rs | miden-crypto/src/merkle/smt/full/mod.rs | use alloc::{string::ToString, vec::Vec};
use super::{
EMPTY_WORD, EmptySubtreeRoots, InnerNode, InnerNodeInfo, InnerNodes, LeafIndex, MerkleError,
MutationSet, NodeIndex, SparseMerklePath, SparseMerkleTree, Word,
};
use crate::field::PrimeField64;
mod error;
pub use error::{SmtLeafError, SmtProofError};
mod leaf;
pub use leaf::SmtLeaf;
mod proof;
pub use proof::SmtProof;
use crate::utils::{ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable};
// Concurrent implementation
#[cfg(feature = "concurrent")]
pub(in crate::merkle::smt) mod concurrent;
#[cfg(test)]
mod tests;
// CONSTANTS
// ================================================================================================
/// The depth of the sparse Merkle tree.
///
/// All leaves in this SMT are located at depth 64.
pub const SMT_DEPTH: u8 = 64;
/// The maximum number of entries allowed in a multiple leaf.
pub const MAX_LEAF_ENTRIES: usize = 1024;
// SMT
// ================================================================================================
type Leaves = super::Leaves<SmtLeaf>;
/// Sparse Merkle tree mapping 256-bit keys to 256-bit values. Both keys and values are represented
/// by 4 field elements.
///
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf
/// to which the key maps.
///
/// A leaf is either empty, or holds one or more key-value pairs. An empty leaf hashes to the empty
/// word. Otherwise, a leaf hashes to the hash of its key-value pairs, ordered by key first, value
/// second.
///
/// ```text
/// depth
/// T 0 Root
/// │ . / \
/// │ 1 left right
/// │ . / \ / \
/// │
/// │ .. .. .. .. .. .. .. ..
/// │
/// │ 63
/// │ / \ / \ \
/// │ ↓ / \ / \ \
/// │ 64 Leaf₀ Leaf₁ Leaf₂ Leaf₃ ... Leaf₂⁶⁴₋₂³²
/// 0x0..0 0x0..1 0x0..2 0x0..3 0xFFFFFFFF00000000
///
/// The digest is 256 bits, or 4 field elements:
/// [elem₀, elem₁, elem₂, elem₃]
/// ↑
/// Most significant element determines leaf
/// index, mapping into the actual Leaf lookup
/// table where the values are stored.
///
/// Zooming into a leaf, i.e. Leaf₁:
/// ┌─────────────────────────────────────────────────┐
/// │ Leaf₁ (index: 0x0..1) │
/// ├─────────────────────────────────────────────────┤
/// │ Possible states: │
/// │ │
/// │ 1. Empty leaf: │
/// │ └─ hash = EMPTY_WORD │
/// │ │
/// │ 2. Single entry: │
/// │ └─ (key₁, value₁) │
/// │ └─ hash = H(key₁, value₁) │
/// │ │
/// │ 3. Multiple entries: │
/// │ └─ (key₁, value₁) │
/// │ └─ (key₂, value₂) │
/// │ └─ ... │
/// │ └─ hash = H(key₁, value₁, key₂, value₂, ...) │
/// └─────────────────────────────────────────────────┘
///
/// Leaf states:
/// - Empty: hashes to EMPTY_WORD
/// - Non-empty: contains (key, value) pairs
/// hash = H(key₁, value₁, key₂, value₂, ...)
/// ```
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct Smt {
root: Word,
num_entries: usize,
leaves: Leaves,
inner_nodes: InnerNodes,
}
impl Smt {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// The default value used to compute the hash of empty leaves
pub const EMPTY_VALUE: Word = <Self as SparseMerkleTree<SMT_DEPTH>>::EMPTY_VALUE;
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Returns a new [Smt].
///
/// All leaves in the returned tree are set to [Self::EMPTY_VALUE].
pub fn new() -> Self {
let root = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
Self {
root,
num_entries: 0,
inner_nodes: Default::default(),
leaves: Default::default(),
}
}
/// Returns a new [Smt] instantiated with leaves set as specified by the provided entries.
///
/// If the `concurrent` feature is enabled, this function uses a parallel implementation to
/// process the entries efficiently, otherwise it defaults to the sequential implementation.
///
/// All leaves omitted from the entries list are set to [Self::EMPTY_VALUE].
///
/// # Errors
/// Returns an error if:
/// - the provided entries contain multiple values for the same key.
/// - inserting a key-value pair would exceed [`MAX_LEAF_ENTRIES`] (1024 entries) in a leaf.
pub fn with_entries(
entries: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Self, MerkleError> {
#[cfg(feature = "concurrent")]
{
Self::with_entries_concurrent(entries)
}
#[cfg(not(feature = "concurrent"))]
{
Self::with_entries_sequential(entries)
}
}
/// Similar to `with_entries` but avoids the overhead of sorting if the entries are already
/// sorted.
///
/// This only applies if the "concurrent" feature is enabled. Without the feature, the behavior
/// is equivalent to `with_entiries`.
///
/// # Errors
/// Returns an error if inserting a key-value pair would exceed [`MAX_LEAF_ENTRIES`] (1024
/// entries) in a leaf.
pub fn with_sorted_entries(
entries: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Self, MerkleError> {
#[cfg(feature = "concurrent")]
{
Self::with_sorted_entries_concurrent(entries)
}
#[cfg(not(feature = "concurrent"))]
{
Self::with_entries_sequential(entries)
}
}
/// Returns a new [Smt] instantiated with leaves set as specified by the provided entries.
///
/// This sequential implementation processes entries one at a time to build the tree.
/// All leaves omitted from the entries list are set to [Self::EMPTY_VALUE].
///
/// # Errors
/// Returns an error if:
/// - the provided entries contain multiple values for the same key.
/// - inserting a key-value pair would exceed [`MAX_LEAF_ENTRIES`] (1024 entries) in a leaf.
#[cfg(any(not(feature = "concurrent"), fuzzing, feature = "fuzzing", test))]
fn with_entries_sequential(
entries: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Self, MerkleError> {
use alloc::collections::BTreeSet;
// create an empty tree
let mut tree = Self::new();
// This being a sparse data structure, the EMPTY_WORD is not assigned to the `BTreeMap`, so
// entries with the empty value need additional tracking.
let mut key_set_to_zero = BTreeSet::new();
for (key, value) in entries {
let old_value = tree.insert(key, value)?;
if old_value != EMPTY_WORD || key_set_to_zero.contains(&key) {
return Err(MerkleError::DuplicateValuesForIndex(
LeafIndex::<SMT_DEPTH>::from(key).value(),
));
}
if value == EMPTY_WORD {
key_set_to_zero.insert(key);
};
}
Ok(tree)
}
/// Returns a new [`Smt`] instantiated from already computed leaves and nodes.
///
/// This function performs minimal consistency checking. It is the caller's responsibility to
/// ensure the passed arguments are correct and consistent with each other.
///
/// # Panics
/// With debug assertions on, this function panics if `root` does not match the root node in
/// `inner_nodes`.
pub fn from_raw_parts(inner_nodes: InnerNodes, leaves: Leaves, root: Word) -> Self {
// Our particular implementation of `from_raw_parts()` never returns `Err`.
<Self as SparseMerkleTree<SMT_DEPTH>>::from_raw_parts(inner_nodes, leaves, root).unwrap()
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the depth of the tree
pub const fn depth(&self) -> u8 {
SMT_DEPTH
}
/// Returns the root of the tree
pub fn root(&self) -> Word {
<Self as SparseMerkleTree<SMT_DEPTH>>::root(self)
}
/// Returns the number of non-empty leaves in this tree.
///
/// Note that this may return a different value from [Self::num_entries()] as a single leaf may
/// contain more than one key-value pair.
pub fn num_leaves(&self) -> usize {
self.leaves.len()
}
/// Returns the number of key-value pairs with non-default values in this tree.
///
/// Note that this may return a different value from [Self::num_leaves()] as a single leaf may
/// contain more than one key-value pair.
pub fn num_entries(&self) -> usize {
self.num_entries
}
/// Returns the leaf to which `key` maps
pub fn get_leaf(&self, key: &Word) -> SmtLeaf {
<Self as SparseMerkleTree<SMT_DEPTH>>::get_leaf(self, key)
}
/// Returns the value associated with `key`
pub fn get_value(&self, key: &Word) -> Word {
<Self as SparseMerkleTree<SMT_DEPTH>>::get_value(self, key)
}
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
/// path to the leaf, as well as the leaf itself.
pub fn open(&self, key: &Word) -> SmtProof {
<Self as SparseMerkleTree<SMT_DEPTH>>::open(self, key)
}
/// Returns a boolean value indicating whether the SMT is empty.
pub fn is_empty(&self) -> bool {
debug_assert_eq!(self.leaves.is_empty(), self.root == Self::EMPTY_ROOT);
self.root == Self::EMPTY_ROOT
}
// ITERATORS
// --------------------------------------------------------------------------------------------
/// Returns an iterator over the leaves of this [`Smt`] in arbitrary order.
pub fn leaves(&self) -> impl Iterator<Item = (LeafIndex<SMT_DEPTH>, &SmtLeaf)> {
self.leaves
.iter()
.map(|(leaf_index, leaf)| (LeafIndex::new_max_depth(*leaf_index), leaf))
}
/// Returns an iterator over the key-value pairs of this [Smt] in arbitrary order.
pub fn entries(&self) -> impl Iterator<Item = &(Word, Word)> {
self.leaves().flat_map(|(_, leaf)| leaf.entries())
}
/// Returns an iterator over the inner nodes of this [Smt].
pub fn inner_nodes(&self) -> impl Iterator<Item = InnerNodeInfo> + '_ {
self.inner_nodes.values().map(|e| InnerNodeInfo {
value: e.hash(),
left: e.left,
right: e.right,
})
}
/// Returns an iterator over the [`InnerNode`] and the respective [`NodeIndex`] of the [`Smt`].
pub fn inner_node_indices(&self) -> impl Iterator<Item = (NodeIndex, InnerNode)> + '_ {
self.inner_nodes.iter().map(|(idx, inner)| (*idx, inner.clone()))
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Inserts a value at the specified key, returning the previous value associated with that key.
/// Recall that by definition, any key that hasn't been updated is associated with
/// [`Self::EMPTY_VALUE`].
///
/// This also recomputes all hashes between the leaf (associated with the key) and the root,
/// updating the root itself.
///
/// # Errors
/// Returns an error if inserting the key-value pair would exceed [`MAX_LEAF_ENTRIES`] (1024
/// entries) in the leaf.
pub fn insert(&mut self, key: Word, value: Word) -> Result<Word, MerkleError> {
<Self as SparseMerkleTree<SMT_DEPTH>>::insert(self, key, value)
}
/// Computes what changes are necessary to insert the specified key-value pairs into this Merkle
/// tree, allowing for validation before applying those changes.
///
/// This method returns a [`MutationSet`], which contains all the information for inserting
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
/// [`Smt::apply_mutations()`] can be called in order to commit these changes to the Merkle
/// tree, or [`drop()`] to discard them.
///
/// # Example
/// ```
/// # use miden_crypto::{Felt, Word};
/// # use miden_crypto::merkle::{EmptySubtreeRoots, smt::{Smt, SMT_DEPTH}};
/// let mut smt = Smt::new();
/// let pair = (Word::default(), Word::default());
/// let mutations = smt.compute_mutations(vec![pair]).unwrap();
/// assert_eq!(mutations.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
/// smt.apply_mutations(mutations).unwrap();
/// assert_eq!(smt.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
/// ```
pub fn compute_mutations(
&self,
kv_pairs: impl IntoIterator<Item = (Word, Word)>,
) -> Result<MutationSet<SMT_DEPTH, Word, Word>, MerkleError> {
#[cfg(feature = "concurrent")]
{
self.compute_mutations_concurrent(kv_pairs)
}
#[cfg(not(feature = "concurrent"))]
{
<Self as SparseMerkleTree<SMT_DEPTH>>::compute_mutations(self, kv_pairs)
}
}
/// Applies the prospective mutations computed with [`Smt::compute_mutations()`] to this tree.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
/// the `mutations` were computed against, and the second item is the actual current root of
/// this tree.
pub fn apply_mutations(
&mut self,
mutations: MutationSet<SMT_DEPTH, Word, Word>,
) -> Result<(), MerkleError> {
<Self as SparseMerkleTree<SMT_DEPTH>>::apply_mutations(self, mutations)
}
/// Applies the prospective mutations computed with [`Smt::compute_mutations()`] to this tree
/// and returns the reverse mutation set.
///
/// Applying the reverse mutation sets to the updated tree will revert the changes.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
/// the `mutations` were computed against, and the second item is the actual current root of
/// this tree.
pub fn apply_mutations_with_reversion(
&mut self,
mutations: MutationSet<SMT_DEPTH, Word, Word>,
) -> Result<MutationSet<SMT_DEPTH, Word, Word>, MerkleError> {
<Self as SparseMerkleTree<SMT_DEPTH>>::apply_mutations_with_reversion(self, mutations)
}
// HELPERS
// --------------------------------------------------------------------------------------------
/// Inserts `value` at leaf index pointed to by `key`. `value` is guaranteed to not be the empty
/// value, such that this is indeed an insertion.
///
/// # Errors
/// Returns an error if inserting the key-value pair would exceed [`MAX_LEAF_ENTRIES`] (1024
/// entries) in the leaf.
fn perform_insert(&mut self, key: Word, value: Word) -> Result<Option<Word>, MerkleError> {
debug_assert_ne!(value, Self::EMPTY_VALUE);
let leaf_index: LeafIndex<SMT_DEPTH> = Self::key_to_leaf_index(&key);
match self.leaves.get_mut(&leaf_index.value()) {
Some(leaf) => {
let prev_entries = leaf.num_entries();
let result = leaf.insert(key, value).map_err(|e| match e {
SmtLeafError::TooManyLeafEntries { actual } => {
MerkleError::TooManyLeafEntries { actual }
},
other => panic!("unexpected SmtLeaf::insert error: {:?}", other),
})?;
let current_entries = leaf.num_entries();
self.num_entries += current_entries - prev_entries;
Ok(result)
},
None => {
self.leaves.insert(leaf_index.value(), SmtLeaf::Single((key, value)));
self.num_entries += 1;
Ok(None)
},
}
}
/// Removes key-value pair at leaf index pointed to by `key` if it exists.
fn perform_remove(&mut self, key: Word) -> Option<Word> {
let leaf_index: LeafIndex<SMT_DEPTH> = Self::key_to_leaf_index(&key);
if let Some(leaf) = self.leaves.get_mut(&leaf_index.value()) {
let prev_entries = leaf.num_entries();
let (old_value, is_empty) = leaf.remove(key);
let current_entries = leaf.num_entries();
self.num_entries -= prev_entries - current_entries;
if is_empty {
self.leaves.remove(&leaf_index.value());
}
old_value
} else {
// there's nothing stored at the leaf; nothing to update
None
}
}
}
impl SparseMerkleTree<SMT_DEPTH> for Smt {
type Key = Word;
type Value = Word;
type Leaf = SmtLeaf;
type Opening = SmtProof;
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
const EMPTY_ROOT: Word = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
fn from_raw_parts(
inner_nodes: InnerNodes,
leaves: Leaves,
root: Word,
) -> Result<Self, MerkleError> {
if cfg!(debug_assertions) {
let root_node_hash = inner_nodes
.get(&NodeIndex::root())
.map(InnerNode::hash)
.unwrap_or(Self::EMPTY_ROOT);
assert_eq!(root_node_hash, root);
}
let num_entries = leaves.values().map(|leaf| leaf.num_entries()).sum();
Ok(Self { root, inner_nodes, leaves, num_entries })
}
fn root(&self) -> Word {
self.root
}
fn set_root(&mut self, root: Word) {
self.root = root;
}
fn get_inner_node(&self, index: NodeIndex) -> InnerNode {
self.inner_nodes
.get(&index)
.cloned()
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(SMT_DEPTH, index.depth()))
}
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) -> Option<InnerNode> {
if inner_node == EmptySubtreeRoots::get_inner_node(SMT_DEPTH, index.depth()) {
self.remove_inner_node(index)
} else {
self.inner_nodes.insert(index, inner_node)
}
}
fn remove_inner_node(&mut self, index: NodeIndex) -> Option<InnerNode> {
self.inner_nodes.remove(&index)
}
fn insert_value(
&mut self,
key: Self::Key,
value: Self::Value,
) -> Result<Option<Self::Value>, MerkleError> {
// inserting an `EMPTY_VALUE` is equivalent to removing any value associated with `key`
if value != Self::EMPTY_VALUE {
self.perform_insert(key, value)
} else {
Ok(self.perform_remove(key))
}
}
fn get_value(&self, key: &Self::Key) -> Self::Value {
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
match self.leaves.get(&leaf_pos) {
Some(leaf) => leaf.get_value(key).unwrap_or_default(),
None => EMPTY_WORD,
}
}
fn get_leaf(&self, key: &Word) -> Self::Leaf {
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
match self.leaves.get(&leaf_pos) {
Some(leaf) => leaf.clone(),
None => SmtLeaf::new_empty((*key).into()),
}
}
fn hash_leaf(leaf: &Self::Leaf) -> Word {
leaf.hash()
}
fn construct_prospective_leaf(
&self,
mut existing_leaf: SmtLeaf,
key: &Word,
value: &Word,
) -> Result<SmtLeaf, SmtLeafError> {
debug_assert_eq!(existing_leaf.index(), Self::key_to_leaf_index(key));
match existing_leaf {
SmtLeaf::Empty(_) => Ok(SmtLeaf::new_single(*key, *value)),
_ => {
if *value != EMPTY_WORD {
existing_leaf.insert(*key, *value)?;
} else {
existing_leaf.remove(*key);
}
Ok(existing_leaf)
},
}
}
fn key_to_leaf_index(key: &Word) -> LeafIndex<SMT_DEPTH> {
let most_significant_felt = key[3];
LeafIndex::new_max_depth(most_significant_felt.as_canonical_u64())
}
fn path_and_leaf_to_opening(path: SparseMerklePath, leaf: SmtLeaf) -> SmtProof {
SmtProof::new_unchecked(path, leaf)
}
}
impl Default for Smt {
fn default() -> Self {
Self::new()
}
}
// CONVERSIONS
// ================================================================================================
impl From<Word> for LeafIndex<SMT_DEPTH> {
fn from(value: Word) -> Self {
// We use the most significant `Felt` of a `Word` as the leaf index.
Self::new_max_depth(value[3].as_canonical_u64())
}
}
// SERIALIZATION
// ================================================================================================
impl Serializable for Smt {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
// Write the number of filled leaves for this Smt
target.write_usize(self.entries().count());
// Write each (key, value) pair
for (key, value) in self.entries() {
target.write(key);
target.write(value);
}
}
fn get_size_hint(&self) -> usize {
let entries_count = self.entries().count();
// Each entry is the size of a digest plus a word.
entries_count.get_size_hint()
+ entries_count * (Word::SERIALIZED_SIZE + EMPTY_WORD.get_size_hint())
}
}
impl Deserializable for Smt {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
// Read the number of filled leaves for this Smt
let num_filled_leaves = source.read_usize()?;
let mut entries = Vec::with_capacity(num_filled_leaves);
for _ in 0..num_filled_leaves {
let key = source.read()?;
let value = source.read()?;
entries.push((key, value));
}
Self::with_entries(entries)
.map_err(|err| DeserializationError::InvalidValue(err.to_string()))
}
}
// FUZZING
// ================================================================================================
#[cfg(any(fuzzing, feature = "fuzzing"))]
impl Smt {
pub fn fuzz_with_entries_sequential(
entries: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Smt, MerkleError> {
Self::with_entries_sequential(entries)
}
pub fn fuzz_compute_mutations_sequential(
&self,
kv_pairs: impl IntoIterator<Item = (Word, Word)>,
) -> MutationSet<SMT_DEPTH, Word, Word> {
<Self as SparseMerkleTree<SMT_DEPTH>>::compute_mutations(self, kv_pairs)
.expect("Failed to compute mutations in fuzzing")
}
}
// TESTS
// ================================================================================================
#[cfg(test)]
use crate::Felt;
#[test]
fn test_smt_serialization_deserialization() {
// Smt for default types (empty map)
let smt_default = Smt::default();
let bytes = smt_default.to_bytes();
assert_eq!(smt_default, Smt::read_from_bytes(&bytes).unwrap());
assert_eq!(bytes.len(), smt_default.get_size_hint());
// Smt with values
let smt_leaves_2: [(Word, Word); 2] = [
(
Word::new([Felt::new(105), Felt::new(106), Felt::new(107), Felt::new(108)]),
[Felt::new(5_u64), Felt::new(6_u64), Felt::new(7_u64), Felt::new(8_u64)].into(),
),
(
Word::new([Felt::new(101), Felt::new(102), Felt::new(103), Felt::new(104)]),
[Felt::new(1_u64), Felt::new(2_u64), Felt::new(3_u64), Felt::new(4_u64)].into(),
),
];
let smt = Smt::with_entries(smt_leaves_2).unwrap();
let bytes = smt.to_bytes();
assert_eq!(smt, Smt::read_from_bytes(&bytes).unwrap());
assert_eq!(bytes.len(), smt.get_size_hint());
}
#[test]
fn smt_with_sorted_entries() {
// Smt with sorted values
let smt_leaves_2: [(Word, Word); 2] = [
(
Word::new([Felt::new(101), Felt::new(102), Felt::new(103), Felt::new(104)]),
[Felt::new(1_u64), Felt::new(2_u64), Felt::new(3_u64), Felt::new(4_u64)].into(),
),
(
Word::new([Felt::new(105), Felt::new(106), Felt::new(107), Felt::new(108)]),
[Felt::new(5_u64), Felt::new(6_u64), Felt::new(7_u64), Felt::new(8_u64)].into(),
),
];
let smt = Smt::with_sorted_entries(smt_leaves_2).unwrap();
let expected_smt = Smt::with_entries(smt_leaves_2).unwrap();
assert_eq!(smt, expected_smt);
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/full/concurrent/tests.rs | miden-crypto/src/merkle/smt/full/concurrent/tests.rs | #![cfg(feature = "std")]
use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use assert_matches::assert_matches;
use proptest::prelude::*;
use rand::{Rng, prelude::IteratorRandom, rng};
use super::{
COLS_PER_SUBTREE, InnerNode, Map, NodeIndex, NodeMutations, PairComputations, SMT_DEPTH,
SUBTREE_DEPTH, Smt, SmtLeaf, SparseMerkleTree, SubtreeLeaf, SubtreeLeavesIter, Word,
build_subtree,
};
use crate::{
EMPTY_WORD, Felt, ONE, ZERO,
merkle::{MerkleError, smt::LeafIndex},
};
fn smtleaf_to_subtree_leaf(leaf: &SmtLeaf) -> SubtreeLeaf {
SubtreeLeaf {
col: leaf.index().index.value(),
hash: leaf.hash(),
}
}
#[test]
fn test_sorted_pairs_to_leaves() {
let entries: Vec<(Word, Word)> = vec![
// Subtree 0.
([ONE, ONE, ONE, Felt::new(16)].into(), [ONE; 4].into()),
([ONE, ONE, ONE, Felt::new(17)].into(), [ONE; 4].into()),
// Leaf index collision.
([ONE, ONE, Felt::new(10), Felt::new(20)].into(), [ONE; 4].into()),
([ONE, ONE, Felt::new(20), Felt::new(20)].into(), [ONE; 4].into()),
// Subtree 1. Normal single leaf again.
([ONE, ONE, ONE, Felt::new(400)].into(), [ONE; 4].into()), // Subtree boundary.
([ONE, ONE, ONE, Felt::new(401)].into(), [ONE; 4].into()),
// Subtree 2. Another normal leaf.
([ONE, ONE, ONE, Felt::new(1024)].into(), [ONE; 4].into()),
];
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let control_leaves: Vec<SmtLeaf> = {
let mut entries_iter = entries.iter().cloned();
let mut next_entry = || entries_iter.next().unwrap();
let control_leaves = vec![
// Subtree 0.
SmtLeaf::Single(next_entry()),
SmtLeaf::Single(next_entry()),
SmtLeaf::new_multiple(vec![next_entry(), next_entry()]).unwrap(),
// Subtree 1.
SmtLeaf::Single(next_entry()),
SmtLeaf::Single(next_entry()),
// Subtree 2.
SmtLeaf::Single(next_entry()),
];
assert_eq!(entries_iter.next(), None);
control_leaves
};
let control_subtree_leaves: Vec<Vec<SubtreeLeaf>> = {
let mut control_leaves_iter = control_leaves.iter();
let mut next_leaf = || control_leaves_iter.next().unwrap();
let control_subtree_leaves: Vec<Vec<SubtreeLeaf>> = [
// Subtree 0.
vec![next_leaf(), next_leaf(), next_leaf()],
// Subtree 1.
vec![next_leaf(), next_leaf()],
// Subtree 2.
vec![next_leaf()],
]
.map(|subtree| subtree.into_iter().map(smtleaf_to_subtree_leaf).collect())
.to_vec();
assert_eq!(control_leaves_iter.next(), None);
control_subtree_leaves
};
let subtrees: PairComputations<u64, SmtLeaf> = Smt::sorted_pairs_to_leaves(entries).unwrap();
// This will check that the hashes, columns, and subtree assignments all match.
assert_eq!(subtrees.leaves, control_subtree_leaves);
// Flattening and re-separating out the leaves into subtrees should have the same result.
let mut all_leaves: Vec<SubtreeLeaf> = subtrees.leaves.clone().into_iter().flatten().collect();
let re_grouped: Vec<Vec<_>> = SubtreeLeavesIter::from_leaves(&mut all_leaves).collect();
assert_eq!(subtrees.leaves, re_grouped);
// Then finally we might as well check the computed leaf nodes too.
let control_leaves: BTreeMap<u64, SmtLeaf> = control
.leaves()
.map(|(index, value)| (index.index.value(), value.clone()))
.collect();
for (column, test_leaf) in subtrees.nodes {
if test_leaf.is_empty() {
continue;
}
let control_leaf = control_leaves
.get(&column)
.unwrap_or_else(|| panic!("no leaf node found for column {column}"));
assert_eq!(control_leaf, &test_leaf);
}
}
// Helper for the below tests.
fn generate_entries(pair_count: u64) -> Vec<(Word, Word)> {
(0..pair_count)
.map(|i| {
let leaf_index = ((i as f64 / pair_count as f64) * (pair_count as f64)) as u64;
let key = Word::new([ONE, ONE, Felt::new(i), Felt::new(leaf_index)]);
let value = Word::new([ONE, ONE, ONE, Felt::new(i)]);
(key, value)
})
.collect()
}
fn generate_updates(entries: Vec<(Word, Word)>, updates: usize) -> Vec<(Word, Word)> {
const REMOVAL_PROBABILITY: f64 = 0.2;
let mut rng = rng();
// Assertion to ensure input keys are unique
assert!(
entries.iter().map(|(key, _)| key).collect::<BTreeSet<_>>().len() == entries.len(),
"Input entries contain duplicate keys!"
);
let mut sorted_entries: Vec<(Word, Word)> = entries
.into_iter()
.choose_multiple(&mut rng, updates)
.into_iter()
.map(|(key, _)| {
let value = if rng.random_bool(REMOVAL_PROBABILITY) {
EMPTY_WORD
} else {
Word::new([ONE, ONE, ONE, Felt::new(rng.random())])
};
(key, value)
})
.collect();
sorted_entries.sort_by_key(|(key, _)| Smt::key_to_leaf_index(key).value());
sorted_entries
}
#[test]
fn test_single_subtree() {
// A single subtree's worth of leaves.
const PAIR_COUNT: u64 = COLS_PER_SUBTREE;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
// `entries` should already be sorted by nature of how we constructed it.
let leaves = Smt::sorted_pairs_to_leaves(entries).unwrap().leaves;
let leaves = leaves.into_iter().next().unwrap();
let (first_subtree, subtree_root) = build_subtree(leaves, SMT_DEPTH, SMT_DEPTH);
assert!(!first_subtree.is_empty());
// The inner nodes computed from that subtree should match the nodes in our control tree.
for (index, node) in first_subtree.into_iter() {
let control = control.get_inner_node(index);
assert_eq!(
control, node,
"subtree-computed node at index {index:?} does not match control",
);
}
// The root returned should also match the equivalent node in the control tree.
let control_root_index =
NodeIndex::new(SMT_DEPTH - SUBTREE_DEPTH, subtree_root.col).expect("Valid root index");
let control_root_node = control.get_inner_node(control_root_index);
let control_hash = control_root_node.hash();
assert_eq!(
control_hash, subtree_root.hash,
"Subtree-computed root at index {control_root_index:?} does not match control"
);
}
// Test that not just can we compute a subtree correctly, but we can feed the results of one
// subtree into computing another. In other words, test that `build_subtree()` is correctly
// composable.
#[test]
fn test_two_subtrees() {
// Two subtrees' worth of leaves.
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 2;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let PairComputations { leaves, .. } = Smt::sorted_pairs_to_leaves(entries).unwrap();
// With two subtrees' worth of leaves, we should have exactly two subtrees.
let [first, second]: [Vec<_>; 2] = leaves.try_into().unwrap();
assert_eq!(first.len() as u64, PAIR_COUNT / 2);
assert_eq!(first.len(), second.len());
let mut current_depth = SMT_DEPTH;
let mut next_leaves: Vec<SubtreeLeaf> = Default::default();
let (first_nodes, first_root) = build_subtree(first, SMT_DEPTH, current_depth);
next_leaves.push(first_root);
let (second_nodes, second_root) = build_subtree(second, SMT_DEPTH, current_depth);
next_leaves.push(second_root);
// All new inner nodes + the new subtree-leaves should be 512, for one depth-cycle.
let total_computed = first_nodes.len() + second_nodes.len() + next_leaves.len();
assert_eq!(total_computed as u64, PAIR_COUNT);
// Verify the computed nodes of both subtrees.
let computed_nodes = first_nodes.clone().into_iter().chain(second_nodes);
for (index, test_node) in computed_nodes {
let control_node = control.get_inner_node(index);
assert_eq!(
control_node, test_node,
"subtree-computed node at index {index:?} does not match control",
);
}
current_depth -= SUBTREE_DEPTH;
let (nodes, root_leaf) = build_subtree(next_leaves, SMT_DEPTH, current_depth);
assert_eq!(nodes.len(), SUBTREE_DEPTH as usize);
assert_eq!(root_leaf.col, 0);
for (index, test_node) in nodes {
let control_node = control.get_inner_node(index);
assert_eq!(
control_node, test_node,
"subtree-computed node at index {index:?} does not match control",
);
}
let index = NodeIndex::new(current_depth - SUBTREE_DEPTH, root_leaf.col).unwrap();
let control_root = control.get_inner_node(index).hash();
assert_eq!(control_root, root_leaf.hash, "Root mismatch");
}
#[test]
fn test_singlethreaded_subtrees() {
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
let PairComputations {
leaves: mut leaf_subtrees,
nodes: test_leaves,
} = Smt::sorted_pairs_to_leaves(entries).unwrap();
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
// There's no flat_map_unzip(), so this is the best we can do.
let (nodes, mut subtree_roots): (Vec<Map<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
.into_iter()
.enumerate()
.map(|(i, subtree)| {
// Pre-assertions.
assert!(
subtree.is_sorted(),
"subtree {i} at bottom-depth {current_depth} is not sorted",
);
assert!(
!subtree.is_empty(),
"subtree {i} at bottom-depth {current_depth} is empty!",
);
// Do actual things.
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
// Post-assertions.
for (&index, test_node) in nodes.iter() {
let control_node = control.get_inner_node(index);
assert_eq!(
test_node, &control_node,
"depth {current_depth} subtree {i}: test node does not match control at index {index:?}",
);
}
(nodes, subtree_root)
})
.unzip();
// Update state between each depth iteration.
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
accumulated_nodes.extend(nodes.into_iter().flatten());
assert!(!leaf_subtrees.is_empty(), "on depth {current_depth}");
}
// Make sure the true leaves match, first checking length and then checking each individual
// leaf.
let control_leaves: BTreeMap<_, _> = control.leaves().collect();
let control_leaves_len = control_leaves.len();
let test_leaves_len = test_leaves.len();
assert_eq!(test_leaves_len, control_leaves_len);
for (col, ref test_leaf) in test_leaves {
let index = LeafIndex::new_max_depth(col);
let &control_leaf = control_leaves.get(&index).unwrap();
assert_eq!(test_leaf, control_leaf, "test leaf at column {col} does not match control");
}
// Make sure the inner nodes match, checking length first and then each individual leaf.
let control_nodes_len = control.inner_nodes().count();
let test_nodes_len = accumulated_nodes.len();
assert_eq!(test_nodes_len, control_nodes_len);
for (index, test_node) in accumulated_nodes.clone() {
let control_node = control.get_inner_node(index);
assert_eq!(test_node, control_node, "test node does not match control at {index:?}");
}
// After the last iteration of the above for loop, we should have the new root node actually
// in two places: one in `accumulated_nodes`, and the other as the "next leaves" return from
// `build_subtree()`. So let's check both!
let control_root = control.get_inner_node(NodeIndex::root());
// That for loop should have left us with only one leaf subtree...
let [leaf_subtree]: [Vec<_>; 1] = leaf_subtrees.try_into().unwrap();
// which itself contains only one 'leaf'...
let [root_leaf]: [SubtreeLeaf; 1] = leaf_subtree.try_into().unwrap();
// which matches the expected root.
assert_eq!(control.root(), root_leaf.hash);
// Likewise `accumulated_nodes` should contain a node at the root index...
assert!(accumulated_nodes.contains_key(&NodeIndex::root()));
// and it should match our actual root.
let test_root = accumulated_nodes.get(&NodeIndex::root()).unwrap();
assert_eq!(control_root, *test_root);
// And of course the root we got from each place should match.
assert_eq!(control.root(), root_leaf.hash);
}
/// The parallel version of `test_singlethreaded_subtree()`.
#[test]
fn test_multithreaded_subtrees() {
use rayon::prelude::*;
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let mut accumulated_nodes: BTreeMap<NodeIndex, InnerNode> = Default::default();
let PairComputations {
leaves: mut leaf_subtrees,
nodes: test_leaves,
} = Smt::sorted_pairs_to_leaves(entries).unwrap();
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
let (nodes, mut subtree_roots): (Vec<Map<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
.into_par_iter()
.enumerate()
.map(|(i, subtree)| {
// Pre-assertions.
assert!(
subtree.is_sorted(),
"subtree {i} at bottom-depth {current_depth} is not sorted",
);
assert!(
!subtree.is_empty(),
"subtree {i} at bottom-depth {current_depth} is empty!",
);
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
// Post-assertions.
for (&index, test_node) in nodes.iter() {
let control_node = control.get_inner_node(index);
assert_eq!(
test_node, &control_node,
"depth {current_depth} subtree {i}: test node does not match control at index {index:?}",
);
}
(nodes, subtree_root)
})
.unzip();
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
accumulated_nodes.extend(nodes.into_iter().flatten());
assert!(!leaf_subtrees.is_empty(), "on depth {current_depth}");
}
// Make sure the true leaves match, checking length first and then each individual leaf.
let control_leaves: BTreeMap<_, _> = control.leaves().collect();
let control_leaves_len = control_leaves.len();
let test_leaves_len = test_leaves.len();
assert_eq!(test_leaves_len, control_leaves_len);
for (col, ref test_leaf) in test_leaves {
let index = LeafIndex::new_max_depth(col);
let &control_leaf = control_leaves.get(&index).unwrap();
assert_eq!(test_leaf, control_leaf);
}
// Make sure the inner nodes match, checking length first and then each individual leaf.
let control_nodes_len = control.inner_nodes().count();
let test_nodes_len = accumulated_nodes.len();
assert_eq!(test_nodes_len, control_nodes_len);
for (index, test_node) in accumulated_nodes.clone() {
let control_node = control.get_inner_node(index);
assert_eq!(test_node, control_node, "test node does not match control at {index:?}");
}
// After the last iteration of the above for loop, we should have the new root node actually
// in two places: one in `accumulated_nodes`, and the other as the "next leaves" return from
// `build_subtree()`. So let's check both!
let control_root = control.get_inner_node(NodeIndex::root());
// That for loop should have left us with only one leaf subtree...
let [leaf_subtree]: [_; 1] = leaf_subtrees.try_into().unwrap();
// which itself contains only one 'leaf'...
let [root_leaf]: [_; 1] = leaf_subtree.try_into().unwrap();
// which matches the expected root.
assert_eq!(control.root(), root_leaf.hash);
// Likewise `accumulated_nodes` should contain a node at the root index...
assert!(accumulated_nodes.contains_key(&NodeIndex::root()));
// and it should match our actual root.
let test_root = accumulated_nodes.get(&NodeIndex::root()).unwrap();
assert_eq!(control_root, *test_root);
// And of course the root we got from each place should match.
assert_eq!(control.root(), root_leaf.hash);
}
#[test]
fn test_with_entries_concurrent() {
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let mut entries = generate_entries(PAIR_COUNT);
let mut rng = rand::rng();
// Set 10% of the entries to have empty words as their values.
for _ in 0..PAIR_COUNT / 10 {
let random_index = rng.random_range(0..PAIR_COUNT);
entries[random_index as usize].1 = EMPTY_WORD;
}
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
let smt = Smt::with_entries(entries.clone()).unwrap();
assert_eq!(smt.root(), control.root());
assert_eq!(smt, control);
}
/// Concurrent mutations
#[test]
fn test_singlethreaded_subtree_mutations() {
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let updates = generate_updates(entries.clone(), 1000);
let tree = Smt::with_entries_sequential(entries.clone()).unwrap();
let control = tree.compute_mutations_sequential(updates.clone()).unwrap();
let mut node_mutations = NodeMutations::default();
let (mut subtree_leaves, new_pairs) =
tree.sorted_pairs_to_mutated_subtree_leaves(updates).unwrap();
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
// There's no flat_map_unzip(), so this is the best we can do.
let (mutations_per_subtree, mut subtree_roots): (Vec<_>, Vec<_>) = subtree_leaves
.into_iter()
.enumerate()
.map(|(i, subtree)| {
// Pre-assertions.
assert!(
subtree.is_sorted(),
"subtree {i} at bottom-depth {current_depth} is not sorted",
);
assert!(
!subtree.is_empty(),
"subtree {i} at bottom-depth {current_depth} is empty!",
);
// Calculate the mutations for this subtree.
let (mutations_per_subtree, subtree_root) =
tree.build_subtree_mutations(subtree, SMT_DEPTH, current_depth);
// Check that the mutations match the control tree.
for (&index, mutation) in mutations_per_subtree.iter() {
let control_mutation = control.node_mutations().get(&index).unwrap();
assert_eq!(
control_mutation, mutation,
"depth {current_depth} subtree {i}: mutation does not match control at index {index:?}",
);
}
(mutations_per_subtree, subtree_root)
})
.unzip();
subtree_leaves = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
node_mutations.extend(mutations_per_subtree.into_iter().flatten());
assert!(!subtree_leaves.is_empty(), "on depth {current_depth}");
}
let [subtree]: [Vec<_>; 1] = subtree_leaves.try_into().unwrap();
let [root_leaf]: [SubtreeLeaf; 1] = subtree.try_into().unwrap();
// Check that the new root matches the control.
assert_eq!(control.new_root, root_leaf.hash);
// Check that the node mutations match the control.
assert_eq!(control.node_mutations().len(), node_mutations.len());
for (&index, mutation) in control.node_mutations().iter() {
let test_mutation = node_mutations.get(&index).unwrap();
assert_eq!(test_mutation, mutation);
}
// Check that the new pairs match the control
assert_eq!(control.new_pairs.len(), new_pairs.len());
for (&key, &value) in control.new_pairs.iter() {
let test_value = new_pairs.get(&key).unwrap();
assert_eq!(test_value, &value);
}
}
#[test]
fn test_compute_mutations_parallel() {
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let tree = Smt::with_entries(entries.clone()).unwrap();
let updates = generate_updates(entries, 1000);
let control = tree.compute_mutations_sequential(updates.clone()).unwrap();
let mutations = tree.compute_mutations(updates).unwrap();
assert_eq!(mutations.root(), control.root());
assert_eq!(mutations.old_root(), control.old_root());
assert_eq!(mutations.node_mutations(), control.node_mutations());
assert_eq!(mutations.new_pairs(), control.new_pairs());
}
#[test]
fn test_smt_construction_with_entries_unsorted() {
let entries = [
([ONE, ONE, Felt::new(2_u64), ONE].into(), [ONE; 4].into()),
([ONE; 4].into(), [ONE; 4].into()),
];
let control = Smt::with_entries_sequential(entries).unwrap();
let smt = Smt::with_entries(entries).unwrap();
assert_eq!(smt.root(), control.root());
assert_eq!(smt, control);
}
#[test]
fn test_smt_construction_with_entries_duplicate_keys() {
let entries = [
([ONE, ONE, ONE, Felt::new(16)].into(), [ONE; 4].into()),
([ONE; 4].into(), [ONE; 4].into()),
([ONE, ONE, ONE, Felt::new(16)].into(), [ONE; 4].into()),
];
let expected_col = Smt::key_to_leaf_index(&entries[0].0).index.value();
let err = Smt::with_entries(entries).unwrap_err();
assert_matches!(err, MerkleError::DuplicateValuesForIndex(col) if col == expected_col);
}
#[test]
fn test_smt_construction_with_some_empty_values() {
let entries = [
([ONE, ONE, ONE, ONE].into(), Smt::EMPTY_VALUE),
([ONE, ONE, ONE, Felt::new(2)].into(), [ONE; 4].into()),
];
let result = Smt::with_entries(entries);
assert!(result.is_ok(), "SMT construction failed with mixed empty values");
let smt = result.unwrap();
let control = Smt::with_entries_sequential(entries).unwrap();
assert_eq!(smt.num_leaves(), 1);
assert_eq!(smt.root(), control.root(), "Root hashes do not match");
assert_eq!(smt, control, "SMTs are not equal");
}
#[test]
fn test_smt_construction_with_all_empty_values() {
let entries = [([ONE, ONE, ONE, ONE].into(), Smt::EMPTY_VALUE)];
let result = Smt::with_entries(entries);
assert!(result.is_ok(), "SMT construction failed with all empty values");
let smt = result.unwrap();
assert_eq!(
smt.root(),
Smt::default().root(),
"SMT with all empty values should have the same root as the default SMT"
);
assert_eq!(smt, Smt::default(), "SMT with all empty values should be empty");
}
#[test]
fn test_smt_construction_with_no_entries() {
let entries: [(Word, Word); 0] = [];
let result = Smt::with_entries(entries);
assert!(result.is_ok(), "SMT construction failed with no entries");
let smt = result.unwrap();
assert_eq!(smt, Smt::default(), "SMT with no entries should be empty");
}
fn arb_felt() -> impl Strategy<Value = Felt> {
prop_oneof![any::<u64>().prop_map(Felt::new), Just(ZERO), Just(ONE),]
}
/// Test that the debug assertion panics on unsorted entries.
#[test]
#[should_panic = "is_sorted_by_key"]
fn smt_with_sorted_entries_panics_on_unsorted_entries() {
// Unsorted keys.
let smt_leaves_2: [(Word, Word); 2] = [
(
Word::new([Felt::new(105), Felt::new(106), Felt::new(107), Felt::new(108)]),
[Felt::new(5_u64), Felt::new(6_u64), Felt::new(7_u64), Felt::new(8_u64)].into(),
),
(
Word::new([Felt::new(101), Felt::new(102), Felt::new(103), Felt::new(104)]),
[Felt::new(1_u64), Felt::new(2_u64), Felt::new(3_u64), Felt::new(4_u64)].into(),
),
];
// Should panic because entries are not sorted.
Smt::with_sorted_entries(smt_leaves_2).unwrap();
}
#[test]
fn test_with_sorted_entries_large_num_leaves() {
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 8;
let entries = generate_entries(PAIR_COUNT);
let control = Smt::with_entries_sequential(entries.clone()).unwrap();
// `entries` should already be sorted by nature of how we constructed it.
let actual = Smt::with_sorted_entries(entries).unwrap();
assert_eq!(actual, control);
}
/// Generate entries that are guaranteed to be in different subtrees
fn generate_cross_subtree_entries() -> impl Strategy<Value = Vec<(Word, Word)>> {
let subtree_offsets = prop::collection::vec(0..(COLS_PER_SUBTREE * 4), 1..100);
subtree_offsets.prop_map(|offsets| {
offsets
.into_iter()
.map(|base_col| {
let key = Word::new([ONE, ONE, ONE, Felt::new(base_col)]);
let value = Word::new([ONE, ONE, ONE, Felt::new(base_col)]);
(key, value)
})
.collect()
})
}
fn arb_entries() -> impl Strategy<Value = Vec<(Word, Word)>> {
// Combine random entries with guaranteed cross-subtree entries
prop_oneof![
// Original random entry generation
prop::collection::vec(
prop_oneof![
// Random values case
(
prop::array::uniform4(arb_felt()).prop_map(Word::new),
prop::array::uniform4(arb_felt()).prop_map(Word::new)
),
// Edge case values
(
Just([ONE, ONE, ONE, Felt::new(0)].into()),
Just([ONE, ONE, ONE, Felt::new(u64::MAX)].into())
)
],
1..1000,
),
// Guaranteed cross-subtree entries
generate_cross_subtree_entries(),
// Mix of both (combine random and cross-subtree entries)
(
generate_cross_subtree_entries(),
prop::collection::vec(
(
prop::array::uniform4(arb_felt()).prop_map(Word::new),
prop::array::uniform4(arb_felt()).prop_map(Word::new)
),
1..1000,
)
)
.prop_map(|(mut cross_subtree, mut random)| {
cross_subtree.append(&mut random);
cross_subtree
})
]
.prop_map(|entries| {
// Ensure uniqueness of entries as `Smt::with_entries` returns an error if multiple values
// exist for the same key.
let mut used_indices = BTreeSet::new();
let mut used_keys = BTreeSet::new();
let mut result = Vec::new();
for (key, value) in entries {
let leaf_index = LeafIndex::<SMT_DEPTH>::from(key).value();
if used_indices.insert(leaf_index) && used_keys.insert(key) {
result.push((key, value));
}
}
result
})
}
proptest! {
#[test]
fn test_with_entries_consistency(entries in arb_entries()) {
let sequential = Smt::with_entries_sequential(entries.clone()).unwrap();
let concurrent = Smt::with_entries(entries.clone()).unwrap();
prop_assert_eq!(concurrent, sequential);
}
#[test]
fn test_compute_mutations_consistency(
initial_entries in arb_entries(),
update_entries in arb_entries().prop_filter(
"Update must not be empty and must differ from initial entries",
|updates| !updates.is_empty()
)
) {
let tree = Smt::with_entries_sequential(initial_entries.clone()).unwrap();
let has_real_changes = update_entries.iter().any(|(key, value)| {
match initial_entries.iter().find(|(init_key, _)| init_key == key) {
Some((_, init_value)) => init_value != value,
None => true,
}
});
let sequential = tree.compute_mutations_sequential(update_entries.clone()).unwrap();
let concurrent = tree.compute_mutations(update_entries.clone()).unwrap();
// If there are real changes, the root should change
if has_real_changes {
let sequential_changed = sequential.old_root != sequential.new_root;
let concurrent_changed = concurrent.old_root != concurrent.new_root;
prop_assert!(
sequential_changed || concurrent_changed,
"Root should have changed"
);
}
prop_assert_eq!(sequential.old_root, concurrent.old_root);
prop_assert_eq!(sequential.new_root, concurrent.new_root);
prop_assert_eq!(sequential.node_mutations(), concurrent.node_mutations());
prop_assert_eq!(sequential.new_pairs.len(), concurrent.new_pairs.len());
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/full/concurrent/mod.rs | miden-crypto/src/merkle/smt/full/concurrent/mod.rs | use alloc::vec::Vec;
use core::mem;
use num::Integer;
use rayon::prelude::*;
use super::{
EmptySubtreeRoots, InnerNode, InnerNodes, Leaves, MerkleError, MutationSet, NodeIndex,
SMT_DEPTH, Smt, SmtLeaf, SparseMerkleTree, Word, leaf,
};
use crate::merkle::smt::{Map, NodeMutation, NodeMutations, SmtLeafError};
#[cfg(test)]
mod tests;
pub(in crate::merkle::smt) type MutatedSubtreeLeaves = Vec<Vec<SubtreeLeaf>>;
// CONCURRENT IMPLEMENTATIONS
// ================================================================================================
impl Smt {
/// Parallel implementation of [`Smt::with_entries()`].
///
/// This method constructs a new sparse Merkle tree concurrently by processing subtrees in
/// parallel, working from the bottom up. The process works as follows:
///
/// 1. First, the input key-value pairs are sorted and grouped into subtrees based on their leaf
/// indices. Each subtree covers a range of 256 (2^8) possible leaf positions.
///
/// 2. The subtrees are then processed in parallel:
/// - For each subtree, compute the inner nodes from depth D down to depth D-8.
/// - Each subtree computation yields a new subtree root and its associated inner nodes.
///
/// 3. These subtree roots are recursively merged to become the "leaves" for the next iteration,
/// which processes the next 8 levels up. This continues until the final root of the tree is
/// computed at depth 0.
///
/// # Errors
/// Returns an error if the provided entries contain multiple values for the same key.
pub(crate) fn with_entries_concurrent(
entries: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Self, MerkleError> {
let entries: Vec<(Word, Word)> = entries.into_iter().collect();
if entries.is_empty() {
return Ok(Self::default());
}
let (inner_nodes, leaves) = Self::build_subtrees(entries)?;
// All the leaves are empty
if inner_nodes.is_empty() {
return Ok(Self::default());
}
let root = inner_nodes.get(&NodeIndex::root()).unwrap().hash();
<Self as SparseMerkleTree<SMT_DEPTH>>::from_raw_parts(inner_nodes, leaves, root)
}
/// Similar to `with_entries_concurrent` but used for pre-sorted entries to avoid overhead.
pub(crate) fn with_sorted_entries_concurrent(
entries: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Self, MerkleError> {
let entries: Vec<(Word, Word)> = entries.into_iter().collect();
if entries.is_empty() {
return Ok(Self::default());
}
let (inner_nodes, leaves) = build_subtrees_from_sorted_entries(entries)?;
// All the leaves are empty
if inner_nodes.is_empty() {
return Ok(Self::default());
}
let root = inner_nodes.get(&NodeIndex::root()).unwrap().hash();
<Self as SparseMerkleTree<SMT_DEPTH>>::from_raw_parts(inner_nodes, leaves, root)
}
/// Parallel implementation of [`Smt::compute_mutations()`].
///
/// This method computes mutations by recursively processing subtrees in parallel, working from
/// the bottom up. The process works as follows:
///
/// 1. First, the input key-value pairs are sorted and grouped into subtrees based on their leaf
/// indices. Each subtree covers a range of 256 (2^8) possible leaf positions.
///
/// 2. The subtrees containing modifications are then processed in parallel:
/// - For each modified subtree, compute node mutations from depth D up to depth D-8
/// - Each subtree computation yields a new root at depth D-8 and its associated mutations
///
/// 3. These subtree roots become the "leaves" for the next iteration, which processes the next
/// 8 levels up. This continues until reaching the tree's root at depth 0.
///
/// # Errors
/// Returns an error if mutations would exceed [`MAX_LEAF_ENTRIES`] (1024 entries) in a leaf.
pub(crate) fn compute_mutations_concurrent(
&self,
kv_pairs: impl IntoIterator<Item = (Word, Word)>,
) -> Result<MutationSet<SMT_DEPTH, Word, Word>, MerkleError>
where
Self: Sized + Sync,
{
// Collect and sort key-value pairs by their corresponding leaf index
let mut sorted_kv_pairs: Vec<_> = kv_pairs.into_iter().collect();
sorted_kv_pairs.par_sort_unstable_by_key(|(key, _)| Self::key_to_leaf_index(key).value());
// Convert sorted pairs into mutated leaves and capture any new pairs
let (mut subtree_leaves, new_pairs) =
self.sorted_pairs_to_mutated_subtree_leaves(sorted_kv_pairs)?;
// If no mutations, return an empty mutation set
if subtree_leaves.is_empty() {
return Ok(MutationSet {
old_root: self.root(),
new_root: self.root(),
node_mutations: NodeMutations::default(),
new_pairs,
});
}
let mut node_mutations = NodeMutations::default();
// Process each depth level in reverse, stepping by the subtree depth
for depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
// Parallel processing of each subtree to generate mutations and roots
let (mutations_per_subtree, mut subtree_roots): (Vec<_>, Vec<_>) = subtree_leaves
.into_par_iter()
.map(|subtree| {
debug_assert!(subtree.is_sorted() && !subtree.is_empty());
self.build_subtree_mutations(subtree, SMT_DEPTH, depth)
})
.unzip();
// Prepare leaves for the next depth level
subtree_leaves = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
// Aggregate all node mutations
node_mutations.extend(mutations_per_subtree.into_iter().flatten());
debug_assert!(!subtree_leaves.is_empty());
}
let new_root = subtree_leaves[0][0].hash;
// Create mutation set
let mutation_set = MutationSet {
old_root: self.root(),
new_root,
node_mutations,
new_pairs,
};
// There should be mutations and new pairs at this point
debug_assert!(
!mutation_set.node_mutations().is_empty() && !mutation_set.new_pairs().is_empty()
);
Ok(mutation_set)
}
// SUBTREE MUTATION
// --------------------------------------------------------------------------------------------
/// Computes the node mutations and the root of a subtree
fn build_subtree_mutations(
&self,
mut leaves: Vec<SubtreeLeaf>,
tree_depth: u8,
bottom_depth: u8,
) -> (NodeMutations, SubtreeLeaf)
where
Self: Sized,
{
debug_assert!(bottom_depth <= tree_depth);
debug_assert!(Integer::is_multiple_of(&bottom_depth, &SUBTREE_DEPTH));
debug_assert!(leaves.len() <= usize::pow(2, SUBTREE_DEPTH as u32));
let subtree_root_depth = bottom_depth - SUBTREE_DEPTH;
let mut node_mutations: NodeMutations = Default::default();
let mut next_leaves: Vec<SubtreeLeaf> = Vec::with_capacity(leaves.len() / 2);
for current_depth in (subtree_root_depth..bottom_depth).rev() {
debug_assert!(current_depth <= bottom_depth);
let next_depth = current_depth + 1;
let mut iter = leaves.drain(..).peekable();
while let Some(first_leaf) = iter.next() {
// This constructs a valid index because next_depth will never exceed the depth of
// the tree.
let parent_index = NodeIndex::new_unchecked(next_depth, first_leaf.col).parent();
let parent_node = self.get_inner_node(parent_index);
let combined_node = fetch_sibling_pair(&mut iter, first_leaf, parent_node);
let combined_hash = combined_node.hash();
let &empty_hash = EmptySubtreeRoots::entry(tree_depth, current_depth);
// Add the parent node even if it is empty for proper upward updates
next_leaves.push(SubtreeLeaf {
col: parent_index.value(),
hash: combined_hash,
});
node_mutations.insert(
parent_index,
if combined_hash != empty_hash {
NodeMutation::Addition(combined_node)
} else {
NodeMutation::Removal
},
);
}
drop(iter);
leaves = mem::take(&mut next_leaves);
}
debug_assert_eq!(leaves.len(), 1);
let root_leaf = leaves.pop().unwrap();
(node_mutations, root_leaf)
}
// SUBTREE CONSTRUCTION
// --------------------------------------------------------------------------------------------
/// Computes the raw parts for a new sparse Merkle tree from a set of key-value pairs.
///
/// `entries` need not be sorted. This function will sort them using parallel sorting.
///
/// # Errors
/// Returns an error if the provided entries contain multiple values for the same key.
fn build_subtrees(mut entries: Vec<(Word, Word)>) -> Result<(InnerNodes, Leaves), MerkleError> {
entries.par_sort_unstable_by_key(|item| {
let index = Self::key_to_leaf_index(&item.0);
index.value()
});
build_subtrees_from_sorted_entries(entries)
}
// LEAF NODE CONSTRUCTION
// --------------------------------------------------------------------------------------------
/// Performs the initial transforms for constructing a [`SparseMerkleTree`] by composing
/// subtrees. In other words, this function takes the key-value inputs to the tree, and produces
/// the inputs to feed into [`build_subtree()`].
///
/// `pairs` *must* already be sorted **by leaf index column**, not simply sorted by key. If
/// `pairs` is not correctly sorted, the returned computations will be incorrect.
///
/// # Errors
/// Returns an error if the provided pairs contain multiple values for the same key.
///
/// # Panics
/// With debug assertions on, this function panics if it detects that `pairs` is not correctly
/// sorted. Without debug assertions, the returned computations will be incorrect.
pub(in crate::merkle::smt) fn sorted_pairs_to_leaves(
pairs: Vec<(Word, Word)>,
) -> Result<PairComputations<u64, SmtLeaf>, MerkleError> {
process_sorted_pairs_to_leaves(pairs, Self::pairs_to_leaf)
}
/// Constructs a single leaf from an arbitrary amount of key-value pairs.
/// Those pairs must all have the same leaf index.
///
/// # Errors
/// Returns a `MerkleError::DuplicateValuesForIndex` if the provided pairs contain multiple
/// values for the same key.
///
/// # Returns
/// - `Ok(Some(SmtLeaf))` if a valid leaf is constructed.
/// - `Ok(None)` if the only provided value is `Self::EMPTY_VALUE`.
fn pairs_to_leaf(mut pairs: Vec<(Word, Word)>) -> Result<Option<SmtLeaf>, MerkleError> {
assert!(!pairs.is_empty());
if pairs.len() > 1 {
pairs.sort_by(|(key_1, _), (key_2, _)| leaf::cmp_keys(*key_1, *key_2));
// Check for duplicates in a sorted list by comparing adjacent pairs
if let Some(window) = pairs.windows(2).find(|window| window[0].0 == window[1].0) {
// If we find a duplicate, return an error
let col = Self::key_to_leaf_index(&window[0].0).index.value();
return Err(MerkleError::DuplicateValuesForIndex(col));
}
Ok(Some(SmtLeaf::new_multiple(pairs).unwrap()))
} else {
let (key, value) = pairs.pop().unwrap();
if value == Self::EMPTY_VALUE {
Ok(None)
} else {
Ok(Some(SmtLeaf::new_single(key, value)))
}
}
}
/// Computes leaves from a set of key-value pairs and current leaf values.
/// Derived from `sorted_pairs_to_leaves`
fn sorted_pairs_to_mutated_subtree_leaves(
&self,
pairs: Vec<(Word, Word)>,
) -> Result<(MutatedSubtreeLeaves, Map<Word, Word>), MerkleError> {
// Map to track new key-value pairs for mutated leaves
let mut new_pairs = Map::new();
let accumulator = process_sorted_pairs_to_leaves(pairs, |leaf_pairs| {
let mut leaf = self.get_leaf(&leaf_pairs[0].0);
let mut leaf_changed = false;
for (key, value) in leaf_pairs {
// Check if the value has changed
let old_value = new_pairs.get(&key).cloned().unwrap_or_else(|| {
// Safe to unwrap: `leaf_pairs` contains keys all belonging to this leaf.
// `SmtLeaf::get_value()` only returns `None` if the key does not belong to the
// leaf, which cannot happen due to the sorting/grouping
// logic in `process_sorted_pairs_to_leaves()`.
leaf.get_value(&key).unwrap()
});
if value != old_value {
// Update the leaf and track the new key-value pair
leaf = self.construct_prospective_leaf(leaf, &key, &value).map_err(
|e| match e {
SmtLeafError::TooManyLeafEntries { actual } => {
MerkleError::TooManyLeafEntries { actual }
},
other => panic!("unexpected SmtLeaf::insert error: {:?}", other),
},
)?;
new_pairs.insert(key, value);
leaf_changed = true;
}
}
if leaf_changed {
// Only return the leaf if it actually changed
Ok(Some(leaf))
} else {
// Return None if leaf hasn't changed
Ok(None)
}
});
// The closure is the only possible source of errors.
// Since it never returns an error - only `Ok(Some(_))` or `Ok(None)` - we can safely assume
// `accumulator` is always `Ok(_)`.
Ok((accumulator?.leaves, new_pairs))
}
}
// SUBTREES
// ================================================================================================
/// A subtree is of depth 8.
pub(in crate::merkle::smt) const SUBTREE_DEPTH: u8 = 8;
/// A depth-8 subtree contains 256 "columns" that can possibly be occupied.
pub(in crate::merkle::smt) const COLS_PER_SUBTREE: u64 = u64::pow(2, SUBTREE_DEPTH as u32);
/// Helper struct for organizing the data we care about when computing Merkle subtrees.
///
/// Note that these represent "conceptual" leaves of some subtree, not necessarily
/// the leaf type for the sparse Merkle tree.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct SubtreeLeaf {
/// The 'value' field of [`NodeIndex`]. When computing a subtree, the depth is already known.
pub col: u64,
/// The hash of the node this `SubtreeLeaf` represents.
pub hash: Word,
}
/// Helper struct to organize the return value of [`Smt::sorted_pairs_to_leaves()`].
#[derive(Debug, Clone)]
pub(in crate::merkle::smt) struct PairComputations<K, L> {
/// Literal leaves to be added to the sparse Merkle tree's internal mapping.
pub nodes: Map<K, L>,
/// "Conceptual" leaves that will be used for computations.
pub leaves: Vec<Vec<SubtreeLeaf>>,
}
// Derive requires `L` to impl Default, even though we don't actually need that.
impl<K, L> Default for PairComputations<K, L> {
fn default() -> Self {
Self {
nodes: Default::default(),
leaves: Default::default(),
}
}
}
#[derive(Debug)]
pub(in crate::merkle::smt) struct SubtreeLeavesIter<'s> {
leaves: core::iter::Peekable<alloc::vec::Drain<'s, SubtreeLeaf>>,
}
impl<'s> SubtreeLeavesIter<'s> {
pub(crate) fn from_leaves(leaves: &'s mut Vec<SubtreeLeaf>) -> Self {
// TODO: determine if there is any notable performance difference between taking a Vec,
// which many need flattening first, vs storing a `Box<dyn Iterator<Item = SubtreeLeaf>>`.
// The latter may have self-referential properties that are impossible to express in purely
// safe Rust Rust.
Self { leaves: leaves.drain(..).peekable() }
}
}
impl Iterator for SubtreeLeavesIter<'_> {
type Item = Vec<SubtreeLeaf>;
/// Each `next()` collects an entire subtree.
fn next(&mut self) -> Option<Vec<SubtreeLeaf>> {
let mut subtree: Vec<SubtreeLeaf> = Default::default();
let mut last_subtree_col = 0;
while let Some(leaf) = self.leaves.peek() {
last_subtree_col = u64::max(1, last_subtree_col);
let is_exact_multiple = Integer::is_multiple_of(&last_subtree_col, &COLS_PER_SUBTREE);
let next_subtree_col = if is_exact_multiple {
u64::next_multiple_of(last_subtree_col + 1, COLS_PER_SUBTREE)
} else {
last_subtree_col.next_multiple_of(COLS_PER_SUBTREE)
};
last_subtree_col = leaf.col;
if leaf.col < next_subtree_col {
subtree.push(self.leaves.next().unwrap());
} else if subtree.is_empty() {
continue;
} else {
break;
}
}
if subtree.is_empty() {
debug_assert!(self.leaves.peek().is_none());
return None;
}
Some(subtree)
}
}
// HELPER FUNCTIONS
// ================================================================================================
/// Processes sorted key-value pairs to compute leaves for a subtree.
///
/// This function groups key-value pairs by their corresponding column index and processes each
/// group to construct leaves. The actual construction of the leaf is delegated to the
/// `process_leaf` callback, allowing flexibility for different use cases (e.g., creating
/// new leaves or mutating existing ones).
///
/// # Parameters
/// - `pairs`: A vector of sorted key-value pairs. The pairs *must* be sorted by leaf index column
/// (not simply by key). If the input is not sorted correctly, the function will produce incorrect
/// results and may panic in debug mode.
/// - `process_leaf`: A callback function used to process each group of key-value pairs
/// corresponding to the same column index. The callback takes a vector of key-value pairs for a
/// single column and returns the constructed leaf for that column.
///
/// # Returns
/// A `PairComputations<u64, Self::Leaf>` containing:
/// - `nodes`: A mapping of column indices to the constructed leaves.
/// - `leaves`: A collection of `SubtreeLeaf` structures representing the processed leaves. Each
/// `SubtreeLeaf` includes the column index and the hash of the corresponding leaf.
///
/// # Errors
/// Returns an error if the `process_leaf` callback fails.
///
/// # Panics
/// This function will panic in debug mode if the input `pairs` are not sorted by column index.
pub(crate) fn process_sorted_pairs_to_leaves<F>(
pairs: Vec<(Word, Word)>,
mut process_leaf: F,
) -> Result<PairComputations<u64, SmtLeaf>, MerkleError>
where
F: FnMut(Vec<(Word, Word)>) -> Result<Option<SmtLeaf>, MerkleError>,
{
debug_assert!(pairs.is_sorted_by_key(|(key, _)| Smt::key_to_leaf_index(key).value()));
let mut accumulator: PairComputations<u64, SmtLeaf> = Default::default();
// As we iterate, we'll keep track of the kv-pairs we've seen so far that correspond to a
// single leaf. When we see a pair that's in a different leaf, we'll swap these pairs
// out and store them in our accumulated leaves.
let mut current_leaf_buffer: Vec<(Word, Word)> = Default::default();
let mut iter = pairs.into_iter().peekable();
while let Some((key, value)) = iter.next() {
let col = Smt::key_to_leaf_index(&key).index.value();
let peeked_col = iter.peek().map(|(key, _v)| {
let index = Smt::key_to_leaf_index(key);
let next_col = index.index.value();
// We panic if `pairs` is not sorted by column.
debug_assert!(next_col >= col);
next_col
});
current_leaf_buffer.push((key, value));
// If the next pair is the same column as this one, then we're done after adding this
// pair to the buffer.
if peeked_col == Some(col) {
continue;
}
// Otherwise, the next pair is a different column, or there is no next pair. Either way
// it's time to swap out our buffer.
let leaf_pairs = mem::take(&mut current_leaf_buffer);
// Process leaf and propagate any errors
match process_leaf(leaf_pairs) {
Ok(Some(leaf)) => {
accumulator.nodes.insert(col, leaf);
},
Ok(None) => {
// No leaf was constructed for this column. The column will be skipped.
},
Err(e) => return Err(e),
}
debug_assert!(current_leaf_buffer.is_empty());
}
// Compute the leaves from the nodes concurrently
let mut accumulated_leaves: Vec<SubtreeLeaf> = accumulator
.nodes
.clone()
.into_par_iter()
.map(|(col, leaf)| SubtreeLeaf { col, hash: Smt::hash_leaf(&leaf) })
.collect();
// Sort the leaves by column
accumulated_leaves.par_sort_by_key(|leaf| leaf.col);
// TODO: determine is there is any notable performance difference between computing
// subtree boundaries after the fact as an iterator adapter (like this), versus computing
// subtree boundaries as we go. Either way this function is only used at the beginning of a
// parallel construction, so it should not be a critical path.
accumulator.leaves = SubtreeLeavesIter::from_leaves(&mut accumulated_leaves).collect();
Ok(accumulator)
}
/// Computes the raw parts for a new sparse Merkle tree from a set of key-value pairs.
///
/// This function is mostly an implementation detail of
/// [`Smt::with_entries_concurrent()`].
///
/// # Errors
/// Returns an error if the provided entries contain multiple values for the same key.
fn build_subtrees_from_sorted_entries(
entries: Vec<(Word, Word)>,
) -> Result<(InnerNodes, Leaves), MerkleError> {
let mut accumulated_nodes: InnerNodes = Default::default();
let PairComputations {
leaves: mut leaf_subtrees,
nodes: initial_leaves,
} = Smt::sorted_pairs_to_leaves(entries)?;
// If there are no leaves, we can return early
if initial_leaves.is_empty() {
return Ok((accumulated_nodes, initial_leaves));
}
for current_depth in (SUBTREE_DEPTH..=SMT_DEPTH).step_by(SUBTREE_DEPTH as usize).rev() {
let (nodes, mut subtree_roots): (Vec<Map<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
.into_par_iter()
.map(|subtree| {
debug_assert!(subtree.is_sorted());
debug_assert!(!subtree.is_empty());
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
(nodes, subtree_root)
})
.unzip();
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
accumulated_nodes.extend(nodes.into_iter().flatten());
debug_assert!(!leaf_subtrees.is_empty());
}
Ok((accumulated_nodes, initial_leaves))
}
/// Builds Merkle nodes from a bottom layer of "leaves" -- represented by a horizontal index and
/// the hash of the leaf at that index. `leaves` *must* be sorted by horizontal index, and
/// `leaves` must not contain more than one depth-8 subtree's worth of leaves.
///
/// This function will then calculate the inner nodes above each leaf for 8 layers, as well as
/// the "leaves" for the next 8-deep subtree, so this function can effectively be chained into
/// itself.
///
/// # Panics
/// With debug assertions on, this function panics under invalid inputs: if `leaves` contains
/// more entries than can fit in a depth-8 subtree, if `leaves` contains leaves belonging to
/// different depth-8 subtrees, if `bottom_depth` is lower in the tree than the specified
/// maximum depth (`DEPTH`), or if `leaves` is not sorted.
pub(crate) fn build_subtree(
mut leaves: Vec<SubtreeLeaf>,
tree_depth: u8,
bottom_depth: u8,
) -> (Map<NodeIndex, InnerNode>, SubtreeLeaf) {
#[cfg(debug_assertions)]
{
// Ensure that all leaves have unique column indices within this subtree.
// In normal usage via public APIs, this should never happen because leaf
// construction enforces uniqueness. However, when testing or benchmarking
// `build_subtree()` in isolation, duplicate columns can appear if input
// constraints are not enforced.
use alloc::collections::BTreeSet;
let mut seen_cols = BTreeSet::new();
for leaf in &leaves {
assert!(seen_cols.insert(leaf.col), "Duplicate column found in subtree: {}", leaf.col);
}
}
debug_assert!(bottom_depth <= tree_depth);
debug_assert!(Integer::is_multiple_of(&bottom_depth, &SUBTREE_DEPTH));
debug_assert!(leaves.len() <= usize::pow(2, SUBTREE_DEPTH as u32));
let subtree_root = bottom_depth - SUBTREE_DEPTH;
let mut inner_nodes: Map<NodeIndex, InnerNode> = Default::default();
let mut next_leaves: Vec<SubtreeLeaf> = Vec::with_capacity(leaves.len() / 2);
for next_depth in (subtree_root..bottom_depth).rev() {
debug_assert!(next_depth <= bottom_depth);
// `next_depth` is the stuff we're making.
// `current_depth` is the stuff we have.
let current_depth = next_depth + 1;
let mut iter = leaves.drain(..).peekable();
while let Some(first) = iter.next() {
// On non-continuous iterations, including the first iteration, `first_column` may
// be a left or right node. On subsequent continuous iterations, we will always call
// `iter.next()` twice.
// On non-continuous iterations (including the very first iteration), this column
// could be either on the left or the right. If the next iteration is not
// discontinuous with our right node, then the next iteration's
let is_right = first.col.is_odd();
let (left, right) = if is_right {
// Discontinuous iteration: we have no left node, so it must be empty.
let left = SubtreeLeaf {
col: first.col - 1,
hash: *EmptySubtreeRoots::entry(tree_depth, current_depth),
};
let right = first;
(left, right)
} else {
let left = first;
let right_col = first.col + 1;
let right = match iter.peek().copied() {
Some(SubtreeLeaf { col, .. }) if col == right_col => {
// Our inputs must be sorted.
debug_assert!(left.col <= col);
// The next leaf in the iterator is our sibling. Use it and consume it!
iter.next().unwrap()
},
// Otherwise, the leaves don't contain our sibling, so our sibling must be
// empty.
_ => SubtreeLeaf {
col: right_col,
hash: *EmptySubtreeRoots::entry(tree_depth, current_depth),
},
};
(left, right)
};
let index = NodeIndex::new_unchecked(current_depth, left.col).parent();
let node = InnerNode { left: left.hash, right: right.hash };
let hash = node.hash();
let &equivalent_empty_hash = EmptySubtreeRoots::entry(tree_depth, next_depth);
// If this hash is empty, then it doesn't become a new inner node, nor does it count
// as a leaf for the next depth.
if hash != equivalent_empty_hash {
inner_nodes.insert(index, node);
next_leaves.push(SubtreeLeaf { col: index.value(), hash });
}
}
// Stop borrowing `leaves`, so we can swap it.
// The iterator is empty at this point anyway.
drop(iter);
// After each depth, consider the stuff we just made the new "leaves", and empty the
// other collection.
mem::swap(&mut leaves, &mut next_leaves);
}
debug_assert_eq!(leaves.len(), 1);
let root = leaves.pop().unwrap();
(inner_nodes, root)
}
/// Constructs an `InnerNode` representing the sibling pair of which `first_leaf` is a part:
/// - If `first_leaf` is a right child, the left child is copied from the `parent_node`.
/// - If `first_leaf` is a left child, the right child is taken from `iter` if it was also mutated
/// or copied from the `parent_node`.
///
/// Returns the `InnerNode` containing the hashes of the sibling pair.
pub(crate) fn fetch_sibling_pair(
iter: &mut core::iter::Peekable<alloc::vec::Drain<SubtreeLeaf>>,
first_leaf: SubtreeLeaf,
parent_node: InnerNode,
) -> InnerNode {
let is_right_node = first_leaf.col.is_odd();
if is_right_node {
let left_leaf = SubtreeLeaf {
col: first_leaf.col - 1,
hash: parent_node.left,
};
InnerNode {
left: left_leaf.hash,
right: first_leaf.hash,
}
} else {
let right_col = first_leaf.col + 1;
let right_leaf = match iter.peek().copied() {
Some(SubtreeLeaf { col, .. }) if col == right_col => iter.next().unwrap(),
_ => SubtreeLeaf { col: right_col, hash: parent_node.right },
};
InnerNode {
left: first_leaf.hash,
right: right_leaf.hash,
}
}
}
#[cfg(feature = "internal")]
pub fn build_subtree_for_bench(
leaves: Vec<SubtreeLeaf>,
tree_depth: u8,
bottom_depth: u8,
) -> (Map<NodeIndex, InnerNode>, SubtreeLeaf) {
build_subtree(leaves, tree_depth, bottom_depth)
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large_forest/error.rs | miden-crypto/src/merkle/smt/large_forest/error.rs | //! This module contains the error types and helpers for working with errors from the large SMT
//! forest.
use thiserror::Error;
use crate::merkle::{MerkleError, smt::large_forest::history::error::HistoryError};
/// The errors returned by operations on the large SMT forest.
///
/// This type primarily serves to wrap more specific error types from various subsystems into a
/// generic interface type.
#[derive(Debug, Error)]
pub enum LargeSmtForestError {
#[error(transparent)]
HistoryError(#[from] HistoryError),
#[error(transparent)]
MerkleError(#[from] MerkleError),
}
pub mod history {}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large_forest/mod.rs | miden-crypto/src/merkle/smt/large_forest/mod.rs | //! A high-performance sparse merkle tree forest backed by pluggable storage.
mod error;
mod history;
pub use error::LargeSmtForestError;
pub use history::{History, HistoryView, error::HistoryError};
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large_forest/history/tests.rs | miden-crypto/src/merkle/smt/large_forest/history/tests.rs | #![cfg(feature = "std")]
//! The functional tests for the history component.
use p3_field::PrimeCharacteristicRing;
use super::{CompactLeaf, History, LeafChanges, NodeChanges, error::Result};
use crate::{
Felt, Word,
merkle::{NodeIndex, smt::LeafIndex},
rand::test_utils::rand_value,
};
// TESTS
// ================================================================================================
#[test]
fn empty() {
let history = History::empty(5);
assert_eq!(history.num_versions(), 0);
assert_eq!(history.max_versions(), 5);
}
#[test]
fn roots() -> Result<()> {
// Set up our test state
let nodes = NodeChanges::default();
let leaves = LeafChanges::default();
let mut history = History::empty(2);
let root_1: Word = rand_value();
let root_2: Word = rand_value();
history.add_version(root_1, 0, nodes.clone(), leaves.clone())?;
history.add_version(root_2, 1, nodes.clone(), leaves.clone())?;
// We should be able to get all the roots.
let roots = history.roots();
assert_eq!(roots.len(), 2);
assert!(roots.contains(&root_1));
assert!(roots.contains(&root_2));
Ok(())
}
#[test]
fn is_known_root() -> Result<()> {
// Set up our test state
let nodes = NodeChanges::default();
let leaves = LeafChanges::default();
let mut history = History::empty(2);
let root_1: Word = rand_value();
let root_2: Word = rand_value();
history.add_version(root_1, 0, nodes.clone(), leaves.clone())?;
history.add_version(root_2, 1, nodes.clone(), leaves.clone())?;
// We should be able to query for existing roots.
assert!(history.is_known_root(root_1));
assert!(history.is_known_root(root_2));
// But not for nonexistent ones.
assert!(!history.is_known_root(rand_value()));
Ok(())
}
#[test]
fn find_latest_corresponding_version() -> Result<()> {
// Start by setting up our test data.
let nodes = NodeChanges::default();
let leaves = LeafChanges::default();
let mut history = History::empty(5);
let v1 = 10;
let v2 = 20;
let v3 = 30;
let v4 = 31;
let v5 = 45;
history.add_version(rand_value(), v1, nodes.clone(), leaves.clone())?;
history.add_version(rand_value(), v2, nodes.clone(), leaves.clone())?;
history.add_version(rand_value(), v3, nodes.clone(), leaves.clone())?;
history.add_version(rand_value(), v4, nodes.clone(), leaves.clone())?;
history.add_version(rand_value(), v5, nodes.clone(), leaves.clone())?;
// When we query for a version that is older than the oldest in the history we should get an
// error.
assert!(history.find_latest_corresponding_version(0).is_err());
assert!(history.find_latest_corresponding_version(9).is_err());
// When we query for the oldest version we should get its index.
assert_eq!(history.find_latest_corresponding_version(v1), Ok(0));
// And that goes for any other known version
assert_eq!(history.find_latest_corresponding_version(v2), Ok(1));
assert_eq!(history.find_latest_corresponding_version(v3), Ok(2));
assert_eq!(history.find_latest_corresponding_version(v4), Ok(3));
assert_eq!(history.find_latest_corresponding_version(v5), Ok(4));
// But we can also query for versions in between.
assert_eq!(history.find_latest_corresponding_version(11), Ok(0));
assert_eq!(history.find_latest_corresponding_version(19), Ok(0));
assert_eq!(history.find_latest_corresponding_version(21), Ok(1));
assert_eq!(history.find_latest_corresponding_version(29), Ok(1));
assert_eq!(history.find_latest_corresponding_version(32), Ok(3));
assert_eq!(history.find_latest_corresponding_version(44), Ok(3));
assert_eq!(history.find_latest_corresponding_version(46), Ok(4));
Ok(())
}
#[test]
fn add_version() -> Result<()> {
let nodes = NodeChanges::default();
let leaves = LeafChanges::default();
// We start with an empty state, and we should be able to add deltas up until the limit we
// set.
let mut history = History::empty(2);
assert_eq!(history.num_versions(), 0);
assert_eq!(history.max_versions(), 2);
let root_1: Word = rand_value();
let id_1 = 0;
history.add_version(root_1, id_1, nodes.clone(), leaves.clone())?;
assert_eq!(history.num_versions(), 1);
let root_2: Word = rand_value();
let id_2 = 1;
history.add_version(root_2, id_2, nodes.clone(), leaves.clone())?;
assert_eq!(history.num_versions(), 2);
// At this point, adding any version should remove the oldest.
let root_3: Word = rand_value();
let id_3 = 2;
history.add_version(root_3, id_3, nodes.clone(), leaves.clone())?;
assert_eq!(history.num_versions(), 2);
// If we then query for that first version it won't be there anymore, but the other two
// should.
assert!(history.get_view_at(id_1).is_err());
assert!(history.get_view_at(id_2).is_ok());
assert!(history.get_view_at(id_3).is_ok());
// If we try and add a version with a non-monotonic version number, we should see an error.
assert!(history.add_version(root_3, id_1, nodes, leaves).is_err());
Ok(())
}
#[test]
fn truncate() -> Result<()> {
// Start by setting up the test data
let mut history = History::empty(4);
let nodes = NodeChanges::default();
let leaves = LeafChanges::default();
let root_1: Word = rand_value();
let id_1 = 5;
history.add_version(root_1, id_1, nodes.clone(), leaves.clone())?;
let root_2: Word = rand_value();
let id_2 = 10;
history.add_version(root_2, id_2, nodes.clone(), leaves.clone())?;
let root_3: Word = rand_value();
let id_3 = 15;
history.add_version(root_3, id_3, nodes.clone(), leaves.clone())?;
let root_4: Word = rand_value();
let id_4 = 20;
history.add_version(root_4, id_4, nodes.clone(), leaves.clone())?;
assert_eq!(history.num_versions(), 4);
// If we truncate to the oldest version or before, nothing should be removed.
assert_eq!(history.truncate(0), 0);
assert_eq!(history.num_versions(), 4);
assert_eq!(history.truncate(4), 0);
assert_eq!(history.num_versions(), 4);
assert_eq!(history.truncate(id_1), 0);
assert_eq!(history.num_versions(), 4);
// If we truncate to a specific known version, it should remove all previous versions.
assert_eq!(history.truncate(id_2), 1);
assert_eq!(history.num_versions(), 3);
// If we truncate to a version that is not known, the newest relevant version should be
// retained.
assert_eq!(history.truncate(16), 1);
assert_eq!(history.num_versions(), 2);
// If we truncate to a version beyond the newest known, only that should be retained.
assert_eq!(history.truncate(25), 1);
assert_eq!(history.num_versions(), 1);
Ok(())
}
#[test]
fn clear() -> Result<()> {
// Start by setting up the test data
let mut history = History::empty(4);
let nodes = NodeChanges::default();
let leaves = LeafChanges::default();
let root_1: Word = rand_value();
let id_1 = 0;
history.add_version(root_1, id_1, nodes.clone(), leaves.clone())?;
let root_2: Word = rand_value();
let id_2 = 1;
history.add_version(root_2, id_2, nodes.clone(), leaves.clone())?;
assert_eq!(history.num_versions(), 2);
// We can clear the history entirely in one go.
history.clear();
assert_eq!(history.num_versions(), 0);
Ok(())
}
#[test]
fn view_at() -> Result<()> {
// Starting in an empty state we should be able to add deltas up until the limit we set.
let mut history = History::empty(3);
assert_eq!(history.num_versions(), 0);
assert_eq!(history.max_versions(), 3);
// We can add an initial version with some changes in both nodes and leaves.
let root_1 = rand_value::<Word>();
let id_1 = 3;
let mut nodes_1 = NodeChanges::default();
let n1_value: Word = rand_value();
let n2_value: Word = rand_value();
nodes_1.insert(NodeIndex::new(2, 1).unwrap(), n1_value);
nodes_1.insert(NodeIndex::new(8, 128).unwrap(), n2_value);
let mut leaf_1 = CompactLeaf::new();
let l1_e1_key: Word = rand_value();
let l1_e1_value: Word = rand_value();
let leaf_1_ix = LeafIndex::from(l1_e1_key);
leaf_1.insert(l1_e1_key, l1_e1_value);
let mut leaf_2 = CompactLeaf::new();
let l2_e1_key: Word = rand_value();
let l2_e1_value: Word = rand_value();
let leaf_2_ix = LeafIndex::from(l2_e1_key);
let mut l2_e2_key: Word = rand_value();
l2_e2_key[3] = Felt::from_u64(leaf_2_ix.value());
let l2_e2_value: Word = rand_value();
leaf_2.insert(l2_e1_key, l2_e1_value);
leaf_2.insert(l2_e2_key, l2_e2_value);
let mut leaves_1 = LeafChanges::default();
leaves_1.insert(leaf_1_ix, leaf_1.clone());
leaves_1.insert(leaf_2_ix, leaf_2.clone());
history.add_version(root_1, id_1, nodes_1.clone(), leaves_1.clone())?;
assert_eq!(history.num_versions(), 1);
// We then add another version that overlaps with the older version.
let root_2 = rand_value::<Word>();
let id_2 = 5;
let mut nodes_2 = NodeChanges::default();
let n3_value: Word = rand_value();
let n4_value: Word = rand_value();
nodes_2.insert(NodeIndex::new(2, 1).unwrap(), n3_value);
nodes_2.insert(NodeIndex::new(10, 256).unwrap(), n4_value);
let mut leaf_3 = CompactLeaf::new();
let leaf_3_ix = leaf_2_ix;
let mut l3_e1_key: Word = rand_value();
l3_e1_key[3] = Felt::from_u64(leaf_3_ix.value());
let l3_e1_value: Word = rand_value();
leaf_3.insert(l3_e1_key, l3_e1_value);
let mut leaves_2 = LeafChanges::default();
leaves_2.insert(leaf_3_ix, leaf_3.clone());
history.add_version(root_2, id_2, nodes_2.clone(), leaves_2.clone())?;
assert_eq!(history.num_versions(), 2);
// And another version for the sake of the test.
let root_3 = rand_value::<Word>();
let id_3 = 6;
let mut nodes_3 = NodeChanges::default();
let n5_value: Word = rand_value();
nodes_3.insert(NodeIndex::new(30, 1).unwrap(), n5_value);
let mut leaf_4 = CompactLeaf::new();
let l4_e1_key: Word = rand_value();
let l4_e1_value: Word = rand_value();
let leaf_4_ix = LeafIndex::from(l4_e1_key);
leaf_4.insert(l4_e1_key, l4_e1_value);
let mut leaves_3 = LeafChanges::default();
leaves_3.insert(leaf_4_ix, leaf_4.clone());
history.add_version(root_3, id_3, nodes_3.clone(), leaves_3.clone())?;
assert_eq!(history.num_versions(), 3);
// At this point, we can grab a view into the history. If we grab something older than the
// history knows about we should get an error.
assert!(history.get_view_at(2).is_err());
// If we grab something valid, then we should get the right results. Let's grab the oldest
// possible version to test the overlay logic.
let view = history.get_view_at(id_1)?;
// Getting a node in the targeted version should just return it.
assert_eq!(view.node_value(&NodeIndex::new(2, 1).unwrap()), Some(&n1_value));
assert_eq!(view.node_value(&NodeIndex::new(8, 128).unwrap()), Some(&n2_value));
// Getting a node that is _not_ in the targeted delta directly should search through the
// versions in between the targeted version at the current tree and return the oldest value
// it can find for it.
assert_eq!(view.node_value(&NodeIndex::new(10, 256).unwrap()), Some(&n4_value));
assert_eq!(view.node_value(&NodeIndex::new(30, 1).unwrap()), Some(&n5_value));
// Getting a node that doesn't exist in ANY versions should return none.
assert!(view.node_value(&NodeIndex::new(45, 100).unwrap()).is_none());
// Similarly, getting a leaf from the targeted version should just return it.
assert_eq!(view.leaf_value(&leaf_1_ix), Some(&leaf_1));
assert_eq!(view.leaf_value(&leaf_2_ix), Some(&leaf_2));
// But getting a leaf that is not in the target delta directly should result in the same
// traversal.
assert_eq!(view.leaf_value(&leaf_4_ix), Some(&leaf_4));
// And getting a leaf that does not exist in any of the versions should return one.
assert!(view.leaf_value(&LeafIndex::new(1024).unwrap()).is_none());
// Finally, getting a full value from a compact leaf should yield the value directly from
// the target version if the target version overlays it AND contains it.
assert_eq!(view.value(&l1_e1_key), Some(Some(&l1_e1_value)));
assert_eq!(view.value(&l2_e1_key), Some(Some(&l2_e1_value)));
assert_eq!(view.value(&l2_e2_key), Some(Some(&l2_e2_value)));
// However, if the leaf exists but does not contain the provided word, it should return the
// sentinel `Some(None)`.
let mut ne_key_in_existing_leaf: Word = rand_value();
ne_key_in_existing_leaf[3] = Felt::from_u64(leaf_1_ix.value());
assert_eq!(view.value(&ne_key_in_existing_leaf), Some(None));
// If the leaf is not overlaid, then the lookup should go up the chain just as in the other
// cases.
assert_eq!(view.value(&l4_e1_key), Some(Some(&l4_e1_value)));
// But if nothing is found, it should just return None;
let ne_key: Word = rand_value();
assert!(view.value(&ne_key).is_none());
// We can also get views for versions that are not directly contained, such as a version newer
// than the newest. This should just use the newest version to service the query.
let view = history.get_view_at(7)?;
assert_eq!(view.node_value(&NodeIndex::new_unchecked(30, 1)), Some(&n5_value));
assert!(view.node_value(&NodeIndex::new_unchecked(30, 2)).is_none());
Ok(())
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large_forest/history/error.rs | miden-crypto/src/merkle/smt/large_forest/history/error.rs | //! The error type and utility types for working with errors from the SMT history construct.
use thiserror::Error;
use crate::merkle::smt::large_forest::history::VersionId;
/// The type of errors returned by the history container.
#[derive(Debug, Error, PartialEq)]
pub enum HistoryError {
/// Raised when a query expects the history to contain at least one entry, but it is empty.
#[error("The history was empty")]
HistoryEmpty,
/// Raised when a version is added to the history and is not newer than the previous.
#[error("Version {0} is not monotonic with respect to {1}")]
NonMonotonicVersions(VersionId, VersionId),
/// Raised when no version exists in the history for an arbitrary query.
#[error("The specified version is too old to be served by the history")]
VersionTooOld,
}
/// The result type for use within the history container.
pub type Result<T> = core::result::Result<T, HistoryError>;
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large_forest/history/mod.rs | miden-crypto/src/merkle/smt/large_forest/history/mod.rs | //! This module contains the definition of [`History`], a simple container for some number of
//! historical versions of a given merkle tree.
//!
//! This history consists of a series of _deltas_ from the current state of the tree, moving
//! backward in history away from that current state. These deltas are then used to form a "merged
//! overlay" that represents the changes to be made on top of the current tree to put it _back_ in
//! that historical state.
//!
//! It provides functionality for adding new states to the history, as well as for querying the
//! history at a given point in time.
//!
//! # Complexity
//!
//! Versions in this structure are _cumulative_. To get the entire picture of an arbitrary node or
//! leaf at version `v` it may be necessary to check for changes in all versions between `v` and the
//! current tree state. This gives worst-case complexity `O(v)` when querying a node or leaf for the
//! version `v`.
//!
//! This is acceptable overhead as we assert that newer versions are far more likely to be queried
//! than older versions. Nevertheless, it may be improved in future using a sharing approach, but
//! that potential improvement is being ignored for now for the sake of simplicity.
//!
//! # Performance
//!
//! This structure operates entirely in memory, and is hence reasonably quick to query. As of the
//! current time, no detailed benchmarking has taken place for the history, but based on some basic
//! profiling the major time taken is in chasing pointers throughout memory due to the use of
//! [`Map`]s, but this is unavoidable in the current structure and may need to be revisited in
//! the future.
pub mod error;
#[cfg(test)]
mod tests;
use alloc::collections::VecDeque;
use core::fmt::Debug;
use error::{HistoryError, Result};
use crate::{
Map, Set, Word,
merkle::{
NodeIndex,
smt::{LeafIndex, SMT_DEPTH},
},
};
// UTILITY TYPE ALIASES
// ================================================================================================
/// A compact leaf is a mapping from full word-length keys to word-length values, intended to be
/// stored in the leaves of an otherwise shallower merkle tree.
pub type CompactLeaf = Map<Word, Word>;
/// A collection of changes to arbitrary non-leaf nodes in a merkle tree.
///
/// All changes to nodes between versions `v` and `v + 1` must be explicitly "undone" in the
/// `NodeChanges` representing version `v`. This includes nodes that were defaulted in version `v`
/// that were given an explicit value in version `v + 1`, where the `NodeChanges` must explicitly
/// set those nodes back to the default.
///
/// Failure to do so will result in incorrect values when those nodes are queried at a point in the
/// history corresponding to version `v`.
pub type NodeChanges = Map<NodeIndex, Word>;
/// A collection of changes to arbitrary leaf nodes in a merkle tree.
///
/// This represents the state of the leaf wholesale, rather than as a delta from the newer version.
/// This massively simplifies querying leaves in the history.
///
/// Note that if in the version of the tree represented by these `LeafChanges` had the default value
/// at the leaf, this default value must be made concrete in the map. Failure to do so will retain a
/// newer, non-default value for that leaf, and thus result in incorrect query results at this point
/// in the history.
pub type LeafChanges = Map<LeafIndex<SMT_DEPTH>, CompactLeaf>;
/// An identifier for a historical tree version overlay, which must be monotonic as new versions are
/// added.
pub type VersionId = u64;
// HISTORY
// ================================================================================================
/// A History contains a sequence of versions atop a given tree.
///
/// The versions are _cumulative_, meaning that querying the history must account for changes from
/// the current tree that take place in versions that are not the queried version or the current
/// tree.
#[derive(Clone, Debug)]
pub struct History {
/// The maximum number of historical versions to be stored.
max_count: usize,
/// The deltas that make up the history for this tree.
///
/// It will never contain more than `max_count` deltas, and is ordered with the oldest data at
/// the lowest index.
///
/// # Implementation Note
///
/// As we are targeting small numbers of history items (e.g. 30), having a sequence with an
/// allocated capacity equal to the small maximum number of items is perfectly sane. This will
/// avoid costly reallocations in the fast path.
///
/// We use a [`VecDeque`] instead of a [`Vec`] or [`alloc::collections::LinkedList`] as we
/// estimate that the vast majority of removals will be the oldest entries as new ones are
/// pushed. This means that we can optimize for those removals along with indexing performance,
/// rather than optimizing for more rare removals from the middle of the sequence.
deltas: VecDeque<Delta>,
}
impl History {
/// Constructs a new history container, containing at most `max_count` historical versions for
/// a tree.
#[must_use]
pub fn empty(max_count: usize) -> Self {
// We allocate one more than we actually need to store to allow us to insert and THEN
// remove, rather than the other way around. This leads to negligible increases in memory
// usage while allowing for cleaner code.
let deltas = VecDeque::with_capacity(max_count + 1);
Self { max_count, deltas }
}
/// Gets the maximum number of versions that this history can store.
#[must_use]
pub fn max_versions(&self) -> usize {
self.max_count
}
/// Gets the current number of versions in the history.
#[must_use]
pub fn num_versions(&self) -> usize {
self.deltas.len()
}
/// Returns all the roots that the history knows about.
///
/// # Complexity
///
/// Calling this method requires a traversal of all the versions and is hence linear in the
/// number of history versions.
#[must_use]
pub fn roots(&self) -> Set<Word> {
self.deltas.iter().map(|d| d.root).collect()
}
/// Returns `true` if `root` is in the history and `false` otherwise.
///
/// # Complexity
///
/// Calling this method requires a traversal of all the versions and is hence linear in the
/// number of history versions.
#[must_use]
pub fn is_known_root(&self, root: Word) -> bool {
self.deltas.iter().any(|r| r.root == root)
}
/// Adds a version to the history with the provided `root` and represented by the changes from
/// the current tree given in `nodes` and `leaves`.
///
/// If adding this version would result in exceeding `self.max_count` historical versions, then
/// the oldest of the versions is automatically removed.
///
/// # Gotchas
///
/// When constructing the `nodes` and `leaves`, keep in mind that those collections must contain
/// entries for the **default value of a node or leaf** at any position where the tree was
/// sparse in the state represented by `root`. If this is not done, incorrect values may be
/// returned.
///
/// This is necessary because the changes are the _reverse_ from what one might expect. Namely,
/// the changes in a given version `v` must "_revert_" the changes made in the transition from
/// version `v` to version `v + 1`.
///
/// # Errors
///
/// - [`HistoryError::NonMonotonicVersions`] if the provided version is not greater than the
/// previously added version.
pub fn add_version(
&mut self,
root: Word,
version_id: VersionId,
nodes: NodeChanges,
leaves: LeafChanges,
) -> Result<()> {
if let Some(v) = self.deltas.iter().last() {
if v.version_id < version_id {
self.deltas.push_back(Delta::new(root, version_id, nodes, leaves));
if self.num_versions() > self.max_versions() {
self.deltas.pop_front();
}
Ok(())
} else {
Err(HistoryError::NonMonotonicVersions(version_id, v.version_id))
}
} else {
self.deltas.push_back(Delta::new(root, version_id, nodes, leaves));
Ok(())
}
}
/// Returns the index in the sequence of deltas of the version that corresponds to the provided
/// `version_id`.
///
/// To "correspond" means that it either has the provided `version_id`, or is the newest version
/// with a `version_id` less than the provided id. In either case, it is the correct version to
/// be used to query the tree state in the provided `version_id`.
///
/// # Complexity
///
/// Finding the latest corresponding version in the history requires a linear traversal of the
/// history entries, and hence has complexity `O(n)` in the number of versions.
///
/// # Errors
///
/// - [`HistoryError::HistoryEmpty`] if the history is empty and hence there is no version to
/// find.
/// - [`HistoryError::VersionTooOld`] if the history does not contain the data to provide a
/// coherent overlay for the provided `version_id` due to `version_id` being older than the
/// oldest version stored.
fn find_latest_corresponding_version(&self, version_id: VersionId) -> Result<usize> {
// If the version is older than the oldest, we error.
if let Some(oldest_version) = self.deltas.front() {
if oldest_version.version_id > version_id {
return Err(HistoryError::VersionTooOld);
}
} else {
return Err(HistoryError::VersionTooOld);
}
let ix = self
.deltas
.iter()
.position(|d| d.version_id > version_id)
.unwrap_or_else(|| self.num_versions())
.checked_sub(1)
.expect(
"Subtraction should not overflow as we have ruled out the no-version \
case, and in the other cases the left operand will be >= 1",
);
Ok(ix)
}
/// Returns a view of the history that allows querying as a single unified overlay on the
/// current state of the merkle tree as if the overlay was reverting the tree to the state
/// corresponding to the specified `version_id`.
///
/// Note that the history may not contain a version that directly corresponds to `version_id`.
/// In such a case, the view will instead use the newest version coherent with the provided
/// `version_id`, as this is the correct version for the provided id. Note that this will be
/// incorrect if the versions stored in the history do not represent contiguous changes from the
/// current tree.
///
/// # Complexity
///
/// The computational complexity of this method is linear in the number of versions stored in
/// the history.
///
/// # Errors
///
/// - [`HistoryError::VersionTooOld`] if the history does not contain the data to provide a
/// coherent overlay for the provided `version_id` due to `version_id` being older than the
/// oldest version stored.
pub fn get_view_at(&self, version_id: VersionId) -> Result<HistoryView<'_>> {
let version_index = self.find_latest_corresponding_version(version_id)?;
Ok(HistoryView::new_of(version_index, self))
}
/// Removes all versions in the history that are older than the version denoted by the provided
/// `version_id`.
///
/// If `version_id` is not a version known by the history, it will keep the newest version that
/// is capable of serving as that version in queries.
///
/// # Complexity
///
/// The computational complexity of this method is linear in the number of versions stored in
/// the history prior to any removals.
pub fn truncate(&mut self, version_id: VersionId) -> usize {
// We start by getting the index to truncate to, though it is not an error to remove
// something too old.
let truncate_ix = self.find_latest_corresponding_version(version_id).unwrap_or(0);
for _ in 0..truncate_ix {
self.deltas.pop_front();
}
truncate_ix
}
/// Removes all versions from the history.
pub fn clear(&mut self) {
self.deltas.clear();
}
}
// HISTORY VIEW
// ================================================================================================
/// A read-only view of the history overlay on the tree at a specified place in the history.
#[derive(Debug)]
pub struct HistoryView<'history> {
/// The index of the target version in the history.
version_ix: usize,
/// The history that actually stores the data that will be queried.
history: &'history History,
}
impl<'history> HistoryView<'history> {
/// Constructs a new history view that acts as a single overlay of the state represented by the
/// oldest delta for which `f` returns true.
///
/// # Complexity
///
/// The computational complexity of this method is linear in the number of versions stored in
/// the history.
fn new_of(version_ix: usize, history: &'history History) -> Self {
Self { version_ix, history }
}
/// Gets the value of the node in the history at the provided `index`, or returns `None` if the
/// version does not overlay the current tree at that node.
///
/// # Complexity
///
/// The computational complexity of this method is linear in the number of versions due to the
/// need to traverse to find the correct overlay value.
#[must_use]
pub fn node_value(&self, index: &NodeIndex) -> Option<&Word> {
self.history
.deltas
.iter()
.skip(self.version_ix)
.find_map(|v| v.nodes.get(index))
}
/// Gets the value of the entire leaf in the history at the specified `index`, or returns `None`
/// if the version does not overlay the current tree at that leaf.
///
/// # Complexity
///
/// The computational complexity of this method is linear in the number of versions due to the
/// need to traverse to find the correct overlay value.
#[must_use]
pub fn leaf_value(&self, index: &LeafIndex<SMT_DEPTH>) -> Option<&CompactLeaf> {
self.history
.deltas
.iter()
.skip(self.version_ix)
.find_map(|v| v.leaves.get(index))
}
/// Queries the value of a specific key in a leaf in the overlay, returning:
///
/// - `None` if the version does not overlay that leaf in the current tree,
/// - `Some(None)` if the version does overlay that leaf but the compact leaf does not contain
/// that value,
/// - and `Some(Some(v))` if the version does overlay the leaf and the key exists in that leaf.
///
/// # Complexity
///
/// The computational complexity of this method is linear in the number of versions due to the
/// need to traverse to find the correct overlay value.
#[must_use]
pub fn value(&self, key: &Word) -> Option<Option<&Word>> {
self.leaf_value(&LeafIndex::from(*key)).map(|leaf| leaf.get(key))
}
}
// DELTA
// ================================================================================================
/// A delta for a state `n` represents the changes (to both nodes and leaves) that need to be
/// applied on top of the state `n + 1` to yield the correct tree for state `n`.
///
/// # Cumulative Deltas and Temporal Ordering
///
/// In order to best represent the history of a merkle tree, these deltas are constructed to take
/// advantage of two main properties:
///
/// - They are _cumulative_, which reduces their practical memory usage. This does, however, mean
/// that querying the state of older blocks is more expensive than querying newer ones.
/// - Deltas are applied in **temporally reversed order** from what one might expect. Most
/// conventional applications of deltas bring something from the past into the future through
/// application. In our case, the application of one or more deltas moves the tree into a **past
/// state**.
///
/// # Construction
///
/// While the [`Delta`] type is visible in the interface of the history, it is only intended to be
/// constructed by the history. Users should not be allowed to construct it directly.
#[derive(Clone, Debug, PartialEq)]
struct Delta {
/// The root of the tree in the `version` corresponding to the application of the reversions in
/// this delta to the previous tree state.
pub root: Word,
/// The version of the tree represented by the delta.
pub version_id: VersionId,
/// Any changes to the non-leaf nodes in the tree for this delta.
pub nodes: NodeChanges,
/// Any changes to the leaf nodes in the tree for this delta.
///
/// Note that the leaf state is **not represented compactly**, and describes the entire state
/// of the leaf in the corresponding version.
pub leaves: LeafChanges,
}
impl Delta {
/// Creates a new delta with the provided `root`, and representing the provided
/// changes to `nodes` and `leaves` in the merkle tree.
#[must_use]
fn new(root: Word, version_id: VersionId, nodes: NodeChanges, leaves: LeafChanges) -> Self {
Self { root, version_id, nodes, leaves }
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/batch_ops.rs | miden-crypto/src/merkle/smt/large/batch_ops.rs | use alloc::vec::Vec;
use core::mem;
use num::Integer;
use rayon::prelude::*;
use super::{
IN_MEMORY_DEPTH, LargeSmt, LargeSmtError, LoadedLeaves, MutatedLeaves, ROOT_MEMORY_INDEX,
SMT_DEPTH, SmtStorage, StorageUpdates, Subtree, SubtreeUpdate,
};
use crate::{
Word,
merkle::smt::{
EmptySubtreeRoots, LeafIndex, Map, MerkleError, MutationSet, NodeIndex, NodeMutation,
NodeMutations, SmtLeaf, SparseMerkleTree,
full::concurrent::{
SUBTREE_DEPTH, SubtreeLeaf, SubtreeLeavesIter, fetch_sibling_pair,
process_sorted_pairs_to_leaves,
},
},
};
// TYPES
// ================================================================================================
/// Prepared mutations loaded from storage, ready to be applied.
struct PreparedMutations {
old_root: Word,
new_root: Word,
sorted_node_mutations: Vec<(NodeIndex, NodeMutation)>,
loaded_subtrees: Map<NodeIndex, Option<Subtree>>,
new_pairs: Map<Word, Word>,
leaf_map: Map<u64, Option<SmtLeaf>>,
}
// BATCH OPERATIONS
// ================================================================================================
impl<S: SmtStorage> LargeSmt<S> {
/// Processes one set of `subtree_leaves` at a given `subtree_root_depth` and returns:
/// - node mutations to apply to in-memory nodes (empty if handled in subtree),
/// - the computed subtree root leaf, and
/// - an optional storage update instruction for the subtree.
fn process_subtree_for_depth(
&self,
subtree_leaves: Vec<SubtreeLeaf>,
subtree_root_depth: u8,
) -> (NodeMutations, SubtreeLeaf, Option<SubtreeUpdate>) {
debug_assert!(subtree_leaves.is_sorted() && !subtree_leaves.is_empty());
let subtree_root_index =
NodeIndex::new_unchecked(subtree_root_depth, subtree_leaves[0].col >> SUBTREE_DEPTH);
// Load subtree from storage if below in-memory horizon; otherwise use in-memory nodes
let mut subtree_opt = if subtree_root_depth < IN_MEMORY_DEPTH {
None
} else {
Some(
self.storage
.get_subtree(subtree_root_index)
.expect("Storage error getting subtree in insert_batch")
.unwrap_or_else(|| Subtree::new(subtree_root_index)),
)
};
// Build mutations for the subtree
let (mutations, root) = self.build_subtree_mutations(
subtree_leaves,
SMT_DEPTH,
subtree_root_depth,
subtree_opt.as_ref(),
);
let (in_memory_mutations, subtree_update) = if subtree_root_depth < IN_MEMORY_DEPTH {
// In-memory nodes: return mutations for direct application
(mutations, None)
} else {
// Storage nodes: apply mutations to loaded subtree and determine storage action
let modified = !mutations.is_empty();
if let Some(subtree) = subtree_opt.as_mut() {
for (index, mutation) in mutations {
match mutation {
NodeMutation::Removal => {
subtree.remove_inner_node(index);
},
NodeMutation::Addition(node) => {
subtree.insert_inner_node(index, node);
},
}
}
}
let update = if !modified {
None
} else if let Some(subtree) = subtree_opt
&& !subtree.is_empty()
{
Some(SubtreeUpdate::Store { index: subtree_root_index, subtree })
} else {
Some(SubtreeUpdate::Delete { index: subtree_root_index })
};
(NodeMutations::default(), update)
};
(in_memory_mutations, root, subtree_update)
}
/// Helper function to load leaves from storage for a set of key-value pairs.
/// Returns the deduplicated leaf indices and a map of loaded leaves.
fn load_leaves_for_pairs(
&self,
sorted_kv_pairs: &[(Word, Word)],
) -> Result<LoadedLeaves, LargeSmtError> {
// Collect the unique leaf indices
let mut leaf_indices: Vec<u64> = sorted_kv_pairs
.iter()
.map(|(key, _)| Self::key_to_leaf_index(key).value())
.collect();
leaf_indices.dedup();
leaf_indices.par_sort_unstable();
// Get leaves from storage
let leaves_from_storage = self.storage.get_leaves(&leaf_indices)?;
// Map leaf indices to their corresponding leaves
let leaf_map: Map<u64, Option<SmtLeaf>> = leaf_indices
.iter()
.zip(leaves_from_storage)
.map(|(index, maybe_leaf)| (*index, maybe_leaf))
.collect();
Ok((leaf_indices, leaf_map))
}
/// Computes leaves from a set of key-value pairs and current leaf values.
///
/// Returns: (leaves for tree building, map of mutated leaf nodes, changed key-value pairs,
/// leaf_delta, entry_delta)
fn sorted_pairs_to_mutated_leaves_with_preloaded_leaves(
&self,
pairs: Vec<(Word, Word)>,
leaf_map: &Map<u64, Option<SmtLeaf>>,
) -> MutatedLeaves {
// Map to track new key-value pairs for mutated leaves
let mut new_pairs = Map::new();
let mut leaf_count_delta = 0isize;
let mut entry_count_delta = 0isize;
let accumulator = process_sorted_pairs_to_leaves(pairs, |leaf_pairs| {
let leaf_index = LeafIndex::<SMT_DEPTH>::from(leaf_pairs[0].0);
let old_leaf_opt = leaf_map.get(&leaf_index.value()).and_then(|opt| opt.as_ref());
let old_entry_count = old_leaf_opt.map(|leaf| leaf.entries().len()).unwrap_or(0);
let mut leaf = old_leaf_opt
.cloned()
.unwrap_or_else(|| SmtLeaf::new_empty(leaf_pairs[0].0.into()));
let mut leaf_changed = false;
for (key, value) in leaf_pairs {
// Check if the value has changed
let old_value = new_pairs.get(&key).cloned().unwrap_or_else(|| {
// Safe to unwrap: `leaf_pairs` contains keys all belonging to this leaf.
// `SmtLeaf::get_value()` only returns `None` if the key does not belong to the
// leaf, which cannot happen due to the sorting/grouping
// logic in `process_sorted_pairs_to_leaves()`.
leaf.get_value(&key).unwrap()
});
if value != old_value {
// Update the leaf and track the new key-value pair
leaf = self
.construct_prospective_leaf(leaf, &key, &value)
.expect("Failed to construct prospective leaf");
new_pairs.insert(key, value);
leaf_changed = true;
}
}
if leaf_changed {
// Calculate deltas
let new_entry_count = leaf.entries().len();
match (&leaf, old_leaf_opt) {
(SmtLeaf::Empty(_), Some(_)) => {
// Leaf was deleted
leaf_count_delta -= 1;
entry_count_delta -= old_entry_count as isize;
},
(SmtLeaf::Empty(_), None) => {
// Was empty, still empty (shouldn't happen with leaf_changed=true)
unreachable!("Leaf was empty, but leaf_changed=true");
},
(_, None) => {
// New leaf created
leaf_count_delta += 1;
entry_count_delta += new_entry_count as isize;
},
(_, Some(_)) => {
// Leaf updated (not empty)
entry_count_delta += new_entry_count as isize - old_entry_count as isize;
},
}
// Only return the leaf if it actually changed
Ok(Some(leaf))
} else {
// Return None if leaf hasn't changed
Ok(None)
}
});
// The closure is the only possible source of errors.
// Since it never returns an error - only `Ok(Some(_))` or `Ok(None)` - we can safely assume
// `accumulator` is always `Ok(_)`.
let accumulator = accumulator.expect("process_sorted_pairs_to_leaves never fails");
(
accumulator.leaves,
accumulator.nodes,
new_pairs,
leaf_count_delta,
entry_count_delta,
)
}
/// Computes the node mutations and the root of a subtree
fn build_subtree_mutations(
&self,
mut leaves: Vec<SubtreeLeaf>,
tree_depth: u8,
subtree_root_depth: u8,
subtree: Option<&Subtree>,
) -> (NodeMutations, SubtreeLeaf) {
let bottom_depth = subtree_root_depth + SUBTREE_DEPTH;
debug_assert!(bottom_depth <= tree_depth);
debug_assert!(Integer::is_multiple_of(&bottom_depth, &SUBTREE_DEPTH));
debug_assert!(leaves.len() <= usize::pow(2, SUBTREE_DEPTH as u32));
let mut node_mutations: NodeMutations = Default::default();
let mut next_leaves: Vec<SubtreeLeaf> = Vec::with_capacity(leaves.len() / 2);
for current_depth in (subtree_root_depth..bottom_depth).rev() {
debug_assert!(current_depth <= bottom_depth);
let next_depth = current_depth + 1;
let mut iter = leaves.drain(..).peekable();
while let Some(first_leaf) = iter.next() {
// This constructs a valid index because next_depth will never exceed the depth of
// the tree.
let parent_index = NodeIndex::new_unchecked(next_depth, first_leaf.col).parent();
let parent_node = if let Some(sub) = subtree {
sub.get_inner_node(parent_index).unwrap_or_else(|| {
EmptySubtreeRoots::get_inner_node(SMT_DEPTH, parent_index.depth())
})
} else if subtree_root_depth >= IN_MEMORY_DEPTH {
EmptySubtreeRoots::get_inner_node(SMT_DEPTH, parent_index.depth())
} else {
self.get_inner_node(parent_index)
};
let combined_node = fetch_sibling_pair(&mut iter, first_leaf, parent_node);
let combined_hash = combined_node.hash();
let &empty_hash = EmptySubtreeRoots::entry(tree_depth, current_depth);
// Add the parent node even if it is empty for proper upward updates
next_leaves.push(SubtreeLeaf {
col: parent_index.value(),
hash: combined_hash,
});
node_mutations.insert(
parent_index,
if combined_hash != empty_hash {
NodeMutation::Addition(combined_node)
} else {
NodeMutation::Removal
},
);
}
drop(iter);
leaves = mem::take(&mut next_leaves);
}
debug_assert_eq!(leaves.len(), 1);
let root_leaf = leaves.pop().unwrap();
(node_mutations, root_leaf)
}
/// Inserts multiple key-value pairs into the tree in a single batch operation.
///
/// This is the recommended method for bulk insertions, updates, and deletions. It efficiently
/// processes all changes by loading each subtree and leaf only once, applying all mutations
/// in-place, and leveraging parallel hashing throughout.
///
/// To delete entries, pass [`EMPTY_WORD`](crate::EMPTY_WORD) as the value.
///
/// # Returns
/// Returns the new root hash of the tree after applying all changes.
///
/// # Errors
/// Returns an error if:
/// - Any leaf would exceed [`MAX_LEAF_ENTRIES`](crate::merkle::smt::MAX_LEAF_ENTRIES) (1024
/// entries)
/// - Storage operations fail
///
/// # Example
/// ```no_run
/// # #[cfg(feature = "rocksdb")]
/// # {
/// use miden_crypto::{
/// EMPTY_WORD, Felt, Word,
/// merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage},
/// };
///
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db"))?;
/// let mut smt = LargeSmt::load(storage)?;
///
/// let entries = vec![
/// // Insert new entries
/// (
/// Word::new([Felt::new(1), Felt::new(0), Felt::new(0), Felt::new(0)]),
/// Word::new([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]),
/// ),
/// (
/// Word::new([Felt::new(2), Felt::new(0), Felt::new(0), Felt::new(0)]),
/// Word::new([Felt::new(11), Felt::new(22), Felt::new(33), Felt::new(44)]),
/// ),
/// // Delete an entry
/// (Word::new([Felt::new(3), Felt::new(0), Felt::new(0), Felt::new(0)]), EMPTY_WORD),
/// ];
///
/// let new_root = smt.insert_batch(entries)?;
/// # Ok(())
/// # }
/// # }
/// ```
pub fn insert_batch(
&mut self,
kv_pairs: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Word, LargeSmtError>
where
Self: Sized + Sync,
{
// Sort key-value pairs by leaf index
let mut sorted_kv_pairs: Vec<_> = kv_pairs.into_iter().collect();
sorted_kv_pairs.par_sort_by_key(|(key, _)| Self::key_to_leaf_index(key).value());
// Load leaves from storage
let (_leaf_indices, leaf_map) = self.load_leaves_for_pairs(&sorted_kv_pairs)?;
// Process leaves in parallel to get mutated leaves for tree building AND deltas
let (mut leaves, mutated_leaf_nodes, _new_pairs, leaf_count_delta, entry_count_delta) =
self.sorted_pairs_to_mutated_leaves_with_preloaded_leaves(sorted_kv_pairs, &leaf_map);
// Early return if no mutations
if leaves.is_empty() {
return Ok(self.root());
}
// Pre-allocate capacity for subtree updates.
let mut subtree_updates: Vec<SubtreeUpdate> = Vec::with_capacity(leaves.len());
// Process each depth level in reverse, stepping by the subtree depth
for subtree_root_depth in
(0..=SMT_DEPTH - SUBTREE_DEPTH).step_by(SUBTREE_DEPTH as usize).rev()
{
// Build mutations and apply them to loaded subtrees
let subtree_count = leaves.len();
let is_in_memory = subtree_root_depth < IN_MEMORY_DEPTH;
let mutations_capacity = if is_in_memory {
subtree_count * SUBTREE_DEPTH as usize
} else {
0
};
let updates_capacity = if is_in_memory { 0 } else { subtree_count };
let (in_memory_mutations, mut subtree_roots, modified_subtrees) = leaves
.into_par_iter()
.map(|subtree_leaves| {
self.process_subtree_for_depth(subtree_leaves, subtree_root_depth)
})
.fold(
|| {
(
Vec::with_capacity(mutations_capacity),
Vec::with_capacity(subtree_count),
Vec::with_capacity(updates_capacity),
)
},
|(mut muts, mut roots, mut subtrees), (mem_muts, root, subtree_update)| {
muts.extend(mem_muts);
roots.push(root);
if let Some(update) = subtree_update {
subtrees.push(update);
}
(muts, roots, subtrees)
},
)
.reduce(
|| (Vec::new(), Vec::new(), Vec::new()),
|(mut m1, mut r1, mut s1), (m2, r2, s2)| {
m1.extend(m2);
r1.extend(r2);
s1.extend(s2);
(m1, r1, s1)
},
);
// Apply in-memory mutations
for (index, mutation) in in_memory_mutations {
match mutation {
NodeMutation::Removal => self.remove_inner_node(index),
NodeMutation::Addition(node) => self.insert_inner_node(index, node),
};
}
// Collect modified subtrees directly into the updates vector.
subtree_updates.extend(modified_subtrees);
// Prepare leaves for the next depth level
leaves = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
debug_assert!(!leaves.is_empty());
}
let new_root = leaves[0][0].hash;
self.in_memory_nodes[ROOT_MEMORY_INDEX] = new_root;
// Build leaf updates for storage (convert Empty to None for deletion)
let mut leaf_update_map = leaf_map;
for (idx, mutated_leaf) in mutated_leaf_nodes {
let leaf_opt = match mutated_leaf {
// Delete from storage
SmtLeaf::Empty(_) => None,
_ => Some(mutated_leaf),
};
leaf_update_map.insert(idx, leaf_opt);
}
// Atomic update to storage
let updates = StorageUpdates::from_parts(
leaf_update_map,
subtree_updates,
leaf_count_delta,
entry_count_delta,
);
self.storage.apply(updates)?;
// Update cached counts
self.leaf_count = self.leaf_count.saturating_add_signed(leaf_count_delta);
self.entry_count = self.entry_count.saturating_add_signed(entry_count_delta);
Ok(new_root)
}
/// Prepares mutations by loading necessary data from storage.
fn prepare_mutations(
&self,
mutations: MutationSet<SMT_DEPTH, Word, Word>,
) -> Result<PreparedMutations, LargeSmtError> {
let MutationSet {
old_root,
node_mutations,
new_pairs,
new_root,
} = mutations;
// Guard against accidentally trying to apply mutations that were computed against a
// different tree, including a stale version of this tree.
let expected_root = SparseMerkleTree::<SMT_DEPTH>::root(self);
if old_root != expected_root {
return Err(LargeSmtError::Merkle(MerkleError::ConflictingRoots {
expected_root,
actual_root: old_root,
}));
}
// Sort mutations
let mut sorted_mutations: Vec<_> = Vec::from_iter(node_mutations);
sorted_mutations.par_sort_unstable_by_key(|(index, _)| Subtree::find_subtree_root(*index));
// Collect all unique subtree root indexes needed
let mut subtree_roots_indices: Vec<NodeIndex> = sorted_mutations
.iter()
.filter_map(|(index, _)| {
if index.depth() < IN_MEMORY_DEPTH {
None
} else {
Some(Subtree::find_subtree_root(*index))
}
})
.collect();
subtree_roots_indices.dedup();
// Read all subtrees at once
let subtrees_from_storage = self.storage.get_subtrees(&subtree_roots_indices)?;
// Map the subtrees
let loaded_subtrees: Map<NodeIndex, Option<Subtree>> = subtree_roots_indices
.into_iter()
.zip(subtrees_from_storage)
.map(|(root_index, subtree_opt)| {
(root_index, Some(subtree_opt.unwrap_or_else(|| Subtree::new(root_index))))
})
.collect();
// Collect and sort key-value pairs by their corresponding leaf index
let mut sorted_kv_pairs: Vec<_> = new_pairs.iter().map(|(k, v)| (*k, *v)).collect();
sorted_kv_pairs.par_sort_by_key(|(key, _)| LargeSmt::<S>::key_to_leaf_index(key).value());
// Collect the unique leaf indices
let mut leaf_indices: Vec<u64> = sorted_kv_pairs
.iter()
.map(|(key, _)| LargeSmt::<S>::key_to_leaf_index(key).value())
.collect();
leaf_indices.par_sort_unstable();
leaf_indices.dedup();
// Get leaves from storage
let leaves = self.storage.get_leaves(&leaf_indices)?;
// Map leaf indices to their corresponding leaves
let leaf_map: Map<u64, Option<SmtLeaf>> = leaf_indices.into_iter().zip(leaves).collect();
Ok(PreparedMutations {
old_root,
new_root,
sorted_node_mutations: sorted_mutations,
loaded_subtrees,
new_pairs,
leaf_map,
})
}
/// Applies prepared mutations to the tree, updating storage.
///
/// Note: This and [`insert_batch()`](Self::insert_batch) are the only two methods that
/// persist changes to storage.
fn apply_prepared_mutations(
&mut self,
prepared: PreparedMutations,
) -> Result<(), LargeSmtError> {
use NodeMutation::*;
let PreparedMutations {
old_root: _,
new_root,
sorted_node_mutations,
mut loaded_subtrees,
new_pairs,
mut leaf_map,
} = prepared;
// Update the root in memory
self.in_memory_nodes[ROOT_MEMORY_INDEX] = new_root;
// Process node mutations
for (index, mutation) in sorted_node_mutations {
if index.depth() < IN_MEMORY_DEPTH {
match mutation {
Removal => {
SparseMerkleTree::<SMT_DEPTH>::remove_inner_node(self, index);
},
Addition(node) => {
SparseMerkleTree::<SMT_DEPTH>::insert_inner_node(self, index, node);
},
};
} else {
let subtree_root_index = Subtree::find_subtree_root(index);
let subtree = loaded_subtrees
.get_mut(&subtree_root_index)
.expect("Subtree map entry must exist")
.as_mut()
.expect("Subtree must exist as it was either fetched or created");
match mutation {
Removal => {
subtree.remove_inner_node(index);
},
Addition(node) => {
subtree.insert_inner_node(index, node);
},
};
}
}
// Go through subtrees, see if any are empty, and if so remove them
for (_index, subtree) in loaded_subtrees.iter_mut() {
if subtree.as_ref().is_some_and(|s| s.is_empty()) {
*subtree = None;
}
}
// Process leaf mutations
let mut leaf_count_delta = 0isize;
let mut entry_count_delta = 0isize;
for (key, value) in new_pairs {
let idx = LargeSmt::<S>::key_to_leaf_index(&key).value();
let entry = leaf_map.entry(idx).or_insert(None);
// New value is empty, handle deletion
if value == LargeSmt::<S>::EMPTY_VALUE {
if let Some(leaf) = entry {
// Leaf exists, handle deletion
let (old_value, is_empty) = leaf.remove(key);
if old_value.is_some() {
// Key had previous value, decrement entry count
entry_count_delta -= 1;
if is_empty {
// Leaf is now empty, remove it and decrement leaf count
*entry = None;
leaf_count_delta -= 1;
}
}
}
} else {
// New value is not empty, handle update or create
match entry {
Some(leaf) => {
// Leaf exists, handle update
if leaf.insert(key, value).expect("Failed to insert value").is_none() {
// Key had no previous value, increment entry count
entry_count_delta += 1;
}
},
None => {
// Leaf does not exist, create it
*entry = Some(SmtLeaf::Single((key, value)));
// Increment both entry and leaf count
entry_count_delta += 1;
leaf_count_delta += 1;
},
}
}
}
let updates = StorageUpdates::from_parts(
leaf_map,
loaded_subtrees.into_iter().map(|(index, subtree_opt)| match subtree_opt {
Some(subtree) => SubtreeUpdate::Store { index, subtree },
None => SubtreeUpdate::Delete { index },
}),
leaf_count_delta,
entry_count_delta,
);
self.storage.apply(updates)?;
// Update cached counts
self.leaf_count = self.leaf_count.saturating_add_signed(leaf_count_delta);
self.entry_count = self.entry_count.saturating_add_signed(entry_count_delta);
Ok(())
}
/// Computes what changes are necessary to insert the specified key-value pairs into this Merkle
/// tree, allowing for validation before applying those changes.
///
/// This method returns a [`MutationSet`], which contains all the information for inserting
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
/// [`LargeSmt::apply_mutations()`] can be called in order to commit these changes to the Merkle
/// tree, or [`drop()`] to discard them.
///
/// # Example
/// ```
/// # use miden_crypto::{Felt, Word};
/// # use miden_crypto::merkle::{EmptySubtreeRoots, smt::{LargeSmt, MemoryStorage, SMT_DEPTH}};
/// let mut smt = LargeSmt::new(MemoryStorage::new()).unwrap();
/// let pair = (Word::default(), Word::default());
/// let mutations = smt.compute_mutations(vec![pair]).expect("compute_mutations ok");
/// assert_eq!(mutations.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
/// smt.apply_mutations(mutations);
/// assert_eq!(smt.root(), *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
/// ```
pub fn compute_mutations(
&self,
kv_pairs: impl IntoIterator<Item = (Word, Word)>,
) -> Result<MutationSet<SMT_DEPTH, Word, Word>, LargeSmtError>
where
Self: Sized + Sync,
{
// Collect and sort key-value pairs by their corresponding leaf index
let mut sorted_kv_pairs: Vec<_> = kv_pairs.into_iter().collect();
sorted_kv_pairs
.par_sort_unstable_by_key(|(key, _)| LargeSmt::<S>::key_to_leaf_index(key).value());
// Load leaves from storage using helper
let (_leaf_indices, leaf_map) = self.load_leaves_for_pairs(&sorted_kv_pairs)?;
// Convert sorted pairs into mutated leaves and capture any new pairs
let (mut leaves, _mutated_leaf_nodes, new_pairs, _leaf_count_delta, _entry_count_delta) =
self.sorted_pairs_to_mutated_leaves_with_preloaded_leaves(sorted_kv_pairs, &leaf_map);
// If no mutations, return an empty mutation set
let old_root = SparseMerkleTree::<SMT_DEPTH>::root(self);
if leaves.is_empty() {
return Ok(MutationSet {
old_root,
new_root: old_root,
node_mutations: NodeMutations::default(),
new_pairs,
});
}
let mut node_mutations = NodeMutations::default();
// Process each depth level in reverse, stepping by the subtree depth
for subtree_root_depth in
(0..=SMT_DEPTH - SUBTREE_DEPTH).step_by(SUBTREE_DEPTH as usize).rev()
{
// Parallel processing of each subtree to generate mutations and roots
let (mutations_per_subtree, mut subtree_roots): (Vec<_>, Vec<_>) = leaves
.into_par_iter()
.map(|subtree_leaves| {
let subtree_opt = if subtree_root_depth < IN_MEMORY_DEPTH {
None
} else {
// Compute subtree root index
let subtree_root_index = NodeIndex::new_unchecked(
subtree_root_depth,
subtree_leaves[0].col >> SUBTREE_DEPTH,
);
self.storage
.get_subtree(subtree_root_index)
.expect("Storage error getting subtree in compute_mutations")
};
debug_assert!(subtree_leaves.is_sorted() && !subtree_leaves.is_empty());
self.build_subtree_mutations(
subtree_leaves,
SMT_DEPTH,
subtree_root_depth,
subtree_opt.as_ref(),
)
})
.unzip();
// Prepare leaves for the next depth level
leaves = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
// Aggregate all node mutations
node_mutations.extend(mutations_per_subtree.into_iter().flatten());
debug_assert!(!leaves.is_empty());
}
let new_root = leaves[0][0].hash;
// Create mutation set
let mutation_set = MutationSet {
old_root: SparseMerkleTree::<SMT_DEPTH>::root(self),
new_root,
node_mutations,
new_pairs,
};
// There should be mutations and new pairs at this point
debug_assert!(
!mutation_set.node_mutations().is_empty() && !mutation_set.new_pairs().is_empty()
);
Ok(mutation_set)
}
/// Applies the prospective mutations computed with [`LargeSmt::compute_mutations()`] to this
/// tree.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
/// the `mutations` were computed against, and the second item is the actual current root of
/// this tree.
pub fn apply_mutations(
&mut self,
mutations: MutationSet<SMT_DEPTH, Word, Word>,
) -> Result<(), LargeSmtError> {
let prepared = self.prepare_mutations(mutations)?;
self.apply_prepared_mutations(prepared)?;
Ok(())
}
/// Applies the prospective mutations computed with [`LargeSmt::compute_mutations()`] to this
/// tree and returns the reverse mutation set.
///
/// Applying the reverse mutation sets to the updated tree will revert the changes.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`Vec`]. The first item is the root hash
/// the `mutations` were computed against, and the second item is the actual current root of
/// this tree.
pub fn apply_mutations_with_reversion(
&mut self,
mutations: MutationSet<SMT_DEPTH, Word, Word>,
) -> Result<MutationSet<SMT_DEPTH, Word, Word>, LargeSmtError>
where
Self: Sized,
{
use NodeMutation::*;
let prepared = self.prepare_mutations(mutations)?;
let (old_root, new_root) = (prepared.old_root, prepared.new_root);
// Collect reverse mutations: for each mutation, capture the old node state
let reverse_mutations: NodeMutations = prepared
.sorted_node_mutations
.iter()
.filter_map(|(index, mutation)| {
let old_node = if index.depth() < IN_MEMORY_DEPTH {
self.get_non_empty_inner_node(*index)
} else {
let subtree_root = Subtree::find_subtree_root(*index);
prepared
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/tests.rs | miden-crypto/src/merkle/smt/large/tests.rs | use alloc::{collections::BTreeSet, vec::Vec};
use p3_field::PrimeCharacteristicRing;
use rand::{Rng, prelude::IteratorRandom, rng};
use super::MemoryStorage;
use crate::{
EMPTY_WORD, Felt, ONE, WORD_SIZE, Word,
field::PrimeField64,
merkle::{
InnerNodeInfo,
smt::{
LargeSmt, LeafIndex, SMT_DEPTH, SmtLeaf,
full::{Smt, concurrent::COLS_PER_SUBTREE},
},
},
};
fn generate_entries(pair_count: u64) -> Vec<(Word, Word)> {
(0..pair_count)
.map(|i| {
let leaf_index = ((i as f64 / pair_count as f64) * (pair_count as f64)) as u64;
let key = Word::new([ONE, ONE, Felt::new(i), Felt::new(leaf_index)]);
let value = Word::new([ONE, ONE, ONE, Felt::new(i)]);
(key, value)
})
.collect()
}
fn generate_updates(entries: Vec<(Word, Word)>, updates: usize) -> Vec<(Word, Word)> {
const REMOVAL_PROBABILITY: f64 = 0.2;
let mut rng = rng();
assert!(
entries.iter().map(|(key, _)| key).collect::<BTreeSet<_>>().len() == entries.len(),
"Input entries contain duplicate keys!"
);
let mut sorted_entries: Vec<(Word, Word)> = entries
.into_iter()
.choose_multiple(&mut rng, updates)
.into_iter()
.map(|(key, _)| {
let value = if rng.random_bool(REMOVAL_PROBABILITY) {
EMPTY_WORD
} else {
Word::new([ONE, ONE, ONE, Felt::new(rng.random())])
};
(key, value)
})
.collect();
sorted_entries.sort_by_key(|(key, _)| key[3].as_canonical_u64());
sorted_entries
}
fn create_equivalent_smts_for_testing<S: super::SmtStorage>(
storage: S,
entries: Vec<(Word, Word)>,
) -> (Smt, LargeSmt<S>) {
let control_smt = Smt::with_entries(entries.clone()).unwrap();
let large_smt = LargeSmt::<S>::with_entries(storage, entries).unwrap();
(control_smt, large_smt)
}
#[test]
fn test_smt_get_value() {
let storage = MemoryStorage::new();
let key_1: Word = Word::from([ONE, ONE, ONE, ONE]);
let key_2: Word = Word::from([2_u32, 2_u32, 2_u32, 2_u32]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let smt = LargeSmt::<_>::with_entries(storage, [(key_1, value_1), (key_2, value_2)]).unwrap();
let returned_value_1 = smt.get_value(&key_1);
let returned_value_2 = smt.get_value(&key_2);
assert_eq!(value_1, returned_value_1);
assert_eq!(value_2, returned_value_2);
let key_no_value = Word::from([42_u32, 42_u32, 42_u32, 42_u32]);
assert_eq!(EMPTY_WORD, smt.get_value(&key_no_value));
}
#[test]
fn test_equivalent_roots() {
let storage = MemoryStorage::new();
let entries = generate_entries(1000);
let (control_smt, large_smt) = create_equivalent_smts_for_testing(storage, entries);
assert_eq!(control_smt.root(), large_smt.root());
}
#[test]
fn test_equivalent_openings() {
let storage = MemoryStorage::new();
let entries = generate_entries(1000);
let (control_smt, large_smt) = create_equivalent_smts_for_testing(storage, entries.clone());
for (key, _) in entries {
assert_eq!(control_smt.open(&key), large_smt.open(&key));
}
}
#[test]
fn test_equivalent_entry_sets() {
let storage = MemoryStorage::new();
let entries = generate_entries(1000);
let (control_smt, large_smt) = create_equivalent_smts_for_testing(storage, entries);
let mut entries_control_smt_owned: Vec<(Word, Word)> = control_smt.entries().copied().collect();
let mut entries_large_smt: Vec<(Word, Word)> = large_smt.entries().unwrap().collect();
entries_control_smt_owned.sort_by_key(|k| k.0);
entries_large_smt.sort_by_key(|k| k.0);
assert_eq!(entries_control_smt_owned, entries_large_smt);
assert_eq!(control_smt.num_leaves(), large_smt.num_leaves());
assert_eq!(control_smt.num_entries(), large_smt.num_entries());
}
#[test]
fn test_equivalent_leaf_sets() {
let storage = MemoryStorage::new();
let entries = generate_entries(1000);
let (control_smt, large_smt) = create_equivalent_smts_for_testing(storage, entries);
let mut leaves_control_smt: Vec<(LeafIndex<SMT_DEPTH>, SmtLeaf)> =
control_smt.leaves().map(|(idx, leaf_ref)| (idx, leaf_ref.clone())).collect();
let mut leaves_large_smt: Vec<(LeafIndex<SMT_DEPTH>, SmtLeaf)> =
large_smt.leaves().unwrap().collect();
leaves_control_smt.sort_by_key(|k| k.0);
leaves_large_smt.sort_by_key(|k| k.0);
assert_eq!(leaves_control_smt.len(), leaves_large_smt.len());
assert_eq!(leaves_control_smt, leaves_large_smt);
assert_eq!(control_smt.num_leaves(), large_smt.num_leaves());
assert_eq!(control_smt.num_entries(), large_smt.num_entries());
}
#[test]
fn test_equivalent_inner_nodes() {
let storage = MemoryStorage::new();
let entries = generate_entries(1000);
let (control_smt, large_smt) = create_equivalent_smts_for_testing(storage, entries);
let mut control_smt_inner_nodes: Vec<InnerNodeInfo> = control_smt.inner_nodes().collect();
let mut large_smt_inner_nodes: Vec<InnerNodeInfo> = large_smt.inner_nodes().unwrap().collect();
control_smt_inner_nodes.sort_by_key(|info| info.value);
large_smt_inner_nodes.sort_by_key(|info| info.value);
assert_eq!(control_smt_inner_nodes.len(), large_smt_inner_nodes.len());
assert_eq!(control_smt_inner_nodes, large_smt_inner_nodes);
}
#[test]
fn test_compute_mutations() {
let storage = MemoryStorage::new();
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
let control_smt = Smt::with_entries(entries.clone()).unwrap();
let large_tree = LargeSmt::<_>::with_entries(storage, entries.clone()).unwrap();
let updates = generate_updates(entries, 1000);
let control_mutations = control_smt.compute_mutations(updates.clone()).unwrap();
let mutations = large_tree.compute_mutations(updates).unwrap();
assert_eq!(mutations.root(), control_mutations.root());
assert_eq!(mutations.old_root(), control_mutations.old_root());
assert_eq!(mutations.node_mutations(), control_mutations.node_mutations());
assert_eq!(mutations.new_pairs(), control_mutations.new_pairs());
}
#[test]
fn test_empty_smt() {
let storage = MemoryStorage::new();
let large_smt = LargeSmt::<_>::new(storage).expect("Failed to create empty SMT");
let empty_control_smt = Smt::new();
assert_eq!(large_smt.root(), empty_control_smt.root(), "Empty SMT root mismatch");
let random_key =
Word::from([ONE, Felt::from_u32(2_u32), Felt::from_u32(3_u32), Felt::from_u32(4_u32)]);
assert_eq!(
large_smt.get_value(&random_key),
EMPTY_WORD,
"get_value on empty SMT should return EMPTY_WORD"
);
assert_eq!(large_smt.entries().unwrap().count(), 0, "Empty SMT should have no entries");
assert_eq!(large_smt.leaves().unwrap().count(), 0, "Empty SMT should have no leaves");
assert_eq!(
large_smt.inner_nodes().unwrap().count(),
0,
"Empty SMT should have no inner nodes"
);
}
#[test]
fn test_single_entry_smt() {
let storage = MemoryStorage::new();
let key = Word::new([ONE, ONE, ONE, ONE]);
let value = Word::new([ONE; WORD_SIZE]);
let mut smt = LargeSmt::<_>::with_entries(storage, [(key, value)]).unwrap();
let control_smt_single = Smt::with_entries([(key, value)]).unwrap();
assert_eq!(smt.root(), control_smt_single.root(), "Single entry SMT root mismatch");
assert_eq!(smt.get_value(&key), value, "get_value for existing key failed");
let other_key = Word::from([2_u32, 2_u32, 2_u32, 2_u32]);
assert_eq!(smt.get_value(&other_key), EMPTY_WORD, "get_value for non-existing key failed");
let entries: Vec<_> = smt.entries().unwrap().collect();
assert_eq!(entries.len(), 1, "Single entry SMT should have one entry");
assert_eq!(entries[0], (key, value), "Single entry SMT entry mismatch");
let new_value = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let mutations = smt.compute_mutations(vec![(key, new_value)]).unwrap();
assert_eq!(
smt.open(&key),
control_smt_single.open(&key),
"Opening before mutations mismatch"
);
smt.apply_mutations(mutations).unwrap();
let control_smt_updated = Smt::with_entries([(key, new_value)]).unwrap();
assert_eq!(smt.root(), control_smt_updated.root(), "Updated SMT root mismatch");
assert_eq!(smt.get_value(&key), new_value, "get_value after update failed");
assert_eq!(
smt.open(&key),
control_smt_updated.open(&key),
"Opening after mutations mismatch"
);
let mutations_delete = smt.compute_mutations(vec![(key, EMPTY_WORD)]).unwrap();
smt.apply_mutations(mutations_delete).unwrap();
let empty_control_smt = Smt::new();
assert_eq!(smt.root(), empty_control_smt.root(), "SMT root after deletion mismatch");
assert_eq!(smt.get_value(&key), EMPTY_WORD, "get_value after deletion failed");
assert_eq!(smt.entries().unwrap().count(), 0, "SMT should have no entries after deletion");
}
#[test]
fn test_duplicate_key_insertion() {
let storage = MemoryStorage::new();
let key = Word::from([ONE, ONE, ONE, ONE]);
let value1 = Word::new([ONE; WORD_SIZE]);
let value2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let entries = vec![(key, value1), (key, value2)];
let result = LargeSmt::<_>::with_entries(storage, entries);
assert!(result.is_err(), "Expected an error when inserting duplicate keys");
}
#[test]
fn test_delete_entry() {
let storage = MemoryStorage::new();
let key1 = Word::new([ONE, ONE, ONE, ONE]);
let value1 = Word::new([ONE; WORD_SIZE]);
let key2 = Word::from([2_u32, 2_u32, 2_u32, 2_u32]);
let value2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let key3 = Word::from([3_u32, 3_u32, 3_u32, 3_u32]);
let value3 = Word::new([Felt::from_u32(3_u32); WORD_SIZE]);
let initial_entries = vec![(key1, value1), (key2, value2), (key3, value3)];
let mut smt = LargeSmt::<_>::with_entries(storage, initial_entries.clone()).unwrap();
let mutations = smt.compute_mutations(vec![(key2, EMPTY_WORD)]).unwrap();
smt.apply_mutations(mutations).unwrap();
assert_eq!(
smt.get_value(&key2),
EMPTY_WORD,
"get_value for deleted key should be EMPTY_WORD"
);
let current_entries: Vec<_> = smt.entries().unwrap().collect();
assert!(
!current_entries.iter().any(|(k, _v)| k == &key2),
"Deleted key should not be in entries"
);
assert_eq!(current_entries.len(), 2, "SMT should have 2 entries after deletion");
assert_eq!(smt.get_value(&key1), value1, "Value for key1 changed after deleting key2");
assert_eq!(smt.get_value(&key3), value3, "Value for key3 changed after deleting key2");
let remaining_entries = vec![(key1, value1), (key3, value3)];
let control_smt_after_delete = Smt::with_entries(remaining_entries).unwrap();
assert_eq!(smt.root(), control_smt_after_delete.root(), "SMT root mismatch after deletion");
}
#[test]
fn test_insert_entry() {
let storage = MemoryStorage::new();
let initial_entries = generate_entries(100);
let mut large_smt = LargeSmt::<_>::with_entries(storage, initial_entries.clone()).unwrap();
let mut control_smt = Smt::with_entries(initial_entries.clone()).unwrap();
assert_eq!(large_smt.num_entries(), control_smt.num_entries(), "Number of entries mismatch");
assert_eq!(large_smt.num_leaves(), control_smt.num_leaves(), "Number of leaves mismatch");
let new_key = Word::from([100_u32, 100_u32, 100_u32, 100_u32]);
let new_value = Word::new([Felt::from_u32(100_u32); WORD_SIZE]);
let old_value = large_smt.insert(new_key, new_value).unwrap();
let control_old_value = control_smt.insert(new_key, new_value).unwrap();
assert_eq!(old_value, control_old_value, "Old values mismatch");
assert_eq!(old_value, EMPTY_WORD, "Expected empty value");
assert_eq!(large_smt.num_entries(), control_smt.num_entries(), "Number of entries mismatch");
assert_eq!(large_smt.num_leaves(), control_smt.num_leaves(), "Number of leaves mismatch");
assert_eq!(large_smt.get_value(&new_key), new_value, "Value mismatch");
assert_eq!(control_smt.get_value(&new_key), new_value, "Value mismatch");
assert_eq!(large_smt.root(), control_smt.root(), "Roots don't match after insert");
let large_proof = large_smt.open(&new_key);
let control_proof = control_smt.open(&new_key);
assert_eq!(large_proof, control_proof, "Proofs don't match");
for (key, _) in initial_entries {
let large_proof = large_smt.open(&key);
let control_proof = control_smt.open(&key);
assert_eq!(large_proof, control_proof, "Proofs don't match for original key: {key:?}");
}
}
#[test]
fn test_mutations_revert() {
let storage = MemoryStorage::new();
let mut smt = LargeSmt::<_>::new(storage).unwrap();
let key_1: Word = Word::new([ONE, ONE, ONE, Felt::new(1)]);
let key_2: Word = Word::new([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(2)]);
let key_3: Word = Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(3)]);
let value_1 = Word::new([ONE; WORD_SIZE]);
let value_2 = Word::new([Felt::from_u32(2_u32); WORD_SIZE]);
let value_3 = Word::new([Felt::from_u32(3_u32); WORD_SIZE]);
smt.insert(key_1, value_1).unwrap();
smt.insert(key_2, value_2).unwrap();
let mutations = smt
.compute_mutations(vec![(key_1, EMPTY_WORD), (key_2, value_1), (key_3, value_3)])
.unwrap();
let original_root = smt.root();
let revert = smt.apply_mutations_with_reversion(mutations).unwrap();
assert_eq!(revert.old_root, smt.root(), "reverse mutations old root did not match");
assert_eq!(revert.root(), original_root, "reverse mutations new root did not match");
smt.apply_mutations(revert).unwrap();
assert_eq!(
smt.root(),
original_root,
"SMT with applied revert mutations did not match original SMT"
);
}
#[test]
fn test_insert_batch_matches_compute_apply() {
let storage1 = MemoryStorage::new();
let storage2 = MemoryStorage::new();
const PAIR_COUNT: u64 = COLS_PER_SUBTREE * 64;
let entries = generate_entries(PAIR_COUNT);
// Create two identical trees
let mut tree1 = LargeSmt::with_entries(storage1, entries.clone()).unwrap();
let mut tree2 = LargeSmt::with_entries(storage2, entries.clone()).unwrap();
// Generate updates
let updates = generate_updates(entries, 1000);
// Compute_mutations + apply_mutations
let mutations = tree1.compute_mutations(updates.clone()).unwrap();
let root1_before = tree1.root();
tree1.apply_mutations(mutations).unwrap();
let root1_after = tree1.root();
// Insert_batch
let root2_before = tree2.root();
let root2_after = tree2.insert_batch(updates.clone()).unwrap();
// Roots match at each step
assert_eq!(root1_before, root2_before, "Initial roots should match");
assert_eq!(root1_after, root2_after, "Final roots should match");
// All values match
for (key, _) in updates {
let val1 = tree1.get_value(&key);
let val2 = tree2.get_value(&key);
assert_eq!(val1, val2, "Values should match for key {key:?}");
}
// Verify metadata
assert_eq!(tree1.num_leaves(), tree2.num_leaves());
assert_eq!(tree1.num_entries(), tree2.num_entries());
}
#[test]
fn test_insert_batch_empty_tree() {
let storage = MemoryStorage::new();
let mut smt = LargeSmt::new(storage).unwrap();
let entries = vec![
(
crate::Word::new([Felt::new(1), Felt::new(0), Felt::new(0), Felt::new(0)]),
crate::Word::new([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]),
),
(
crate::Word::new([Felt::new(2), Felt::new(0), Felt::new(0), Felt::new(0)]),
crate::Word::new([Felt::new(11), Felt::new(22), Felt::new(33), Felt::new(44)]),
),
];
let new_root = smt.insert_batch(entries.clone()).unwrap();
use crate::merkle::EmptySubtreeRoots;
assert_ne!(new_root, *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
// Verify values were inserted
for (key, value) in entries {
assert_eq!(smt.get_value(&key), value);
}
}
#[test]
fn test_insert_batch_with_deletions() {
let storage = MemoryStorage::new();
let mut smt = LargeSmt::new(storage).unwrap();
// Initial data
let key_1 = crate::Word::new([ONE, ONE, ONE, Felt::new(1)]);
let key_2 = crate::Word::new([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(2)]);
let key_3 = crate::Word::new([Felt::new(0), Felt::new(0), Felt::new(0), Felt::new(3)]);
let value_1 = crate::Word::new([ONE; WORD_SIZE]);
let value_2 = crate::Word::new([Felt::new(2); WORD_SIZE]);
let value_3 = crate::Word::new([Felt::new(3); WORD_SIZE]);
smt.insert(key_1, value_1).unwrap();
smt.insert(key_2, value_2).unwrap();
let initial_root = smt.root();
// Batch update with insertions and deletions
let updates = vec![(key_1, EMPTY_WORD), (key_2, value_1), (key_3, value_3)];
let new_root = smt.insert_batch(updates).unwrap();
assert_ne!(new_root, initial_root);
// Verify the state
assert_eq!(smt.get_value(&key_1), EMPTY_WORD);
assert_eq!(smt.get_value(&key_2), value_1);
assert_eq!(smt.get_value(&key_3), value_3);
}
#[test]
fn test_insert_batch_no_mutations() {
let storage = MemoryStorage::new();
let mut smt = LargeSmt::new(storage).unwrap();
let key_1 = crate::Word::new([ONE, ONE, ONE, Felt::new(1)]);
let value_1 = crate::Word::new([ONE; WORD_SIZE]);
smt.insert(key_1, value_1).unwrap();
let root_before = smt.root();
// Insert the same value again (no change)
let root_after = smt.insert_batch(vec![(key_1, value_1)]).unwrap();
assert_eq!(root_before, root_after);
}
#[test]
fn test_insert_batch_large_dataset() {
// Test insert_batch with a large dataset
let storage = MemoryStorage::new();
let mut smt = LargeSmt::new(storage).unwrap();
const LARGE_COUNT: u64 = COLS_PER_SUBTREE * 128;
let entries = generate_entries(LARGE_COUNT);
let new_root = smt.insert_batch(entries.clone()).unwrap();
use crate::merkle::EmptySubtreeRoots;
assert_ne!(new_root, *EmptySubtreeRoots::entry(SMT_DEPTH, 0));
// Spot check some values
for (key, value) in entries.iter().step_by(100) {
assert_eq!(smt.get_value(key), *value);
}
assert_eq!(smt.num_entries(), LARGE_COUNT as usize);
}
// IN-MEMORY LAYOUT TESTS
// ================================================================================================
#[test]
fn test_flat_layout_index_zero_unused_in_instance() {
use crate::merkle::Rpo256;
let storage = MemoryStorage::new();
let mut smt = LargeSmt::<_>::new(storage).unwrap();
let in_memory_nodes = smt.in_memory_nodes();
// Index 0 should always be EMPTY_WORD (unused)
assert_eq!(in_memory_nodes[0], EMPTY_WORD, "Index 0 should be EMPTY_WORD (unused)");
let key = Word::new([ONE, ONE, ONE, Felt::new(1)]);
let value = Word::new([Felt::new(42), Felt::new(43), Felt::new(44), Felt::new(45)]);
smt.insert(key, value).unwrap();
let in_memory_nodes = smt.in_memory_nodes();
assert_eq!(in_memory_nodes[0], EMPTY_WORD, "Index 0 should be EMPTY_WORD (unused)");
// The root hash is computed from children at indices 2 and 3
let computed_root = Rpo256::merge(&[in_memory_nodes[2], in_memory_nodes[3]]);
assert_eq!(computed_root, smt.root(), "Root should equal hash(children[2], children[3])");
}
#[test]
fn test_flat_layout_after_insertion() {
use crate::merkle::{EmptySubtreeRoots, Rpo256};
// Insert a value and verify the flat layout is updated correctly
let storage = MemoryStorage::new();
let mut smt = LargeSmt::<_>::new(storage).unwrap();
let key = Word::new([ONE, ONE, ONE, Felt::new(1)]);
let value = Word::new([Felt::new(42), Felt::new(43), Felt::new(44), Felt::new(45)]);
smt.insert(key, value).unwrap();
let in_memory_nodes = smt.in_memory_nodes();
// Index 0 should still be unused
assert_eq!(in_memory_nodes[0], EMPTY_WORD, "Index 0 should remain EMPTY_WORD");
let depth_1_empty = *EmptySubtreeRoots::entry(SMT_DEPTH, 1);
let changed = in_memory_nodes[2] != depth_1_empty || in_memory_nodes[3] != depth_1_empty;
assert!(changed, "At least one of root's children should have changed after insertion");
// Verify root can be computed from children at indices 2 and 3
let computed_root = Rpo256::merge(&[in_memory_nodes[2], in_memory_nodes[3]]);
assert_eq!(
computed_root,
smt.root(),
"Root should equal hash of children at indices 2 and 3"
);
}
#[test]
fn test_flat_layout_children_relationship() {
use crate::merkle::{EmptySubtreeRoots, NodeIndex, Rpo256};
// Insert multiple values and verify parent-child relationships in the flat layout
let storage = MemoryStorage::new();
let mut smt = LargeSmt::<_>::new(storage).unwrap();
// Generate random leaf indices
let mut rng = rng();
let num_samples = 50;
let leaf_indices: Vec<u64> =
(0..num_samples).map(|_| rng.random::<u64>() % (1 << 20)).collect();
for leaf_value in &leaf_indices {
let key = Word::new([ONE, ONE, ONE, Felt::new(*leaf_value)]);
let value = Word::new([Felt::new(*leaf_value * 10); 4]);
smt.insert(key, value).unwrap();
}
let in_memory_nodes = smt.in_memory_nodes();
// Verify root separately (depth 0, value 0, memory_idx 1)
let root_left = in_memory_nodes[2];
let root_right = in_memory_nodes[3];
let root_hash = Rpo256::merge(&[root_left, root_right]);
assert_eq!(root_hash, smt.root(), "Root hash should match computed hash from children");
for &leaf_value in &leaf_indices {
// Trace path from depth 1 down to in-memory depth
for depth in 1..super::IN_MEMORY_DEPTH {
let node_value = leaf_value >> (SMT_DEPTH - depth);
let node_idx = NodeIndex::new(depth, node_value).unwrap();
let memory_idx = super::to_memory_index(&node_idx);
// Get children from flat layout
let left_child = in_memory_nodes[memory_idx * 2];
let right_child = in_memory_nodes[memory_idx * 2 + 1];
// Calculate expected empty hash for children at this depth
let child_depth = depth + 1;
let empty_hash = *EmptySubtreeRoots::entry(SMT_DEPTH, child_depth);
// Determine which child is on the path to our leaf
let is_right_child = ((leaf_value >> (SMT_DEPTH - depth - 1)) & 1) == 1;
// Select the child on the path and verify it's non-empty
let child_on_path = if is_right_child { right_child } else { left_child };
assert_ne!(
child_on_path, empty_hash,
"Child on path should be non-empty at depth {}, value {} (on path to leaf {})",
depth, node_value, leaf_value
);
// Verify the parent-child hash relationship
let node_hash = Rpo256::merge(&[left_child, right_child]);
assert_eq!(
in_memory_nodes[memory_idx], node_hash,
"Stored hash at memory_idx {} should match computed hash from children at depth {}, value {}",
memory_idx, depth, node_value
);
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/error.rs | miden-crypto/src/merkle/smt/large/error.rs | use thiserror::Error;
use super::{MerkleError, StorageError};
use crate::Word;
// ERROR TYPES
// ================================================================================================
/// Errors that can occur during LargeSmt operations.
#[derive(Debug, Error)]
pub enum LargeSmtError {
/// A Merkle tree operation failed.
#[error("merkle operation failed")]
Merkle(#[from] MerkleError),
/// A storage operation failed.
#[error("storage operation failed")]
Storage(#[from] StorageError),
/// The reconstructed root does not match the expected root.
#[error("root mismatch: expected {expected:?}, got {actual:?}")]
RootMismatch {
/// The expected root hash.
expected: Word,
/// The actual reconstructed root hash.
actual: Word,
},
/// Storage already contains data when trying to create a new tree.
///
/// Use [`super::LargeSmt::load_with_root()`] or [`super::LargeSmt::load()`] to load
/// existing storage.
#[error("storage is not empty")]
StorageNotEmpty,
}
#[cfg(test)]
// Compile-time assertion that LargeSmtError implements the required traits
const _: fn() = || {
fn assert_impl<T: std::error::Error + Send + Sync + 'static>() {}
assert_impl::<LargeSmtError>();
assert_impl::<MerkleError>();
assert_impl::<StorageError>();
};
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/mod.rs | miden-crypto/src/merkle/smt/large/mod.rs | //! Large-scale Sparse Merkle Tree backed by pluggable storage.
//!
//! `LargeSmt` stores the top of the tree (depths 0–23) in memory and persists the lower
//! depths (24–64) in storage as fixed-size subtrees. This hybrid layout scales beyond RAM
//! while keeping common operations fast. With the `rocksdb` feature enabled, the lower
//! subtrees and leaves are stored in RocksDB. On reload, the in-memory top is reconstructed
//! from cached depth-24 subtree roots.
//!
//! Examples below require the `rocksdb` feature.
//!
//! Load an existing RocksDB-backed tree with root validation:
//! ```no_run
//! # #[cfg(feature = "rocksdb")]
//! # {
//! use miden_crypto::{
//! Word,
//! merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! # let expected_root: Word = miden_crypto::EMPTY_WORD;
//! let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db"))?;
//! let smt = LargeSmt::load_with_root(storage, expected_root)?;
//! assert_eq!(smt.root(), expected_root);
//! # Ok(())
//! # }
//! # }
//! ```
//!
//! Load an existing tree without root validation (use with caution):
//! ```no_run
//! # #[cfg(feature = "rocksdb")]
//! # {
//! use miden_crypto::merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage};
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db"))?;
//! let smt = LargeSmt::load(storage)?;
//! let _root = smt.root();
//! # Ok(())
//! # }
//! # }
//! ```
//!
//! Initialize an empty RocksDB-backed tree and bulk-load entries:
//! ```no_run
//! # #[cfg(feature = "rocksdb")]
//! # {
//! use miden_crypto::{
//! Felt, Word,
//! merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let path = "/path/to/new-db";
//! if std::path::Path::new(path).exists() {
//! std::fs::remove_dir_all(path)?;
//! }
//! std::fs::create_dir_all(path)?;
//!
//! let storage = RocksDbStorage::open(RocksDbConfig::new(path))?;
//! let mut smt = LargeSmt::new(storage)?; // empty tree
//!
//! // Prepare initial entries
//! let entries = vec![
//! (
//! Word::new([Felt::new(1), Felt::new(0), Felt::new(0), Felt::new(0)]),
//! Word::new([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]),
//! ),
//! (
//! Word::new([Felt::new(2), Felt::new(0), Felt::new(0), Felt::new(0)]),
//! Word::new([Felt::new(11), Felt::new(22), Felt::new(33), Felt::new(44)]),
//! ),
//! ];
//!
//! // Bulk insert entries (faster than compute_mutations + apply_mutations)
//! smt.insert_batch(entries)?;
//! # Ok(())
//! # }
//! # }
//! ```
//!
//! Apply batch updates (insertions and deletions):
//! ```no_run
//! # #[cfg(feature = "rocksdb")]
//! # {
//! use miden_crypto::{
//! EMPTY_WORD, Felt, Word,
//! merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db"))?;
//! let mut smt = LargeSmt::load(storage)?;
//!
//! let k1 = Word::new([Felt::new(101), Felt::new(0), Felt::new(0), Felt::new(0)]);
//! let v1 = Word::new([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]);
//! let k2 = Word::new([Felt::new(202), Felt::new(0), Felt::new(0), Felt::new(0)]);
//! let k3 = Word::new([Felt::new(303), Felt::new(0), Felt::new(0), Felt::new(0)]);
//! let v3 = Word::new([Felt::new(7), Felt::new(7), Felt::new(7), Felt::new(7)]);
//!
//! // EMPTY_WORD marks deletions
//! let updates = vec![(k1, v1), (k2, EMPTY_WORD), (k3, v3)];
//! smt.insert_batch(updates)?;
//! # Ok(())
//! # }
//! # }
//! ```
//!
//! Quick initialization with `with_entries` (best for modest datasets/tests):
//! ```no_run
//! # #[cfg(feature = "rocksdb")]
//! # {
//! use miden_crypto::{
//! Felt, Word,
//! merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage},
//! };
//!
//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Note: `with_entries` expects an EMPTY storage and performs an all-at-once build.
//! // Prefer `insert_batch` for large bulk loads.
//! let path = "/path/to/new-db";
//! if std::path::Path::new(path).exists() {
//! std::fs::remove_dir_all(path)?;
//! }
//! std::fs::create_dir_all(path)?;
//!
//! let storage = RocksDbStorage::open(RocksDbConfig::new(path))?;
//! let entries = vec![
//! (
//! Word::new([Felt::new(1), Felt::new(0), Felt::new(0), Felt::new(0)]),
//! Word::new([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]),
//! ),
//! (
//! Word::new([Felt::new(2), Felt::new(0), Felt::new(0), Felt::new(0)]),
//! Word::new([Felt::new(11), Felt::new(22), Felt::new(33), Felt::new(44)]),
//! ),
//! ];
//! let _smt = LargeSmt::with_entries(storage, entries)?;
//! # Ok(())
//! # }
//! # }
//! ```
//!
//! ## Performance and Memory Considerations
//!
//! The `apply_mutations()` and `apply_mutations_with_reversion()` methods use batched
//! operations: they preload all affected subtrees and leaves before applying changes
//! atomically. This approach reduces I/O at the cost of higher temporary memory usage.
//!
//! ### Memory Usage
//!
//! Peak memory is proportional to:
//! - The number of mutated leaves
//! - The number of distinct storage subtrees touched by those mutations
//!
//! This memory is temporary and released immediately after the batch commits.
//!
//! ### Locality Matters
//!
//! Memory usage scales with how dispersed updates are, not just their count:
//! - **Localized updates**: Keys with shared high-order bits fall into the same storage subtrees
//! - **Scattered updates**: Keys spread across many storage subtrees require loading more distinct
//! subtrees
//!
//! ### Guidelines
//!
//! For typical batches (up to ~10,000 updates) with reasonable locality, the working set
//! is modest. Very large or highly scattered batches will use more
//! memory proportionally.
//!
//! To optimize memory and I/O: group updates by key locality so that keys sharing
//! high-order bits are processed together.
use alloc::vec::Vec;
use super::{
EmptySubtreeRoots, InnerNode, InnerNodeInfo, InnerNodes, LeafIndex, MerkleError, NodeIndex,
SMT_DEPTH, SmtLeaf, SmtProof, SparseMerkleTree, Word,
};
use crate::merkle::smt::{Map, full::concurrent::MutatedSubtreeLeaves};
mod error;
pub use error::LargeSmtError;
#[cfg(test)]
mod property_tests;
#[cfg(test)]
mod tests;
mod subtree;
pub use subtree::{Subtree, SubtreeError};
mod storage;
pub use storage::{
MemoryStorage, SmtStorage, StorageError, StorageUpdateParts, StorageUpdates, SubtreeUpdate,
};
#[cfg(feature = "rocksdb")]
pub use storage::{RocksDbConfig, RocksDbStorage};
mod iter;
pub use iter::LargeSmtInnerNodeIterator;
mod batch_ops;
mod construction;
mod smt_trait;
// CONSTANTS
// ================================================================================================
/// Number of levels of the tree that are stored in memory
const IN_MEMORY_DEPTH: u8 = 24;
/// Number of nodes that are stored in memory (including the unused index 0)
const NUM_IN_MEMORY_NODES: usize = 1 << (IN_MEMORY_DEPTH + 1);
/// Index of the root node inside `in_memory_nodes`.
pub(super) const ROOT_MEMORY_INDEX: usize = 1;
/// Number of subtree levels below in-memory depth (24-64 in steps of 8)
const NUM_SUBTREE_LEVELS: usize = 5;
/// How many subtrees we buffer before flushing them to storage **during the
/// SMT construction phase**.
///
/// * This constant is **only** used while building a fresh tree; incremental updates use their own
/// per-batch sizing.
/// * Construction is all-or-nothing: if the write fails we abort and rebuild from scratch, so we
/// allow larger batches that maximise I/O throughput instead of fine-grained rollback safety.
const CONSTRUCTION_SUBTREE_BATCH_SIZE: usize = 10_000;
// TYPES
// ================================================================================================
type Leaves = super::Leaves<SmtLeaf>;
/// Result of loading leaves from storage: (leaf indices, map of leaf index to leaf).
type LoadedLeaves = (Vec<u64>, Map<u64, Option<SmtLeaf>>);
/// Result of processing key-value pairs into mutated leaves for subtree building:
/// - `MutatedSubtreeLeaves`: Leaves organized for parallel subtree building
/// - `Map<u64, SmtLeaf>`: Map of leaf index to mutated leaf node (for storage updates)
/// - `Map<Word, Word>`: Changed key-value pairs
/// - `isize`: Leaf count delta
/// - `isize`: Entry count delta
type MutatedLeaves = (MutatedSubtreeLeaves, Map<u64, SmtLeaf>, Map<Word, Word>, isize, isize);
// LargeSmt
// ================================================================================================
/// A large-scale Sparse Merkle tree mapping 256-bit keys to 256-bit values, backed by pluggable
/// storage. Both keys and values are represented by 4 field elements.
///
/// Unlike the regular `Smt`, this implementation is designed for very large trees by using external
/// storage (such as RocksDB) for the bulk of the tree data, while keeping only the upper levels (up
/// to depth 24) in memory. This hybrid approach allows the tree to scale beyond memory limitations
/// while maintaining good performance for common operations.
///
/// All leaves sit at depth 64. The most significant element of the key is used to identify the leaf
/// to which the key maps.
///
/// A leaf is either empty, or holds one or more key-value pairs. An empty leaf hashes to the empty
/// word. Otherwise, a leaf hashes to the hash of its key-value pairs, ordered by key first, value
/// second.
///
/// The tree structure:
/// - Depths 0-23: Stored in memory as a flat array for fast access
/// - Depths 24-64: Stored in external storage organized as subtrees for efficient batch operations
///
/// `LargeSmt` does **not** implement [`Clone`]. Copying an instance would only duplicate the
/// in-memory nodes while continuing to share the storage backend, which is misleading. If you need
/// to share an instance between threads or components, wrap it in an
/// [`Arc`](alloc::sync::Arc) explicitly so the ownership semantics are clear.
#[derive(Debug)]
pub struct LargeSmt<S: SmtStorage> {
storage: S,
/// Flat vector representation of in-memory nodes.
/// Index 0 is unused; index 1 is root.
/// For node at index i: left child at 2*i, right child at 2*i+1.
in_memory_nodes: Vec<Word>,
/// Cached count of non-empty leaves. Initialized from storage on load,
/// updated after each mutation.
leaf_count: usize,
/// Cached count of key-value entries across all leaves. Initialized from
/// storage on load, updated after each mutation.
entry_count: usize,
}
impl<S: SmtStorage> LargeSmt<S> {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// The default value used to compute the hash of empty leaves.
pub const EMPTY_VALUE: Word = <Self as SparseMerkleTree<SMT_DEPTH>>::EMPTY_VALUE;
/// Subtree depths for the subtrees stored in storage.
pub const SUBTREE_DEPTHS: [u8; 5] = [56, 48, 40, 32, 24];
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the depth of the tree
pub const fn depth(&self) -> u8 {
SMT_DEPTH
}
/// Returns the root of the tree
pub fn root(&self) -> Word {
self.in_memory_nodes[ROOT_MEMORY_INDEX]
}
/// Returns the number of non-empty leaves in this tree.
///
/// Note that this may return a different value from [Self::num_entries()] as a single leaf may
/// contain more than one key-value pair.
pub fn num_leaves(&self) -> usize {
self.leaf_count
}
/// Returns the number of key-value pairs with non-default values in this tree.
///
/// Note that this may return a different value from [Self::num_leaves()] as a single leaf may
/// contain more than one key-value pair.
pub fn num_entries(&self) -> usize {
self.entry_count
}
/// Returns the leaf to which `key` maps
pub fn get_leaf(&self, key: &Word) -> SmtLeaf {
<Self as SparseMerkleTree<SMT_DEPTH>>::get_leaf(self, key)
}
/// Returns the value associated with `key`
pub fn get_value(&self, key: &Word) -> Word {
<Self as SparseMerkleTree<SMT_DEPTH>>::get_value(self, key)
}
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
/// path to the leaf, as well as the leaf itself.
pub fn open(&self, key: &Word) -> SmtProof {
<Self as SparseMerkleTree<SMT_DEPTH>>::open(self, key)
}
/// Returns a boolean value indicating whether the SMT is empty.
pub fn is_empty(&self) -> bool {
let root = self.root();
debug_assert_eq!(self.leaf_count == 0, root == Self::EMPTY_ROOT);
root == Self::EMPTY_ROOT
}
// ITERATORS
// --------------------------------------------------------------------------------------------
/// Returns an iterator over the leaves of this [`LargeSmt`].
/// Note: This iterator returns owned SmtLeaf values.
///
/// # Errors
/// Returns an error if the storage backend fails to create the iterator.
pub fn leaves(
&self,
) -> Result<impl Iterator<Item = (LeafIndex<SMT_DEPTH>, SmtLeaf)>, LargeSmtError> {
let iter = self.storage.iter_leaves()?;
Ok(iter.map(|(idx, leaf)| (LeafIndex::new_max_depth(idx), leaf)))
}
/// Returns an iterator over the key-value pairs of this [`LargeSmt`].
/// Note: This iterator returns owned (Word, Word) tuples.
///
/// # Errors
/// Returns an error if the storage backend fails to create the iterator.
pub fn entries(&self) -> Result<impl Iterator<Item = (Word, Word)>, LargeSmtError> {
let leaves_iter = self.leaves()?;
Ok(leaves_iter.flat_map(|(_, leaf)| {
// Collect the (Word, Word) tuples into an owned Vec
// This ensures they outlive the 'leaf' from which they are derived.
let owned_entries: Vec<(Word, Word)> = leaf.entries().to_vec();
// Return an iterator over this owned Vec
owned_entries.into_iter()
}))
}
/// Returns an iterator over the inner nodes of this [`LargeSmt`].
///
/// # Errors
/// Returns an error if the storage backend fails during iteration setup.
pub fn inner_nodes(&self) -> Result<impl Iterator<Item = InnerNodeInfo> + '_, LargeSmtError> {
// Pre-validate that storage is accessible
let _ = self.storage.iter_subtrees()?;
Ok(LargeSmtInnerNodeIterator::new(self))
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Inserts a value at the specified key, returning the previous value associated with that key.
/// Recall that by definition, any key that hasn't been updated is associated with
/// [`Self::EMPTY_VALUE`].
///
/// This also recomputes all hashes between the leaf (associated with the key) and the root,
/// updating the root itself.
///
/// # Errors
/// Returns an error if inserting the key-value pair would exceed
/// [`MAX_LEAF_ENTRIES`](super::MAX_LEAF_ENTRIES) (1024 entries) in the leaf.
pub fn insert(&mut self, key: Word, value: Word) -> Result<Word, MerkleError> {
<Self as SparseMerkleTree<SMT_DEPTH>>::insert(self, key, value)
}
// HELPERS
// --------------------------------------------------------------------------------------------
/// Helper to get an in-memory node if not empty.
///
/// # Panics
/// With debug assertions on, panics if `index.depth() >= IN_MEMORY_DEPTH`.
fn get_non_empty_inner_node(&self, index: NodeIndex) -> Option<InnerNode> {
debug_assert!(index.depth() < IN_MEMORY_DEPTH, "Only for in-memory nodes");
let memory_index = to_memory_index(&index);
let left = self.in_memory_nodes[memory_index * 2];
let right = self.in_memory_nodes[memory_index * 2 + 1];
// Check if both children are empty
let child_depth = index.depth() + 1;
if is_empty_parent(left, right, child_depth) {
None
} else {
Some(InnerNode { left, right })
}
}
// TEST HELPERS
// --------------------------------------------------------------------------------------------
#[cfg(test)]
pub(crate) fn in_memory_nodes(&self) -> &Vec<Word> {
&self.in_memory_nodes
}
}
// HELPERS
// ================================================================================================
/// Checks if a node with the given children is empty.
/// A node is considered empty if both children equal the empty hash for that depth.
pub(super) fn is_empty_parent(left: Word, right: Word, child_depth: u8) -> bool {
let empty_hash = *EmptySubtreeRoots::entry(SMT_DEPTH, child_depth);
left == empty_hash && right == empty_hash
}
/// Converts a NodeIndex to a flat vector index using 1-indexed layout.
/// Index 0 is unused, index 1 is root.
/// For a node at index i: left child at 2*i, right child at 2*i+1.
pub(super) fn to_memory_index(index: &NodeIndex) -> usize {
debug_assert!(index.depth() < IN_MEMORY_DEPTH);
debug_assert!(index.value() < (1 << index.depth()));
(1usize << index.depth()) + index.value() as usize
}
impl<S: SmtStorage> PartialEq for LargeSmt<S> {
/// Compares two LargeSmt instances based on their root hash and metadata.
///
/// Note: This comparison only checks the root hash and counts, not the underlying
/// storage contents. Two SMTs with the same root should be cryptographically
/// equivalent, but this doesn't verify the storage backends are identical.
fn eq(&self, other: &Self) -> bool {
self.root() == other.root()
&& self.leaf_count == other.leaf_count
&& self.entry_count == other.entry_count
}
}
impl<S: SmtStorage> Eq for LargeSmt<S> {}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/smt_trait.rs | miden-crypto/src/merkle/smt/large/smt_trait.rs | use alloc::vec::Vec;
use p3_field::PrimeField64;
use super::{
IN_MEMORY_DEPTH, LargeSmt, NUM_SUBTREE_LEVELS, ROOT_MEMORY_INDEX, SMT_DEPTH, SmtStorage,
StorageError, Subtree,
};
use crate::{
EMPTY_WORD, Word,
merkle::smt::{
EmptySubtreeRoots, InnerNode, LargeSmtError, LeafIndex, Map, MerkleError, NodeIndex,
SmtLeaf, SmtLeafError, SmtProof, SparseMerklePath, SparseMerkleTree,
large::{is_empty_parent, to_memory_index},
},
};
impl<S: SmtStorage> SparseMerkleTree<SMT_DEPTH> for LargeSmt<S> {
type Key = Word;
type Value = Word;
type Leaf = SmtLeaf;
type Opening = SmtProof;
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
const EMPTY_ROOT: Word = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
fn from_raw_parts(
_inner_nodes: super::InnerNodes,
_leaves: super::Leaves,
_root: Word,
) -> Result<Self, MerkleError> {
// This method is not supported
panic!("LargeSmt::from_raw_parts is not supported");
}
fn root(&self) -> Word {
self.in_memory_nodes[ROOT_MEMORY_INDEX]
}
fn set_root(&mut self, root: Word) {
self.in_memory_nodes[ROOT_MEMORY_INDEX] = root;
}
fn get_inner_node(&self, index: NodeIndex) -> InnerNode {
if index.depth() < IN_MEMORY_DEPTH {
let memory_index = to_memory_index(&index);
// Reconstruct InnerNode from flat layout: left at 2*i, right at 2*i+1
return InnerNode {
left: self.in_memory_nodes[memory_index * 2],
right: self.in_memory_nodes[memory_index * 2 + 1],
};
}
self.storage
.get_inner_node(index)
.expect("Failed to get inner node")
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(SMT_DEPTH, index.depth()))
}
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) -> Option<InnerNode> {
if index.depth() < IN_MEMORY_DEPTH {
let i = to_memory_index(&index);
// Get the old node before replacing
let old_left = self.in_memory_nodes[i * 2];
let old_right = self.in_memory_nodes[i * 2 + 1];
// Store new node in flat layout
self.in_memory_nodes[i * 2] = inner_node.left;
self.in_memory_nodes[i * 2 + 1] = inner_node.right;
// Check if the old node was empty
if is_empty_parent(old_left, old_right, index.depth() + 1) {
return None;
}
return Some(InnerNode { left: old_left, right: old_right });
}
self.storage
.set_inner_node(index, inner_node)
.expect("Failed to store inner node")
}
fn remove_inner_node(&mut self, index: NodeIndex) -> Option<InnerNode> {
if index.depth() < IN_MEMORY_DEPTH {
let memory_index = to_memory_index(&index);
// Get the old node before replacing with empty hashes
let old_left = self.in_memory_nodes[memory_index * 2];
let old_right = self.in_memory_nodes[memory_index * 2 + 1];
// Replace with empty hashes
let child_depth = index.depth() + 1;
let empty_hash = *EmptySubtreeRoots::entry(SMT_DEPTH, child_depth);
self.in_memory_nodes[memory_index * 2] = empty_hash;
self.in_memory_nodes[memory_index * 2 + 1] = empty_hash;
// Return the old node if it wasn't already empty
if is_empty_parent(old_left, old_right, child_depth) {
return None;
}
return Some(InnerNode { left: old_left, right: old_right });
}
self.storage.remove_inner_node(index).expect("Failed to remove inner node")
}
fn insert(&mut self, key: Self::Key, value: Self::Value) -> Result<Self::Value, MerkleError> {
let old_value = self.get_value(&key);
// if the old value and new value are the same, there is nothing to update
if value == old_value {
return Ok(value);
}
let mutations = self.compute_mutations([(key, value)])?;
self.apply_mutations(mutations).expect("Failed to apply mutations in insert");
Ok(old_value)
}
fn insert_value(
&mut self,
key: Self::Key,
value: Self::Value,
) -> Result<Option<Self::Value>, MerkleError> {
// inserting an `EMPTY_VALUE` is equivalent to removing any value associated with `key`
let index = Self::key_to_leaf_index(&key).value();
if value != Self::EMPTY_VALUE {
match self.storage.insert_value(index, key, value) {
Ok(prev) => Ok(prev),
Err(StorageError::Leaf(SmtLeafError::TooManyLeafEntries { actual })) => {
Err(MerkleError::TooManyLeafEntries { actual })
},
Err(_) => {
panic!("Storage error during insert_value");
},
}
} else {
Ok(self.storage.remove_value(index, key).map_err(LargeSmtError::from)?)
}
}
fn get_value(&self, key: &Self::Key) -> Self::Value {
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key);
match self.storage.get_leaf(leaf_pos.value()) {
Ok(Some(leaf)) => leaf.get_value(key).unwrap_or_default(),
Ok(None) => EMPTY_WORD,
Err(_) => {
panic!("Storage error during get_leaf in get_value");
},
}
}
fn get_leaf(&self, key: &Word) -> Self::Leaf {
let leaf_pos = LeafIndex::<SMT_DEPTH>::from(*key).value();
match self.storage.get_leaf(leaf_pos) {
Ok(Some(leaf)) => leaf,
Ok(None) => SmtLeaf::new_empty((*key).into()),
Err(_) => {
panic!("Storage error during get_leaf in get_leaf");
},
}
}
fn hash_leaf(leaf: &Self::Leaf) -> Word {
leaf.hash()
}
fn construct_prospective_leaf(
&self,
mut existing_leaf: SmtLeaf,
key: &Word,
value: &Word,
) -> Result<SmtLeaf, SmtLeafError> {
debug_assert_eq!(existing_leaf.index(), Self::key_to_leaf_index(key));
match existing_leaf {
SmtLeaf::Empty(_) => Ok(SmtLeaf::new_single(*key, *value)),
_ => {
if *value != EMPTY_WORD {
existing_leaf.insert(*key, *value)?;
} else {
existing_leaf.remove(*key);
}
Ok(existing_leaf)
},
}
}
fn open(&self, key: &Self::Key) -> Self::Opening {
let leaf = self.get_leaf(key);
let mut idx: NodeIndex = LeafIndex::from(*key).into();
let subtree_roots: Vec<NodeIndex> = (0..NUM_SUBTREE_LEVELS)
.scan(idx.parent(), |cursor, _| {
let subtree_root = Subtree::find_subtree_root(*cursor);
*cursor = subtree_root.parent();
Some(subtree_root)
})
.collect();
// cache subtrees in memory
let mut cache = Map::<NodeIndex, Subtree>::new();
for &root in &subtree_roots {
let subtree =
match self.storage.get_subtree(root).expect("storage error fetching subtree") {
Some(st) => st,
None => Subtree::new(root),
};
cache.insert(root, subtree);
}
let mut path = Vec::with_capacity(idx.depth() as usize);
while idx.depth() > 0 {
let is_right = idx.is_value_odd();
idx = idx.parent();
let sibling_hash = if idx.depth() < IN_MEMORY_DEPTH {
// top levels in memory
let InnerNode { left, right } = self.get_inner_node(idx);
if is_right { left } else { right }
} else {
// deep levels come from our 5 preloaded subtrees
let root = Subtree::find_subtree_root(idx);
let subtree = &cache[&root];
let InnerNode { left, right } = subtree
.get_inner_node(idx)
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(SMT_DEPTH, idx.depth()));
if is_right { left } else { right }
};
path.push(sibling_hash);
}
let merkle_path =
SparseMerklePath::from_sized_iter(path).expect("failed to convert to SparseMerklePath");
Self::path_and_leaf_to_opening(merkle_path, leaf)
}
fn key_to_leaf_index(key: &Word) -> LeafIndex<SMT_DEPTH> {
let most_significant_felt = key[3];
LeafIndex::new_max_depth(most_significant_felt.as_canonical_u64())
}
fn path_and_leaf_to_opening(path: SparseMerklePath, leaf: SmtLeaf) -> SmtProof {
SmtProof::new_unchecked(path, leaf)
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/property_tests.rs | miden-crypto/src/merkle/smt/large/property_tests.rs | use alloc::{
collections::{BTreeMap, BTreeSet},
vec::Vec,
};
use proptest::prelude::*;
use super::MemoryStorage;
use crate::{
EMPTY_WORD, Felt, ONE, Word, ZERO,
merkle::smt::{LargeSmt, LeafIndex, SMT_DEPTH},
};
// GENERATORS
// ================================================================================================
fn arb_felt() -> impl Strategy<Value = Felt> {
prop_oneof![any::<u64>().prop_map(Felt::new), Just(ZERO), Just(ONE),]
}
fn arb_word() -> impl Strategy<Value = Word> {
prop::array::uniform4(arb_felt()).prop_map(Word::new)
}
/// Generates unique key-value pairs
fn arb_entries(min_size: usize, max_size: usize) -> impl Strategy<Value = Vec<(Word, Word)>> {
prop::collection::vec((arb_word(), arb_word()), min_size..=max_size).prop_map(move |entries| {
// Ensure uniqueness of entries as `LargeSmt::with_entries` returns an error if multiple
// values exist for the same key.
let mut used_indices = BTreeSet::new();
let mut used_keys = BTreeSet::new();
let mut result = Vec::new();
for (key, value) in entries {
let leaf_index = LeafIndex::<SMT_DEPTH>::from(key).value();
if used_indices.insert(leaf_index) && used_keys.insert(key) {
result.push((key, value));
}
}
result
})
}
/// Generates updates based on existing entries.
fn arb_updates(
existing_entries: Vec<(Word, Word)>,
min_updates: usize,
max_updates: usize,
) -> impl Strategy<Value = Vec<(Word, Word)>> {
let existing_keys: Vec<Word> = existing_entries.iter().map(|(k, _)| *k).collect();
let has_existing = !existing_keys.is_empty();
// Generate raw update params: (is_new_key, is_deletion, idx_seed, random_key, random_val)
prop::collection::vec(
(any::<bool>(), any::<bool>(), any::<usize>(), arb_word(), arb_word()),
min_updates..=max_updates,
)
.prop_map(move |raw_updates| {
let mut updates = BTreeMap::new();
for (is_new_key, is_deletion, idx_seed, rand_key, rand_val) in raw_updates {
let key = if has_existing && !is_new_key {
// Update existing key
existing_keys[idx_seed % existing_keys.len()]
} else {
// Use random key
rand_key
};
// Determine value
let value = if is_deletion { EMPTY_WORD } else { rand_val };
updates.insert(key, value);
}
updates.into_iter().collect()
})
}
// TESTS
// ================================================================================================
proptest! {
#![proptest_config(ProptestConfig::with_cases(10))]
#[test]
fn prop_insert_batch_matches_compute_apply(
(initial_entries, updates) in arb_entries(1, 100)
.prop_flat_map(|entries| {
let updates = arb_updates(entries.clone(), 1, 50);
(Just(entries), updates)
})
) {
run_insert_batch_matches_compute_apply(initial_entries, updates)?;
}
}
fn run_insert_batch_matches_compute_apply(
initial_entries: Vec<(Word, Word)>,
updates: Vec<(Word, Word)>,
) -> Result<(), TestCaseError> {
let storage1 = MemoryStorage::new();
let storage2 = MemoryStorage::new();
// Create two identical trees
let mut tree1 = LargeSmt::with_entries(storage1, initial_entries.clone()).unwrap();
let mut tree2 = LargeSmt::with_entries(storage2, initial_entries.clone()).unwrap();
let root1 = tree1.root();
let root2 = tree2.root();
// Compute mutations -> apply mutations
let mutations = tree1.compute_mutations(updates.clone()).unwrap();
tree1.apply_mutations(mutations).unwrap();
let new_root1 = tree1.root();
// Insert_batch
let new_root2 = tree2.insert_batch(updates.clone()).unwrap();
// Verification
// Roots match at each step
prop_assert_eq!(root1, root2, "Initial roots should match");
prop_assert_eq!(new_root1, new_root2, "Final roots should match");
// Verify all touched keys have correct values in both trees
for (key, _) in updates {
let val1 = tree1.get_value(&key);
let val2 = tree2.get_value(&key);
prop_assert_eq!(val1, val2, "Values should match for key {:?}", key);
}
// Verify all initial keys (if not updated) are still consistent
for (key, _) in initial_entries {
let val1 = tree1.get_value(&key);
let val2 = tree2.get_value(&key);
prop_assert_eq!(val1, val2, "Values for initial keys should match");
}
// Verify metadata
prop_assert_eq!(tree1.num_leaves(), tree2.num_leaves());
prop_assert_eq!(tree1.num_entries(), tree2.num_entries());
Ok(())
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/iter.rs | miden-crypto/src/merkle/smt/large/iter.rs | use alloc::{boxed::Box, vec::Vec};
use super::{IN_MEMORY_DEPTH, LargeSmt, SmtStorage, is_empty_parent};
use crate::{
Word,
merkle::{InnerNodeInfo, Rpo256, smt::large::subtree::Subtree},
};
// ITERATORS
// ================================================================================================
enum InnerNodeIteratorState<'a> {
InMemory {
current_index: usize,
large_smt_in_memory_nodes: &'a Vec<Word>,
},
Subtree {
subtree_iter: Box<dyn Iterator<Item = Subtree> + 'a>,
current_subtree_node_iter: Option<Box<dyn Iterator<Item = InnerNodeInfo> + 'a>>,
},
Done,
}
pub struct LargeSmtInnerNodeIterator<'a, S: SmtStorage> {
large_smt: &'a LargeSmt<S>,
state: InnerNodeIteratorState<'a>,
}
impl<'a, S: SmtStorage> LargeSmtInnerNodeIterator<'a, S> {
pub(super) fn new(large_smt: &'a LargeSmt<S>) -> Self {
// in-memory nodes should never be empty
Self {
large_smt,
state: InnerNodeIteratorState::InMemory {
current_index: 0,
large_smt_in_memory_nodes: &large_smt.in_memory_nodes,
},
}
}
}
impl<S: SmtStorage> Iterator for LargeSmtInnerNodeIterator<'_, S> {
type Item = InnerNodeInfo;
/// Returns the next inner node info in the tree.
///
/// The iterator operates in three phases:
/// 1. InMemory: Iterates through the in-memory nodes (depths 0-IN_MEMORY_DEPTH-1)
/// 2. Subtree: Iterates through nodes in storage subtrees (depths IN_MEMORY_DEPTH-SMT_DEPTH)
/// 3. Done: No more nodes to iterate
fn next(&mut self) -> Option<Self::Item> {
loop {
match &mut self.state {
// Phase 1: Process in-memory nodes (depths 0-23)
InnerNodeIteratorState::InMemory { current_index, large_smt_in_memory_nodes } => {
// Iterate through nodes at depths 0 to IN_MEMORY_DEPTH-1
// Start at index 1 (root), max node index is (1 << IN_MEMORY_DEPTH) - 1
if *current_index == 0 {
*current_index = 1;
}
let max_node_idx = (1 << IN_MEMORY_DEPTH) - 1;
while *current_index <= max_node_idx {
let node_idx = *current_index;
*current_index += 1;
// Get children from flat layout: left at 2*i, right at 2*i+1
let left = large_smt_in_memory_nodes[node_idx * 2];
let right = large_smt_in_memory_nodes[node_idx * 2 + 1];
// Skip empty nodes
let depth = node_idx.ilog2() as u8;
let child_depth = depth + 1;
if !is_empty_parent(left, right, child_depth) {
return Some(InnerNodeInfo {
value: Rpo256::merge(&[left, right]),
left,
right,
});
}
}
// All in-memory nodes processed. Transition to Phase 2: Subtree iteration
match self.large_smt.storage.iter_subtrees() {
Ok(subtree_iter) => {
self.state = InnerNodeIteratorState::Subtree {
subtree_iter,
current_subtree_node_iter: None,
};
continue; // Start processing subtrees immediately
},
Err(_e) => {
// Storage error occurred - we should propagate this properly
// For now, transition to Done state to avoid infinite loops
self.state = InnerNodeIteratorState::Done;
return None;
},
}
},
// Phase 2: Process storage subtrees (depths 25-64)
InnerNodeIteratorState::Subtree { subtree_iter, current_subtree_node_iter } => {
loop {
// First, try to get the next node from current subtree
if let Some(node_iter) = current_subtree_node_iter
&& let Some(info) = node_iter.as_mut().next()
{
return Some(info);
}
// Current subtree exhausted, move to next subtree
match subtree_iter.next() {
Some(next_subtree) => {
let infos: Vec<InnerNodeInfo> =
next_subtree.iter_inner_node_info().collect();
*current_subtree_node_iter = Some(Box::new(infos.into_iter()));
},
None => {
self.state = InnerNodeIteratorState::Done;
return None; // All subtrees processed
},
}
}
},
InnerNodeIteratorState::Done => {
return None; // Iteration finished.
},
}
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/construction.rs | miden-crypto/src/merkle/smt/large/construction.rs | use alloc::vec::Vec;
use core::mem;
use rayon::prelude::*;
use super::{
CONSTRUCTION_SUBTREE_BATCH_SIZE, IN_MEMORY_DEPTH, LargeSmt, LargeSmtError, NUM_IN_MEMORY_NODES,
ROOT_MEMORY_INDEX, SMT_DEPTH, SmtStorage, StorageError, Subtree,
};
use crate::{
EMPTY_WORD, Word,
merkle::smt::{
EmptySubtreeRoots, InnerNode, Map, MerkleError, NodeIndex, Rpo256, Smt, SparseMerkleTree,
full::concurrent::{
PairComputations, SUBTREE_DEPTH, SubtreeLeaf, SubtreeLeavesIter, build_subtree,
},
large::to_memory_index,
},
};
// CONSTRUCTION
// ================================================================================================
impl<S: SmtStorage> LargeSmt<S> {
/// Creates a new empty [LargeSmt] backed by the provided storage.
///
/// This method is intended for creating a fresh tree with empty storage. If the storage
/// already contains data, use [`Self::load_with_root()`] or [`Self::load()`]
/// instead.
///
/// # Errors
/// - Returns [`LargeSmtError::StorageNotEmpty`] if the storage already contains data.
/// - Returns a storage error if checking the storage state fails.
///
/// # Example
/// ```
/// # use miden_crypto::merkle::smt::{LargeSmt, MemoryStorage};
/// let storage = MemoryStorage::new();
/// let smt = LargeSmt::new(storage).expect("Failed to create SMT");
/// ```
pub fn new(storage: S) -> Result<Self, LargeSmtError> {
if storage.has_leaves()? {
return Err(LargeSmtError::StorageNotEmpty);
}
Self::initialize_from_storage(storage)
}
/// Loads an existing [LargeSmt] from storage without validating the root.
///
/// If the storage is empty, the SMT is initialized with the root of an empty tree.
/// Otherwise, the in-memory top of the tree is reconstructed from the cached depth-24
/// subtree hashes stored in the backend.
///
/// **Note:** This method does not validate the reconstructed root. Use this only when
/// you explicitly want to skip validation. For normal reloading, prefer
/// [`Self::load_with_root()`].
///
/// # Errors
/// Returns an error if fetching data from storage fails.
///
/// # Example
/// ```no_run
/// # #[cfg(feature = "rocksdb")]
/// # {
/// use miden_crypto::merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage};
/// let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db")).unwrap();
/// let smt = LargeSmt::load(storage).expect("Failed to load SMT");
/// # }
/// ```
pub fn load(storage: S) -> Result<Self, LargeSmtError> {
Self::initialize_from_storage(storage)
}
/// Loads an existing [LargeSmt] from storage and validates it against the expected root.
///
/// This method reconstructs the in-memory top of the tree from the cached depth-24
/// subtree hashes, computes the root, and validates it against `expected_root`.
///
/// Use this method when reloading a tree to ensure the storage contains the expected
/// data and hasn't been corrupted or tampered with.
///
/// # Errors
/// - Returns [`LargeSmtError::RootMismatch`] if the reconstructed root does not match
/// `expected_root`.
/// - Returns a storage error if fetching data from storage fails.
///
/// # Example
/// ```no_run
/// # #[cfg(feature = "rocksdb")]
/// # {
/// use miden_crypto::{
/// Word,
/// merkle::smt::{LargeSmt, RocksDbConfig, RocksDbStorage},
/// };
/// # let expected_root: Word = miden_crypto::EMPTY_WORD;
/// let storage = RocksDbStorage::open(RocksDbConfig::new("/path/to/db")).unwrap();
/// let smt = LargeSmt::load_with_root(storage, expected_root)
/// .expect("Failed to load SMT with expected root");
/// # }
/// ```
pub fn load_with_root(storage: S, expected_root: Word) -> Result<Self, LargeSmtError> {
let smt = Self::load(storage)?;
let actual_root = smt.root();
if actual_root != expected_root {
return Err(LargeSmtError::RootMismatch {
expected: expected_root,
actual: actual_root,
});
}
Ok(smt)
}
/// Returns a new [Smt] instantiated with leaves set as specified by the provided entries.
///
/// If the `concurrent` feature is enabled, this function uses a parallel implementation to
/// process the entries efficiently, otherwise it defaults to the sequential implementation.
///
/// All leaves omitted from the entries list are set to [Self::EMPTY_VALUE].
///
/// # Errors
/// Returns an error if the provided entries contain multiple values for the same key.
pub fn with_entries(
storage: S,
entries: impl IntoIterator<Item = (Word, Word)>,
) -> Result<Self, LargeSmtError> {
let entries: Vec<(Word, Word)> = entries.into_iter().collect();
if storage.has_leaves()? {
return Err(StorageError::Unsupported(
"Cannot create SMT with non-empty storage".into(),
)
.into());
}
let mut tree = LargeSmt::new(storage)?;
if entries.is_empty() {
return Ok(tree);
}
tree.build_subtrees(entries)?;
Ok(tree)
}
/// Internal method that initializes the in-memory tree from storage.
///
/// For empty storage, returns an empty tree. For non-empty storage,
/// rebuilds the in-memory top from cached depth-24 hashes.
fn initialize_from_storage(storage: S) -> Result<Self, LargeSmtError> {
// Initialize in-memory nodes
let mut in_memory_nodes: Vec<Word> = vec![EMPTY_WORD; NUM_IN_MEMORY_NODES];
// Root
in_memory_nodes[ROOT_MEMORY_INDEX] = *EmptySubtreeRoots::entry(SMT_DEPTH, 0);
// Inner nodes
for depth in 0..IN_MEMORY_DEPTH {
let child_empty_hash = *EmptySubtreeRoots::entry(SMT_DEPTH, depth + 1);
let start = 2 * (1 << depth);
let end = 2 * (1 << (depth + 1));
in_memory_nodes[start..end].fill(child_empty_hash);
}
let is_empty = !storage.has_leaves()?;
// If the tree is empty, return it
if is_empty {
return Ok(Self {
storage,
in_memory_nodes,
leaf_count: 0,
entry_count: 0,
});
}
// Initialize counts from storage
let leaf_count = storage.leaf_count()?;
let entry_count = storage.entry_count()?;
// Get the in-memory top of tree leaves from storage
let in_memory_tree_leaves = storage.get_depth24()?;
// Convert in-memory top of tree leaves to SubtreeLeaf
let mut leaf_subtrees: Vec<SubtreeLeaf> = in_memory_tree_leaves
.into_iter()
.map(|(index, hash)| SubtreeLeaf { col: index, hash })
.collect();
leaf_subtrees.sort_by_key(|leaf| leaf.col);
let mut subtree_leaves: Vec<Vec<SubtreeLeaf>> =
SubtreeLeavesIter::from_leaves(&mut leaf_subtrees).collect();
// build in-memory top of the tree
for current_depth in (SUBTREE_DEPTH..=IN_MEMORY_DEPTH).step_by(SUBTREE_DEPTH as usize).rev()
{
let (nodes, mut subtree_roots): (Vec<Map<_, _>>, Vec<SubtreeLeaf>) = subtree_leaves
.into_par_iter()
.map(|subtree| {
debug_assert!(subtree.is_sorted());
debug_assert!(!subtree.is_empty());
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, current_depth);
(nodes, subtree_root)
})
.unzip();
subtree_leaves = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
debug_assert!(!subtree_leaves.is_empty());
for subtree_nodes in nodes {
for (index, node) in subtree_nodes {
let memory_index = to_memory_index(&index);
// Store left and right children in flat layout
in_memory_nodes[memory_index * 2] = node.left;
in_memory_nodes[memory_index * 2 + 1] = node.right;
}
}
}
// Compute the root from children at indices 2 and 3
let calculated_root = Rpo256::merge(&[in_memory_nodes[2], in_memory_nodes[3]]);
// Set the root node
in_memory_nodes[ROOT_MEMORY_INDEX] = calculated_root;
Ok(Self {
storage,
in_memory_nodes,
leaf_count,
entry_count,
})
}
fn build_subtrees(&mut self, mut entries: Vec<(Word, Word)>) -> Result<(), MerkleError> {
entries.par_sort_unstable_by_key(|item| {
let index = Self::key_to_leaf_index(&item.0);
index.value()
});
self.build_subtrees_from_sorted_entries(entries)?;
Ok(())
}
fn build_subtrees_from_sorted_entries(
&mut self,
entries: Vec<(Word, Word)>,
) -> Result<(), MerkleError> {
let PairComputations {
leaves: mut leaf_subtrees,
nodes: initial_leaves,
} = Smt::sorted_pairs_to_leaves(entries)?;
if initial_leaves.is_empty() {
return Ok(());
}
// Update cached counts before storing leaves
self.leaf_count = initial_leaves.len();
self.entry_count = initial_leaves.values().map(|leaf| leaf.num_entries()).sum();
// Store the initial leaves
self.storage.set_leaves(initial_leaves).expect("Failed to store initial leaves");
// build deep (disk-backed) subtrees
leaf_subtrees = std::thread::scope(|scope| {
let (sender, receiver) = flume::bounded(CONSTRUCTION_SUBTREE_BATCH_SIZE);
let storage = &mut self.storage;
scope.spawn(move || -> Result<(), MerkleError> {
let mut subtrees: Vec<Subtree> =
Vec::with_capacity(CONSTRUCTION_SUBTREE_BATCH_SIZE);
for subtree in receiver.iter() {
subtrees.push(subtree);
if subtrees.len() == CONSTRUCTION_SUBTREE_BATCH_SIZE {
let subtrees_clone = mem::take(&mut subtrees);
storage
.set_subtrees(subtrees_clone)
.expect("Writer thread failed to set subtrees");
}
}
storage.set_subtrees(subtrees).expect("Writer thread failed to set subtrees");
Ok(())
});
for bottom_depth in (IN_MEMORY_DEPTH + SUBTREE_DEPTH..=SMT_DEPTH)
.step_by(SUBTREE_DEPTH as usize)
.rev()
{
let mut subtree_roots: Vec<SubtreeLeaf> = leaf_subtrees
.into_par_iter()
.map(|subtree_leaves| {
debug_assert!(subtree_leaves.is_sorted());
debug_assert!(!subtree_leaves.is_empty());
let (nodes, subtree_root) =
build_subtree(subtree_leaves, SMT_DEPTH, bottom_depth);
let subtree_root_index =
NodeIndex::new(bottom_depth - SUBTREE_DEPTH, subtree_root.col).unwrap();
let mut subtree = Subtree::new(subtree_root_index);
for (index, node) in nodes {
subtree.insert_inner_node(index, node);
}
sender.send(subtree).expect("Flume channel disconnected unexpectedly");
subtree_root
})
.collect();
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
debug_assert!(!leaf_subtrees.is_empty());
}
drop(sender);
leaf_subtrees
});
// build top of the tree (in-memory only, normal insert)
for bottom_depth in (SUBTREE_DEPTH..=IN_MEMORY_DEPTH).step_by(SUBTREE_DEPTH as usize).rev()
{
let (nodes, mut subtree_roots): (Vec<Map<_, _>>, Vec<SubtreeLeaf>) = leaf_subtrees
.into_par_iter()
.map(|subtree| {
debug_assert!(subtree.is_sorted());
debug_assert!(!subtree.is_empty());
let (nodes, subtree_root) = build_subtree(subtree, SMT_DEPTH, bottom_depth);
(nodes, subtree_root)
})
.unzip();
leaf_subtrees = SubtreeLeavesIter::from_leaves(&mut subtree_roots).collect();
debug_assert!(!leaf_subtrees.is_empty());
for subtree_nodes in nodes {
self.insert_inner_nodes_batch(subtree_nodes.into_iter());
}
}
self.set_root(self.get_inner_node(NodeIndex::root()).hash());
Ok(())
}
// Inserts batch of upper inner nodes
fn insert_inner_nodes_batch(
&mut self,
nodes: impl IntoIterator<Item = (NodeIndex, InnerNode)>,
) {
for (index, node) in nodes {
if index.depth() < IN_MEMORY_DEPTH {
let memory_index = to_memory_index(&index);
// Store in flat layout: left at 2*i, right at 2*i+1
self.in_memory_nodes[memory_index * 2] = node.left;
self.in_memory_nodes[memory_index * 2 + 1] = node.right;
}
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/storage/memory.rs | miden-crypto/src/merkle/smt/large/storage/memory.rs | use alloc::{boxed::Box, vec::Vec};
use super::{SmtStorage, StorageError, StorageUpdateParts, StorageUpdates, SubtreeUpdate};
use crate::{
EMPTY_WORD, Map, MapEntry, Word,
merkle::{
NodeIndex,
smt::{
InnerNode, SmtLeaf,
large::{IN_MEMORY_DEPTH, subtree::Subtree},
},
},
};
/// In-memory storage for a Sparse Merkle Tree (SMT), implementing the `SmtStorage` trait.
///
/// This structure stores the SMT's leaf nodes and subtrees directly in memory.
///
/// It is primarily intended for scenarios where data persistence to disk is not a
/// primary concern. Common use cases include:
/// - Testing environments.
/// - Managing SMT instances with a limited operational lifecycle.
/// - Situations where a higher-level application architecture handles its own data persistence
/// strategy.
#[derive(Debug, Clone)]
pub struct MemoryStorage {
pub leaves: Map<u64, SmtLeaf>,
pub subtrees: Map<NodeIndex, Subtree>,
}
impl MemoryStorage {
/// Creates a new, empty in-memory storage for a Sparse Merkle Tree.
///
/// Initializes empty maps for leaves and subtrees.
pub fn new() -> Self {
Self { leaves: Map::new(), subtrees: Map::new() }
}
}
impl Default for MemoryStorage {
fn default() -> Self {
Self::new()
}
}
impl SmtStorage for MemoryStorage {
/// Gets the total number of non-empty leaves currently stored.
fn leaf_count(&self) -> Result<usize, StorageError> {
Ok(self.leaves.len())
}
/// Gets the total number of key-value entries currently stored.
fn entry_count(&self) -> Result<usize, StorageError> {
Ok(self.leaves.values().map(|leaf| leaf.num_entries()).sum())
}
/// Inserts a key-value pair into the leaf at the given index.
///
/// - If the leaf at `index` does not exist, a new `SmtLeaf::Single` is created.
/// - If the leaf exists, the key-value pair is inserted into it.
/// - Returns the previous value associated with the key, if any.
///
/// # Panics
/// Panics in debug builds if `value` is `EMPTY_WORD`.
fn insert_value(
&mut self,
index: u64,
key: Word,
value: Word,
) -> Result<Option<Word>, StorageError> {
debug_assert_ne!(value, EMPTY_WORD);
match self.leaves.get_mut(&index) {
Some(leaf) => Ok(leaf.insert(key, value)?),
None => {
self.leaves.insert(index, SmtLeaf::Single((key, value)));
Ok(None)
},
}
}
/// Removes a key-value pair from the leaf at the given `index`.
///
/// - If the leaf at `index` exists and the `key` is found within that leaf, the key-value pair
/// is removed, and the old `Word` value is returned in `Ok(Some(Word))`.
/// - If the leaf at `index` exists but the `key` is not found within that leaf, `Ok(None)` is
/// returned (as `leaf.get_value(&key)` would be `None`).
/// - If the leaf at `index` does not exist, `Ok(None)` is returned, as no value could be
/// removed.
fn remove_value(&mut self, index: u64, key: Word) -> Result<Option<Word>, StorageError> {
let old_value = match self.leaves.entry(index) {
MapEntry::Occupied(mut entry) => {
let (old_value, is_empty) = entry.get_mut().remove(key);
if is_empty {
entry.remove();
}
old_value
},
// Leaf at index does not exist, so no value could be removed.
MapEntry::Vacant(_) => None,
};
Ok(old_value)
}
/// Retrieves a single leaf node.
fn get_leaf(&self, index: u64) -> Result<Option<SmtLeaf>, StorageError> {
Ok(self.leaves.get(&index).cloned())
}
/// Sets multiple leaf nodes in storage.
///
/// If a leaf at a given index already exists, it is overwritten.
fn set_leaves(&mut self, leaves_map: Map<u64, SmtLeaf>) -> Result<(), StorageError> {
self.leaves.extend(leaves_map);
Ok(())
}
/// Removes a single leaf node.
fn remove_leaf(&mut self, index: u64) -> Result<Option<SmtLeaf>, StorageError> {
Ok(self.leaves.remove(&index))
}
/// Retrieves multiple leaf nodes. Returns Ok(None) for indices not found.
fn get_leaves(&self, indices: &[u64]) -> Result<Vec<Option<SmtLeaf>>, StorageError> {
let leaves = indices.iter().map(|idx| self.leaves.get(idx).cloned()).collect();
Ok(leaves)
}
/// Returns true if the storage has any leaves.
fn has_leaves(&self) -> Result<bool, StorageError> {
Ok(!self.leaves.is_empty())
}
/// Retrieves a single Subtree (representing deep nodes) by its root NodeIndex.
/// Assumes index.depth() >= IN_MEMORY_DEPTH. Returns Ok(None) if not found.
fn get_subtree(&self, index: NodeIndex) -> Result<Option<Subtree>, StorageError> {
Ok(self.subtrees.get(&index).cloned())
}
/// Retrieves multiple Subtrees.
/// Assumes index.depth() >= IN_MEMORY_DEPTH for all indices. Returns Ok(None) for indices not
/// found.
fn get_subtrees(&self, indices: &[NodeIndex]) -> Result<Vec<Option<Subtree>>, StorageError> {
let subtrees: Vec<_> = indices.iter().map(|idx| self.subtrees.get(idx).cloned()).collect();
Ok(subtrees)
}
/// Sets a single Subtree (representing deep nodes) by its root NodeIndex.
///
/// If a subtree with the same root NodeIndex already exists, it is overwritten.
/// Assumes `subtree.root_index().depth() >= IN_MEMORY_DEPTH`.
fn set_subtree(&mut self, subtree: &Subtree) -> Result<(), StorageError> {
self.subtrees.insert(subtree.root_index(), subtree.clone());
Ok(())
}
/// Sets multiple Subtrees (representing deep nodes) by their root NodeIndex.
///
/// If a subtree with a given root NodeIndex already exists, it is overwritten.
/// Assumes `subtree.root_index().depth() >= IN_MEMORY_DEPTH` for all subtrees in the vector.
fn set_subtrees(&mut self, subtrees_vec: Vec<Subtree>) -> Result<(), StorageError> {
self.subtrees
.extend(subtrees_vec.into_iter().map(|subtree| (subtree.root_index(), subtree)));
Ok(())
}
/// Removes a single Subtree (representing deep nodes) by its root NodeIndex.
fn remove_subtree(&mut self, index: NodeIndex) -> Result<(), StorageError> {
self.subtrees.remove(&index);
Ok(())
}
/// Retrieves a single inner node from a Subtree.
///
/// This function is intended for accessing nodes within a Subtree, meaning
/// `index.depth()` must be greater than or equal to `IN_MEMORY_DEPTH`.
///
/// # Errors
/// - `StorageError::Unsupported`: If `index.depth() < IN_MEMORY_DEPTH`.
///
/// Returns `Ok(None)` if the subtree or the specific inner node within it is not found.
fn get_inner_node(&self, index: NodeIndex) -> Result<Option<InnerNode>, StorageError> {
if index.depth() < IN_MEMORY_DEPTH {
return Err(StorageError::Unsupported(
"Cannot get inner node from upper part of the tree".into(),
));
}
let subtree_root_index = Subtree::find_subtree_root(index);
Ok(self
.subtrees
.get(&subtree_root_index)
.and_then(|subtree| subtree.get_inner_node(index)))
}
/// Sets a single inner node within a Subtree.
///
/// - `index.depth()` must be greater than or equal to `IN_MEMORY_DEPTH`.
/// - If the target Subtree does not exist, it is created.
/// - The `node` is then inserted into the Subtree.
///
/// Returns the `InnerNode` that was previously at this `index`, if any.
///
/// # Errors
/// - `StorageError::Unsupported`: If `index.depth() < IN_MEMORY_DEPTH`.
fn set_inner_node(
&mut self,
index: NodeIndex,
node: InnerNode,
) -> Result<Option<InnerNode>, StorageError> {
if index.depth() < IN_MEMORY_DEPTH {
return Err(StorageError::Unsupported(
"Cannot set inner node in upper part of the tree".into(),
));
}
let subtree_root_index = Subtree::find_subtree_root(index);
let mut subtree = self
.subtrees
.remove(&subtree_root_index)
.unwrap_or_else(|| Subtree::new(subtree_root_index));
let old_node = subtree.insert_inner_node(index, node);
self.subtrees.insert(subtree_root_index, subtree);
Ok(old_node)
}
/// Removes a single inner node from a Subtree.
///
/// - `index.depth()` must be greater than or equal to `IN_MEMORY_DEPTH`.
/// - If the Subtree becomes empty after removing the node, the Subtree itself is removed from
/// storage.
///
/// Returns the `InnerNode` that was removed, if any.
///
/// # Errors
/// - `StorageError::Unsupported`: If `index.depth() < IN_MEMORY_DEPTH`.
fn remove_inner_node(&mut self, index: NodeIndex) -> Result<Option<InnerNode>, StorageError> {
if index.depth() < IN_MEMORY_DEPTH {
return Err(StorageError::Unsupported(
"Cannot remove inner node from upper part of the tree".into(),
));
}
let subtree_root_index = Subtree::find_subtree_root(index);
let inner_node: Option<InnerNode> =
self.subtrees.remove(&subtree_root_index).and_then(|mut subtree| {
let old_node = subtree.remove_inner_node(index);
if !subtree.is_empty() {
self.subtrees.insert(subtree_root_index, subtree);
}
old_node
});
Ok(inner_node)
}
/// Applies a set of updates atomically to the storage.
///
/// This method handles updates to:
/// - Leaves: Inserts new or updated leaves, removes specified leaves.
/// - Subtrees: Inserts new or updated subtrees, removes specified subtrees.
fn apply(&mut self, updates: StorageUpdates) -> Result<(), StorageError> {
let StorageUpdateParts {
leaf_updates,
subtree_updates,
leaf_count_delta: _,
entry_count_delta: _,
} = updates.into_parts();
for (index, leaf_opt) in leaf_updates {
if let Some(leaf) = leaf_opt {
self.leaves.insert(index, leaf);
} else {
self.leaves.remove(&index);
}
}
for update in subtree_updates {
match update {
SubtreeUpdate::Store { index, subtree } => {
self.subtrees.insert(index, subtree);
},
SubtreeUpdate::Delete { index } => {
self.subtrees.remove(&index);
},
}
}
Ok(())
}
/// Returns an iterator over all (index, SmtLeaf) pairs in the storage.
///
/// The iterator provides access to the current state of the leaves.
fn iter_leaves(&self) -> Result<Box<dyn Iterator<Item = (u64, SmtLeaf)> + '_>, StorageError> {
let leaves_vec = self.leaves.iter().map(|(&k, v)| (k, v.clone())).collect::<Vec<_>>();
Ok(Box::new(leaves_vec.into_iter()))
}
/// Returns an iterator over all Subtrees in the storage.
///
/// The iterator provides access to the current subtrees from storage.
fn iter_subtrees(&self) -> Result<Box<dyn Iterator<Item = Subtree> + '_>, StorageError> {
let subtrees_vec = self.subtrees.values().cloned().collect::<Vec<_>>();
Ok(Box::new(subtrees_vec.into_iter()))
}
/// Retrieves all depth 24 roots for fast tree rebuilding.
///
/// For MemoryStorage, this returns an empty vector since all data is already in memory
/// and there's no startup performance benefit to caching depth 24 roots.
fn get_depth24(&self) -> Result<Vec<(u64, Word)>, StorageError> {
Ok(Vec::new())
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/storage/error.rs | miden-crypto/src/merkle/smt/large/storage/error.rs | use alloc::{boxed::Box, string::String};
/// Errors returned by any `SmtStorage` implementation.
///
/// Categories:
/// - Backend errors (DB/I/O)
/// - Decode/length mismatches with expected/actual parameters
/// - Unsupported operations
/// - Higher-level value and subtree decode failures
#[derive(Debug, thiserror::Error)]
pub enum StorageError {
/// Backend I/O or database error (e.g., RocksDB).
#[error("backend error: {0}")]
Backend(#[from] Box<dyn core::error::Error + Send + Sync + 'static>),
/// Key bytes had the wrong length (e.g., leaf index key, subtree root key).
#[error("invalid key length: expected {expected} bytes, found {found}")]
BadKeyLen { expected: usize, found: usize },
/// Subtree key bytes had the wrong length (e.g., subtree root key).
#[error(
"invalid subtree key length at depth {depth}: expected {expected} bytes, found {found}"
)]
BadSubtreeKeyLen { depth: u8, expected: usize, found: usize },
/// Value/metadata bytes had the wrong length (e.g., leaf/entry counts).
#[error("invalid value length for {what}: expected {expected} bytes, found {found}")]
BadValueLen {
what: &'static str,
expected: usize,
found: usize,
},
/// Leaf-level error (e.g., too many entries).
#[error("leaf operation failed")]
Leaf(#[from] crate::merkle::smt::SmtLeafError),
/// Failed to (de)serialize a stored subtree blob.
#[error("failed to decode subtree")]
Subtree(#[from] crate::merkle::smt::SubtreeError),
/// The requested operation is not supported by this backend.
#[error("operation not supported: {0}")]
Unsupported(String),
/// Higher-level type (e.g., `Word`) failed to decode from bytes.
#[error("failed to decode value bytes")]
Value(#[from] crate::utils::DeserializationError),
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/storage/mod.rs | miden-crypto/src/merkle/smt/large/storage/mod.rs | use alloc::{boxed::Box, vec::Vec};
use core::{
fmt,
ops::{Deref, DerefMut},
};
use crate::{
Word,
merkle::{
NodeIndex,
smt::{InnerNode, Map, SmtLeaf, large::subtree::Subtree},
},
};
mod error;
pub use error::StorageError;
#[cfg(feature = "rocksdb")]
mod rocksdb;
#[cfg(feature = "rocksdb")]
pub use rocksdb::{RocksDbConfig, RocksDbStorage};
mod memory;
pub use memory::MemoryStorage;
mod updates;
pub use updates::{StorageUpdateParts, StorageUpdates, SubtreeUpdate};
/// Sparse Merkle Tree storage backend.
///
/// This trait outlines the fundamental operations required to persist and retrieve
/// the components of an SMT: leaves and deeper subtrees.
/// Implementations of this trait can provide various storage solutions, like in-memory
/// maps or persistent databases (e.g., RocksDB).
///
/// All methods are expected to handle potential storage errors by returning a
/// `Result<_, StorageError>`.
pub trait SmtStorage: 'static + fmt::Debug + Send + Sync {
/// Retrieves the total number of leaf nodes currently stored.
///
/// # Errors
/// Returns `StorageError` if the storage read operation fails.
fn leaf_count(&self) -> Result<usize, StorageError>;
/// Retrieves the total number of unique key-value entries across all leaf nodes.
///
/// # Errors
/// Returns `StorageError` if the storage read operation fails.
fn entry_count(&self) -> Result<usize, StorageError>;
/// Inserts a key-value pair into the SMT leaf at the specified logical `index`.
///
/// - If the leaf at `index` does not exist, it may be created.
/// - If the `key` already exists in the leaf at `index`, its `value` is updated.
/// - Returns the previous `Word` value associated with the `key` at `index`, if any.
///
/// Implementations are responsible for updating overall leaf and entry counts if necessary.
///
/// Note: This only updates the leaf. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// # Errors
/// Returns `StorageError` if the storage operation fails (e.g., backend database error,
/// insufficient space, serialization failures).
fn insert_value(
&mut self,
index: u64,
key: Word,
value: Word,
) -> Result<Option<Word>, StorageError>;
/// Removes a key-value pair from the SMT leaf at the specified logical `index`.
///
/// - If the `key` is found in the leaf at `index`, it is removed, and the old `Word` value is
/// returned.
/// - If the leaf at `index` does not exist, or if the `key` is not found within it, `Ok(None)`
/// is returned.
/// - If removing the entry causes the leaf to become empty, the behavior regarding the leaf
/// node itself (e.g., whether it's removed from storage) is implementation-dependent, but
/// counts should be updated.
///
/// Implementations are responsible for updating overall leaf and entry counts if necessary.
///
/// Note: This only updates the leaf. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// # Errors
/// Returns `StorageError` if the storage operation fails (e.g., backend database error,
/// write permission issues, serialization failures).
fn remove_value(&mut self, index: u64, key: Word) -> Result<Option<Word>, StorageError>;
/// Retrieves a single SMT leaf node by its logical `index`.
/// Returns `Ok(None)` if no leaf exists at the given `index`.
fn get_leaf(&self, index: u64) -> Result<Option<SmtLeaf>, StorageError>;
/// Sets or updates multiple SMT leaf nodes in storage.
///
/// For each entry in the `leaves` map, if a leaf at the given index already exists,
/// it should be overwritten with the new `SmtLeaf` data.
/// If it does not exist, a new leaf is stored.
///
/// Note: This only updates the leaves. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// # Errors
/// Returns `StorageError` if any storage operation fails during the batch update.
fn set_leaves(&mut self, leaves: Map<u64, SmtLeaf>) -> Result<(), StorageError>;
/// Removes a single SMT leaf node entirely from storage by its logical `index`.
///
/// Note: This only removes the leaf. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// Returns the `SmtLeaf` that was removed, or `Ok(None)` if no leaf existed at `index`.
/// Implementations should ensure that removing a leaf also correctly updates
/// the overall leaf and entry counts.
fn remove_leaf(&mut self, index: u64) -> Result<Option<SmtLeaf>, StorageError>;
/// Retrieves multiple SMT leaf nodes by their logical `indices`.
///
/// The returned `Vec` will have the same length as the input `indices` slice.
/// For each `index` in the input, the corresponding element in the output `Vec`
/// will be `Some(SmtLeaf)` if found, or `None` if not found.
fn get_leaves(&self, indices: &[u64]) -> Result<Vec<Option<SmtLeaf>>, StorageError>;
/// Returns true if the storage has any leaves.
///
/// # Errors
/// Returns `StorageError` if the storage read operation fails.
fn has_leaves(&self) -> Result<bool, StorageError>;
/// Retrieves a single SMT Subtree by its root `NodeIndex`.
///
/// Subtrees typically represent deeper, compacted parts of the SMT.
/// Returns `Ok(None)` if no subtree is found for the given `index`.
fn get_subtree(&self, index: NodeIndex) -> Result<Option<Subtree>, StorageError>;
/// Retrieves multiple Subtrees by their root `NodeIndex` values.
///
/// The returned `Vec` will have the same length as the input `indices` slice.
/// For each `index` in the input, the corresponding element in the output `Vec`
/// will be `Some(Subtree)` if found, or `None` if not found.
fn get_subtrees(&self, indices: &[NodeIndex]) -> Result<Vec<Option<Subtree>>, StorageError>;
/// Sets or updates a single SMT Subtree in storage, identified by its root `NodeIndex`.
///
/// If a subtree with the same root `NodeIndex` already exists, it is overwritten.
fn set_subtree(&mut self, subtree: &Subtree) -> Result<(), StorageError>;
/// Sets or updates multiple SMT Subtrees in storage.
///
/// For each `Subtree` in the `subtrees` vector, if a subtree with the same root `NodeIndex`
/// already exists, it is overwritten.
fn set_subtrees(&mut self, subtrees: Vec<Subtree>) -> Result<(), StorageError>;
/// Removes a single SMT Subtree from storage, identified by its root `NodeIndex`.
///
/// Returns `Ok(())` on successful removal or if the subtree did not exist.
fn remove_subtree(&mut self, index: NodeIndex) -> Result<(), StorageError>;
/// Retrieves a single inner node from within a Subtree.
///
/// This method is intended for accessing nodes at depths greater than the in-memory horizon.
/// Returns `Ok(None)` if the containing Subtree or the specific inner node is not found.
fn get_inner_node(&self, index: NodeIndex) -> Result<Option<InnerNode>, StorageError>;
/// Sets or updates a single inner node (non-leaf node) within a Subtree.
///
/// - If the target Subtree does not exist, it might need to be created by the implementation.
/// - Returns the `InnerNode` that was previously at this `index`, if any.
fn set_inner_node(
&mut self,
index: NodeIndex,
node: InnerNode,
) -> Result<Option<InnerNode>, StorageError>;
/// Removes a single inner node (non-leaf node) from within a Subtree.
///
/// - If the Subtree becomes empty after removing the node, the Subtree itself might be removed
/// by the storage implementation.
/// - Returns the `InnerNode` that was removed, if any.
fn remove_inner_node(&mut self, index: NodeIndex) -> Result<Option<InnerNode>, StorageError>;
/// Applies a batch of `StorageUpdates` atomically to the storage backend.
///
/// This is the primary method for persisting changes to the SMT. Implementations must ensure
/// that all updates within the `StorageUpdates` struct (leaf changes, subtree changes,
/// new root hash, and count deltas) are applied as a single, indivisible operation.
/// If any part of the update fails, the entire transaction should be rolled back, leaving
/// the storage in its previous state.
fn apply(&mut self, updates: StorageUpdates) -> Result<(), StorageError>;
/// Returns an iterator over all (logical_index, SmtLeaf) pairs currently in storage.
///
/// The order of iteration is not guaranteed unless specified by the implementation.
fn iter_leaves(&self) -> Result<Box<dyn Iterator<Item = (u64, SmtLeaf)> + '_>, StorageError>;
/// Returns an iterator over all `Subtree` instances currently in storage.
///
/// The order of iteration is not guaranteed unless specified by the implementation.
fn iter_subtrees(&self) -> Result<Box<dyn Iterator<Item = Subtree> + '_>, StorageError>;
/// Retrieves all depth 24 hashes from storage for efficient startup reconstruction.
///
/// Returns a vector of `(node_index_value, InnerNode)` tuples representing
/// the cached roots of nodes at depth 24 (the in-memory/storage boundary).
/// These roots enable fast reconstruction of the upper tree without loading
/// entire subtrees.
///
/// The hash cache is automatically maintained by subtree operations - no manual
/// cache management is required.
fn get_depth24(&self) -> Result<Vec<(u64, Word)>, StorageError>;
}
impl<T: SmtStorage + ?Sized> SmtStorage for Box<T> {
#[inline]
fn leaf_count(&self) -> Result<usize, StorageError> {
self.deref().leaf_count()
}
#[inline]
fn entry_count(&self) -> Result<usize, StorageError> {
self.deref().entry_count()
}
#[inline]
fn insert_value(
&mut self,
index: u64,
key: Word,
value: Word,
) -> Result<Option<Word>, StorageError> {
self.deref_mut().insert_value(index, key, value)
}
#[inline]
fn remove_value(&mut self, index: u64, key: Word) -> Result<Option<Word>, StorageError> {
self.deref_mut().remove_value(index, key)
}
#[inline]
fn get_leaf(&self, index: u64) -> Result<Option<SmtLeaf>, StorageError> {
self.deref().get_leaf(index)
}
#[inline]
fn set_leaves(&mut self, leaves: Map<u64, SmtLeaf>) -> Result<(), StorageError> {
self.deref_mut().set_leaves(leaves)
}
#[inline]
fn remove_leaf(&mut self, index: u64) -> Result<Option<SmtLeaf>, StorageError> {
self.deref_mut().remove_leaf(index)
}
#[inline]
fn get_leaves(&self, indices: &[u64]) -> Result<Vec<Option<SmtLeaf>>, StorageError> {
self.deref().get_leaves(indices)
}
#[inline]
fn has_leaves(&self) -> Result<bool, StorageError> {
self.deref().has_leaves()
}
#[inline]
fn get_subtree(&self, index: NodeIndex) -> Result<Option<Subtree>, StorageError> {
self.deref().get_subtree(index)
}
#[inline]
fn get_subtrees(&self, indices: &[NodeIndex]) -> Result<Vec<Option<Subtree>>, StorageError> {
self.deref().get_subtrees(indices)
}
#[inline]
fn set_subtree(&mut self, subtree: &Subtree) -> Result<(), StorageError> {
self.deref_mut().set_subtree(subtree)
}
#[inline]
fn set_subtrees(&mut self, subtrees: Vec<Subtree>) -> Result<(), StorageError> {
self.deref_mut().set_subtrees(subtrees)
}
#[inline]
fn remove_subtree(&mut self, index: NodeIndex) -> Result<(), StorageError> {
self.deref_mut().remove_subtree(index)
}
#[inline]
fn get_inner_node(&self, index: NodeIndex) -> Result<Option<InnerNode>, StorageError> {
self.deref().get_inner_node(index)
}
#[inline]
fn set_inner_node(
&mut self,
index: NodeIndex,
node: InnerNode,
) -> Result<Option<InnerNode>, StorageError> {
self.deref_mut().set_inner_node(index, node)
}
#[inline]
fn remove_inner_node(&mut self, index: NodeIndex) -> Result<Option<InnerNode>, StorageError> {
self.deref_mut().remove_inner_node(index)
}
#[inline]
fn apply(&mut self, updates: StorageUpdates) -> Result<(), StorageError> {
self.deref_mut().apply(updates)
}
#[inline]
fn iter_leaves(&self) -> Result<Box<dyn Iterator<Item = (u64, SmtLeaf)> + '_>, StorageError> {
self.deref().iter_leaves()
}
#[inline]
fn iter_subtrees(&self) -> Result<Box<dyn Iterator<Item = Subtree> + '_>, StorageError> {
self.deref().iter_subtrees()
}
#[inline]
fn get_depth24(&self) -> Result<Vec<(u64, Word)>, StorageError> {
self.deref().get_depth24()
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/storage/rocksdb.rs | miden-crypto/src/merkle/smt/large/storage/rocksdb.rs | use alloc::{boxed::Box, vec::Vec};
use std::{path::PathBuf, sync::Arc};
use rocksdb::{
BlockBasedOptions, Cache, ColumnFamilyDescriptor, DB, DBCompactionStyle, DBCompressionType,
DBIteratorWithThreadMode, FlushOptions, IteratorMode, Options, ReadOptions, WriteBatch,
WriteOptions,
};
use super::{SmtStorage, StorageError, StorageUpdateParts, StorageUpdates, SubtreeUpdate};
use crate::{
EMPTY_WORD, Word,
merkle::{
NodeIndex,
smt::{
InnerNode, Map, SmtLeaf,
large::{IN_MEMORY_DEPTH, LargeSmt, subtree::Subtree},
},
},
utils::{Deserializable, Serializable},
};
/// The name of the RocksDB column family used for storing SMT leaves.
const LEAVES_CF: &str = "leaves";
/// The names of the RocksDB column families used for storing SMT subtrees (deep nodes).
const SUBTREE_24_CF: &str = "st24";
const SUBTREE_32_CF: &str = "st32";
const SUBTREE_40_CF: &str = "st40";
const SUBTREE_48_CF: &str = "st48";
const SUBTREE_56_CF: &str = "st56";
/// The name of the RocksDB column family used for storing metadata (e.g., counts).
const METADATA_CF: &str = "metadata";
/// The name of the RocksDB column family used for storing level 24 hashes for fast tree rebuilding.
const DEPTH_24_CF: &str = "depth24";
/// The key used in the `METADATA_CF` column family to store the total count of non-empty leaves.
const LEAF_COUNT_KEY: &[u8] = b"leaf_count";
/// The key used in the `METADATA_CF` column family to store the total count of key-value entries.
const ENTRY_COUNT_KEY: &[u8] = b"entry_count";
/// A RocksDB-backed persistent storage implementation for a Sparse Merkle Tree (SMT).
///
/// Implements the `SmtStorage` trait, providing durable storage for SMT components
/// including leaves, subtrees (for deeper parts of the tree), and metadata like the SMT root
/// and counts. It leverages RocksDB column families to organize data:
/// - `LEAVES_CF` ("leaves"): Stores `SmtLeaf` data, keyed by their logical u64 index.
/// - `SUBTREE_24_CF` ("st24"): Stores serialized `Subtree` data at depth 24, keyed by their root
/// `NodeIndex`.
/// - `SUBTREE_32_CF` ("st32"): Stores serialized `Subtree` data at depth 32, keyed by their root
/// `NodeIndex`.
/// - `SUBTREE_40_CF` ("st40"): Stores serialized `Subtree` data at depth 40, keyed by their root
/// `NodeIndex`.
/// - `SUBTREE_48_CF` ("st48"): Stores serialized `Subtree` data at depth 48, keyed by their root
/// `NodeIndex`.
/// - `SUBTREE_56_CF` ("st56"): Stores serialized `Subtree` data at depth 56, keyed by their root
/// `NodeIndex`.
/// - `METADATA_CF` ("metadata"): Stores overall SMT metadata such as the current root hash, total
/// leaf count, and total entry count.
#[derive(Debug, Clone)]
pub struct RocksDbStorage {
db: Arc<DB>,
}
impl RocksDbStorage {
/// Opens or creates a RocksDB database at the specified `path` and configures it for SMT
/// storage.
///
/// This method sets up the necessary column families (`leaves`, `subtrees`, `metadata`)
/// and applies various RocksDB options for performance, such as caching, bloom filters,
/// and compaction strategies tailored for SMT workloads.
///
/// # Errors
/// Returns `StorageError::Backend` if the database cannot be opened or configured,
/// for example, due to path issues, permissions, or RocksDB internal errors.
pub fn open(config: RocksDbConfig) -> Result<Self, StorageError> {
// Base DB options
let mut db_opts = Options::default();
// Create DB if it doesn't exist
db_opts.create_if_missing(true);
// Auto-create missing column families
db_opts.create_missing_column_families(true);
// Tune compaction threads to match CPU cores
db_opts.increase_parallelism(rayon::current_num_threads() as i32);
// Limit the number of open file handles
db_opts.set_max_open_files(config.max_open_files);
// Parallelize flush/compaction up to CPU count
db_opts.set_max_background_jobs(rayon::current_num_threads() as i32);
// Maximum WAL size
db_opts.set_max_total_wal_size(512 * 1024 * 1024);
// Shared block cache across all column families
let cache = Cache::new_lru_cache(config.cache_size);
// Common table options for bloom filtering and cache
let mut table_opts = BlockBasedOptions::default();
// Use shared LRU cache for block data
table_opts.set_block_cache(&cache);
table_opts.set_bloom_filter(10.0, false);
// Enable whole-key bloom filtering (better with point lookups)
table_opts.set_whole_key_filtering(true);
// Pin L0 filter and index blocks in cache (improves performance)
table_opts.set_pin_l0_filter_and_index_blocks_in_cache(true);
// Column family for leaves
let mut leaves_opts = Options::default();
leaves_opts.set_block_based_table_factory(&table_opts);
// 128 MB memtable
leaves_opts.set_write_buffer_size(128 << 20);
// Allow up to 3 memtables
leaves_opts.set_max_write_buffer_number(3);
leaves_opts.set_min_write_buffer_number_to_merge(1);
// Do not retain flushed memtables in memory
leaves_opts.set_max_write_buffer_size_to_maintain(0);
// Use level-based compaction
leaves_opts.set_compaction_style(DBCompactionStyle::Level);
// 512 MB target file size
leaves_opts.set_target_file_size_base(512 << 20);
leaves_opts.set_target_file_size_multiplier(2);
// LZ4 compression
leaves_opts.set_compression_type(DBCompressionType::Lz4);
// Set level-based compaction parameters
leaves_opts.set_level_zero_file_num_compaction_trigger(8);
// Helper to build subtree CF options with correct prefix length
fn subtree_cf(cache: &Cache, bloom_filter_bits: f64) -> Options {
let mut tbl = BlockBasedOptions::default();
// Use shared LRU cache for block data
tbl.set_block_cache(cache);
// Set bloom filter for subtree lookups
tbl.set_bloom_filter(bloom_filter_bits, false);
// Enable whole-key bloom filtering
tbl.set_whole_key_filtering(true);
// Pin L0 filter and index blocks in cache
tbl.set_pin_l0_filter_and_index_blocks_in_cache(true);
let mut opts = Options::default();
opts.set_block_based_table_factory(&tbl);
// 128 MB memtable
opts.set_write_buffer_size(128 << 20);
opts.set_max_write_buffer_number(3);
opts.set_min_write_buffer_number_to_merge(1);
// Do not retain flushed memtables in memory
opts.set_max_write_buffer_size_to_maintain(0);
// Use level-based compaction
opts.set_compaction_style(DBCompactionStyle::Level);
// Trigger compaction at 4 L0 files
opts.set_level_zero_file_num_compaction_trigger(4);
// 512 MB target file size
opts.set_target_file_size_base(512 << 20);
opts.set_target_file_size_multiplier(2);
// LZ4 compression
opts.set_compression_type(DBCompressionType::Lz4);
// Set level-based compaction parameters
opts.set_level_zero_file_num_compaction_trigger(8);
opts
}
let mut depth24_opts = Options::default();
depth24_opts.set_compression_type(DBCompressionType::Lz4);
depth24_opts.set_block_based_table_factory(&table_opts);
// Metadata CF with no compression
let mut metadata_opts = Options::default();
metadata_opts.set_compression_type(DBCompressionType::None);
// Define column families with tailored options
let cfs = vec![
ColumnFamilyDescriptor::new(LEAVES_CF, leaves_opts),
ColumnFamilyDescriptor::new(SUBTREE_24_CF, subtree_cf(&cache, 8.0)),
ColumnFamilyDescriptor::new(SUBTREE_32_CF, subtree_cf(&cache, 10.0)),
ColumnFamilyDescriptor::new(SUBTREE_40_CF, subtree_cf(&cache, 10.0)),
ColumnFamilyDescriptor::new(SUBTREE_48_CF, subtree_cf(&cache, 12.0)),
ColumnFamilyDescriptor::new(SUBTREE_56_CF, subtree_cf(&cache, 12.0)),
ColumnFamilyDescriptor::new(METADATA_CF, metadata_opts),
ColumnFamilyDescriptor::new(DEPTH_24_CF, depth24_opts),
];
// Open the database with our tuned CFs
let db = DB::open_cf_descriptors(&db_opts, config.path, cfs)?;
Ok(Self { db: Arc::new(db) })
}
/// Syncs the RocksDB database to disk.
///
/// This ensures that all data is persisted to disk.
///
/// # Errors
/// - Returns `StorageError::Backend` if the flush operation fails.
fn sync(&self) -> Result<(), StorageError> {
let mut fopts = FlushOptions::default();
fopts.set_wait(true);
for name in [
LEAVES_CF,
SUBTREE_24_CF,
SUBTREE_32_CF,
SUBTREE_40_CF,
SUBTREE_48_CF,
SUBTREE_56_CF,
METADATA_CF,
DEPTH_24_CF,
] {
let cf = self.cf_handle(name)?;
self.db.flush_cf_opt(cf, &fopts)?;
}
self.db.flush_wal(true)?;
Ok(())
}
/// Converts an index (u64) into a fixed-size byte array for use as a RocksDB key.
#[inline(always)]
fn index_db_key(index: u64) -> [u8; 8] {
index.to_be_bytes()
}
/// Converts a `NodeIndex` (for a subtree root) into a `KeyBytes` for use as a RocksDB key.
/// The `KeyBytes` is a wrapper around a 8-byte value with a variable-length prefix.
#[inline(always)]
fn subtree_db_key(index: NodeIndex) -> KeyBytes {
let keep = match index.depth() {
24 => 3,
32 => 4,
40 => 5,
48 => 6,
56 => 7,
d => panic!("unsupported depth {d}"),
};
KeyBytes::new(index.value(), keep)
}
/// Retrieves a handle to a RocksDB column family by its name.
///
/// # Errors
/// Returns `StorageError::Backend` if the column family with the given `name` does not
/// exist.
fn cf_handle(&self, name: &str) -> Result<&rocksdb::ColumnFamily, StorageError> {
self.db
.cf_handle(name)
.ok_or_else(|| StorageError::Unsupported(format!("unknown column family `{name}`")))
}
/* helper: CF handle from NodeIndex ------------------------------------- */
#[inline(always)]
fn subtree_cf(&self, index: NodeIndex) -> &rocksdb::ColumnFamily {
let name = cf_for_depth(index.depth());
self.cf_handle(name).expect("CF handle missing")
}
}
impl SmtStorage for RocksDbStorage {
/// Retrieves the total count of non-empty leaves from the `METADATA_CF` column family.
/// Returns 0 if the count is not found.
///
/// # Errors
/// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error
/// occurs.
/// - `StorageError::BadValueLen`: If the retrieved count bytes are invalid.
fn leaf_count(&self) -> Result<usize, StorageError> {
let cf = self.cf_handle(METADATA_CF)?;
self.db.get_cf(cf, LEAF_COUNT_KEY)?.map_or(Ok(0), |bytes| {
let arr: [u8; 8] =
bytes.as_slice().try_into().map_err(|_| StorageError::BadValueLen {
what: "leaf count",
expected: 8,
found: bytes.len(),
})?;
Ok(usize::from_be_bytes(arr))
})
}
/// Retrieves the total count of key-value entries from the `METADATA_CF` column family.
/// Returns 0 if the count is not found.
///
/// # Errors
/// - `StorageError::Backend`: If the metadata column family is missing or a RocksDB error
/// occurs.
/// - `StorageError::BadValueLen`: If the retrieved count bytes are invalid.
fn entry_count(&self) -> Result<usize, StorageError> {
let cf = self.cf_handle(METADATA_CF)?;
self.db.get_cf(cf, ENTRY_COUNT_KEY)?.map_or(Ok(0), |bytes| {
let arr: [u8; 8] =
bytes.as_slice().try_into().map_err(|_| StorageError::BadValueLen {
what: "entry count",
expected: 8,
found: bytes.len(),
})?;
Ok(usize::from_be_bytes(arr))
})
}
/// Inserts a key-value pair into the SMT leaf at the specified logical `index`.
///
/// This operation involves:
/// 1. Retrieving the current leaf (if any) at `index`.
/// 2. Inserting the new key-value pair into the leaf.
/// 3. Updating the leaf and entry counts in the metadata column family.
/// 4. Writing all changes (leaf data, counts) to RocksDB in a single batch.
///
/// Note: This only updates the leaf. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// # Errors
/// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs.
/// - `StorageError::DeserializationError`: If existing leaf data is corrupt.
fn insert_value(
&mut self,
index: u64,
key: Word,
value: Word,
) -> Result<Option<Word>, StorageError> {
debug_assert_ne!(value, EMPTY_WORD);
let mut batch = WriteBatch::default();
// Fetch initial counts.
let mut current_leaf_count = self.leaf_count()?;
let mut current_entry_count = self.entry_count()?;
let leaves_cf = self.cf_handle(LEAVES_CF)?;
let db_key = Self::index_db_key(index);
let maybe_leaf = self.get_leaf(index)?;
let value_to_return: Option<Word> = match maybe_leaf {
Some(mut existing_leaf) => {
let old_value = existing_leaf.insert(key, value).expect("Failed to insert value");
// Determine if the overall SMT entry_count needs to change.
// entry_count increases if:
// 1. The key was not present in this leaf before (`old_value` is `None`).
// 2. The key was present but held `EMPTY_WORD` (`old_value` is
// `Some(EMPTY_WORD)`).
if old_value.is_none_or(|old_v| old_v == EMPTY_WORD) {
current_entry_count += 1;
}
// current_leaf_count does not change because the leaf itself already existed.
batch.put_cf(leaves_cf, db_key, existing_leaf.to_bytes());
old_value
},
None => {
// Leaf at `index` does not exist, so create a new one.
let new_leaf = SmtLeaf::Single((key, value));
// A new leaf is created.
current_leaf_count += 1;
// This new leaf contains one new SMT entry.
current_entry_count += 1;
batch.put_cf(leaves_cf, db_key, new_leaf.to_bytes());
// No previous value, as the leaf (and thus the key in it) was new.
None
},
};
// Add updated metadata counts to the batch.
let metadata_cf = self.cf_handle(METADATA_CF)?;
batch.put_cf(metadata_cf, LEAF_COUNT_KEY, current_leaf_count.to_be_bytes());
batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, current_entry_count.to_be_bytes());
// Atomically write all changes (leaf data and metadata counts).
self.db.write(batch)?;
Ok(value_to_return)
}
/// Removes a key-value pair from the SMT leaf at the specified logical `index`.
///
/// This operation involves:
/// 1. Retrieving the leaf at `index`.
/// 2. Removing the `key` from the leaf. If the leaf becomes empty, it's deleted from RocksDB.
/// 3. Updating the leaf and entry counts in the metadata column family.
/// 4. Writing all changes (leaf data/deletion, counts) to RocksDB in a single batch.
///
/// Returns `Ok(None)` if the leaf at `index` does not exist or the `key` is not found.
///
/// Note: This only updates the leaf. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// # Errors
/// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs.
/// - `StorageError::DeserializationError`: If existing leaf data is corrupt.
fn remove_value(&mut self, index: u64, key: Word) -> Result<Option<Word>, StorageError> {
let Some(mut leaf) = self.get_leaf(index)? else {
return Ok(None);
};
let mut batch = WriteBatch::default();
let cf = self.cf_handle(LEAVES_CF)?;
let metadata_cf = self.cf_handle(METADATA_CF)?;
let db_key = Self::index_db_key(index);
let mut entry_count = self.entry_count()?;
let mut leaf_count = self.leaf_count()?;
let (current_value, is_empty) = leaf.remove(key);
if let Some(current_value) = current_value
&& current_value != EMPTY_WORD
{
entry_count -= 1;
}
if is_empty {
leaf_count -= 1;
batch.delete_cf(cf, db_key);
} else {
batch.put_cf(cf, db_key, leaf.to_bytes());
}
batch.put_cf(metadata_cf, LEAF_COUNT_KEY, leaf_count.to_be_bytes());
batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, entry_count.to_be_bytes());
self.db.write(batch)?;
Ok(current_value)
}
/// Retrieves a single SMT leaf node by its logical `index` from the `LEAVES_CF` column family.
///
/// # Errors
/// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs.
/// - `StorageError::DeserializationError`: If the retrieved leaf data is corrupt.
fn get_leaf(&self, index: u64) -> Result<Option<SmtLeaf>, StorageError> {
let cf = self.cf_handle(LEAVES_CF)?;
let key = Self::index_db_key(index);
match self.db.get_cf(cf, key)? {
Some(bytes) => {
let leaf = SmtLeaf::read_from_bytes(&bytes)?;
Ok(Some(leaf))
},
None => Ok(None),
}
}
/// Sets or updates multiple SMT leaf nodes in the `LEAVES_CF` column family.
///
/// This method performs a batch write to RocksDB. It also updates the global
/// leaf and entry counts in the `METADATA_CF` based on the provided `leaves` map,
/// overwriting any previous counts.
///
/// Note: This method assumes the provided `leaves` map represents the entirety
/// of leaves to be stored or that counts are being explicitly reset.
/// Note: This only updates the leaves. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// # Errors
/// - `StorageError::Backend`: If column families are missing or a RocksDB error occurs.
fn set_leaves(&mut self, leaves: Map<u64, SmtLeaf>) -> Result<(), StorageError> {
let cf = self.cf_handle(LEAVES_CF)?;
let leaf_count: usize = leaves.len();
let entry_count: usize = leaves.values().map(|leaf| leaf.entries().len()).sum();
let mut batch = WriteBatch::default();
for (idx, leaf) in leaves {
let key = Self::index_db_key(idx);
let value = leaf.to_bytes();
batch.put_cf(cf, key, &value);
}
let metadata_cf = self.cf_handle(METADATA_CF)?;
batch.put_cf(metadata_cf, LEAF_COUNT_KEY, leaf_count.to_be_bytes());
batch.put_cf(metadata_cf, ENTRY_COUNT_KEY, entry_count.to_be_bytes());
self.db.write(batch)?;
Ok(())
}
/// Removes a single SMT leaf node by its logical `index` from the `LEAVES_CF` column family.
///
/// Important: This method currently *does not* update the global leaf and entry counts
/// in the metadata. Callers are responsible for managing these counts separately
/// if using this method directly, or preferably use `apply` or `remove_value` which handle
/// counts.
///
/// Note: This only removes the leaf. Callers are responsible for recomputing and
/// persisting the corresponding inner nodes.
///
/// # Errors
/// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs.
/// - `StorageError::DeserializationError`: If the retrieved (to be returned) leaf data is
/// corrupt.
fn remove_leaf(&mut self, index: u64) -> Result<Option<SmtLeaf>, StorageError> {
let key = Self::index_db_key(index);
let cf = self.cf_handle(LEAVES_CF)?;
let old_bytes = self.db.get_cf(cf, key)?;
self.db.delete_cf(cf, key)?;
Ok(old_bytes
.map(|bytes| SmtLeaf::read_from_bytes(&bytes).expect("failed to deserialize leaf")))
}
/// Retrieves multiple SMT leaf nodes by their logical `indices` using RocksDB's `multi_get_cf`.
///
/// # Errors
/// - `StorageError::Backend`: If the leaves column family is missing or a RocksDB error occurs.
/// - `StorageError::DeserializationError`: If any retrieved leaf data is corrupt.
fn get_leaves(&self, indices: &[u64]) -> Result<Vec<Option<SmtLeaf>>, StorageError> {
let cf = self.cf_handle(LEAVES_CF)?;
let db_keys: Vec<[u8; 8]> = indices.iter().map(|&idx| Self::index_db_key(idx)).collect();
let results = self.db.multi_get_cf(db_keys.iter().map(|k| (cf, k.as_ref())));
results
.into_iter()
.map(|result| match result {
Ok(Some(bytes)) => Ok(Some(SmtLeaf::read_from_bytes(&bytes)?)),
Ok(None) => Ok(None),
Err(e) => Err(e.into()),
})
.collect()
}
/// Returns true if the storage has any leaves.
///
/// # Errors
/// Returns `StorageError` if the storage read operation fails.
fn has_leaves(&self) -> Result<bool, StorageError> {
Ok(self.leaf_count()? > 0)
}
/// Batch-retrieves multiple subtrees from RocksDB by their node indices.
///
/// This method groups requests by subtree depth into column family buckets,
/// then performs parallel `multi_get` operations to efficiently retrieve
/// all subtrees. Results are deserialized and placed in the same order as
/// the input indices.
///
/// Note: Retrieval is performed in parallel. If multiple errors occur (e.g.,
/// deserialization or backend errors), only the first one encountered is returned.
/// Other errors will be discarded.
///
/// # Parameters
/// - `indices`: A slice of subtree root indices to retrieve.
///
/// # Returns
/// - A `Vec<Option<Subtree>>` where each index corresponds to the original input.
/// - `Ok(...)` if all fetches succeed.
/// - `Err(StorageError)` if any RocksDB access or deserialization fails.
fn get_subtree(&self, index: NodeIndex) -> Result<Option<Subtree>, StorageError> {
let cf = self.subtree_cf(index);
let key = Self::subtree_db_key(index);
match self.db.get_cf(cf, key)? {
Some(bytes) => {
let subtree = Subtree::from_vec(index, &bytes)?;
Ok(Some(subtree))
},
None => Ok(None),
}
}
/// Batch-retrieves multiple subtrees from RocksDB by their node indices.
///
/// This method groups requests by subtree depth into column family buckets,
/// then performs parallel `multi_get` operations to efficiently retrieve
/// all subtrees. Results are deserialized and placed in the same order as
/// the input indices.
///
/// # Parameters
/// - `indices`: A slice of subtree root indices to retrieve.
///
/// # Returns
/// - A `Vec<Option<Subtree>>` where each index corresponds to the original input.
/// - `Ok(...)` if all fetches succeed.
/// - `Err(StorageError)` if any RocksDB access or deserialization fails.
fn get_subtrees(&self, indices: &[NodeIndex]) -> Result<Vec<Option<Subtree>>, StorageError> {
use rayon::prelude::*;
let mut depth_buckets: [Vec<(usize, NodeIndex)>; 5] = Default::default();
for (original_index, &node_index) in indices.iter().enumerate() {
let depth = node_index.depth();
let bucket_index = match depth {
56 => 0,
48 => 1,
40 => 2,
32 => 3,
24 => 4,
_ => {
return Err(StorageError::Unsupported(format!(
"unsupported subtree depth {depth}"
)));
},
};
depth_buckets[bucket_index].push((original_index, node_index));
}
let mut results = vec![None; indices.len()];
// Process depth buckets in parallel
let bucket_results: Result<Vec<_>, StorageError> = depth_buckets
.into_par_iter()
.enumerate()
.filter(|(_, bucket)| !bucket.is_empty())
.map(
|(bucket_index, bucket)| -> Result<Vec<(usize, Option<Subtree>)>, StorageError> {
let depth = LargeSmt::<RocksDbStorage>::SUBTREE_DEPTHS[bucket_index];
let cf = self.cf_handle(cf_for_depth(depth))?;
let keys: Vec<_> =
bucket.iter().map(|(_, idx)| Self::subtree_db_key(*idx)).collect();
let db_results = self.db.multi_get_cf(keys.iter().map(|k| (cf, k.as_ref())));
// Process results for this bucket
bucket
.into_iter()
.zip(db_results)
.map(|((original_index, node_index), db_result)| {
let subtree = match db_result {
Ok(Some(bytes)) => Some(Subtree::from_vec(node_index, &bytes)?),
Ok(None) => None,
Err(e) => return Err(e.into()),
};
Ok((original_index, subtree))
})
.collect()
},
)
.collect();
// Flatten results and place them in correct positions
for bucket_result in bucket_results? {
for (original_index, subtree) in bucket_result {
results[original_index] = subtree;
}
}
Ok(results)
}
/// Stores a single subtree in RocksDB and optionally updates the depth-24 root cache.
///
/// The subtree is serialized and written to its corresponding column family.
/// If it's a depth-24 subtree, the root node’s hash is also stored in the
/// dedicated `DEPTH_24_CF` cache to support top-level reconstruction.
///
/// # Parameters
/// - `subtree`: A reference to the subtree to be stored.
///
/// # Errors
/// - Returns `StorageError` if column family lookup, serialization, or the write operation
/// fails.
fn set_subtree(&mut self, subtree: &Subtree) -> Result<(), StorageError> {
let subtrees_cf = self.subtree_cf(subtree.root_index());
let mut batch = WriteBatch::default();
let key = Self::subtree_db_key(subtree.root_index());
let value = subtree.to_vec();
batch.put_cf(subtrees_cf, key, value);
// Also update level 24 hash cache if this is a level 24 subtree
if subtree.root_index().depth() == IN_MEMORY_DEPTH {
let root_hash = subtree
.get_inner_node(subtree.root_index())
.ok_or_else(|| StorageError::Unsupported("Subtree root node not found".into()))?
.hash();
let depth24_cf = self.cf_handle(DEPTH_24_CF)?;
let hash_key = Self::index_db_key(subtree.root_index().value());
batch.put_cf(depth24_cf, hash_key, root_hash.to_bytes());
}
self.db.write(batch)?;
Ok(())
}
/// Bulk-writes subtrees to storage (bypassing WAL).
///
/// This method writes a vector of serialized `Subtree` objects directly to their
/// corresponding RocksDB column families based on their root index.
///
/// ⚠️ **Warning:** This function should only be used during **initial SMT construction**.
/// It disables the WAL, meaning writes are **not crash-safe** and can result in data loss
/// if the process terminates unexpectedly.
///
/// # Parameters
/// - `subtrees`: A vector of `Subtree` objects to be serialized and persisted.
///
/// # Errors
/// - Returns `StorageError::Backend` if any column family lookup or RocksDB write fails.
fn set_subtrees(&mut self, subtrees: Vec<Subtree>) -> Result<(), StorageError> {
let depth24_cf = self.cf_handle(DEPTH_24_CF)?;
let mut write_opts = WriteOptions::default();
write_opts.disable_wal(true);
let mut batch = WriteBatch::default();
for subtree in subtrees {
let subtrees_cf = self.subtree_cf(subtree.root_index());
let key = Self::subtree_db_key(subtree.root_index());
let value = subtree.to_vec();
batch.put_cf(subtrees_cf, key, value);
if subtree.root_index().depth() == IN_MEMORY_DEPTH
&& let Some(root_node) = subtree.get_inner_node(subtree.root_index())
{
let hash_key = Self::index_db_key(subtree.root_index().value());
batch.put_cf(depth24_cf, hash_key, root_node.hash().to_bytes());
}
}
self.db.write_opt(batch, &write_opts)?;
Ok(())
}
/// Removes a single SMT Subtree from storage, identified by its root `NodeIndex`.
///
/// # Errors
/// - `StorageError::Backend`: If the subtrees column family is missing or a RocksDB error
/// occurs.
fn remove_subtree(&mut self, index: NodeIndex) -> Result<(), StorageError> {
let subtrees_cf = self.subtree_cf(index);
let mut batch = WriteBatch::default();
let key = Self::subtree_db_key(index);
batch.delete_cf(subtrees_cf, key);
// Also remove level 24 hash cache if this is a level 24 subtree
if index.depth() == IN_MEMORY_DEPTH {
let depth24_cf = self.cf_handle(DEPTH_24_CF)?;
let hash_key = Self::index_db_key(index.value());
batch.delete_cf(depth24_cf, hash_key);
}
self.db.write(batch)?;
Ok(())
}
/// Retrieves a single inner node (non-leaf node) from within a Subtree.
///
/// This method is intended for accessing nodes at depths greater than or equal to
/// `IN_MEMORY_DEPTH`. It first finds the appropriate Subtree containing the `index`, then
/// delegates to `Subtree::get_inner_node()`.
///
/// # Errors
/// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur.
/// - `StorageError::Value`: If the containing Subtree data is corrupt.
fn get_inner_node(&self, index: NodeIndex) -> Result<Option<InnerNode>, StorageError> {
if index.depth() < IN_MEMORY_DEPTH {
return Err(StorageError::Unsupported(
"Cannot get inner node from upper part of the tree".into(),
));
}
let subtree_root_index = Subtree::find_subtree_root(index);
Ok(self
.get_subtree(subtree_root_index)?
.and_then(|subtree| subtree.get_inner_node(index)))
}
/// Sets or updates a single inner node (non-leaf node) within a Subtree.
///
/// This method is intended for `index.depth() >= IN_MEMORY_DEPTH`.
/// If the target Subtree does not exist, it is created. The `node` is then
/// inserted into the Subtree, and the modified Subtree is written back to storage.
///
/// # Errors
/// - `StorageError::Backend`: If `index.depth() < IN_MEMORY_DEPTH`, or if RocksDB errors occur.
/// - `StorageError::Value`: If existing Subtree data is corrupt.
fn set_inner_node(
&mut self,
index: NodeIndex,
node: InnerNode,
) -> Result<Option<InnerNode>, StorageError> {
if index.depth() < IN_MEMORY_DEPTH {
return Err(StorageError::Unsupported(
"Cannot set inner node in upper part of the tree".into(),
));
}
let subtree_root_index = Subtree::find_subtree_root(index);
let mut subtree = self
.get_subtree(subtree_root_index)?
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | true |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/storage/updates.rs | miden-crypto/src/merkle/smt/large/storage/updates.rs | use alloc::vec::Vec;
use crate::merkle::{
NodeIndex,
smt::{Map, SmtLeaf, large::subtree::Subtree},
};
/// Represents a storage update operation for a subtree.
///
/// Each variant explicitly indicates whether to store or delete a subtree at a given index.
#[derive(Debug, Clone)]
pub enum SubtreeUpdate {
/// Store or update a subtree at the specified index.
Store {
/// The index where the subtree should be stored.
index: NodeIndex,
/// The subtree data to store.
subtree: Subtree,
},
/// Delete the subtree at the specified index.
Delete {
/// The index of the subtree to delete.
index: NodeIndex,
},
}
/// Owned decomposition of a [`StorageUpdates`] batch into its constituent parts.
///
/// This struct provides direct access to the individual components of a storage update
/// batch after transferring ownership from [`StorageUpdates::into_parts`].
#[derive(Debug)]
pub struct StorageUpdateParts {
/// Leaf updates indexed by their position in the tree.
///
/// `Some(leaf)` indicates an insertion or update, while `None` indicates deletion.
pub leaf_updates: Map<u64, Option<SmtLeaf>>,
/// Vector of subtree storage operations (Store or Delete) to be applied atomically.
pub subtree_updates: Vec<SubtreeUpdate>,
/// Net change in the count of non-empty leaves.
///
/// Positive values indicate more leaves were added than removed,
/// negative values indicate more leaves were removed than added.
pub leaf_count_delta: isize,
/// Net change in the total number of key-value entries across all leaves.
///
/// Positive values indicate more entries were added than removed,
/// negative values indicate more entries were removed than added.
pub entry_count_delta: isize,
}
/// Represents a collection of changes to be applied atomically to an SMT storage backend.
///
/// This struct is used to batch multiple updates (to leaves and subtrees)
/// ensuring that they are persisted together as a single, consistent transaction.
/// It also tracks deltas for leaf and entry counts, allowing storage implementations
/// to maintain these counts accurately.
#[derive(Default, Debug, Clone)]
pub struct StorageUpdates {
/// A map of updates to individual SMT leaves.
/// The key is the logical leaf index (u64).
/// - `Some(SmtLeaf)` indicates an insertion or update of the leaf at that index.
/// - `None` indicates a deletion of the leaf at that index.
leaf_updates: Map<u64, Option<SmtLeaf>>,
/// Vector of subtree storage operations (Store or Delete) to be applied atomically.
subtree_updates: Vec<SubtreeUpdate>,
/// The net change in the total count of non-empty leaves resulting from this batch of updates.
/// For example, if one leaf is added and one is removed, this would be 0.
/// If two new leaves are added, this would be +2.
leaf_count_delta: isize,
/// The net change in the total count of key-value entries across all leaves
/// resulting from this batch of updates.
entry_count_delta: isize,
}
impl StorageUpdates {
/// Creates a new `StorageUpdates` with default empty updates.
///
/// This constructor is ideal for incremental building where you'll add updates
/// one by one using the convenience methods like `insert_leaf()` and `insert_subtree()`.
pub fn new() -> Self {
Self::default()
}
/// Creates a new `StorageUpdates` from pre-computed components.
///
/// This constructor is ideal for bulk operations where you already have
/// the complete maps of updates and calculated deltas, such as when applying
/// a batch of mutations.
pub fn from_parts(
leaf_updates: Map<u64, Option<SmtLeaf>>,
subtree_updates: impl IntoIterator<Item = SubtreeUpdate>,
leaf_count_delta: isize,
entry_count_delta: isize,
) -> Self {
Self {
leaf_updates,
subtree_updates: subtree_updates.into_iter().collect(),
leaf_count_delta,
entry_count_delta,
}
}
/// Adds a leaf insertion/update to the batch.
///
/// If a leaf at the same index was previously added to this batch, it will be replaced.
pub fn insert_leaf(&mut self, index: u64, leaf: SmtLeaf) {
self.leaf_updates.insert(index, Some(leaf));
}
/// Adds a leaf removal to the batch.
///
/// If a leaf at the same index was previously added to this batch, it will be replaced.
pub fn remove_leaf(&mut self, index: u64) {
self.leaf_updates.insert(index, None);
}
/// Adds a subtree insertion/update to the batch.
///
/// **Note:** This method does not deduplicate. If you call this multiple times with the
/// same subtree index, multiple update operations will be added to the batch. The storage
/// implementation will apply them in order, with the last one taking effect.
pub fn insert_subtree(&mut self, subtree: Subtree) {
let index = subtree.root_index();
self.subtree_updates.push(SubtreeUpdate::Store { index, subtree });
}
/// Adds a subtree removal to the batch.
///
/// **Note:** This method does not deduplicate. If you call this multiple times with the
/// same subtree index, multiple delete operations will be added to the batch. The storage
/// implementation will apply them in order, with the last one taking effect.
pub fn remove_subtree(&mut self, index: NodeIndex) {
self.subtree_updates.push(SubtreeUpdate::Delete { index });
}
/// Returns true if this update batch contains no changes.
pub fn is_empty(&self) -> bool {
self.leaf_updates.is_empty() && self.subtree_updates.is_empty()
}
/// Returns the number of leaf updates in this batch.
pub fn leaf_update_count(&self) -> usize {
self.leaf_updates.len()
}
/// Returns the number of subtree updates in this batch.
pub fn subtree_update_count(&self) -> usize {
self.subtree_updates.len()
}
/// Returns a reference to the leaf updates map.
pub fn leaf_updates(&self) -> &Map<u64, Option<SmtLeaf>> {
&self.leaf_updates
}
/// Returns a reference to the subtree updates vector.
pub fn subtree_updates(&self) -> &[SubtreeUpdate] {
&self.subtree_updates
}
/// Returns the leaf count delta.
pub fn leaf_count_delta(&self) -> isize {
self.leaf_count_delta
}
/// Returns the entry count delta.
pub fn entry_count_delta(&self) -> isize {
self.entry_count_delta
}
/// Sets the leaf count delta.
pub fn set_leaf_count_delta(&mut self, delta: isize) {
self.leaf_count_delta = delta;
}
/// Sets the entry count delta.
pub fn set_entry_count_delta(&mut self, delta: isize) {
self.entry_count_delta = delta;
}
/// Adjusts the leaf count delta by the specified amount.
pub fn adjust_leaf_count_delta(&mut self, adjustment: isize) {
self.leaf_count_delta += adjustment;
}
/// Adjusts the entry count delta by the specified amount.
pub fn adjust_entry_count_delta(&mut self, adjustment: isize) {
self.entry_count_delta += adjustment;
}
/// Consumes this StorageUpdates and returns the leaf updates map.
pub fn into_leaf_updates(self) -> Map<u64, Option<SmtLeaf>> {
self.leaf_updates
}
/// Consumes this StorageUpdates and returns the subtree updates vector.
pub fn into_subtree_updates(self) -> Vec<SubtreeUpdate> {
self.subtree_updates
}
/// Consumes this `StorageUpdates` and returns its owned parts as a [`StorageUpdateParts`].
pub fn into_parts(self) -> StorageUpdateParts {
StorageUpdateParts {
leaf_updates: self.leaf_updates,
subtree_updates: self.subtree_updates,
leaf_count_delta: self.leaf_count_delta,
entry_count_delta: self.entry_count_delta,
}
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/subtree/tests.rs | miden-crypto/src/merkle/smt/large/subtree/tests.rs | use super::{InnerNode, NodeIndex, SUBTREE_DEPTH, Subtree};
use crate::Word;
#[test]
fn test_initial_state() {
let root_index = NodeIndex::new(SUBTREE_DEPTH, 0).unwrap();
let subtree = Subtree::new(root_index);
assert_eq!(subtree.root_index(), root_index, "Root index should match the provided index");
assert_eq!(subtree.len(), 0, "New subtree should be empty");
assert!(subtree.is_empty(), "New subtree should report as empty");
}
#[test]
fn test_node_operations() {
let subtree_root_idx = NodeIndex::new(SUBTREE_DEPTH, 0).unwrap();
let mut subtree = Subtree::new(subtree_root_idx);
// Create test nodes and indices
let node1_idx = NodeIndex::new(SUBTREE_DEPTH + 1, 0).unwrap();
let node1 = InnerNode {
left: Word::default(),
right: Word::default(),
};
let node2_idx = NodeIndex::new(SUBTREE_DEPTH + 2, 3).unwrap();
let node2 = InnerNode {
left: Word::from([1u32; 4]),
right: Word::from([2u32; 4]),
};
// Test insertion into empty subtree
assert_eq!(subtree.len(), 0, "Subtree should be empty");
let old_node = subtree.insert_inner_node(node1_idx, node1.clone());
assert!(old_node.is_none(), "Old node should be empty");
assert_eq!(subtree.len(), 1, "Subtree should have one node");
let old_node = subtree.insert_inner_node(node2_idx, node2.clone());
assert!(old_node.is_none(), "Old node should be empty");
assert_eq!(subtree.len(), 2, "Subtree should have two nodes");
// Test node retrieval
assert_eq!(
subtree.get_inner_node(node1_idx),
Some(node1.clone()),
"Should match the first node"
);
assert_eq!(
subtree.get_inner_node(node2_idx),
Some(node2.clone()),
"Should match the second node"
);
let non_existent_idx = NodeIndex::new(SUBTREE_DEPTH + 3, 0).unwrap();
assert!(
subtree.get_inner_node(non_existent_idx).is_none(),
"Should return None for non-existent node"
);
// Test node overwriting
let node1_updated = InnerNode {
left: Word::from([3u32; 4]),
right: Word::from([4u32; 4]),
};
let previous_node = subtree.insert_inner_node(node1_idx, node1_updated.clone());
assert_eq!(previous_node, Some(node1), "Overwriting should return the previous node");
assert_eq!(subtree.len(), 2, "Length should not change on overwrite");
assert_eq!(
subtree.get_inner_node(node1_idx),
Some(node1_updated.clone()),
"Should retrieve the updated node"
);
// Test node removal
let removed_node = subtree.remove_inner_node(node1_idx);
assert_eq!(removed_node, Some(node1_updated), "Removing should return the removed node");
assert_eq!(subtree.len(), 1, "Length should decrease after removal");
assert!(
subtree.get_inner_node(node1_idx).is_none(),
"Removed node should no longer be retrievable"
);
// Test removing non-existent node
let remove_result = subtree.remove_inner_node(node1_idx);
assert!(remove_result.is_none(), "Removing non-existent node should return None");
assert_eq!(subtree.len(), 1, "Length should not change when removing non-existent node");
// Remove final node to test empty state
let removed_node = subtree.remove_inner_node(node2_idx);
assert_eq!(removed_node, Some(node2), "Should remove the final node");
assert_eq!(subtree.len(), 0, "Subtree should be empty after removing all nodes");
assert!(subtree.is_empty(), "Subtree should report as empty");
// Test removing from empty subtree
let remove_result = subtree.remove_inner_node(node1_idx);
assert!(remove_result.is_none(), "Removing from empty subtree should return None");
assert_eq!(subtree.len(), 0, "Length should remain zero");
}
#[test]
fn test_serialize_deserialize_empty_subtree() {
let root_index = NodeIndex::new(SUBTREE_DEPTH, 1).unwrap();
let subtree = Subtree::new(root_index);
let serialized = subtree.to_vec();
// Should only contain the bitmask (all zeros) and no node data
assert_eq!(
serialized.len(),
Subtree::BITMASK_SIZE,
"Empty subtree serialization should only contain bitmask"
);
assert!(
serialized.iter().all(|&byte| byte == 0),
"All bytes in empty subtree serialization should be zero"
);
let deserialized = Subtree::from_vec(root_index, &serialized)
.expect("Deserialization of empty subtree should succeed");
assert_eq!(deserialized.root_index(), root_index, "Deserialized root index should match");
assert!(deserialized.is_empty(), "Deserialized subtree should be empty");
assert_eq!(deserialized.len(), 0, "Deserialized subtree should have length 0");
}
#[test]
fn test_serialize_deserialize_subtree_with_nodes() {
let subtree_root_idx = NodeIndex::new(SUBTREE_DEPTH, 0).unwrap();
let mut subtree = Subtree::new(subtree_root_idx);
// Add nodes at positions: root (local index 0), first child (local index 1),
// and last possible position (local index 254)
let node0_idx_global = NodeIndex::new(SUBTREE_DEPTH, 0).unwrap();
let node1_idx_global = NodeIndex::new(SUBTREE_DEPTH + 1, 0).unwrap();
let node254_idx_global = NodeIndex::new(SUBTREE_DEPTH + 7, 127).unwrap();
let node0 = InnerNode {
left: Word::from([1u32; 4]),
right: Word::from([2u32; 4]),
};
let node1 = InnerNode {
left: Word::from([3u32; 4]),
right: Word::from([4u32; 4]),
};
let node254 = InnerNode {
left: Word::from([5u32; 4]),
right: Word::from([6u32; 4]),
};
subtree.insert_inner_node(node0_idx_global, node0.clone());
subtree.insert_inner_node(node1_idx_global, node1.clone());
subtree.insert_inner_node(node254_idx_global, node254.clone());
assert_eq!(subtree.len(), 3, "Subtree should contain 3 nodes");
// Test serialization
let serialized = subtree.to_vec();
let expected_size = Subtree::BITMASK_SIZE + 6 * Subtree::HASH_SIZE;
assert_eq!(serialized.len(), expected_size, "Serialized size should be bitmask + 3 nodes");
// Test deserialization
let deserialized =
Subtree::from_vec(subtree_root_idx, &serialized).expect("Deserialization should succeed");
assert_eq!(deserialized.root_index(), subtree_root_idx, "Root index should match");
assert_eq!(deserialized.len(), 3, "Deserialized subtree should have 3 nodes");
assert!(!deserialized.is_empty(), "Deserialized subtree should not be empty");
// Verify all nodes are correctly deserialized
assert_eq!(
deserialized.get_inner_node(node0_idx_global),
Some(node0),
"First node should be correctly deserialized"
);
assert_eq!(
deserialized.get_inner_node(node1_idx_global),
Some(node1),
"Second node should be correctly deserialized"
);
assert_eq!(
deserialized.get_inner_node(node254_idx_global),
Some(node254),
"Third node should be correctly deserialized"
);
// Verify bitmask correctness
let (bitmask_bytes, _node_data) = serialized.split_at(Subtree::BITMASK_SIZE);
// byte 0: bits 0-3 must be set
assert_eq!(bitmask_bytes[0], 0x0f, "byte 0 must have bits 0-3 set");
// bytes 1‥=62 must be zero
assert!(bitmask_bytes[1..63].iter().all(|&b| b == 0), "bytes 1‥62 must be zero");
// byte 63: bits 4 & 5 must be set
assert_eq!(bitmask_bytes[63], 0x30, "byte 63 must have bits 4 & 5 set");
}
/// Tests global to local index conversion with zero-based subtree root
#[test]
fn global_to_local_index_conversion_zero_base() {
let base_idx = NodeIndex::new(SUBTREE_DEPTH, 0).unwrap();
// Test various depth and value combinations
let test_cases = [
// (depth, value, expected_local_index, description)
(SUBTREE_DEPTH, 0, 0, "root node"),
(SUBTREE_DEPTH + 1, 0, 1, "left child"),
(SUBTREE_DEPTH + 1, 1, 2, "right child"),
(SUBTREE_DEPTH + 2, 0, 3, "left grandchild"),
(SUBTREE_DEPTH + 2, 3, 6, "right grandchild at position 3"),
(SUBTREE_DEPTH + 7, 0, 127, "deepest left node"),
(SUBTREE_DEPTH + 7, 127, 254, "deepest right node"),
];
for (depth, value, expected_local, description) in test_cases {
let global_idx = NodeIndex::new(depth, value).unwrap();
let local_idx = Subtree::global_to_local(global_idx, base_idx);
assert_eq!(
local_idx, expected_local,
"Failed for {description}: depth={depth}, value={value}"
);
}
}
/// Tests global to local index conversion with non-zero subtree root
#[test]
fn global_to_local_index_conversion_nonzero_base() {
let base_idx = NodeIndex::new(SUBTREE_DEPTH * 2, 1).unwrap();
let test_cases = [
// (depth, value, expected_local_index, description)
(SUBTREE_DEPTH * 2, 1, 0, "subtree root itself"),
(SUBTREE_DEPTH * 2 + 1, 2, 1, "left child (2 = 1<<1 | 0)"),
(SUBTREE_DEPTH * 2 + 1, 3, 2, "right child (3 = 1<<1 | 1)"),
];
for (depth, value, expected_local, description) in test_cases {
let global_idx = NodeIndex::new(depth, value).unwrap();
let local_idx = Subtree::global_to_local(global_idx, base_idx);
assert_eq!(
local_idx, expected_local,
"Failed for {description}: depth={depth}, value={value}"
);
}
}
/// Tests that global_to_local panics when global depth is less than base depth
#[test]
#[should_panic(expected = "Global depth is less than base depth")]
fn global_to_local_panics_on_invalid_depth() {
let base_idx = NodeIndex::new(SUBTREE_DEPTH, 0).unwrap();
let invalid_global_idx = NodeIndex::new(SUBTREE_DEPTH - 1, 0).unwrap();
// This should panic because global depth cannot be less than base depth
Subtree::global_to_local(invalid_global_idx, base_idx);
}
/// Tests finding subtree roots for nodes at various positions in the tree
#[test]
fn find_subtree_root_for_various_nodes() {
// Test nodes within the first possible subtree (rooted at depth 0)
let shallow_nodes =
[NodeIndex::new(0, 0).unwrap(), NodeIndex::new(SUBTREE_DEPTH - 1, 0).unwrap()];
for node_idx in shallow_nodes {
assert_eq!(
Subtree::find_subtree_root(node_idx),
NodeIndex::root(),
"Node at depth {} should belong to root subtree",
node_idx.depth()
);
}
// Test nodes in subtree rooted at (depth=SUBTREE_DEPTH, value=0)
let subtree_0_root = NodeIndex::new(SUBTREE_DEPTH, 0).unwrap();
let subtree_0_nodes = [
NodeIndex::new(SUBTREE_DEPTH, 0).unwrap(),
NodeIndex::new(SUBTREE_DEPTH + 1, 0).unwrap(),
NodeIndex::new(SUBTREE_DEPTH + 1, 1).unwrap(),
NodeIndex::new(SUBTREE_DEPTH * 2 - 1, (1 << (SUBTREE_DEPTH - 1)) - 1).unwrap(),
];
for node_idx in subtree_0_nodes {
assert_eq!(
Subtree::find_subtree_root(node_idx),
subtree_0_root,
"Node at depth {}, value {} should belong to subtree rooted at depth {}, value 0",
node_idx.depth(),
node_idx.value(),
SUBTREE_DEPTH
);
}
// Test nodes in subtree rooted at (depth=SUBTREE_DEPTH, value=1)
let subtree_1_root = NodeIndex::new(SUBTREE_DEPTH, 1).unwrap();
let subtree_1_nodes = [
NodeIndex::new(SUBTREE_DEPTH, 1).unwrap(),
NodeIndex::new(SUBTREE_DEPTH + 1, 2).unwrap(),
NodeIndex::new(SUBTREE_DEPTH + 1, 3).unwrap(),
];
for node_idx in subtree_1_nodes {
assert_eq!(
Subtree::find_subtree_root(node_idx),
subtree_1_root,
"Node at depth {}, value {} should belong to subtree rooted at depth {}, value 1",
node_idx.depth(),
node_idx.value(),
SUBTREE_DEPTH
);
}
// Test nodes in subtree rooted at (depth=SUBTREE_DEPTH*2, value=3)
let deep_subtree_root = NodeIndex::new(SUBTREE_DEPTH * 2, 3).unwrap();
let deep_subtree_nodes = [
NodeIndex::new(SUBTREE_DEPTH * 2, 3).unwrap(),
NodeIndex::new(SUBTREE_DEPTH * 2 + 5, (3 << 5) | 17).unwrap(),
];
for node_idx in deep_subtree_nodes {
assert_eq!(
Subtree::find_subtree_root(node_idx),
deep_subtree_root,
"Node at depth {}, value {} should belong to deep subtree",
node_idx.depth(),
node_idx.value()
);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/subtree/error.rs | miden-crypto/src/merkle/smt/large/subtree/error.rs | use thiserror::Error;
#[derive(Debug, Error)]
pub enum SubtreeError {
#[error("invalid hash data length: expected {expected} bytes, found {found} bytes")]
BadHashLen { expected: usize, found: usize },
#[error("invalid left hash format at local index {index}")]
BadLeft { index: u8 },
#[error("invalid right hash format at local index {index}")]
BadRight { index: u8 },
#[error("extra hash data after bitmask-indicated entries")]
ExtraData,
#[error("missing left hash data at local index {index}")]
MissingLeft { index: u8 },
#[error("missing right hash data at local index {index}")]
MissingRight { index: u8 },
#[error("subtree data too short: found {found} bytes, need at least {min} bytes")]
TooShort { found: usize, min: usize },
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/large/subtree/mod.rs | miden-crypto/src/merkle/smt/large/subtree/mod.rs | use alloc::vec::Vec;
use super::{EmptySubtreeRoots, InnerNode, InnerNodeInfo, NodeIndex, SMT_DEPTH};
use crate::{
Word,
merkle::smt::{Map, full::concurrent::SUBTREE_DEPTH},
};
mod error;
pub use error::SubtreeError;
#[cfg(test)]
mod tests;
/// Represents a complete 8-depth subtree that is serialized into a single RocksDB entry.
///
/// ### What is stored
/// - `nodes` tracks only **non-empty inner nodes** of this subtree (i.e., nodes for which at least
/// one child differs from the canonical empty hash). Each entry stores an `InnerNode` (hash
/// pair).
///
/// ### Local index layout (how indices are computed)
/// - Indices are **subtree-local** and follow binary-heap (level-order) layout: `root = 0`;
/// children of `i` are at `2i+1` and `2i+2`.
/// - Equivalently, given a `(depth, value)` from the parent tree, the local index is obtained by
/// taking the node’s depth **relative to the subtree root** and its left-to-right position within
/// that level (offset by the total number of nodes in all previous levels).
///
/// ### Serialization (`to_vec` / `from_vec`)
/// - Uses a **512-bit bitmask** (2 bits per node) to mark non-empty left/right children, followed
/// by a packed stream of `Word` hashes for each set bit.
/// - Children equal to the canonical empty hash are omitted in the byte representation and
/// reconstructed on load using `EmptySubtreeRoots` and the child’s depth in the parent tree.
#[derive(Debug, Clone)]
pub struct Subtree {
/// Index of this subtree's root in the parent SMT.
root_index: NodeIndex,
/// Inner nodes keyed by subtree-local index (binary-heap order).
nodes: Map<u8, InnerNode>,
}
impl Subtree {
const HASH_SIZE: usize = 32;
const BITMASK_SIZE: usize = 64;
const MAX_NODES: u8 = 255;
const BITS_PER_NODE: usize = 2;
pub fn new(root_index: NodeIndex) -> Self {
Self { root_index, nodes: Map::new() }
}
pub fn root_index(&self) -> NodeIndex {
self.root_index
}
pub fn len(&self) -> usize {
self.nodes.len()
}
pub fn insert_inner_node(
&mut self,
index: NodeIndex,
inner_node: InnerNode,
) -> Option<InnerNode> {
let local_index = Self::global_to_local(index, self.root_index);
self.nodes.insert(local_index, inner_node)
}
pub fn remove_inner_node(&mut self, index: NodeIndex) -> Option<InnerNode> {
let local_index = Self::global_to_local(index, self.root_index);
self.nodes.remove(&local_index)
}
pub fn get_inner_node(&self, index: NodeIndex) -> Option<InnerNode> {
let local_index = Self::global_to_local(index, self.root_index);
self.nodes.get(&local_index).cloned()
}
/// Serializes this subtree into a compact byte representation.
///
/// The encoding has two components:
///
/// **Bitmask (512 bits)** — Each internal node (up to 255 total) is assigned 2 bits:
/// one for the left child and one for the right child. A bit is set if the corresponding
/// child differs from the canonical empty hash at its depth. This avoids storing empty
/// children.
///
/// **Hash data** — For every set bit in the mask, the corresponding 32-byte `Word` hash
/// is appended to the data section. Hashes are written in breadth-first (local index)
/// order, scanning children left-then-right.
///
/// On deserialization, omitted children are reconstructed using `EmptySubtreeRoots`.
pub fn to_vec(&self) -> Vec<u8> {
let mut data = Vec::with_capacity(self.len() * Self::HASH_SIZE);
let mut bitmask = [0u8; Self::BITMASK_SIZE];
for local_index in 0..Self::MAX_NODES {
if let Some(node) = self.nodes.get(&local_index) {
let bit_offset = (local_index as usize) * Self::BITS_PER_NODE;
let node_depth_in_subtree = Self::local_index_to_depth(local_index);
let child_depth = self.root_index.depth() + node_depth_in_subtree + 1;
let empty_hash = *EmptySubtreeRoots::entry(SMT_DEPTH, child_depth);
if node.left != empty_hash {
Self::set_bit(&mut bitmask, bit_offset);
data.extend_from_slice(&node.left.as_bytes());
}
if node.right != empty_hash {
Self::set_bit(&mut bitmask, bit_offset + 1);
data.extend_from_slice(&node.right.as_bytes());
}
}
}
let mut result = Vec::with_capacity(Self::BITMASK_SIZE + data.len());
result.extend_from_slice(&bitmask);
result.extend_from_slice(&data);
result
}
#[inline]
fn set_bit(bitmask: &mut [u8], bit_offset: usize) {
bitmask[bit_offset / 8] |= 1 << (bit_offset % 8);
}
#[inline]
fn get_bit(bitmask: &[u8], bit_offset: usize) -> bool {
(bitmask[bit_offset / 8] >> (bit_offset % 8)) & 1 != 0
}
/// Deserializes a subtree from its compact byte representation.
///
/// The first 512 bits form the bitmask, which indicates which child hashes
/// are present for each internal node (2 bits per node). For every set bit,
/// a `Word` hash is read sequentially from the data section.
///
/// When a child bit is unset, the corresponding hash is reconstructed from
/// `EmptySubtreeRoots` based on the child’s depth in the full tree.
///
/// Errors are returned if the byte slice is too short, contains an unexpected
/// number of hashes, or leaves unconsumed data at the end.
pub fn from_vec(root_index: NodeIndex, data: &[u8]) -> Result<Self, SubtreeError> {
if data.len() < Self::BITMASK_SIZE {
return Err(SubtreeError::TooShort {
found: data.len(),
min: Self::BITMASK_SIZE,
});
}
let (bitmask, hash_data) = data.split_at(Self::BITMASK_SIZE);
let present_hashes: usize = bitmask.iter().map(|&byte| byte.count_ones() as usize).sum();
if hash_data.len() != present_hashes * Self::HASH_SIZE {
return Err(SubtreeError::BadHashLen {
expected: present_hashes * Self::HASH_SIZE,
found: hash_data.len(),
});
}
let mut nodes = Map::new();
let mut hash_chunks = hash_data.chunks_exact(Self::HASH_SIZE);
// Process each potential node position
for local_index in 0..Self::MAX_NODES {
let bit_offset = (local_index as usize) * Self::BITS_PER_NODE;
let has_left = Self::get_bit(bitmask, bit_offset);
let has_right = Self::get_bit(bitmask, bit_offset + 1);
if has_left || has_right {
// Calculate depth for empty hash lookup
let node_depth_in_subtree = Self::local_index_to_depth(local_index);
let child_depth = root_index.depth() + node_depth_in_subtree + 1;
let empty_hash = *EmptySubtreeRoots::entry(SMT_DEPTH, child_depth);
// Get left child hash
let left_hash = if has_left {
let hash_bytes = hash_chunks
.next()
.ok_or(SubtreeError::MissingLeft { index: local_index })?;
Word::try_from(hash_bytes)
.map_err(|_| SubtreeError::BadLeft { index: local_index })?
} else {
empty_hash
};
// Get right child hash
let right_hash = if has_right {
let hash_bytes = hash_chunks
.next()
.ok_or(SubtreeError::MissingRight { index: local_index })?;
Word::try_from(hash_bytes)
.map_err(|_| SubtreeError::BadRight { index: local_index })?
} else {
empty_hash
};
let inner_node = InnerNode { left: left_hash, right: right_hash };
nodes.insert(local_index, inner_node);
}
}
// Ensure all hash data was consumed
if hash_chunks.next().is_some() {
return Err(SubtreeError::ExtraData);
}
Ok(Self { root_index, nodes })
}
fn global_to_local(global: NodeIndex, base: NodeIndex) -> u8 {
assert!(
global.depth() >= base.depth(),
"Global depth is less than base depth = {}, global depth = {}",
base.depth(),
global.depth()
);
// Calculate the relative depth within the subtree
let relative_depth = global.depth() - base.depth();
// Calculate the base offset in a binary tree of given relative depth
let base_offset = (1 << relative_depth) - 1;
// Mask out the lower `relative_depth` bits to find the local position in the subtree
let mask = (1 << relative_depth) - 1;
let local_position = (global.value() & mask) as u8;
base_offset + local_position
}
pub fn subtree_key(root_index: NodeIndex) -> [u8; 9] {
let mut key = [0u8; 9];
key[0] = root_index.depth();
key[1..].copy_from_slice(&root_index.value().to_be_bytes());
key
}
pub fn find_subtree_root(node_index: NodeIndex) -> NodeIndex {
let depth = node_index.depth();
if depth < SUBTREE_DEPTH {
NodeIndex::root()
} else {
let subtree_root_depth = depth - (depth % SUBTREE_DEPTH);
let relative_depth = depth - subtree_root_depth;
let base_value = node_index.value() >> relative_depth;
NodeIndex::new(subtree_root_depth, base_value).unwrap()
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Convert local index to depth within subtree
#[inline]
const fn local_index_to_depth(local_index: u8) -> u8 {
let n = local_index as u16 + 1;
(u16::BITS as u8 - 1) - n.leading_zeros() as u8
}
pub fn iter_inner_node_info(&self) -> impl Iterator<Item = InnerNodeInfo> + '_ {
self.nodes.values().map(|inner_node_ref| InnerNodeInfo {
value: inner_node_ref.hash(),
left: inner_node_ref.left,
right: inner_node_ref.right,
})
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/simple/proof.rs | miden-crypto/src/merkle/smt/simple/proof.rs | use crate::{
Word,
merkle::{MerkleError, MerkleProof, SparseMerklePath},
};
/// A container for a [crate::Word] value and its [SparseMerklePath] opening.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct SimpleSmtProof {
/// The node value opening for `path`.
pub value: Word,
/// The path from `value` to `root` (exclusive), using an efficient memory representation for
/// empty nodes.
pub path: SparseMerklePath,
}
impl SimpleSmtProof {
/// Convenience function to construct a [SimpleSmtProof].
///
/// `value` is the value `path` leads to, in the tree.
pub fn new(value: Word, path: SparseMerklePath) -> Self {
Self { value, path }
}
}
impl From<(SparseMerklePath, Word)> for SimpleSmtProof {
fn from((path, value): (SparseMerklePath, Word)) -> Self {
SimpleSmtProof::new(value, path)
}
}
impl TryFrom<MerkleProof> for SimpleSmtProof {
type Error = MerkleError;
/// # Errors
///
/// This conversion returns [MerkleError::DepthTooBig] if the path length is greater than
/// [`super::SMT_MAX_DEPTH`].
fn try_from(other: MerkleProof) -> Result<Self, MerkleError> {
let MerkleProof { value, path } = other;
let path = SparseMerklePath::try_from(path)?;
Ok(SimpleSmtProof { value, path })
}
}
impl From<SimpleSmtProof> for MerkleProof {
fn from(other: SimpleSmtProof) -> Self {
let SimpleSmtProof { value, path } = other;
MerkleProof { value, path: path.into() }
}
}
impl PartialEq<MerkleProof> for SimpleSmtProof {
fn eq(&self, rhs: &MerkleProof) -> bool {
self.value == rhs.value && self.path == rhs.path
}
}
impl PartialEq<SimpleSmtProof> for MerkleProof {
fn eq(&self, rhs: &SimpleSmtProof) -> bool {
rhs == self
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/simple/tests.rs | miden-crypto/src/merkle/smt/simple/tests.rs | use alloc::vec::Vec;
use assert_matches::assert_matches;
use super::{
super::{MerkleError, SimpleSmt, Word},
NodeIndex,
};
use crate::{
EMPTY_WORD,
hash::rpo::Rpo256,
merkle::{
EmptySubtreeRoots, InnerNodeInfo, MerklePath, MerkleTree, int_to_leaf, int_to_node,
smt::{LeafIndex, SparseMerkleTree},
},
};
// TEST DATA
// ================================================================================================
const KEYS4: [u64; 4] = [0, 1, 2, 3];
const KEYS8: [u64; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
const VALUES4: [Word; 4] = [int_to_node(1), int_to_node(2), int_to_node(3), int_to_node(4)];
const VALUES8: [Word; 8] = [
int_to_node(1),
int_to_node(2),
int_to_node(3),
int_to_node(4),
int_to_node(5),
int_to_node(6),
int_to_node(7),
int_to_node(8),
];
const ZERO_VALUES8: [Word; 8] = [int_to_leaf(0); 8];
// TESTS
// ================================================================================================
#[test]
fn build_empty_tree() {
// tree of depth 3
let smt = SimpleSmt::<3>::new().unwrap();
let mt = MerkleTree::new(ZERO_VALUES8).unwrap();
assert_eq!(mt.root(), smt.root());
}
#[test]
fn build_sparse_tree() {
const DEPTH: u8 = 3;
let mut smt = SimpleSmt::<DEPTH>::new().unwrap();
let mut values = ZERO_VALUES8.to_vec();
assert_eq!(smt.num_leaves(), 0);
// insert single value
let key = 6;
let new_node = int_to_leaf(7);
values[key as usize] = new_node;
let old_value = smt.insert(LeafIndex::<DEPTH>::new(key).unwrap(), new_node);
let mt2 = MerkleTree::new(values.clone()).unwrap();
assert_eq!(mt2.root(), smt.root());
assert_eq!(
mt2.get_path(NodeIndex::make(3, 6)).unwrap(),
smt.open(&LeafIndex::<3>::new(6).unwrap()).path
);
assert_eq!(old_value, EMPTY_WORD);
assert_eq!(smt.num_leaves(), 1);
// insert second value at distinct leaf branch
let key = 2;
let new_node = int_to_leaf(3);
values[key as usize] = new_node;
let old_value = smt.insert(LeafIndex::<DEPTH>::new(key).unwrap(), new_node);
let mt3 = MerkleTree::new(values).unwrap();
assert_eq!(mt3.root(), smt.root());
assert_eq!(
mt3.get_path(NodeIndex::make(3, 2)).unwrap(),
smt.open(&LeafIndex::<3>::new(2).unwrap()).path
);
assert_eq!(old_value, EMPTY_WORD);
assert_eq!(smt.num_leaves(), 2);
}
/// Tests that [`SimpleSmt::with_contiguous_leaves`] works as expected
#[test]
fn build_contiguous_tree() {
let tree_with_leaves =
SimpleSmt::<2>::with_leaves([0, 1, 2, 3].into_iter().zip(VALUES4.to_vec())).unwrap();
let tree_with_contiguous_leaves =
SimpleSmt::<2>::with_contiguous_leaves(VALUES4.to_vec()).unwrap();
assert_eq!(tree_with_leaves, tree_with_contiguous_leaves);
}
#[test]
fn test_depth2_tree() {
let tree = SimpleSmt::<2>::with_leaves(KEYS4.into_iter().zip(VALUES4.to_vec())).unwrap();
// check internal structure
let (root, node2, node3) = compute_internal_nodes();
assert_eq!(root, tree.root());
assert_eq!(node2, tree.get_node(NodeIndex::make(1, 0)).unwrap());
assert_eq!(node3, tree.get_node(NodeIndex::make(1, 1)).unwrap());
// check get_node()
assert_eq!(VALUES4[0], tree.get_node(NodeIndex::make(2, 0)).unwrap());
assert_eq!(VALUES4[1], tree.get_node(NodeIndex::make(2, 1)).unwrap());
assert_eq!(VALUES4[2], tree.get_node(NodeIndex::make(2, 2)).unwrap());
assert_eq!(VALUES4[3], tree.get_node(NodeIndex::make(2, 3)).unwrap());
// check get_path(): depth 2
assert_eq!(
MerklePath::from(vec![VALUES4[1], node3]),
tree.open(&LeafIndex::<2>::new(0).unwrap()).path,
);
assert_eq!(
MerklePath::from(vec![VALUES4[0], node3]),
tree.open(&LeafIndex::<2>::new(1).unwrap()).path,
);
assert_eq!(
MerklePath::from(vec![VALUES4[3], node2]),
tree.open(&LeafIndex::<2>::new(2).unwrap()).path,
);
assert_eq!(
MerklePath::from(vec![VALUES4[2], node2]),
tree.open(&LeafIndex::<2>::new(3).unwrap()).path,
);
}
#[test]
fn test_inner_node_iterator() -> Result<(), MerkleError> {
let tree = SimpleSmt::<2>::with_leaves(KEYS4.into_iter().zip(VALUES4.to_vec())).unwrap();
// check depth 2
assert_eq!(VALUES4[0], tree.get_node(NodeIndex::make(2, 0)).unwrap());
assert_eq!(VALUES4[1], tree.get_node(NodeIndex::make(2, 1)).unwrap());
assert_eq!(VALUES4[2], tree.get_node(NodeIndex::make(2, 2)).unwrap());
assert_eq!(VALUES4[3], tree.get_node(NodeIndex::make(2, 3)).unwrap());
// get parent nodes
let root = tree.root();
let l1n0 = tree.get_node(NodeIndex::make(1, 0))?;
let l1n1 = tree.get_node(NodeIndex::make(1, 1))?;
let l2n0 = tree.get_node(NodeIndex::make(2, 0))?;
let l2n1 = tree.get_node(NodeIndex::make(2, 1))?;
let l2n2 = tree.get_node(NodeIndex::make(2, 2))?;
let l2n3 = tree.get_node(NodeIndex::make(2, 3))?;
let mut nodes: Vec<InnerNodeInfo> = tree.inner_nodes().collect();
let mut expected = [
InnerNodeInfo { value: root, left: l1n0, right: l1n1 },
InnerNodeInfo { value: l1n0, left: l2n0, right: l2n1 },
InnerNodeInfo { value: l1n1, left: l2n2, right: l2n3 },
];
nodes.sort();
expected.sort();
assert_eq!(nodes, expected);
Ok(())
}
#[test]
fn test_insert() {
const DEPTH: u8 = 3;
let mut tree =
SimpleSmt::<DEPTH>::with_leaves(KEYS8.into_iter().zip(VALUES8.to_vec())).unwrap();
assert_eq!(tree.num_leaves(), 8);
// update one value
let key = 3;
let new_node = int_to_leaf(9);
let mut expected_values = VALUES8.to_vec();
expected_values[key] = new_node;
let expected_tree = MerkleTree::new(expected_values.clone()).unwrap();
let old_leaf = tree.insert(LeafIndex::<DEPTH>::new(key as u64).unwrap(), new_node);
assert_eq!(expected_tree.root(), tree.root);
assert_eq!(old_leaf, VALUES8[key]);
assert_eq!(tree.num_leaves(), 8);
// update another value
let key = 6;
let new_node = int_to_leaf(10);
expected_values[key] = new_node;
let expected_tree = MerkleTree::new(expected_values.clone()).unwrap();
let old_leaf = tree.insert(LeafIndex::<DEPTH>::new(key as u64).unwrap(), new_node);
assert_eq!(expected_tree.root(), tree.root);
assert_eq!(old_leaf, VALUES8[key]);
assert_eq!(tree.num_leaves(), 8);
// set a leaf to empty value
let key = 5;
let new_node = EMPTY_WORD;
expected_values[key] = new_node;
let expected_tree = MerkleTree::new(expected_values.clone()).unwrap();
let old_leaf = tree.insert(LeafIndex::<DEPTH>::new(key as u64).unwrap(), new_node);
assert_eq!(expected_tree.root(), tree.root);
assert_eq!(old_leaf, VALUES8[key]);
assert_eq!(tree.num_leaves(), 7);
}
#[test]
fn small_tree_opening_is_consistent() {
// ____k____
// / \
// _i_ _j_
// / \ / \
// e f g h
// / \ / \ / \ / \
// a b 0 0 c 0 0 d
let z = EMPTY_WORD;
let a = Rpo256::merge(&[z; 2]);
let b = Rpo256::merge(&[a; 2]);
let c = Rpo256::merge(&[b; 2]);
let d = Rpo256::merge(&[c; 2]);
let e = Rpo256::merge(&[a, b]);
let f = Rpo256::merge(&[z, z]);
let g = Rpo256::merge(&[c, z]);
let h = Rpo256::merge(&[z, d]);
let i = Rpo256::merge(&[e, f]);
let j = Rpo256::merge(&[g, h]);
let k = Rpo256::merge(&[i, j]);
let entries = vec![(0, a), (1, b), (4, c), (7, d)];
let tree = SimpleSmt::<3>::with_leaves(entries).unwrap();
assert_eq!(tree.root(), k);
let cases: Vec<(u64, Vec<Word>)> =
vec![(0, vec![b, f, j]), (1, vec![a, f, j]), (4, vec![z, h, i]), (7, vec![z, g, i])];
for (key, path) in cases {
let opening = tree.open(&LeafIndex::<3>::new(key).unwrap());
assert_eq!(MerklePath::from(path), opening.path);
}
}
#[test]
fn test_simplesmt_fail_on_duplicates() {
let values = [
// same key, same value
(int_to_leaf(1), int_to_leaf(1)),
// same key, different values
(int_to_leaf(1), int_to_leaf(2)),
// same key, set to zero
(EMPTY_WORD, int_to_leaf(1)),
// same key, re-set to zero
(int_to_leaf(1), EMPTY_WORD),
// same key, set to zero twice
(EMPTY_WORD, EMPTY_WORD),
];
for (first, second) in values.iter() {
// consecutive
let entries = [(1, *first), (1, *second)];
let smt = SimpleSmt::<64>::with_leaves(entries);
assert_matches!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
// not consecutive
let entries = [(1, *first), (5, int_to_leaf(5)), (1, *second)];
let smt = SimpleSmt::<64>::with_leaves(entries);
assert_matches!(smt.unwrap_err(), MerkleError::DuplicateValuesForIndex(1));
}
}
#[test]
fn with_no_duplicates_empty_node() {
let entries = [(1_u64, int_to_leaf(0)), (5, int_to_leaf(2))];
let smt = SimpleSmt::<64>::with_leaves(entries);
assert!(smt.is_ok());
}
#[test]
fn test_simplesmt_with_leaves_nonexisting_leaf() {
// TESTING WITH EMPTY WORD
// --------------------------------------------------------------------------------------------
// Depth 1 has 2 leaf. Position is 0-indexed, position 2 doesn't exist.
let leaves = [(2, EMPTY_WORD)];
let result = SimpleSmt::<1>::with_leaves(leaves);
assert!(result.is_err());
// Depth 2 has 4 leaves. Position is 0-indexed, position 4 doesn't exist.
let leaves = [(4, EMPTY_WORD)];
let result = SimpleSmt::<2>::with_leaves(leaves);
assert!(result.is_err());
// Depth 3 has 8 leaves. Position is 0-indexed, position 8 doesn't exist.
let leaves = [(8, EMPTY_WORD)];
let result = SimpleSmt::<3>::with_leaves(leaves);
assert!(result.is_err());
// TESTING WITH A VALUE
// --------------------------------------------------------------------------------------------
let value = int_to_node(1);
// Depth 1 has 2 leaves. Position is 0-indexed, position 2 doesn't exist.
let leaves = [(2, value)];
let result = SimpleSmt::<1>::with_leaves(leaves);
assert!(result.is_err());
// Depth 2 has 4 leaves. Position is 0-indexed, position 4 doesn't exist.
let leaves = [(4, value)];
let result = SimpleSmt::<2>::with_leaves(leaves);
assert!(result.is_err());
// Depth 3 has 8 leaves. Position is 0-indexed, position 8 doesn't exist.
let leaves = [(8, value)];
let result = SimpleSmt::<3>::with_leaves(leaves);
assert!(result.is_err());
}
#[test]
fn test_simplesmt_set_subtree() {
// Final Tree:
//
// ____k____
// / \
// _i_ _j_
// / \ / \
// e f g h
// / \ / \ / \ / \
// a b 0 0 c 0 0 d
let z = EMPTY_WORD;
let a = Rpo256::merge(&[z; 2]);
let b = Rpo256::merge(&[a; 2]);
let c = Rpo256::merge(&[b; 2]);
let d = Rpo256::merge(&[c; 2]);
let e = Rpo256::merge(&[a, b]);
let f = Rpo256::merge(&[z, z]);
let g = Rpo256::merge(&[c, z]);
let h = Rpo256::merge(&[z, d]);
let i = Rpo256::merge(&[e, f]);
let j = Rpo256::merge(&[g, h]);
let k = Rpo256::merge(&[i, j]);
// subtree:
// g
// / \
// c 0
let subtree = {
let entries = vec![(0, c)];
SimpleSmt::<1>::with_leaves(entries).unwrap()
};
// insert subtree
const TREE_DEPTH: u8 = 3;
let tree = {
let entries = vec![(0, a), (1, b), (7, d)];
let mut tree = SimpleSmt::<TREE_DEPTH>::with_leaves(entries).unwrap();
tree.set_subtree(2, subtree).unwrap();
tree
};
assert_eq!(tree.root(), k);
assert_eq!(tree.get_leaf(&LeafIndex::<TREE_DEPTH>::new(4).unwrap()), c);
assert_eq!(tree.get_inner_node(NodeIndex::new_unchecked(2, 2)).hash(), g);
}
/// Ensures that an invalid input node index into `set_subtree()` incurs no mutation of the tree
#[test]
fn test_simplesmt_set_subtree_unchanged_for_wrong_index() {
// Final Tree:
//
// ____k____
// / \
// _i_ _j_
// / \ / \
// e f g h
// / \ / \ / \ / \
// a b 0 0 c 0 0 d
let z = EMPTY_WORD;
let a = Rpo256::merge(&[z; 2]);
let b = Rpo256::merge(&[a; 2]);
let c = Rpo256::merge(&[b; 2]);
let d = Rpo256::merge(&[c; 2]);
// subtree:
// g
// / \
// c 0
let subtree = {
let entries = vec![(0, c)];
SimpleSmt::<1>::with_leaves(entries).unwrap()
};
let mut tree = {
let entries = vec![(0, a), (1, b), (7, d)];
SimpleSmt::<3>::with_leaves(entries).unwrap()
};
let tree_root_before_insertion = tree.root();
// insert subtree
assert!(tree.set_subtree(500, subtree).is_err());
assert_eq!(tree.root(), tree_root_before_insertion);
}
/// We insert an empty subtree that has the same depth as the original tree
#[test]
fn test_simplesmt_set_subtree_entire_tree() {
// Initial Tree:
//
// ____k____
// / \
// _i_ _j_
// / \ / \
// e f g h
// / \ / \ / \ / \
// a b 0 0 c 0 0 d
let z = EMPTY_WORD;
let a = Rpo256::merge(&[z; 2]);
let b = Rpo256::merge(&[a; 2]);
let c = Rpo256::merge(&[b; 2]);
let d = Rpo256::merge(&[c; 2]);
// subtree: E3
const DEPTH: u8 = 3;
let subtree = { SimpleSmt::<DEPTH>::with_leaves(Vec::new()).unwrap() };
assert_eq!(subtree.root(), *EmptySubtreeRoots::entry(DEPTH, 0));
// insert subtree
let mut tree = {
let entries = vec![(0, a), (1, b), (4, c), (7, d)];
SimpleSmt::<3>::with_leaves(entries).unwrap()
};
tree.set_subtree(0, subtree).unwrap();
assert_eq!(tree.root(), *EmptySubtreeRoots::entry(DEPTH, 0));
}
/// Tests that `EMPTY_ROOT` constant generated in the `SimpleSmt` equals to the root of the empty
/// tree of depth 64
#[test]
fn test_simplesmt_check_empty_root_constant() {
// get the root of the empty tree of depth 64
let empty_root_64_depth = EmptySubtreeRoots::empty_hashes(64)[0];
assert_eq!(empty_root_64_depth, SimpleSmt::<64>::EMPTY_ROOT);
// get the root of the empty tree of depth 32
let empty_root_32_depth = EmptySubtreeRoots::empty_hashes(32)[0];
assert_eq!(empty_root_32_depth, SimpleSmt::<32>::EMPTY_ROOT);
// get the root of the empty tree of depth 0
let empty_root_1_depth = EmptySubtreeRoots::empty_hashes(1)[0];
assert_eq!(empty_root_1_depth, SimpleSmt::<1>::EMPTY_ROOT);
}
// HELPER FUNCTIONS
// --------------------------------------------------------------------------------------------
fn compute_internal_nodes() -> (Word, Word, Word) {
let node2 = Rpo256::merge(&[VALUES4[0], VALUES4[1]]);
let node3 = Rpo256::merge(&[VALUES4[2], VALUES4[3]]);
let root = Rpo256::merge(&[node2, node3]);
(root, node2, node3)
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/merkle/smt/simple/mod.rs | miden-crypto/src/merkle/smt/simple/mod.rs | use alloc::collections::BTreeSet;
use super::{
EMPTY_WORD, EmptySubtreeRoots, InnerNode, InnerNodeInfo, InnerNodes, LeafIndex, MerkleError,
MutationSet, NodeIndex, SMT_MAX_DEPTH, SMT_MIN_DEPTH, SparseMerkleTree, Word,
};
use crate::merkle::{SparseMerklePath, smt::SmtLeafError};
mod proof;
pub use proof::SimpleSmtProof;
#[cfg(test)]
mod tests;
// SPARSE MERKLE TREE
// ================================================================================================
type Leaves = super::Leaves<Word>;
/// A sparse Merkle tree with 64-bit keys and 4-element leaf values, without compaction.
///
/// The root of the tree is recomputed on each new leaf update.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct SimpleSmt<const DEPTH: u8> {
root: Word,
inner_nodes: InnerNodes,
leaves: Leaves,
}
impl<const DEPTH: u8> SimpleSmt<DEPTH> {
// CONSTANTS
// --------------------------------------------------------------------------------------------
/// The default value used to compute the hash of empty leaves
pub const EMPTY_VALUE: Word = <Self as SparseMerkleTree<DEPTH>>::EMPTY_VALUE;
// CONSTRUCTORS
// --------------------------------------------------------------------------------------------
/// Returns a new [SimpleSmt].
///
/// All leaves in the returned tree are set to [ZERO; 4].
///
/// # Errors
/// Returns an error if DEPTH is 0 or is greater than 64.
pub fn new() -> Result<Self, MerkleError> {
// validate the range of the depth.
if DEPTH < SMT_MIN_DEPTH {
return Err(MerkleError::DepthTooSmall(DEPTH));
} else if SMT_MAX_DEPTH < DEPTH {
return Err(MerkleError::DepthTooBig(DEPTH as u64));
}
let root = *EmptySubtreeRoots::entry(DEPTH, 0);
Ok(Self {
root,
inner_nodes: Default::default(),
leaves: Default::default(),
})
}
/// Returns a new [SimpleSmt] instantiated with leaves set as specified by the provided entries.
///
/// All leaves omitted from the entries list are set to [ZERO; 4].
///
/// # Errors
/// Returns an error if:
/// - If the depth is 0 or is greater than 64.
/// - The number of entries exceeds the maximum tree capacity, that is 2^{depth}.
/// - The provided entries contain multiple values for the same key.
pub fn with_leaves(
entries: impl IntoIterator<Item = (u64, Word)>,
) -> Result<Self, MerkleError> {
// create an empty tree
let mut tree = Self::new()?;
// compute the max number of entries. We use an upper bound of depth 63 because we consider
// passing in a vector of size 2^64 infeasible.
let max_num_entries = 2_u64.pow(DEPTH.min(63).into());
// This being a sparse data structure, the EMPTY_WORD is not assigned to the `BTreeMap`, so
// entries with the empty value need additional tracking.
let mut key_set_to_zero = BTreeSet::new();
for (idx, (key, value)) in entries.into_iter().enumerate() {
if idx as u64 >= max_num_entries {
return Err(MerkleError::TooManyEntries(DEPTH));
}
let old_value = tree.insert(LeafIndex::<DEPTH>::new(key)?, value);
if old_value != Self::EMPTY_VALUE || key_set_to_zero.contains(&key) {
return Err(MerkleError::DuplicateValuesForIndex(key));
}
if value == Self::EMPTY_VALUE {
key_set_to_zero.insert(key);
};
}
Ok(tree)
}
/// Returns a new [`SimpleSmt`] instantiated from already computed leaves and nodes.
///
/// This function performs minimal consistency checking. It is the caller's responsibility to
/// ensure the passed arguments are correct and consistent with each other.
///
/// # Panics
/// With debug assertions on, this function panics if `root` does not match the root node in
/// `inner_nodes`.
pub fn from_raw_parts(inner_nodes: InnerNodes, leaves: Leaves, root: Word) -> Self {
// Our particular implementation of `from_raw_parts()` never returns `Err`.
<Self as SparseMerkleTree<DEPTH>>::from_raw_parts(inner_nodes, leaves, root).unwrap()
}
/// Wrapper around [`SimpleSmt::with_leaves`] which inserts leaves at contiguous indices
/// starting at index 0.
pub fn with_contiguous_leaves(
entries: impl IntoIterator<Item = Word>,
) -> Result<Self, MerkleError> {
Self::with_leaves(
entries
.into_iter()
.enumerate()
.map(|(idx, word)| (idx.try_into().expect("tree max depth is 2^8"), word)),
)
}
// PUBLIC ACCESSORS
// --------------------------------------------------------------------------------------------
/// Returns the depth of the tree
pub const fn depth(&self) -> u8 {
DEPTH
}
/// Returns the root of the tree
pub fn root(&self) -> Word {
<Self as SparseMerkleTree<DEPTH>>::root(self)
}
/// Returns the number of non-empty leaves in this tree.
pub fn num_leaves(&self) -> usize {
self.leaves.len()
}
/// Returns the leaf at the specified index.
pub fn get_leaf(&self, key: &LeafIndex<DEPTH>) -> Word {
<Self as SparseMerkleTree<DEPTH>>::get_leaf(self, key)
}
/// Returns a node at the specified index.
///
/// # Errors
/// Returns an error if the specified index has depth set to 0 or the depth is greater than
/// the depth of this Merkle tree.
pub fn get_node(&self, index: NodeIndex) -> Result<Word, MerkleError> {
if index.is_root() {
Err(MerkleError::DepthTooSmall(index.depth()))
} else if index.depth() > DEPTH {
Err(MerkleError::DepthTooBig(index.depth() as u64))
} else if index.depth() == DEPTH {
let leaf = self.get_leaf(&LeafIndex::<DEPTH>::try_from(index)?);
Ok(leaf)
} else {
Ok(self.get_inner_node(index).hash())
}
}
/// Returns an opening of the leaf associated with `key`. Conceptually, an opening is a Merkle
/// path to the leaf, as well as the leaf itself.
pub fn open(&self, key: &LeafIndex<DEPTH>) -> SimpleSmtProof {
let value = self.get_value(key);
let nodes = key.index.proof_indices().map(|index| self.get_node_hash(index));
// `from_sized_iter()` returns an error if there are more nodes than `SMT_MAX_DEPTH`, but
// this could only happen if we have more levels than `SMT_MAX_DEPTH` ourselves, which is
// guarded against in `SimpleSmt::new()`.
let path = SparseMerklePath::from_sized_iter(nodes).unwrap();
SimpleSmtProof { value, path }
}
/// Returns a boolean value indicating whether the SMT is empty.
pub fn is_empty(&self) -> bool {
debug_assert_eq!(self.leaves.is_empty(), self.root == Self::EMPTY_ROOT);
self.root == Self::EMPTY_ROOT
}
// ITERATORS
// --------------------------------------------------------------------------------------------
/// Returns an iterator over the leaves of this [SimpleSmt].
pub fn leaves(&self) -> impl Iterator<Item = (u64, &Word)> {
self.leaves.iter().map(|(i, w)| (*i, w))
}
/// Returns an iterator over the inner nodes of this [SimpleSmt].
pub fn inner_nodes(&self) -> impl Iterator<Item = InnerNodeInfo> + '_ {
self.inner_nodes.values().map(|e| InnerNodeInfo {
value: e.hash(),
left: e.left,
right: e.right,
})
}
// STATE MUTATORS
// --------------------------------------------------------------------------------------------
/// Inserts a value at the specified key, returning the previous value associated with that key.
/// Recall that by definition, any key that hasn't been updated is associated with
/// [`EMPTY_WORD`].
///
/// This also recomputes all hashes between the leaf (associated with the key) and the root,
/// updating the root itself.
pub fn insert(&mut self, key: LeafIndex<DEPTH>, value: Word) -> Word {
// SAFETY: a SimpleSmt does not contain multi-value leaves. The underlying
// SimpleSmt::insert_value does not return any errors so it's safe to unwrap here.
<Self as SparseMerkleTree<DEPTH>>::insert(self, key, value)
.expect("inserting a value into a simple smt never returns an error")
}
/// Computes what changes are necessary to insert the specified key-value pairs into this
/// Merkle tree, allowing for validation before applying those changes.
///
/// This method returns a [`MutationSet`], which contains all the information for inserting
/// `kv_pairs` into this Merkle tree already calculated, including the new root hash, which can
/// be queried with [`MutationSet::root()`]. Once a mutation set is returned,
/// [`SimpleSmt::apply_mutations()`] can be called in order to commit these changes to the
/// Merkle tree, or [`drop()`] to discard them.
///
/// # Example
/// ```
/// # use miden_crypto::{Felt, Word};
/// # use miden_crypto::merkle::{smt::{LeafIndex, SimpleSmt, SMT_DEPTH}, EmptySubtreeRoots};
/// let mut smt: SimpleSmt<3> = SimpleSmt::new().unwrap();
/// let pair = (LeafIndex::default(), Word::default());
/// let mutations = smt.compute_mutations(vec![pair]);
/// assert_eq!(mutations.root(), *EmptySubtreeRoots::entry(3, 0));
/// smt.apply_mutations(mutations).unwrap();
/// assert_eq!(smt.root(), *EmptySubtreeRoots::entry(3, 0));
/// ```
pub fn compute_mutations(
&self,
kv_pairs: impl IntoIterator<Item = (LeafIndex<DEPTH>, Word)>,
) -> MutationSet<DEPTH, LeafIndex<DEPTH>, Word> {
// SAFETY: a SimpleSmt does not contain multi-value leaves. The underlying
// SimpleSmt::construct_prospective_leaf does not return any errors so it's safe to unwrap
// here.
<Self as SparseMerkleTree<DEPTH>>::compute_mutations(self, kv_pairs)
.expect("computing mutations on a simple smt never returns an error")
}
/// Applies the prospective mutations computed with [`SimpleSmt::compute_mutations()`] to this
/// tree.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`alloc::vec::Vec`]. The first item is the
/// root hash the `mutations` were computed against, and the second item is the actual
/// current root of this tree.
pub fn apply_mutations(
&mut self,
mutations: MutationSet<DEPTH, LeafIndex<DEPTH>, Word>,
) -> Result<(), MerkleError> {
<Self as SparseMerkleTree<DEPTH>>::apply_mutations(self, mutations)
}
/// Applies the prospective mutations computed with [`SimpleSmt::compute_mutations()`] to
/// this tree and returns the reverse mutation set.
///
/// Applying the reverse mutation sets to the updated tree will revert the changes.
///
/// # Errors
/// If `mutations` was computed on a tree with a different root than this one, returns
/// [`MerkleError::ConflictingRoots`] with a two-item [`alloc::vec::Vec`]. The first item is the
/// root hash the `mutations` were computed against, and the second item is the actual
/// current root of this tree.
pub fn apply_mutations_with_reversion(
&mut self,
mutations: MutationSet<DEPTH, LeafIndex<DEPTH>, Word>,
) -> Result<MutationSet<DEPTH, LeafIndex<DEPTH>, Word>, MerkleError> {
<Self as SparseMerkleTree<DEPTH>>::apply_mutations_with_reversion(self, mutations)
}
/// Inserts a subtree at the specified index. The depth at which the subtree is inserted is
/// computed as `DEPTH - SUBTREE_DEPTH`.
///
/// Returns the new root.
pub fn set_subtree<const SUBTREE_DEPTH: u8>(
&mut self,
subtree_insertion_index: u64,
subtree: SimpleSmt<SUBTREE_DEPTH>,
) -> Result<Word, MerkleError> {
if SUBTREE_DEPTH > DEPTH {
return Err(MerkleError::SubtreeDepthExceedsDepth {
subtree_depth: SUBTREE_DEPTH,
tree_depth: DEPTH,
});
}
// Verify that `subtree_insertion_index` is valid.
let subtree_root_insertion_depth = DEPTH - SUBTREE_DEPTH;
let subtree_root_index =
NodeIndex::new(subtree_root_insertion_depth, subtree_insertion_index)?;
// add leaves
// --------------
// The subtree's leaf indices live in their own context - i.e. a subtree of depth `d`. If we
// insert the subtree at `subtree_insertion_index = 0`, then the subtree leaf indices are
// valid as they are. However, consider what happens when we insert at
// `subtree_insertion_index = 1`. The first leaf of our subtree now will have index `2^d`;
// you can see it as there's a full subtree sitting on its left. In general, for
// `subtree_insertion_index = i`, there are `i` subtrees sitting before the subtree we want
// to insert, so we need to adjust all its leaves by `i * 2^d`.
let leaf_index_shift: u64 = subtree_insertion_index * 2_u64.pow(SUBTREE_DEPTH.into());
for (subtree_leaf_idx, leaf_value) in subtree.leaves() {
let new_leaf_idx = leaf_index_shift + subtree_leaf_idx;
debug_assert!(new_leaf_idx < 2_u64.pow(DEPTH.into()));
self.leaves.insert(new_leaf_idx, *leaf_value);
}
// add subtree's branch nodes (which includes the root)
// --------------
for (branch_idx, branch_node) in subtree.inner_nodes {
let new_branch_idx = {
let new_depth = subtree_root_insertion_depth + branch_idx.depth();
let new_value = subtree_insertion_index * 2_u64.pow(branch_idx.depth().into())
+ branch_idx.value();
NodeIndex::new(new_depth, new_value).expect("index guaranteed to be valid")
};
self.inner_nodes.insert(new_branch_idx, branch_node);
}
// recompute nodes starting from subtree root
// --------------
self.recompute_nodes_from_index_to_root(subtree_root_index, subtree.root);
Ok(self.root)
}
}
impl<const DEPTH: u8> SparseMerkleTree<DEPTH> for SimpleSmt<DEPTH> {
type Key = LeafIndex<DEPTH>;
type Value = Word;
type Leaf = Word;
type Opening = SimpleSmtProof;
const EMPTY_VALUE: Self::Value = EMPTY_WORD;
const EMPTY_ROOT: Word = *EmptySubtreeRoots::entry(DEPTH, 0);
fn from_raw_parts(
inner_nodes: InnerNodes,
leaves: Leaves,
root: Word,
) -> Result<Self, MerkleError> {
if cfg!(debug_assertions) {
let root_node_hash = inner_nodes
.get(&NodeIndex::root())
.map(InnerNode::hash)
.unwrap_or(Self::EMPTY_ROOT);
assert_eq!(root_node_hash, root);
}
Ok(Self { root, inner_nodes, leaves })
}
fn root(&self) -> Word {
self.root
}
fn set_root(&mut self, root: Word) {
self.root = root;
}
fn get_inner_node(&self, index: NodeIndex) -> InnerNode {
self.inner_nodes
.get(&index)
.cloned()
.unwrap_or_else(|| EmptySubtreeRoots::get_inner_node(DEPTH, index.depth()))
}
fn insert_inner_node(&mut self, index: NodeIndex, inner_node: InnerNode) -> Option<InnerNode> {
self.inner_nodes.insert(index, inner_node)
}
fn remove_inner_node(&mut self, index: NodeIndex) -> Option<InnerNode> {
self.inner_nodes.remove(&index)
}
fn insert_value(
&mut self,
key: LeafIndex<DEPTH>,
value: Word,
) -> Result<Option<Word>, MerkleError> {
let result = if value == Self::EMPTY_VALUE {
self.leaves.remove(&key.value())
} else {
self.leaves.insert(key.value(), value)
};
Ok(result)
}
fn get_value(&self, key: &LeafIndex<DEPTH>) -> Word {
self.get_leaf(key)
}
fn get_leaf(&self, key: &LeafIndex<DEPTH>) -> Word {
let leaf_pos = key.value();
match self.leaves.get(&leaf_pos) {
Some(word) => *word,
None => Self::EMPTY_VALUE,
}
}
fn hash_leaf(leaf: &Word) -> Word {
// `SimpleSmt` takes the leaf value itself as the hash
*leaf
}
fn construct_prospective_leaf(
&self,
_existing_leaf: Word,
_key: &LeafIndex<DEPTH>,
value: &Word,
) -> Result<Word, SmtLeafError> {
Ok(*value)
}
fn key_to_leaf_index(key: &LeafIndex<DEPTH>) -> LeafIndex<DEPTH> {
*key
}
fn path_and_leaf_to_opening(path: SparseMerklePath, leaf: Word) -> SimpleSmtProof {
(path, leaf).into()
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/utils/iterators.rs | miden-crypto/src/utils/iterators.rs | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
//! Components needed for parallel iterators.
//!
//! When `concurrent` feature is enabled, this module re-exports `rayon::prelude`. Otherwise,
//! this is an empty module.
#[cfg(feature = "concurrent")]
pub use rayon::prelude::*;
/// Returns either a regular or a parallel iterator depending on whether `concurrent` feature
/// is enabled.
///
/// When `concurrent` feature is enabled, creates a parallel iterator; otherwise, creates a
/// regular iterator. Optionally, `min_length` can be used to specify the minimum length of
/// iterator to be processed in each thread.
///
/// Adapted from: <https://github.com/arkworks-rs/utils/blob/master/src/lib.rs>
#[macro_export]
macro_rules! iter {
($e:expr) => {{
#[cfg(feature = "concurrent")]
let result = $e.par_iter();
#[cfg(not(feature = "concurrent"))]
let result = $e.iter();
result
}};
($e:expr, $min_len:expr) => {{
#[cfg(feature = "concurrent")]
let result = $e.par_iter().with_min_len($min_len);
#[cfg(not(feature = "concurrent"))]
let result = $e.iter();
result
}};
}
/// Returns either a regular or a parallel mutable iterator depending on whether `concurrent`
/// feature is enabled.
///
/// When `concurrent` feature is enabled, creates a mutable parallel iterator; otherwise,
/// creates a regular mutable iterator. Optionally, `min_length` can be used to specify the
/// minimum length of iterator to be processed in each thread.
///
/// Adapted from: <https://github.com/arkworks-rs/utils/blob/master/src/lib.rs>
#[macro_export]
macro_rules! iter_mut {
($e:expr) => {{
#[cfg(feature = "concurrent")]
let result = $e.par_iter_mut();
#[cfg(not(feature = "concurrent"))]
let result = $e.iter_mut();
result
}};
($e:expr, $min_len:expr) => {{
#[cfg(feature = "concurrent")]
let result = $e.par_iter_mut().with_min_len($min_len);
#[cfg(not(feature = "concurrent"))]
let result = $e.iter_mut();
result
}};
}
/// Applies a procedure to the provided slice either in a single thread or multiple threads
/// based on whether `concurrent` feature is enabled.
///
/// When `concurrent` feature is enabled, breaks the slice into batches and processes each
/// batch in a separate thread; otherwise, the entire slice is processed as a single batch
/// in one thread. Optionally, `min_batch_size` can be used to specify the minimum size of
/// the resulting batches.
#[macro_export]
macro_rules! batch_iter_mut {
($e: expr, $c: expr) => {
#[cfg(feature = "concurrent")]
{
let batch_size = $e.len() / rayon::current_num_threads().next_power_of_two();
if batch_size < 1 {
$c($e, 0);
}
else {
$e.par_chunks_mut(batch_size).enumerate().for_each(|(i, batch)| {
$c(batch, i * batch_size);
});
}
}
#[cfg(not(feature = "concurrent"))]
$c($e, 0);
};
($e: expr, $min_batch_size: expr, $c: expr) => {
#[cfg(feature = "concurrent")]
{
let batch_size = $e.len() / rayon::current_num_threads().next_power_of_two();
if batch_size < $min_batch_size {
$c($e, 0);
}
else {
$e.par_chunks_mut(batch_size).enumerate().for_each(|(i, batch)| {
$c(batch, i * batch_size);
});
}
}
#[cfg(not(feature = "concurrent"))]
$c($e, 0);
};
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/utils/mod.rs | miden-crypto/src/utils/mod.rs | //! Utilities used in this crate which can also be generally useful downstream.
use alloc::{string::String, vec::Vec};
use core::fmt::{self, Write};
// Re-export serialization traits from miden-serde-utils
#[cfg(feature = "std")]
pub use miden_serde_utils::ReadAdapter;
pub use miden_serde_utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable, SliceReader,
};
use p3_field::{PrimeCharacteristicRing, RawDataSerializable, integers::QuotientMap};
use thiserror::Error;
mod iterators;
#[cfg(feature = "concurrent")]
use iterators::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator};
use crate::{Felt, Word, field::PrimeField64};
// CONSTANTS
// ================================================================================================
/// The number of byte chunks that can be safely embedded in a field element
const BINARY_CHUNK_SIZE: usize = 7;
// RE-EXPORTS
// ================================================================================================
pub use k256::elliptic_curve::zeroize;
// UTILITY FUNCTIONS
// ================================================================================================
/// Converts a [Word] into hex.
pub fn word_to_hex(w: &Word) -> Result<String, fmt::Error> {
let mut s = String::new();
for byte in w.iter().flat_map(|&e| e.to_bytes()) {
write!(s, "{byte:02x}")?;
}
Ok(s)
}
/// Renders an array of bytes as hex into a String.
pub fn bytes_to_hex_string<const N: usize>(data: [u8; N]) -> String {
let mut s = String::with_capacity(N + 2);
s.push_str("0x");
for byte in data.iter() {
write!(s, "{byte:02x}").expect("formatting hex failed");
}
s
}
/// Defines errors which can occur during parsing of hexadecimal strings.
#[derive(Debug, Error)]
pub enum HexParseError {
#[error("expected hex data to have length {expected}, including the 0x prefix, found {actual}")]
InvalidLength { expected: usize, actual: usize },
#[error("hex encoded data must start with 0x prefix")]
MissingPrefix,
#[error("hex encoded data must contain only characters [0-9a-fA-F]")]
InvalidChar,
#[error("hex encoded values of a Digest must be inside the field modulus")]
OutOfRange,
}
/// Parses a hex string into an array of bytes of known size.
pub fn hex_to_bytes<const N: usize>(value: &str) -> Result<[u8; N], HexParseError> {
let expected: usize = (N * 2) + 2;
if value.len() != expected {
return Err(HexParseError::InvalidLength { expected, actual: value.len() });
}
if !value.starts_with("0x") {
return Err(HexParseError::MissingPrefix);
}
let mut data = value.bytes().skip(2).map(|v| match v {
b'0'..=b'9' => Ok(v - b'0'),
b'a'..=b'f' => Ok(v - b'a' + 10),
b'A'..=b'F' => Ok(v - b'A' + 10),
_ => Err(HexParseError::InvalidChar),
});
let mut decoded = [0u8; N];
for byte in decoded.iter_mut() {
// These `unwrap` calls are okay because the length was checked above
let high: u8 = data.next().unwrap()?;
let low: u8 = data.next().unwrap()?;
*byte = (high << 4) + low;
}
Ok(decoded)
}
// CONVERSIONS BETWEEN BYTES AND ELEMENTS
// ================================================================================================
/// Converts a sequence of bytes into vector field elements with padding. This guarantees that no
/// two sequences or bytes map to the same sequence of field elements.
///
/// Packs bytes into chunks of `BINARY_CHUNK_SIZE` and adds padding to the final chunk using a `1`
/// bit followed by zeros. This ensures the original bytes can be recovered during decoding without
/// any ambiguity.
///
/// Note that by the endianness of the conversion as well as the fact that we are packing at most
/// `56 = 7 * 8` bits in each field element, the padding above with `1` should never overflow the
/// field size.
///
/// # Arguments
/// * `bytes` - Byte slice to encode
///
/// # Returns
/// Vector of `Felt` elements with the last element containing padding
pub fn bytes_to_elements_with_padding(bytes: &[u8]) -> Vec<Felt> {
if bytes.is_empty() {
return vec![];
}
// determine the number of field elements needed to encode `bytes` when each field element
// represents at most 7 bytes.
let num_field_elem = bytes.len().div_ceil(BINARY_CHUNK_SIZE);
// initialize a buffer to receive the little-endian elements.
let mut buf = [0_u8; 8];
// iterate the chunks of bytes, creating a field element from each chunk
let last_chunk_idx = num_field_elem - 1;
bytes
.chunks(BINARY_CHUNK_SIZE)
.enumerate()
.map(|(current_chunk_idx, chunk)| {
// copy the chunk into the buffer
if current_chunk_idx != last_chunk_idx {
buf[..BINARY_CHUNK_SIZE].copy_from_slice(chunk);
} else {
// on the last iteration, we pad `buf` with a 1 followed by as many 0's as are
// needed to fill it
buf.fill(0);
buf[..chunk.len()].copy_from_slice(chunk);
buf[chunk.len()] = 1;
}
Felt::new(u64::from_le_bytes(buf))
})
.collect()
}
/// Converts a sequence of padded field elements back to the original bytes.
///
/// Reconstructs the original byte sequence by removing the padding added by `bytes_to_felts`.
/// The padding consists of a `1` bit followed by zeros in the final field element.
///
/// Note that by the endianness of the conversion as well as the fact that we are packing at most
/// `56 = 7 * 8` bits in each field element, the padding above with `1` should never overflow the
/// field size.
///
/// # Arguments
/// * `felts` - Slice of field elements with padding in the last element
///
/// # Returns
/// * `Some(Vec<u8>)` - The original byte sequence with padding removed
/// * `None` - If no padding marker (`1` bit) is found
pub fn padded_elements_to_bytes(felts: &[Felt]) -> Option<Vec<u8>> {
let number_felts = felts.len();
if number_felts == 0 {
return Some(vec![]);
}
let mut result = Vec::with_capacity(number_felts * BINARY_CHUNK_SIZE);
for felt in felts.iter().take(number_felts - 1) {
let felt_bytes = felt.as_canonical_u64().to_le_bytes();
result.extend_from_slice(&felt_bytes[..BINARY_CHUNK_SIZE]);
}
// handle the last field element
let felt_bytes = felts[number_felts - 1].as_canonical_u64().to_le_bytes();
let pos = felt_bytes.iter().rposition(|entry| *entry == 1_u8)?;
result.extend_from_slice(&felt_bytes[..pos]);
Some(result)
}
/// Converts field elements to raw byte representation.
///
/// Each `Felt` is converted to its full `NUM_BYTES` representation, in little-endian form
/// and canonical form, without any padding removal or validation. This is the inverse
/// of `bytes_to_elements_exact`.
///
/// # Arguments
/// * `felts` - Slice of field elements to convert
///
/// # Returns
/// Vector containing the raw bytes from all field elements
pub fn elements_to_bytes(felts: &[Felt]) -> Vec<u8> {
let number_felts = felts.len();
let mut result = Vec::with_capacity(number_felts * Felt::NUM_BYTES);
for felt in felts.iter().take(number_felts) {
let felt_bytes = felt.as_canonical_u64().to_le_bytes();
result.extend_from_slice(&felt_bytes);
}
result
}
/// Converts bytes to field elements with validation.
///
/// This function validates that:
/// - The input bytes length is divisible by `Felt::NUM_BYTES`
/// - All `Felt::NUM_BYTES`-byte sequences represent valid field elements
///
/// # Arguments
/// * `bytes` - Byte slice that must be a multiple of `Felt::NUM_BYTES` in length
///
/// # Returns
/// `Option<Vec<Felt>>` - Vector of `Felt` elements if all validations pass, or None otherwise
pub fn bytes_to_elements_exact(bytes: &[u8]) -> Option<Vec<Felt>> {
// Check that the length is divisible by NUM_BYTES
if !bytes.len().is_multiple_of(Felt::NUM_BYTES) {
return None;
}
let mut result = Vec::with_capacity(bytes.len() / Felt::NUM_BYTES);
for chunk in bytes.chunks_exact(Felt::NUM_BYTES) {
let chunk_array: [u8; Felt::NUM_BYTES] =
chunk.try_into().expect("should succeed given the length check above");
let value = u64::from_le_bytes(chunk_array);
// Validate that the value represents a valid field element
let felt = Felt::from_canonical_checked(value)?;
result.push(felt);
}
Some(result)
}
/// Converts bytes to field elements using u32 packing in little-endian format.
///
/// Each field element contains a u32 value representing up to 4 bytes. If the byte length
/// is not a multiple of 4, the final field element is zero-padded.
///
/// # Arguments
/// - `bytes`: The byte slice to convert
///
/// # Returns
/// A vector of field elements, each containing 4 bytes packed in little-endian order.
///
/// # Examples
/// ```rust
/// # use miden_crypto::{Felt, utils::bytes_to_packed_u32_elements};
///
/// let bytes = vec![0x01, 0x02, 0x03, 0x04, 0x05];
/// let felts = bytes_to_packed_u32_elements(&bytes);
/// assert_eq!(felts, vec![Felt::new(0x04030201), Felt::new(0x00000005)]);
/// ```
pub fn bytes_to_packed_u32_elements(bytes: &[u8]) -> Vec<Felt> {
const BYTES_PER_U32: usize = core::mem::size_of::<u32>();
bytes
.chunks(BYTES_PER_U32)
.map(|chunk| {
// Pack up to 4 bytes into a u32 in little-endian format
let mut packed = [0u8; BYTES_PER_U32];
packed[..chunk.len()].copy_from_slice(chunk);
Felt::from_u32(u32::from_le_bytes(packed))
})
.collect()
}
// VECTOR FUNCTIONS (ported from Winterfell's winter-utils)
// ================================================================================================
/// Returns a vector of the specified length with un-initialized memory.
///
/// This is usually faster than requesting a vector with initialized memory and is useful when we
/// overwrite all contents of the vector immediately after memory allocation.
///
/// # Safety
/// Using values from the returned vector before initializing them will lead to undefined behavior.
#[expect(clippy::uninit_vec)]
pub unsafe fn uninit_vector<T>(length: usize) -> Vec<T> {
let mut vector = Vec::with_capacity(length);
unsafe {
vector.set_len(length);
}
vector
}
// GROUPING / UN-GROUPING FUNCTIONS (ported from Winterfell's winter-utils)
// ================================================================================================
/// Transmutes a slice of `n` elements into a slice of `n` / `N` elements, each of which is
/// an array of `N` elements.
///
/// This function just re-interprets the underlying memory and is thus zero-copy.
/// # Panics
/// Panics if `n` is not divisible by `N`.
pub fn group_slice_elements<T, const N: usize>(source: &[T]) -> &[[T; N]] {
let (chunks, remainder) = source.as_chunks::<N>();
assert!(remainder.is_empty(), "source length must be divisible by {N}");
chunks
}
/// Transmutes a slice of `n` arrays each of length `N`, into a slice of `N` * `n` elements.
///
/// This function just re-interprets the underlying memory and is thus zero-copy.
pub fn flatten_slice_elements<T, const N: usize>(source: &[[T; N]]) -> &[T] {
// SAFETY: [T; N] has the same alignment and memory layout as an array of T.
// p3-util's as_base_slice handles the conversion safely.
unsafe { p3_util::as_base_slice(source) }
}
/// Transmutes a vector of `n` arrays each of length `N`, into a vector of `N` * `n` elements.
///
/// This function just re-interprets the underlying memory and is thus zero-copy.
pub fn flatten_vector_elements<T, const N: usize>(source: Vec<[T; N]>) -> Vec<T> {
// SAFETY: [T; N] has the same alignment and memory layout as an array of T.
// p3-util's flatten_to_base handles the conversion without reallocations.
unsafe { p3_util::flatten_to_base(source) }
}
// TRANSPOSING (ported from Winterfell's winter-utils)
// ================================================================================================
/// Transposes a slice of `n` elements into a matrix with `N` columns and `n`/`N` rows.
///
/// When `concurrent` feature is enabled, the slice will be transposed using multiple threads.
///
/// # Panics
/// Panics if `n` is not divisible by `N`.
pub fn transpose_slice<T: Copy + Send + Sync, const N: usize>(source: &[T]) -> Vec<[T; N]> {
let row_count = source.len() / N;
assert_eq!(
row_count * N,
source.len(),
"source length must be divisible by {}, but was {}",
N,
source.len()
);
let mut result: Vec<[T; N]> = unsafe { uninit_vector(row_count) };
crate::iter_mut!(result, 1024).enumerate().for_each(|(i, element)| {
for j in 0..N {
element[j] = source[i + j * row_count]
}
});
result
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/ecdh/mod.rs | miden-crypto/src/ecdh/mod.rs | //! ECDH (Elliptic Curve Diffie-Hellman) key agreement implementations.
use alloc::vec::Vec;
use rand::{CryptoRng, RngCore};
use thiserror::Error;
use crate::utils::{
Deserializable, Serializable,
zeroize::{Zeroize, ZeroizeOnDrop},
};
pub mod k256;
pub mod x25519;
// KEY AGREEMENT TRAIT
// ================================================================================================
pub(crate) trait KeyAgreementScheme {
type EphemeralSecretKey: ZeroizeOnDrop;
type EphemeralPublicKey: Serializable + Deserializable;
type SecretKey;
type PublicKey: Clone;
type SharedSecret: AsRef<[u8]> + Zeroize + ZeroizeOnDrop;
/// Returns an ephemeral key pair generated from the provided RNG.
fn generate_ephemeral_keypair<R: CryptoRng + RngCore>(
rng: &mut R,
) -> (Self::EphemeralSecretKey, Self::EphemeralPublicKey);
/// Performs key exchange between ephemeral secret and static public key.
fn exchange_ephemeral_static(
ephemeral_sk: Self::EphemeralSecretKey,
static_pk: &Self::PublicKey,
) -> Result<Self::SharedSecret, KeyAgreementError>;
/// Performs key exchange between static secret and ephemeral public key.
fn exchange_static_ephemeral(
static_sk: &Self::SecretKey,
ephemeral_pk: &Self::EphemeralPublicKey,
) -> Result<Self::SharedSecret, KeyAgreementError>;
/// Extracts key material from shared secret.
fn extract_key_material(
shared_secret: &Self::SharedSecret,
length: usize,
) -> Result<Vec<u8>, KeyAgreementError>;
}
// ERROR TYPES
// ================================================================================================
/// Errors that can occur during encryption/decryption operations
#[derive(Debug, Error)]
pub(crate) enum KeyAgreementError {
#[error("hkdf expansion failed")]
HkdfExpansionFailed,
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/ecdh/k256.rs | miden-crypto/src/ecdh/k256.rs | //! ECDH (Elliptic Curve Diffie-Hellman) key agreement implementation over k256
//! i.e., secp256k1 curve.
//!
//! Note that the intended use is in the context of a one-way, sender initiated key agreement
//! scenario. Namely, when the sender knows the (static) public key of the receiver and it
//! uses that, together with an ephemeral secret key that it generates, to derive a shared
//! secret.
//!
//! This shared secret will then be used to encrypt some message (using for example a key
//! derivation function).
//!
//! The public key associated with the ephemeral secret key will be sent alongside the encrypted
//! message.
use alloc::{string::ToString, vec::Vec};
use hkdf::{Hkdf, hmac::SimpleHmac};
use k256::{AffinePoint, elliptic_curve::sec1::ToEncodedPoint, sha2::Sha256};
use rand::{CryptoRng, RngCore};
use crate::{
dsa::ecdsa_k256_keccak::{PUBLIC_KEY_BYTES, PublicKey, SecretKey},
ecdh::KeyAgreementScheme,
utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable,
zeroize::{Zeroize, ZeroizeOnDrop},
},
};
// SHARED SECRET
// ================================================================================================
/// A shared secret computed using the ECDH (Elliptic Curve Diffie-Hellman) key agreement.
///
/// This type implements `ZeroizeOnDrop` because the inner `k256::ecdh::SharedSecret`
/// implements it, ensuring the shared secret is securely wiped from memory when dropped.
pub struct SharedSecret {
pub(crate) inner: k256::ecdh::SharedSecret,
}
impl SharedSecret {
pub(crate) fn new(inner: k256::ecdh::SharedSecret) -> SharedSecret {
Self { inner }
}
/// Returns a HKDF (HMAC-based Extract-and-Expand Key Derivation Function) that can be used
/// to extract entropy from the shared secret.
///
/// This basically converts a shared secret into uniformly random values that are appropriate
/// for use as key material.
pub fn extract(&self, salt: Option<&[u8]>) -> Hkdf<Sha256, SimpleHmac<Sha256>> {
self.inner.extract(salt)
}
}
impl AsRef<[u8]> for SharedSecret {
fn as_ref(&self) -> &[u8] {
self.inner.raw_secret_bytes()
}
}
impl Zeroize for SharedSecret {
/// Securely clears the shared secret from memory.
///
/// # Security
///
/// This implementation follows the same security methodology as the `zeroize` crate to ensure
/// that sensitive cryptographic material is reliably cleared from memory:
///
/// - **Volatile writes**: Uses `ptr::write_volatile` to prevent dead store elimination and
/// other compiler optimizations that might remove the zeroing operation.
/// - **Memory ordering**: Includes a sequentially consistent compiler fence (`SeqCst`) to
/// prevent instruction reordering that could expose the secret data after this function
/// returns.
fn zeroize(&mut self) {
let bytes = self.inner.raw_secret_bytes();
for byte in
unsafe { core::slice::from_raw_parts_mut(bytes.as_ptr() as *mut u8, bytes.len()) }
{
unsafe {
core::ptr::write_volatile(byte, 0u8);
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
}
// Safe to derive ZeroizeOnDrop because we implement Zeroize above
impl ZeroizeOnDrop for SharedSecret {}
// EPHEMERAL SECRET KEY
// ================================================================================================
/// Ephemeral secret key for ECDH key agreement over secp256k1 curve.
///
/// This type implements `ZeroizeOnDrop` because the inner `k256::ecdh::EphemeralSecret`
/// implements it, ensuring the secret key material is securely wiped from memory when dropped.
pub struct EphemeralSecretKey {
inner: k256::ecdh::EphemeralSecret,
}
impl EphemeralSecretKey {
/// Generates a new random ephemeral secret key using the OS random number generator.
#[cfg(feature = "std")]
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let mut rng = rand::rng();
Self::with_rng(&mut rng)
}
/// Generates a new ephemeral secret key using the provided random number generator.
pub fn with_rng<R: CryptoRng + RngCore>(rng: &mut R) -> Self {
// we use a seedable CSPRNG and seed it with `rng`
// this is a work around the fact that the version of the `rand` dependency in our crate
// is different than the one used in the `k256` one. This solution will no longer be needed
// once `k256` gets a new release with a version of the `rand` dependency matching ours
use k256::elliptic_curve::rand_core::SeedableRng;
let mut seed = [0_u8; 32];
rand::RngCore::fill_bytes(rng, &mut seed);
let mut rng = rand_hc::Hc128Rng::from_seed(seed);
let sk_e = k256::ecdh::EphemeralSecret::random(&mut rng);
Self { inner: sk_e }
}
/// Gets the corresponding ephemeral public key for this ephemeral secret key.
pub fn public_key(&self) -> EphemeralPublicKey {
let pk = self.inner.public_key();
EphemeralPublicKey { inner: pk }
}
/// Computes a Diffie-Hellman shared secret from an ephemeral secret key and the (static) public
/// key of the other party.
pub fn diffie_hellman(&self, pk_other: PublicKey) -> SharedSecret {
let shared_secret_inner = self.inner.diffie_hellman(&pk_other.inner.into());
SharedSecret { inner: shared_secret_inner }
}
}
impl ZeroizeOnDrop for EphemeralSecretKey {}
// EPHEMERAL PUBLIC KEY
// ================================================================================================
/// Ephemeral public key for ECDH key agreement over secp256k1 curve.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct EphemeralPublicKey {
pub(crate) inner: k256::PublicKey,
}
impl EphemeralPublicKey {
/// Returns a reference to this ephemeral public key as an elliptic curve point in affine
/// coordinates.
pub fn as_affine(&self) -> &AffinePoint {
self.inner.as_affine()
}
}
impl Serializable for EphemeralPublicKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
// Compressed format
let encoded = self.inner.to_encoded_point(true);
target.write_bytes(encoded.as_bytes());
}
}
impl Deserializable for EphemeralPublicKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let bytes: [u8; PUBLIC_KEY_BYTES] = source.read_array()?;
let inner = k256::PublicKey::from_sec1_bytes(&bytes)
.map_err(|_| DeserializationError::InvalidValue("Invalid public key".to_string()))?;
Ok(Self { inner })
}
}
// KEY AGREEMENT TRAIT IMPLEMENTATION
// ================================================================================================
pub struct K256;
impl KeyAgreementScheme for K256 {
type EphemeralSecretKey = EphemeralSecretKey;
type EphemeralPublicKey = EphemeralPublicKey;
type SecretKey = SecretKey;
type PublicKey = PublicKey;
type SharedSecret = SharedSecret;
fn generate_ephemeral_keypair<R: CryptoRng + RngCore>(
rng: &mut R,
) -> (Self::EphemeralSecretKey, Self::EphemeralPublicKey) {
let sk = EphemeralSecretKey::with_rng(rng);
let pk = sk.public_key();
(sk, pk)
}
fn exchange_ephemeral_static(
ephemeral_sk: Self::EphemeralSecretKey,
static_pk: &Self::PublicKey,
) -> Result<Self::SharedSecret, super::KeyAgreementError> {
Ok(ephemeral_sk.diffie_hellman(static_pk.clone()))
}
fn exchange_static_ephemeral(
static_sk: &Self::SecretKey,
ephemeral_pk: &Self::EphemeralPublicKey,
) -> Result<Self::SharedSecret, super::KeyAgreementError> {
Ok(static_sk.get_shared_secret(ephemeral_pk.clone()))
}
fn extract_key_material(
shared_secret: &Self::SharedSecret,
length: usize,
) -> Result<Vec<u8>, super::KeyAgreementError> {
let hkdf = shared_secret.extract(None);
let mut buf = vec![0_u8; length];
hkdf.expand(&[], &mut buf)
.map_err(|_| super::KeyAgreementError::HkdfExpansionFailed)?;
Ok(buf)
}
}
// TESTS
// ================================================================================================
#[cfg(all(test, feature = "std"))]
mod test {
use rand::rng;
use super::{EphemeralPublicKey, EphemeralSecretKey};
use crate::{
dsa::ecdsa_k256_keccak::SecretKey,
utils::{Deserializable, Serializable},
};
#[test]
fn key_agreement() {
let mut rng = rng();
// 1. Generate the static key-pair for Alice
let sk = SecretKey::with_rng(&mut rng);
let pk = sk.public_key();
// 2. Generate the ephemeral key-pair for Bob
let sk_e = EphemeralSecretKey::with_rng(&mut rng);
let pk_e = sk_e.public_key();
// 3. Bob computes the shared secret key (Bob will send pk_e with the encrypted note to
// Alice)
let shared_secret_key_1 = sk_e.diffie_hellman(pk);
// 4. Alice uses its secret key and the ephemeral public key sent with the encrypted note by
// Bob in order to create the shared secret key. This shared secret key will be used to
// decrypt the encrypted note
let shared_secret_key_2 = sk.get_shared_secret(pk_e);
// Check that the computed shared secret keys are equal
assert_eq!(
shared_secret_key_1.inner.raw_secret_bytes(),
shared_secret_key_2.inner.raw_secret_bytes()
);
}
#[test]
fn test_serialization_round_trip() {
let mut rng = rng();
let sk_e = EphemeralSecretKey::with_rng(&mut rng);
let pk_e = sk_e.public_key();
let pk_e_bytes = pk_e.to_bytes();
let pk_e_serialized = EphemeralPublicKey::read_from_bytes(&pk_e_bytes)
.expect("failed to desrialize ephemeral public key");
assert_eq!(pk_e_serialized, pk_e);
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
0xMiden/crypto | https://github.com/0xMiden/crypto/blob/b30552ecceb5f70565cc0267fca227f30c5af7ab/miden-crypto/src/ecdh/x25519.rs | miden-crypto/src/ecdh/x25519.rs | //! X25519 (Elliptic Curve Diffie-Hellman) key agreement implementation using
//! Curve25519.
//!
//! Note that the intended use is in the context of a one-way, sender initiated key agreement
//! scenario. Namely, when the sender knows the (static) public key of the receiver and it
//! uses that, together with an ephemeral secret key that it generates, to derive a shared
//! secret.
//!
//! This shared secret will then be used to encrypt some message (using for example a key
//! derivation function).
//!
//! The public key associated with the ephemeral secret key will be sent alongside the encrypted
//! message.
use alloc::vec::Vec;
use hkdf::{Hkdf, hmac::SimpleHmac};
use k256::sha2::Sha256;
use rand::{CryptoRng, RngCore};
use crate::{
dsa::eddsa_25519_sha512::{PublicKey, SecretKey},
ecdh::KeyAgreementScheme,
utils::{
ByteReader, ByteWriter, Deserializable, DeserializationError, Serializable,
zeroize::{Zeroize, ZeroizeOnDrop},
},
};
// SHARED SECRETE
// ================================================================================================
/// A shared secret computed using the X25519 (Elliptic Curve Diffie-Hellman) key agreement.
///
/// This type implements `ZeroizeOnDrop` because the inner `x25519_dalek::SharedSecret`
/// implements it, ensuring the shared secret is securely wiped from memory when dropped.
pub struct SharedSecret {
pub(crate) inner: x25519_dalek::SharedSecret,
}
impl SharedSecret {
pub(crate) fn new(inner: x25519_dalek::SharedSecret) -> SharedSecret {
Self { inner }
}
/// Returns a HKDF that can be used to derive uniform keys from the shared secret.
pub fn extract(&self, salt: Option<&[u8]>) -> Hkdf<Sha256, SimpleHmac<Sha256>> {
Hkdf::new(salt, self.inner.as_bytes())
}
}
impl Zeroize for SharedSecret {
/// Securely clears the shared secret from memory.
///
/// # Security
///
/// This implementation follows the same security methodology as the `zeroize` crate to ensure
/// that sensitive cryptographic material is reliably cleared from memory:
///
/// - **Volatile writes**: Uses `ptr::write_volatile` to prevent dead store elimination and
/// other compiler optimizations that might remove the zeroing operation.
/// - **Memory ordering**: Includes a sequentially consistent compiler fence (`SeqCst`) to
/// prevent instruction reordering that could expose the secret data after this function
/// returns.
fn zeroize(&mut self) {
let bytes = self.inner.as_bytes();
for byte in
unsafe { core::slice::from_raw_parts_mut(bytes.as_ptr() as *mut u8, bytes.len()) }
{
unsafe {
core::ptr::write_volatile(byte, 0u8);
}
}
core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst);
}
}
// Safe to derive ZeroizeOnDrop because we implement Zeroize above
impl ZeroizeOnDrop for SharedSecret {}
impl AsRef<[u8]> for SharedSecret {
fn as_ref(&self) -> &[u8] {
self.inner.as_bytes()
}
}
// EPHEMERAL SECRET KEY
// ================================================================================================
/// Ephemeral secret key for X25519 key agreement.
///
/// This type implements `ZeroizeOnDrop` because the inner `x25519_dalek::EphemeralSecret`
/// implements it, ensuring the secret key material is securely wiped from memory when dropped.
pub struct EphemeralSecretKey {
inner: x25519_dalek::EphemeralSecret,
}
impl ZeroizeOnDrop for EphemeralSecretKey {}
impl EphemeralSecretKey {
/// Generates a new random ephemeral secret key using the OS random number generator.
#[cfg(feature = "std")]
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let mut rng = rand::rng();
Self::with_rng(&mut rng)
}
/// Generates a new random ephemeral secret key using the provided RNG.
pub fn with_rng<R: CryptoRng + RngCore>(rng: &mut R) -> Self {
// we use a seedable CSPRNG and seed it with `rng`
// this is a work around the fact that the version of the `rand` dependency in our crate
// is different than the one used in the `x25519_dalek` one. This solution will no longer be
// needed once `x25519_dalek` gets a new release with a version of the `rand`
// dependency matching ours
use k256::elliptic_curve::rand_core::SeedableRng;
let mut seed = [0_u8; 32];
rand::RngCore::fill_bytes(rng, &mut seed);
let rng = rand_hc::Hc128Rng::from_seed(seed);
let sk = x25519_dalek::EphemeralSecret::random_from_rng(rng);
Self { inner: sk }
}
/// Returns the corresponding ephemeral public key.
pub fn public_key(&self) -> EphemeralPublicKey {
EphemeralPublicKey {
inner: x25519_dalek::PublicKey::from(&self.inner),
}
}
/// Computes a Diffie-Hellman shared secret from this ephemeral secret key and the other party's
/// static public key.
pub fn diffie_hellman(self, pk_other: &PublicKey) -> SharedSecret {
let shared = self.inner.diffie_hellman(&pk_other.to_x25519());
SharedSecret::new(shared)
}
}
// EPHEMERAL PUBLIC KEY
// ================================================================================================
/// Ephemeral public key for X25519 agreement.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct EphemeralPublicKey {
pub(crate) inner: x25519_dalek::PublicKey,
}
impl Serializable for EphemeralPublicKey {
fn write_into<W: ByteWriter>(&self, target: &mut W) {
target.write_bytes(self.inner.as_bytes());
}
}
impl Deserializable for EphemeralPublicKey {
fn read_from<R: ByteReader>(source: &mut R) -> Result<Self, DeserializationError> {
let bytes: [u8; 32] = source.read_array()?;
Ok(Self {
inner: x25519_dalek::PublicKey::from(bytes),
})
}
}
// KEY AGREEMENT TRAIT IMPLEMENTATION
// ================================================================================================
pub struct X25519;
impl KeyAgreementScheme for X25519 {
type EphemeralSecretKey = EphemeralSecretKey;
type EphemeralPublicKey = EphemeralPublicKey;
type SecretKey = SecretKey;
type PublicKey = PublicKey;
type SharedSecret = SharedSecret;
fn generate_ephemeral_keypair<R: CryptoRng + RngCore>(
rng: &mut R,
) -> (Self::EphemeralSecretKey, Self::EphemeralPublicKey) {
let sk = EphemeralSecretKey::with_rng(rng);
let pk = sk.public_key();
(sk, pk)
}
fn exchange_ephemeral_static(
ephemeral_sk: Self::EphemeralSecretKey,
static_pk: &Self::PublicKey,
) -> Result<Self::SharedSecret, super::KeyAgreementError> {
Ok(ephemeral_sk.diffie_hellman(static_pk))
}
fn exchange_static_ephemeral(
static_sk: &Self::SecretKey,
ephemeral_pk: &Self::EphemeralPublicKey,
) -> Result<Self::SharedSecret, super::KeyAgreementError> {
Ok(static_sk.get_shared_secret(ephemeral_pk.clone()))
}
fn extract_key_material(
shared_secret: &Self::SharedSecret,
length: usize,
) -> Result<Vec<u8>, super::KeyAgreementError> {
let hkdf = shared_secret.extract(None);
let mut buf = vec![0_u8; length];
hkdf.expand(&[], &mut buf)
.map_err(|_| super::KeyAgreementError::HkdfExpansionFailed)?;
Ok(buf)
}
}
// TESTS
// ================================================================================================
#[cfg(all(test, feature = "std"))]
mod tests {
use rand::rng;
use super::*;
use crate::dsa::eddsa_25519_sha512::SecretKey;
#[test]
fn key_agreement() {
let mut rng = rng();
// 1. Generate the static key-pair for Alice
let sk = SecretKey::with_rng(&mut rng);
let pk = sk.public_key();
// 2. Generate the ephemeral key-pair for Bob
let sk_e = EphemeralSecretKey::with_rng(&mut rng);
let pk_e = sk_e.public_key();
// 3. Bob computes the shared secret key (Bob will send pk_e with the encrypted note to
// Alice)
let shared_secret_key_1 = sk_e.diffie_hellman(&pk);
// 4. Alice uses its secret key and the ephemeral public key sent with the encrypted note by
// Bob in order to create the shared secret key. This shared secret key will be used to
// decrypt the encrypted note
let shared_secret_key_2 = sk.get_shared_secret(pk_e);
// Check that the computed shared secret keys are equal
assert_eq!(shared_secret_key_1.inner.to_bytes(), shared_secret_key_2.inner.to_bytes());
}
}
| rust | Apache-2.0 | b30552ecceb5f70565cc0267fca227f30c5af7ab | 2026-01-04T20:24:48.363198Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.