repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/mod_ex2/src/b.rs | ch05/mod_ex2/src/b.rs | mod b_1;
mod b_2;
struct TypeB;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/mod_ex2/src/main.rs | ch05/mod_ex2/src/main.rs | mod a;
mod b;
fn main() {}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/mod_ex2/src/a.rs | ch05/mod_ex2/src/a.rs | mod a_1;
mod a_2;
struct TypeA;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/mod_ex2/src/a/a_2.rs | ch05/mod_ex2/src/a/a_2.rs | struct TypeA2;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/mod_ex2/src/a/a_1.rs | ch05/mod_ex2/src/a/a_1.rs | struct TypeA1;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/mod_ex2/src/b/b_1.rs | ch05/mod_ex2/src/b/b_1.rs | struct TypeB1;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/mod_ex2/src/b/b_2.rs | ch05/mod_ex2/src/b/b_2.rs | struct TypeB2;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/visibility/src/main.rs | ch05/visibility/src/main.rs | mod a {
struct TypeA {
// a1: a_1::TypeA1, // エラー。子のプライベートな要素は見えない
a2: Box<a_2::TypeA2>, // 子のパブリックな要素は見える
}
mod a_1 {
struct TypeA1 {
// 親が見えるものは見える
a: Box<super::TypeA>,
a2: Box<super::a_2::TypeA2>,
}
}
mod a_2 {
pub struct TypeA2 {
// 親が見えるものは見える
a: Box<super::TypeA>,
// a1: super::a_1::TypeA1, // エラー。親の見えないものは見えない
}
}
}
mod b {
pub struct TypeB;
mod b_1 {
pub struct TypeB1 {
pub n: usize,
m: usize,
}
impl TypeB1 {
fn g(&self) {}
pub fn h(&self) {}
}
fn f1(p: &super::b_1::TypeB1) {
println!("{}", p.n);
println!("{}", p.m);
p.g();
p.h();
}
}
pub mod b_2 {
pub struct TypeB2;
fn f2(p: &super::b_1::TypeB1) {
println!("{}", p.n);
// println!("{}", p.m); // エラー。mはプライベート
// p.g(); // エラー。gはプライベート
p.h();
}
}
}
mod c {
mod c_1_outer {
pub mod c_1_inner {
pub(crate) struct TypeC1; // 同じクレート内からのみ見える
pub(super) struct TypeC2; // 親モジュールからのみ見える
pub(in crate::c::c_1_outer) struct TypeC3; // 親モジュールからのみ見える
pub(self) struct TypeC4; // プライベートと同義
}
fn f() {
let p1 = c_1_inner::TypeC1;
let p2 = c_1_inner::TypeC2;
let p3 = c_1_inner::TypeC3;
// let p4 = c_1_inner::TypeC4; // エラー。プライベートなので見えない
}
}
fn g() {
let p1 = c_1_outer::c_1_inner::TypeC1;
// let p2 = c_1_outer::c_1_inner::TypeC2; // エラー
// let p3 = c_1_outer::c_1_inner::TypeC3; // エラー
// let p4 = c_1_outer::c_1_inner::TypeC4; // エラー
}
}
mod d {
pub struct TypeD;
}
mod e {
pub use crate::d::TypeD;
}
fn main() {
// let a = a::TypeA; // エラー。子のプライベートな要素は見えない
let b = b::TypeB; // 子のパブリックな要素は見える
//let b1 = b::b_1::TypeB1; // 子のプライベートな要素なモジュールb_1は見えない
let b2 = b::b_2::TypeB2; // パブリックな孫b_2のパブリックな要素TypeB2は見える
let e = e::TypeD; // 再エクスポートされた型を利用
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch05/markdown/src/lib.rs | ch05/markdown/src/lib.rs | //! # 第一見出し
//!
//! テキストを書く。
//!
//! ## 第二見出し
//!
//! ### 第三見出し
//!
//! - 箇条書き1
//! - 箇条書き2
//!
//! 1. 番号付きリスト1
//! 2. 番号付きリスト2
//!
//! > 引用
//! > 文字列
//!
//! [KSPUB](https://www.kspub.co.jp/)
//!
//! `println!("Hello, world!");`
//!
//! ```
//! println!("Hello, world!");
//! ```
//!
mod my_module {
//! これはモジュールのドキュメントです。
//!
//! # 利用例
}
/// my_funcは私独自の関数です。
///
/// # 利用例
///
/// ```
/// use markdown::my_func;
/// let n = my_func().unwrap();
/// ```
pub fn my_func() -> Option<u32> {
Some(100)
}
/// nの一つ前の数字を返す
/// nが0の場合はNoneを返す
pub fn pred(n: u32) -> Option<u32> {
if n == 0 {
None
} else {
Some(n - 1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_my_func() {
assert_eq!(my_func(), Some(100));
}
#[test]
#[should_panic]
fn test_pred() {
pred(0).unwrap();
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch04/serialize/src/main.rs | ch04/serialize/src/main.rs | use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
enum List<T> {
Node { data: T, next: Box<List<T>> },
Nil,
}
impl<T> List<T> {
fn new() -> List<T> {
List::Nil
}
/// リストを消費して、そのリストの先頭にdataを追加したリストを返す
fn cons(self, data: T) -> List<T> {
List::Node {
data,
next: Box::new(self),
}
}
}
fn main() {
// リストを生成
let list = List::new().cons(1).cons(2).cons(3);
// JSONにシリアライズ
let js = serde_json::to_string(&list).unwrap();
println!("JSON: {} bytes", js.len());
println!("{js}");
// YAMLにシリアライズ
let yml = serde_yaml::to_string(&list).unwrap();
println!("YAML: {} bytes", yml.len());
println!("{yml}");
// MessagePackにシリアライズ
let msgpack = rmp_serde::to_vec(&list).unwrap();
println!("MessagePack: {} bytes", msgpack.len());
// JSONからデシリアライズ
let list = serde_json::from_str::<List<i32>>(&js).unwrap();
println!("{:?}", list);
// YAMLからデシリアライズ
let list = serde_yaml::from_str::<List<i32>>(&yml).unwrap();
println!("{:?}", list);
// MessagePackからデシリアライズ
let list = rmp_serde::from_slice::<List<i32>>(&msgpack).unwrap();
println!("{:?}", list);
write_to_file();
read_from_file();
}
fn write_to_file() {
use std::{fs::File, io::prelude::*, path::Path};
// リストを生成し、YAMLにシリアライズ
let list = List::new().cons(1).cons(2).cons(3);
let yml = serde_yaml::to_string(&list).unwrap();
// ファイルに書き込み
let path = Path::new("test.yml");
let mut f = File::create(path).unwrap(); // 新規ファイルを生成
f.write_all(yml.as_bytes()).unwrap();
}
fn read_from_file() {
use std::{fs::File, io::prelude::*, path::Path};
// ファイルからYAML読み込み
let path = Path::new("test.yml");
let mut f = File::open(path).unwrap(); // 既存のファイルをオープン
let mut yml = String::new();
f.read_to_string(&mut yml).unwrap();
// YAMLからデシリアライズ
let list = serde_yaml::from_str::<List<i32>>(&yml).unwrap();
println!("{:?}", list);
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch04/display/src/main.rs | ch04/display/src/main.rs | fn main() {
use std::fmt::{Display, Formatter};
/// 虚数を表す型
struct ImaginaryNumber {
real: f64,
img: f64,
}
/// 虚数を表示するため、Displayトレイトを実装
impl Display for ImaginaryNumber {
fn fmt(&self, f: &mut Formatter) -> Result<(), std::fmt::Error> {
write!(f, "{} + {}i", self.real, self.img)
}
}
let n = ImaginaryNumber {
real: 3.0,
img: 4.0,
};
println!("{n}");
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch04/iter/src/main.rs | ch04/iter/src/main.rs | use std::iter::Iterator;
/// リストを表す型
#[derive(Debug, Clone)]
enum List<T> {
Node { data: T, next: Box<List<T>> },
Nil,
}
impl<T> List<T> {
fn new() -> List<T> {
List::Nil
}
/// リストを消費して、そのリストの先頭にdataを追加したリストを返す
fn cons(self, data: T) -> List<T> {
List::Node {
data,
next: Box::new(self),
}
}
/// 不変イテレータを返す
fn iter<'a>(&'a self) -> ListIter<'a, T> {
ListIter { elm: self }
}
}
/// 不変イテレータを表す型
struct ListIter<'a, T> {
elm: &'a List<T>,
}
impl<'a, T> Iterator for ListIter<'a, T> {
type Item = &'a T;
/// 次の要素を指す
fn next(&mut self) -> Option<Self::Item> {
match self.elm {
List::Node { data, next } => {
self.elm = next;
Some(data)
}
List::Nil => None,
}
}
}
fn main() {
// [2, 1, 0]というリストを生成
let list = List::new().cons(0).cons(1).cons(2);
// forで表示
for x in list.iter() {
println!("{x}");
}
println!();
// イテレータで表示
let mut it = list.iter();
println!("{:?}", it.next().unwrap());
println!("{:?}", it.next().unwrap());
println!("{:?}", it.next().unwrap());
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/src/engine.rs | ch06/regex/src/engine.rs | //! 正規表現エンジン
mod codegen;
mod evaluator;
mod parser;
use crate::helper::DynError;
use std::fmt::{self, Display};
/// 命令列
#[derive(Debug)]
pub enum Instruction {
Char(char),
Match,
Jump(usize),
Split(usize, usize),
}
impl Display for Instruction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Instruction::Char(c) => write!(f, "char {}", c),
Instruction::Match => write!(f, "match"),
Instruction::Jump(addr) => write!(f, "jump {:>04}", addr),
Instruction::Split(addr1, addr2) => write!(f, "split {:>04}, {:>04}", addr1, addr2),
}
}
}
/// 正規表現をパースしてコード生成し、
/// ASTと命令列を標準出力に表示。
///
/// # 利用例
///
/// ```
/// use regex;
/// regex::print("abc|(de|cd)+");
/// ```
///
/// # 返り値
///
/// 入力された正規表現にエラーがあったり、内部的な実装エラーがある場合はErrを返す。
pub fn print(expr: &str) -> Result<(), DynError> {
println!("expr: {expr}");
let ast = parser::parse(expr)?;
println!("AST: {:?}", ast);
println!();
println!("code:");
let code = codegen::get_code(&ast)?;
for (n, c) in code.iter().enumerate() {
println!("{:>04}: {c}", n);
}
Ok(())
}
/// 正規表現と文字列をマッチング。
///
/// # 利用例
///
/// ```
/// use regex;
/// regex::do_matching("abc|(de|cd)+", "decddede", true);
/// ```
///
/// # 引数
///
/// exprに正規表現、lineにマッチ対象とする文字列を与える。
/// is_depthがtrueの場合は深さ優先探索を、falseの場合は幅優先探索を利用。
///
/// # 返り値
///
/// エラーなく実行でき、かつマッチングに**成功**した場合はOk(true)を返し、
/// エラーなく実行でき、かつマッチングに**失敗**した場合はOk(false)を返す。
///
/// 入力された正規表現にエラーがあったり、内部的な実装エラーがある場合はErrを返す。
pub fn do_matching(expr: &str, line: &str, is_depth: bool) -> Result<bool, DynError> {
let ast = parser::parse(expr)?;
let code = codegen::get_code(&ast)?;
let line = line.chars().collect::<Vec<char>>();
Ok(evaluator::eval(&code, &line, is_depth)?)
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/src/lib.rs | ch06/regex/src/lib.rs | //! # 正規表現エンジン用クレート。
//!
//! ## 利用例
//!
//! ```
//! use regex;
//! let expr = "a(bc)+|c(def)*"; // 正規表現
//! let line = "cdefdefdef"; // マッチ対象文字列
//! regex::do_matching(expr, line, true); // 幅優先探索でマッチング
//! regex::print(expr); // 正規表現のASTと命令列を表示
//! ```
mod engine;
mod helper;
pub use engine::{do_matching, print};
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/src/helper.rs | ch06/regex/src/helper.rs | pub trait SafeAdd: Sized {
fn safe_add(&self, n: &Self) -> Option<Self>;
}
impl SafeAdd for usize {
fn safe_add(&self, n: &Self) -> Option<Self> {
self.checked_add(*n)
}
}
pub fn safe_add<T, F, E>(dst: &mut T, src: &T, f: F) -> Result<(), E>
where
T: SafeAdd,
F: Fn() -> E,
{
if let Some(n) = dst.safe_add(src) {
*dst = n;
Ok(())
} else {
Err(f())
}
}
pub type DynError = Box<dyn std::error::Error + Send + Sync + 'static>;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/src/main.rs | ch06/regex/src/main.rs | mod engine;
mod helper;
use helper::DynError;
use std::{
env,
fs::File,
io::{BufRead, BufReader},
};
fn main() -> Result<(), DynError> {
let args: Vec<String> = env::args().collect();
if args.len() <= 2 {
eprintln!("usage: {} regex file", args[0]);
return Err("invalid arguments".into());
} else {
match_file(&args[1], &args[2])?;
}
Ok(())
}
/// ファイルをオープンし、行ごとにマッチングを行う。
///
/// マッチングはそれぞれの行頭から1文字ずつずらして行い、
/// いずれかにマッチした場合に、その行がマッチしたものとみなす。
///
/// たとえば、abcdという文字列があった場合、以下の順にマッチが行われ、
/// このいずれかにマッチした場合、与えられた正規表現にマッチする行と判定する。
///
/// - abcd
/// - bcd
/// - cd
/// - d
fn match_file(expr: &str, file: &str) -> Result<(), DynError> {
let f = File::open(file)?;
let reader = BufReader::new(f);
engine::print(expr)?;
println!();
for line in reader.lines() {
let line = line?;
for (i, _) in line.char_indices() {
if engine::do_matching(expr, &line[i..], true)? {
println!("{line}");
break;
}
}
}
Ok(())
}
// 単体テスト。プライベート関数もテスト可能
#[cfg(test)]
mod tests {
use crate::{
engine::do_matching,
helper::{safe_add, SafeAdd},
};
#[test]
fn test_safe_add() {
let n: usize = 10;
assert_eq!(Some(30), n.safe_add(&20));
let n: usize = !0; // 2^64 - 1 (64 bits CPU)
assert_eq!(None, n.safe_add(&1));
let mut n: usize = 10;
assert!(safe_add(&mut n, &20, || ()).is_ok());
let mut n: usize = !0;
assert!(safe_add(&mut n, &1, || ()).is_err());
}
#[test]
fn test_matching() {
// パースエラー
assert!(do_matching("+b", "bbb", true).is_err());
assert!(do_matching("*b", "bbb", true).is_err());
assert!(do_matching("|b", "bbb", true).is_err());
assert!(do_matching("?b", "bbb", true).is_err());
// パース成功、マッチ成功
assert!(do_matching("abc|def", "def", true).unwrap());
assert!(do_matching("(abc)*", "abcabc", true).unwrap());
assert!(do_matching("(ab|cd)+", "abcdcd", true).unwrap());
assert!(do_matching("abc?", "ab", true).unwrap());
assert!(do_matching("((((a*)*)*)*)", "aaaaaaaaa", true).unwrap());
assert!(do_matching("(a*)*b", "aaaaaaaaab", true).unwrap());
assert!(do_matching("(a*)*b", "b", true).unwrap());
assert!(do_matching("a**b", "aaaaaaaaab", true).unwrap());
assert!(do_matching("a**b", "b", true).unwrap());
// パース成功、マッチ失敗
assert!(!do_matching("abc|def", "efa", true).unwrap());
assert!(!do_matching("(ab|cd)+", "", true).unwrap());
assert!(!do_matching("abc?", "acb", true).unwrap());
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/src/engine/parser.rs | ch06/regex/src/engine/parser.rs | //! 正規表現の式をパースし、抽象構文木に変換
use std::{
error::Error,
fmt::{self, Display},
mem::take,
};
/// パースエラーを表すための型
#[derive(Debug)]
pub enum ParseError {
InvalidEscape(usize, char), // 誤ったエスケープシーケンス
InvalidRightParen(usize), // 左開き括弧無し
NoPrev(usize), // +、|、*、?の前に式がない
NoRightParen, // 右閉じ括弧無し
Empty, // 空のパターン
}
/// パースエラーを表示するために、Displayトレイトを実装
impl Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ParseError::InvalidEscape(pos, c) => {
write!(f, "ParseError: invalid escape: pos = {pos}, char = '{c}'")
}
ParseError::InvalidRightParen(pos) => {
write!(f, "ParseError: invalid right parenthesis: pos = {pos}")
}
ParseError::NoPrev(pos) => {
write!(f, "ParseError: no previous expression: pos = {pos}")
}
ParseError::NoRightParen => {
write!(f, "ParseError: no right parenthesis")
}
ParseError::Empty => write!(f, "ParseError: empty expression"),
}
}
}
impl Error for ParseError {} // エラー用に、Errorトレイトを実装
/// 抽象構文木を表現するための型
#[derive(Debug)]
pub enum AST {
Char(char),
Plus(Box<AST>),
Star(Box<AST>),
Question(Box<AST>),
Or(Box<AST>, Box<AST>),
Seq(Vec<AST>),
}
/// parse_plus_star_question関数で利用するための列挙型
enum PSQ {
Plus,
Star,
Question,
}
/// 正規表現を抽象構文木に変換
pub fn parse(expr: &str) -> Result<AST, ParseError> {
// 内部状態を表現するための型
// Char状態 : 文字列処理中
// Escape状態 : エスケープシーケンス処理中
enum ParseState {
Char,
Escape,
}
let mut seq = Vec::new(); // 現在のSeqのコンテキスト
let mut seq_or = Vec::new(); // 現在のOrのコンテキスト
let mut stack = Vec::new(); // コンテキストのスタック
let mut state = ParseState::Char; // 現在の状態
for (i, c) in expr.chars().enumerate() {
match &state {
ParseState::Char => {
match c {
'+' => parse_plus_star_question(&mut seq, PSQ::Plus, i)?,
'*' => parse_plus_star_question(&mut seq, PSQ::Star, i)?,
'?' => parse_plus_star_question(&mut seq, PSQ::Question, i)?,
'(' => {
// 現在のコンテキストをスタックに追加し、
// 現在のコンテキストを空の状態にする
let prev = take(&mut seq);
let prev_or = take(&mut seq_or);
stack.push((prev, prev_or));
}
')' => {
// 現在のコンテキストをスタックからポップ
if let Some((mut prev, prev_or)) = stack.pop() {
// "()"のように式が空の場合はpushしない
if !seq.is_empty() {
seq_or.push(AST::Seq(seq));
}
// Orを生成
if let Some(ast) = fold_or(seq_or) {
prev.push(ast);
}
// 以前のコンテキストを、現在のコンテキストにする
seq = prev;
seq_or = prev_or;
} else {
// "abc)"のように、開き括弧がないのに閉じ括弧がある場合はエラー
return Err(ParseError::InvalidRightParen(i));
}
}
'|' => {
if seq.is_empty() {
// "||", "(|abc)"などと、式が空の場合はエラー
return Err(ParseError::NoPrev(i));
} else {
let prev = take(&mut seq);
seq_or.push(AST::Seq(prev));
}
}
'\\' => state = ParseState::Escape,
_ => seq.push(AST::Char(c)),
};
}
ParseState::Escape => {
// エスケープシーケンス処理
let ast = parse_escape(i, c)?;
seq.push(ast);
state = ParseState::Char;
}
}
}
// 閉じ括弧が足りない場合はエラー
if !stack.is_empty() {
return Err(ParseError::NoRightParen);
}
// "()"のように式が空の場合はpushしない
if !seq.is_empty() {
seq_or.push(AST::Seq(seq));
}
// Orを生成し、成功した場合はそれを返す
if let Some(ast) = fold_or(seq_or) {
Ok(ast)
} else {
Err(ParseError::Empty)
}
}
/// +、*、?をASTに変換
///
/// 後置記法で、+、*、?の前にパターンがない場合はエラー
///
/// 例 : *ab、abc|+などはエラー
fn parse_plus_star_question(
seq: &mut Vec<AST>,
ast_type: PSQ,
pos: usize,
) -> Result<(), ParseError> {
if let Some(prev) = seq.pop() {
let ast = match ast_type {
PSQ::Plus => AST::Plus(Box::new(prev)),
PSQ::Star => AST::Star(Box::new(prev)),
PSQ::Question => AST::Question(Box::new(prev)),
};
seq.push(ast);
Ok(())
} else {
Err(ParseError::NoPrev(pos))
}
}
/// 特殊文字のエスケープ
fn parse_escape(pos: usize, c: char) -> Result<AST, ParseError> {
match c {
'\\' | '(' | ')' | '|' | '+' | '*' | '?' => Ok(AST::Char(c)),
_ => {
let err = ParseError::InvalidEscape(pos, c);
Err(err)
}
}
}
/// orで結合された複数の式をASTに変換
///
/// たとえば、abc|def|ghi は、AST::Or("abc", AST::Or("def", "ghi"))というASTとなる
fn fold_or(mut seq_or: Vec<AST>) -> Option<AST> {
if seq_or.len() > 1 {
// seq_orの要素が複数ある場合は、Orで式を結合
let mut ast = seq_or.pop().unwrap();
seq_or.reverse();
for s in seq_or {
ast = AST::Or(Box::new(s), Box::new(ast));
}
Some(ast)
} else {
// seq_orの要素が一つのみの場合は、Orではなく、最初の値を返す
seq_or.pop()
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/src/engine/codegen.rs | ch06/regex/src/engine/codegen.rs | //! ASTからコード生成を行う
use super::{parser::AST, Instruction};
use crate::helper::safe_add;
use std::{
error::Error,
fmt::{self, Display},
};
/// コード生成エラーを表す型
#[derive(Debug)]
pub enum CodeGenError {
PCOverFlow,
FailStar,
FailOr,
FailQuestion,
}
impl Display for CodeGenError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CodeGenError: {:?}", self)
}
}
impl Error for CodeGenError {}
/// コード生成器
#[derive(Default, Debug)]
struct Generator {
pc: usize,
insts: Vec<Instruction>,
}
/// コード生成を行う関数
pub fn get_code(ast: &AST) -> Result<Vec<Instruction>, CodeGenError> {
let mut generator = Generator::default();
generator.gen_code(ast)?;
Ok(generator.insts)
}
/// コード生成器のメソッド定義
impl Generator {
/// コード生成を行う関数の入り口
fn gen_code(&mut self, ast: &AST) -> Result<(), CodeGenError> {
self.gen_expr(ast)?;
self.inc_pc()?;
self.insts.push(Instruction::Match);
Ok(())
}
/// ASTをパターン分けしコード生成を行う関数
fn gen_expr(&mut self, ast: &AST) -> Result<(), CodeGenError> {
match ast {
AST::Char(c) => self.gen_char(*c)?,
AST::Or(e1, e2) => self.gen_or(e1, e2)?,
AST::Plus(e) => self.gen_plus(e)?,
AST::Star(e1) => {
match &**e1 {
// `(a*)*`のように`Star`が二重となっている場合にスタックオーバーフローする問題を回避するため、
// このような`(((r*)*)*...*)*`を再帰的に処理して1つの`r*`へと変換する。
AST::Star(_) => self.gen_expr(&e1)?,
AST::Seq(e2) if e2.len() == 1 => {
if let Some(e3 @ AST::Star(_)) = e2.get(0) {
self.gen_expr(e3)?
} else {
self.gen_star(e1)?
}
}
e => self.gen_star(&e)?,
}
}
AST::Question(e) => self.gen_question(e)?,
AST::Seq(v) => self.gen_seq(v)?,
}
Ok(())
}
/// char命令生成関数
fn gen_char(&mut self, c: char) -> Result<(), CodeGenError> {
let inst = Instruction::Char(c);
self.insts.push(inst);
self.inc_pc()?;
Ok(())
}
/// OR演算子のコード生成器。
///
/// 以下のようなコードを生成。
///
/// ```text
/// split L1, L2
/// L1: e1のコード
/// jmp L3
/// L2: e2のコード
/// L3:
/// ```
fn gen_or(&mut self, e1: &AST, e2: &AST) -> Result<(), CodeGenError> {
// split L1, L2
let split_addr = self.pc;
self.inc_pc()?;
let split = Instruction::Split(self.pc, 0); // L1 = self.pc。L2は仮に0と設定
self.insts.push(split);
// L1: e1のコード
self.gen_expr(e1)?;
// jmp L3
let jmp_addr = self.pc;
self.insts.push(Instruction::Jump(0)); // L3を仮に0と設定
// L2の値を設定
self.inc_pc()?;
if let Some(Instruction::Split(_, l2)) = self.insts.get_mut(split_addr) {
*l2 = self.pc;
} else {
return Err(CodeGenError::FailOr);
}
// L2: e2のコード
self.gen_expr(e2)?;
// L3の値を設定
if let Some(Instruction::Jump(l3)) = self.insts.get_mut(jmp_addr) {
*l3 = self.pc;
} else {
return Err(CodeGenError::FailOr);
}
Ok(())
}
/// ?限量子のコード生成器。
///
/// 以下のようなコードを生成
///
/// ```text
/// split L1, L2
/// L1: eのコード
/// L2:
/// ```
fn gen_question(&mut self, e: &AST) -> Result<(), CodeGenError> {
// split L1, L2
let split_addr = self.pc;
self.inc_pc()?;
let split = Instruction::Split(self.pc, 0); // self.pcがL1。L2を仮に0と設定
self.insts.push(split);
// L1: eのコード
self.gen_expr(e)?;
// L2の値を設定
if let Some(Instruction::Split(_, l2)) = self.insts.get_mut(split_addr) {
*l2 = self.pc;
Ok(())
} else {
Err(CodeGenError::FailQuestion)
}
}
/// 以下のようなコードを生成
///
/// ```text
/// L1: eのコード
/// split L1, L2
/// L2:
/// ```
fn gen_plus(&mut self, e: &AST) -> Result<(), CodeGenError> {
// L1: eのコード
let l1 = self.pc;
self.gen_expr(e)?;
// split L1, L2
self.inc_pc()?;
let split = Instruction::Split(l1, self.pc); // self.pcがL2
self.insts.push(split);
Ok(())
}
/// *限量子のコード生成器。
///
/// 以下のようなコードを生成
///
/// ```text
/// L1: split L2, L3
/// L2: eのコード
/// jump L1
/// L3:
/// ```
fn gen_star(&mut self, e: &AST) -> Result<(), CodeGenError> {
// L1: split L2, L3
let l1 = self.pc;
self.inc_pc()?;
let split = Instruction::Split(self.pc, 0); // self.pcがL2。L3を仮に0と設定
self.insts.push(split);
// L2: eのコード
self.gen_expr(e)?;
// jump L1
self.inc_pc()?;
self.insts.push(Instruction::Jump(l1));
// L3の値を設定
if let Some(Instruction::Split(_, l3)) = self.insts.get_mut(l1) {
*l3 = self.pc;
Ok(())
} else {
Err(CodeGenError::FailStar)
}
}
/// 連続する正規表現のコード生成
fn gen_seq(&mut self, exprs: &[AST]) -> Result<(), CodeGenError> {
for e in exprs {
self.gen_expr(e)?;
}
Ok(())
}
/// プログラムカウンタをインクリメント
fn inc_pc(&mut self) -> Result<(), CodeGenError> {
safe_add(&mut self.pc, &1, || CodeGenError::PCOverFlow)
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/src/engine/evaluator.rs | ch06/regex/src/engine/evaluator.rs | //! 命令列と入力文字列を受け取り、マッチングを行う
use super::Instruction;
use crate::helper::safe_add;
use std::{
collections::VecDeque,
error::Error,
fmt::{self, Display},
};
#[derive(Debug)]
pub enum EvalError {
PCOverFlow,
SPOverFlow,
InvalidPC,
InvalidContext,
}
impl Display for EvalError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "CodeGenError: {:?}", self)
}
}
impl Error for EvalError {}
/// 命令列の評価を行う関数。
///
/// instが命令列となり、その命令列を用いて入力文字列lineにマッチさせる。
/// is_depthがtrueの場合に深さ優先探索を、falseの場合に幅優先探索を行う。
///
/// 実行時エラーが起きた場合はErrを返す。
/// マッチ成功時はOk(true)を、失敗時はOk(false)を返す。
pub fn eval(inst: &[Instruction], line: &[char], is_depth: bool) -> Result<bool, EvalError> {
if is_depth {
eval_depth(inst, line, 0, 0)
} else {
eval_width(inst, line)
}
}
/// 深さ優先探索で再帰的にマッチングを行う評価器
fn eval_depth(
inst: &[Instruction],
line: &[char],
mut pc: usize,
mut sp: usize,
) -> Result<bool, EvalError> {
loop {
let next = if let Some(i) = inst.get(pc) {
i
} else {
return Err(EvalError::InvalidPC);
};
match next {
Instruction::Char(c) => {
if let Some(sp_c) = line.get(sp) {
if c == sp_c {
safe_add(&mut pc, &1, || EvalError::PCOverFlow)?;
safe_add(&mut sp, &1, || EvalError::SPOverFlow)?;
} else {
return Ok(false);
}
} else {
return Ok(false);
}
}
Instruction::Match => {
return Ok(true);
}
Instruction::Jump(addr) => {
pc = *addr;
}
Instruction::Split(addr1, addr2) => {
if eval_depth(inst, line, *addr1, sp)? || eval_depth(inst, line, *addr2, sp)? {
return Ok(true);
} else {
return Ok(false);
}
}
}
}
}
fn pop_ctx(
pc: &mut usize,
sp: &mut usize,
ctx: &mut VecDeque<(usize, usize)>,
) -> Result<(), EvalError> {
if let Some((p, s)) = ctx.pop_back() {
*pc = p;
*sp = s;
Ok(())
} else {
Err(EvalError::InvalidContext)
}
}
/// 幅優先探索で再帰的にマッチングを行う評価器
fn eval_width(inst: &[Instruction], line: &[char]) -> Result<bool, EvalError> {
let mut ctx = VecDeque::new();
let mut pc = 0;
let mut sp = 0;
loop {
let next = if let Some(i) = inst.get(pc) {
i
} else {
return Err(EvalError::InvalidPC);
};
match next {
Instruction::Char(c) => {
if let Some(sp_c) = line.get(sp) {
if c == sp_c {
safe_add(&mut pc, &1, || EvalError::PCOverFlow)?;
safe_add(&mut sp, &1, || EvalError::SPOverFlow)?;
} else {
if ctx.is_empty() {
return Ok(false);
} else {
pop_ctx(&mut pc, &mut sp, &mut ctx)?;
}
}
} else {
if ctx.is_empty() {
return Ok(false);
} else {
pop_ctx(&mut pc, &mut sp, &mut ctx)?;
}
}
}
Instruction::Match => {
return Ok(true);
}
Instruction::Jump(addr) => {
pc = *addr;
}
Instruction::Split(addr1, addr2) => {
pc = *addr1;
ctx.push_back((*addr2, sp));
continue;
}
}
if !ctx.is_empty() {
ctx.push_back((pc, sp));
pop_ctx(&mut pc, &mut sp, &mut ctx)?;
}
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch06/regex/benches/benchmark.rs | ch06/regex/benches/benchmark.rs | //! # パフォーマンス計測
//!
//! ## 計測方法
//! a?^n a^nという正規表現を、a^nという文字列にマッチさせる。
//! ただし、a?^nとa^nは、a?とaのn回の繰り返し。
//! 計測は幅優先と深さ優先で行う。
//!
//! ## n = 3の場合の例
//!
//! - 正規表現: a?a?a?aaa
//! - str: aaa
//!
//! ## 実行方法
//!
//! cargo-criterionをインストール後、cargo criterionと実行。
//!
//! ```text
//! $ cargo install cargo-criterion
//! $ cargo criterion
//! ```
//!
//! 実行後は、target/criterion/reports/index.htmlというファイルが生成されるため、
//! それをWebブラウザで閲覧する。
use criterion::{criterion_group, criterion_main, Criterion};
use regex::do_matching;
use std::time::Duration;
/// (計測のid、a?^n a^nという正規表現、文字列)というタプル
const INPUTS: &[(&str, &str, &str)] = &[
("n = 2", "a?a?aa", "aa"),
("n = 4", "a?a?a?a?aaaa", "aaaa"),
("n = 6", "a?a?a?a?a?a?aaaaaa", "aaaaaa"),
("n = 8", "a?a?a?a?a?a?a?a?aaaaaaaa", "aaaaaaaa"),
("n = 10", "a?a?a?a?a?a?a?a?a?a?aaaaaaaaaa", "aaaaaaaaaa"),
(
"n = 12",
"a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaa",
"aaaaaaaaaaaa",
),
(
"n = 14",
"a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaa",
"aaaaaaaaaaaaaa",
),
(
"n = 16",
"a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa",
"aaaaaaaaaaaaaaaa",
),
(
"n = 18",
"a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaaaa",
"aaaaaaaaaaaaaaaaaa",
),
(
"n = 20",
"a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaaaaaa",
"aaaaaaaaaaaaaaaaaaaa",
),
];
fn depth_first(c: &mut Criterion) {
let mut g = c.benchmark_group("Depth First");
g.measurement_time(Duration::from_secs(12));
for i in INPUTS {
g.bench_with_input(i.0, &(i.1, i.2), |b, args| {
b.iter(|| do_matching(args.0, args.1, true))
});
}
}
fn width_first(c: &mut Criterion) {
let mut g = c.benchmark_group("Width First");
g.measurement_time(Duration::from_secs(12));
for i in INPUTS {
g.bench_with_input(i.0, &(i.1, i.2), |b, args| {
b.iter(|| do_matching(args.0, args.1, false))
});
}
}
criterion_group!(benches, width_first, depth_first);
criterion_main!(benches);
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch02/sort/src/main.rs | ch02/sort/src/main.rs | //! 実行すると5GB以上のメモリを消費するため注意すること。
//! メモリが足りない場合は、NUMの値を減らしてください。
const NUM: usize = 200000000; // 生成する乱数の合計数
/// xorshift
struct XOR64 {
x: u64,
}
impl XOR64 {
fn new(seed: u64) -> XOR64 {
XOR64 {
x: seed ^ 88172645463325252,
}
}
/// 乱数生成関数
fn next(&mut self) -> u64 {
let x = self.x;
let x = x ^ (x << 13);
let x = x ^ (x >> 7);
let x = x ^ (x << 17);
self.x = x;
return x;
}
}
fn main() {
single_threaded();
multi_threaded();
}
/// 乱数値を要素に持つVecを生成
fn randomized_vec() -> (Vec<u64>, Vec<u64>) {
let mut v1 = Vec::new();
let mut v2 = Vec::new();
let mut generator = XOR64::new(1234);
// 疑似乱数生成
for _ in 0..NUM {
let x1 = generator.next();
let x2 = generator.next();
v1.push(x1);
v2.push(x2);
}
(v1, v2)
}
fn single_threaded() {
let (mut v1, mut v2) = randomized_vec();
let start = std::time::Instant::now(); // 開始時間
v1.sort(); // 順番にソート
v2.sort();
let end = start.elapsed(); // 経過時間
println!(
"single_threaded: {}.{:03}秒",
end.as_secs(),
end.subsec_nanos() / 1_000_000
);
}
fn multi_threaded() {
let (mut v1, mut v2) = randomized_vec();
let start = std::time::Instant::now(); // 開始時間
// スレッドを生成してソート
let handler1 = std::thread::spawn(move || {
v1.sort();
v1
});
let handler2 = std::thread::spawn(move || {
v2.sort();
v2
});
let _v1 = handler1.join().unwrap();
let _v2 = handler2.join().unwrap();
let end = start.elapsed(); // 経過時間
println!(
"multi_threaded: {}.{:03}秒",
end.as_secs(),
end.subsec_nanos() / 1_000_000
);
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch03/rwlock/src/main.rs | ch03/rwlock/src/main.rs | use std::{
collections::BTreeMap,
sync::{Arc, RwLock},
thread::sleep,
time::Duration,
};
fn main() {
// 美術館を初期化
let mut gallery = BTreeMap::new();
gallery.insert("葛飾北斎", "富嶽三十六景 神奈川沖浪裏");
gallery.insert("ミュシャ", "黄道十二宮");
// RwLockとArcを利用して共有可能に
let gallery = Arc::new(RwLock::new(gallery));
let mut hdls = Vec::new(); // joinハンドラ
for n in 0..3 {
// 客を表すスレッドを生成
let gallery = gallery.clone(); // 参照カウンタをインクリメント
let hdl = std::thread::spawn(move || {
for _ in 0..8 {
{
let guard = gallery.read().unwrap(); // リードロック
if n == 0 {
// 美術館の内容を表示
for (key, value) in guard.iter() {
print!("{key}:{value}, ");
}
println!();
}
}
sleep(Duration::from_secs(1));
}
});
hdls.push(hdl);
}
// 美術館スタッフ
let staff = std::thread::spawn(move || {
for n in 0..4 {
// 展示内容入れ替え
if n % 2 == 0 {
let mut guard = gallery.write().unwrap(); // ライトロック
guard.clear();
guard.insert("ゴッホ", "星月夜");
guard.insert("エッシャー", "滝");
} else {
let mut guard = gallery.write().unwrap(); // ライトロック
guard.clear();
guard.insert("葛飾北斎", "富嶽三十六景 神奈川沖浪裏");
guard.insert("ミュシャ", "黄道十二宮");
}
sleep(Duration::from_secs(2));
}
});
for hdl in hdls {
hdl.join().unwrap();
}
staff.join().unwrap();
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch08/ackerman/src/main.rs | ch08/ackerman/src/main.rs | //! アッカーマン関数
use num::{BigUint, FromPrimitive, One, Zero};
const M: usize = 4;
const N: usize = 4;
fn main() {
let m = M;
let n = BigUint::from_usize(N).unwrap();
let a = ackerman(m, n.clone());
println!("ackerman({M}, {N}) = {a}");
}
fn ackerman(m: usize, n: BigUint) -> BigUint {
let one: BigUint = One::one();
let zero: BigUint = Zero::zero();
if m == 0 {
n + one
} else if n == zero {
ackerman(m - 1, one)
} else {
ackerman(m - 1, ackerman(m, n - one))
}
}
#[cfg(test)]
mod tests {
use num::{BigUint, FromPrimitive, ToPrimitive};
#[test]
fn test_ackerman() {
let a = crate::ackerman(3, BigUint::from_usize(3).unwrap());
assert_eq!(a.to_usize().unwrap(), 61);
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch08/ackerman/src/ackerman_tailcall.rs | ch08/ackerman/src/ackerman_tailcall.rs | //! 末尾呼び出し版のアッカーマン関数実装
use num::{BigUint, One, Zero};
#[derive(Debug, Clone)]
pub enum N {
VAL(BigUint),
A(usize, BigUint),
}
impl N {
fn get(self) -> BigUint {
match self {
N::VAL(n) => n,
N::A(m, n) => ackerman_tail(m, N::VAL(n)),
}
}
}
pub fn ackerman(m: usize, n: BigUint) -> BigUint {
ackerman_tail(m, N::VAL(n))
}
fn ackerman_tail(m: usize, n: N) -> BigUint {
let one: BigUint = One::one();
let zero: BigUint = Zero::zero();
if m == 0 {
n.get() + one // n + 1
} else if n.clone().get() == zero {
ackerman_tail(m - 1, N::VAL(one))
} else {
let n_dec = n.get() - one; // n - 1
ackerman_tail(m - 1, N::A(m, n_dec))
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch08/dbg_target/src/main.rs | ch08/dbg_target/src/main.rs | use std::arch::asm;
use nix::{
sys::signal::{kill, Signal},
unistd::getpid,
};
fn main() {
println!("int 3");
unsafe { asm!("int 3") };
println!("kill -SIGTRAP");
let pid = getpid();
kill(pid, Signal::SIGTRAP).unwrap();
for i in 0..3 {
unsafe { asm!("nop") };
println!("i = {i}");
}
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch08/zdbg/src/dbg.rs | ch08/zdbg/src/dbg.rs | use crate::helper::DynError;
use nix::{
libc::user_regs_struct,
sys::{
personality::{self, Persona},
ptrace,
wait::{waitpid, WaitStatus},
},
unistd::{execvp, fork, ForkResult, Pid},
};
use std::ffi::{c_void, CString};
/// デバッガ内の情報
pub struct DbgInfo {
pid: Pid,
brk_addr: Option<*mut c_void>, // ブレークポイントのアドレス
brk_val: i64, // ブレークポイントを設定したメモリの元の値
filename: String, // 実行ファイル
}
/// デバッガ
/// ZDbg<Running>は子プロセスを実行中
/// ZDbg<NotRunning>は子プロセスは実行していない
pub struct ZDbg<T> {
info: Box<DbgInfo>,
_state: T,
}
/// デバッガの実装
pub struct Running; // 実行中
pub struct NotRunning; // 実行していない
/// デバッガの実装の列挙型表現。Exitの場合終了
pub enum State {
Running(ZDbg<Running>),
NotRunning(ZDbg<NotRunning>),
Exit,
}
/// RunningとNotRunningで共通の実装
impl<T> ZDbg<T> {
/// ブレークポイントのアドレスを設定する関数。子プロセスのメモリ上には反映しない。
/// アドレス設定に成功した場合はtrueを返す
fn set_break_addr(&mut self, cmd: &[&str]) -> bool {
if self.info.brk_addr.is_some() {
eprintln!(
"<<ブレークポイントは設定済みです : Addr = {:p}>>",
self.info.brk_addr.unwrap()
);
false
} else if let Some(addr) = get_break_addr(cmd) {
self.info.brk_addr = Some(addr); // ブレークポイントのアドレスを保存
true
} else {
false
}
}
/// 共通のコマンドを実行
fn do_cmd_common(&self, cmd: &[&str]) {
match cmd[0] {
"help" | "h" => do_help(),
_ => (),
}
}
}
/// NotRunning時に呼び出し可能なメソッド
impl ZDbg<NotRunning> {
pub fn new(filename: String) -> Self {
ZDbg {
info: Box::new(DbgInfo {
pid: Pid::from_raw(0),
brk_addr: None,
brk_val: 0,
filename,
}),
_state: NotRunning,
}
}
/// ブレークポイントを設定
fn do_break(&mut self, cmd: &[&str]) -> bool {
self.set_break_addr(cmd)
}
/// 子プロセスを生成し、成功した場合はRunning状態に遷移
fn do_run(mut self, cmd: &[&str]) -> Result<State, DynError> {
// 子プロセスに渡すコマンドライン引数
let args: Vec<CString> = cmd.iter().map(|s| CString::new(*s).unwrap()).collect();
match unsafe { fork()? } {
ForkResult::Child => {
// ASLRを無効に
let p = personality::get().unwrap();
personality::set(p | Persona::ADDR_NO_RANDOMIZE).unwrap();
ptrace::traceme().unwrap();
// exec
execvp(&CString::new(self.info.filename.as_str()).unwrap(), &args).unwrap();
unreachable!();
}
ForkResult::Parent { child, .. } => match waitpid(child, None)? {
WaitStatus::Stopped(..) => {
println!("<<子プロセスの実行に成功しました : PID = {child}>>");
self.info.pid = child;
let mut dbg = ZDbg::<Running> {
info: self.info,
_state: Running,
};
dbg.set_break()?; // ブレークポイントを設定
dbg.do_continue()
}
WaitStatus::Exited(..) | WaitStatus::Signaled(..) => {
Err("子プロセスの実行に失敗しました".into())
}
_ => Err("子プロセスが不正な状態です".into()),
},
}
}
pub fn do_cmd(mut self, cmd: &[&str]) -> Result<State, DynError> {
if cmd.is_empty() {
return Ok(State::NotRunning(self));
}
match cmd[0] {
"run" | "r" => return self.do_run(cmd),
"break" | "b" => {
self.do_break(cmd);
}
"exit" => return Ok(State::Exit),
"continue" | "c" | "stepi" | "s" | "registers" | "regs" => {
eprintln!("<<ターゲットを実行していません。runで実行してください>>")
}
_ => self.do_cmd_common(cmd),
}
Ok(State::NotRunning(self))
}
}
/// Running時に呼び出し可能なメソッド
impl ZDbg<Running> {
pub fn do_cmd(mut self, cmd: &[&str]) -> Result<State, DynError> {
if cmd.is_empty() {
return Ok(State::Running(self));
}
match cmd[0] {
"break" | "b" => self.do_break(cmd)?,
"continue" | "c" => return self.do_continue(),
"registers" | "regs" => {
let regs = ptrace::getregs(self.info.pid)?;
print_regs(®s);
}
"stepi" | "s" => return self.do_stepi(),
"run" | "r" => eprintln!("<<既に実行中です>>"),
"exit" => {
self.do_exit()?;
return Ok(State::Exit);
}
_ => self.do_cmd_common(cmd),
}
Ok(State::Running(self))
}
/// exitを実行。実行中のプロセスはkill
fn do_exit(self) -> Result<(), DynError> {
loop {
ptrace::kill(self.info.pid)?;
match waitpid(self.info.pid, None)? {
WaitStatus::Exited(..) | WaitStatus::Signaled(..) => return Ok(()),
_ => (),
}
}
}
/// ブレークポイントを実際に設定
/// つまり、該当アドレスのメモリを"int 3" = 0xccに設定
fn set_break(&mut self) -> Result<(), DynError> {
let addr = if let Some(addr) = self.info.brk_addr {
addr
} else {
return Ok(());
};
// ブレークするアドレスにあるメモリ上の値を取得
let val = match ptrace::read(self.info.pid, addr) {
Ok(val) => val,
Err(e) => {
eprintln!("<<ptrace::readに失敗 : {e}, addr = {:p}>>", addr);
return Ok(());
}
};
// メモリの値を表示する補助関数
fn print_val(addr: usize, val: i64) {
print!("{:x}:", addr);
for n in (0..8).map(|n| ((val >> (n * 8)) & 0xff) as u8) {
print!(" {:x}", n);
}
}
println!("<<以下のようにメモリを書き換えます>>");
print!("<<before: "); // 元の値を表示
print_val(addr as usize, val);
println!(">>");
let val_int3 = (val & !0xff) | 0xcc; // "int 3"に設定
print!("<<after : "); // 変更後の値を表示
print_val(addr as usize, val_int3);
println!(">>");
// "int 3"をメモリに書き込み
match unsafe { ptrace::write(self.info.pid, addr, val_int3 as *mut c_void) } {
Ok(_) => {
self.info.brk_addr = Some(addr);
self.info.brk_val = val; // 元の値を保持
}
Err(e) => {
eprintln!("<<ptrace::writeに失敗 : {e}, addr = {:p}>>", addr);
}
}
Ok(())
}
/// breakを実行
fn do_break(&mut self, cmd: &[&str]) -> Result<(), DynError> {
if self.set_break_addr(cmd) {
self.set_break()?;
}
Ok(())
}
/// stepiを実行。機械語レベルで1行実行
fn do_stepi(self) -> Result<State, DynError> {
let regs = ptrace::getregs(self.info.pid)?;
if Some((regs.rip) as *mut c_void) == self.info.brk_addr {
// 次の実行先がブレークポイントの場合、
// 先に、0xccに書き換えたメモリを元の値に戻してから実行する必要あり
unsafe {
ptrace::write(
self.info.pid,
self.info.brk_addr.unwrap(),
self.info.brk_val as *mut c_void,
)?
};
self.step_and_break()
} else {
ptrace::step(self.info.pid, None)?;
self.wait_child()
}
}
/// ブレークポイントで停止していた場合は
/// 1ステップ実行しブレークポイントを再設定
fn step_and_break(mut self) -> Result<State, DynError> {
let regs = ptrace::getregs(self.info.pid)?;
if Some((regs.rip) as *mut c_void) == self.info.brk_addr {
ptrace::step(self.info.pid, None)?; // 1ステップ実行
match waitpid(self.info.pid, None)? {
WaitStatus::Exited(..) | WaitStatus::Signaled(..) => {
println!("<<子プロセスが終了しました>>");
return Ok(State::NotRunning(ZDbg::<NotRunning> {
info: self.info,
_state: NotRunning,
}));
}
_ => (),
}
self.set_break()?; // 再度ブレークポイントを設定
}
Ok(State::Running(self))
}
/// continueを実行
fn do_continue(self) -> Result<State, DynError> {
// ブレークポイントで停止していた場合は1ステップ実行後再設定
match self.step_and_break()? {
State::Running(r) => {
// 実行再開
ptrace::cont(r.info.pid, None)?;
r.wait_child()
}
n => Ok(n),
}
}
/// 子プロセスをwait。子プロセスが終了した場合はNotRunning状態に遷移
fn wait_child(self) -> Result<State, DynError> {
match waitpid(self.info.pid, None)? {
WaitStatus::Exited(..) | WaitStatus::Signaled(..) => {
println!("<<子プロセスが終了しました>>");
let not_run = ZDbg::<NotRunning> {
info: self.info,
_state: NotRunning,
};
Ok(State::NotRunning(not_run))
}
WaitStatus::Stopped(..) => {
let mut regs = ptrace::getregs(self.info.pid)?;
if Some((regs.rip - 1) as *mut c_void) == self.info.brk_addr {
// 書き換えたメモリを元の値に戻す
unsafe {
ptrace::write(
self.info.pid,
self.info.brk_addr.unwrap(),
self.info.brk_val as *mut c_void,
)?
};
// ブレークポイントで停止したアドレスから一つもどす
regs.rip -= 1;
ptrace::setregs(self.info.pid, regs)?;
}
println!("<<子プロセスが停止しました : PC = {:#x}>>", regs.rip);
Ok(State::Running(self))
}
_ => Err("waitpidの返り値が不正です".into()),
}
}
}
/// ヘルプを表示
fn do_help() {
println!(
r#"コマンド一覧 (括弧内は省略記法)
break 0x8000 : ブレークポイントを0x8000番地に設定 (b 0x8000)
run : プログラムを実行 (r)
continue : プログラムを再開 (c)
stepi : 機械語レベルで1ステップ実行 (s)
registers : レジスタを表示 (regs)
exit : 終了
help : このヘルプを表示 (h)"#
);
}
/// レジスタを表示
fn print_regs(regs: &user_regs_struct) {
println!(
r#"RIP: {:#016x}, RSP: {:#016x}, RBP: {:#016x}
RAX: {:#016x}, RBX: {:#016x}, RCX: {:#016x}
RDX: {:#016x}, RSI: {:#016x}, RDI: {:#016x}
R8: {:#016x}, R9: {:#016x}, R10: {:#016x}
R11: {:#016x}, R12: {:#016x}, R13: {:#016x}
R14: {:#016x}, R15: {:#016x}"#,
regs.rip,
regs.rsp,
regs.rbp,
regs.rax,
regs.rbx,
regs.rcx,
regs.rdx,
regs.rsi,
regs.rdi,
regs.r8,
regs.r9,
regs.r10,
regs.r11,
regs.r12,
regs.r13,
regs.r14,
regs.r15,
);
}
/// コマンドからブレークポイントを計算
fn get_break_addr(cmd: &[&str]) -> Option<*mut c_void> {
if cmd.len() < 2 {
eprintln!("<<アドレスを指定してください\n例 : break 0x8000>>");
return None;
}
let addr_str = cmd[1];
if &addr_str[0..2] != "0x" {
eprintln!("<<アドレスは16進数でのみ指定可能です\n例 : break 0x8000>>");
return None;
}
let addr = match usize::from_str_radix(&addr_str[2..], 16) {
Ok(addr) => addr,
Err(e) => {
eprintln!("<<アドレス変換エラー : {}>>", e);
return None;
}
} as *mut c_void;
Some(addr)
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch08/zdbg/src/helper.rs | ch08/zdbg/src/helper.rs | pub type DynError = Box<dyn std::error::Error + Send + Sync + 'static>;
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
ytakano/rust_zero | https://github.com/ytakano/rust_zero/blob/843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c/ch08/zdbg/src/main.rs | ch08/zdbg/src/main.rs | mod dbg;
mod helper;
use dbg::{State, ZDbg};
use helper::DynError;
use rustyline::{error::ReadlineError, Editor};
use std::env;
fn main() -> Result<(), DynError> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
let msg = format!("引数が必要です\n例 : {} 実行ファイル [引数*]", args[0]);
return Err(msg.into());
}
run_dbg(&args[1])?;
Ok(())
}
fn run_dbg(filename: &str) -> Result<(), DynError> {
let debugger = ZDbg::new(filename.to_string());
let mut state = State::NotRunning(debugger);
let mut rl = Editor::<()>::new()?;
loop {
match rl.readline("zdbg > ") {
Ok(line) => {
let trimed = line.trim(); // 行頭と行末の空白文字を削除
let cmd: Vec<&str> = trimed.split(' ').filter(|c| !c.is_empty()).collect(); // 空文字を削除
state = match state {
State::Running(r) => r.do_cmd(&cmd)?,
State::NotRunning(n) => n.do_cmd(&cmd)?,
_ => break,
};
if let State::Exit = state {
break;
}
rl.add_history_entry(line);
}
Err(ReadlineError::Interrupted) => eprintln!("<<終了はCtrl+D>>"),
_ => {
if let State::Running(r) = state {
// 子プロセスが実行中の場合はkill
r.do_cmd(&["exit"])?;
};
break;
}
}
}
Ok(())
}
| rust | MIT | 843eb4cf9b2dff3b24defc4ada1f04a58a8efe6c | 2026-01-04T20:20:39.285647Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/lib.rs | src/lib.rs | #![no_std]
#![crate_name = "elfloader"]
#![crate_type = "lib"]
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate env_logger;
mod binary;
pub use binary::ElfBinary;
pub mod arch;
pub use arch::RelocationType;
use core::fmt;
use core::iter::Filter;
use bitflags::bitflags;
use xmas_elf::dynamic::*;
use xmas_elf::program::ProgramIter;
pub use xmas_elf::header::{Header, Machine};
pub use xmas_elf::program::{Flags, ProgramHeader, ProgramHeader64};
pub use xmas_elf::sections::{Rel, Rela};
pub use xmas_elf::symbol_table::{Entry, Entry64};
pub use xmas_elf::{P32, P64};
/// Required alignment for zero-copy reads provided to xmas_elf by the
/// zero crate.
pub(crate) const ALIGNMENT: usize = core::mem::align_of::<Header>();
/// An iterator over [`ProgramHeader`] whose type is `LOAD`.
pub type LoadableHeaders<'a, 'b> = Filter<ProgramIter<'a, 'b>, fn(&ProgramHeader) -> bool>;
pub type PAddr = u64;
pub type VAddr = u64;
// Abstract relocation entries to be passed to the
// trait's relocate method. Library user can decide
// how to handle each relocation
#[allow(dead_code)]
pub struct RelocationEntry {
pub rtype: RelocationType,
pub offset: u64,
pub index: u32,
pub addend: Option<u64>,
}
#[derive(PartialEq, Clone, Debug)]
pub enum ElfLoaderErr {
ElfParser { source: &'static str },
OutOfMemory,
UnalignedMemory,
SymbolTableNotFound,
UnsupportedElfFormat,
UnsupportedElfVersion,
UnsupportedEndianness,
UnsupportedAbi,
UnsupportedElfType,
UnsupportedSectionData,
UnsupportedArchitecture,
UnsupportedRelocationEntry,
}
impl From<&'static str> for ElfLoaderErr {
fn from(source: &'static str) -> Self {
ElfLoaderErr::ElfParser { source }
}
}
impl fmt::Display for ElfLoaderErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ElfLoaderErr::ElfParser { source } => write!(f, "Error in ELF parser: {}", source),
ElfLoaderErr::OutOfMemory => write!(f, "Out of memory"),
ElfLoaderErr::UnalignedMemory => write!(f, "Data must be aligned to {:?}", ALIGNMENT),
ElfLoaderErr::SymbolTableNotFound => write!(f, "No symbol table in the ELF file"),
ElfLoaderErr::UnsupportedElfFormat => write!(f, "ELF format not supported"),
ElfLoaderErr::UnsupportedElfVersion => write!(f, "ELF version not supported"),
ElfLoaderErr::UnsupportedEndianness => write!(f, "ELF endianness not supported"),
ElfLoaderErr::UnsupportedAbi => write!(f, "ELF ABI not supported"),
ElfLoaderErr::UnsupportedElfType => write!(f, "ELF type not supported"),
ElfLoaderErr::UnsupportedSectionData => write!(f, "Can't handle this section data"),
ElfLoaderErr::UnsupportedArchitecture => write!(f, "Unsupported Architecture"),
ElfLoaderErr::UnsupportedRelocationEntry => {
write!(f, "Can't handle relocation entry")
}
}
}
}
bitflags! {
#[derive(Default)]
pub struct DynamicFlags1: u64 {
const NOW = FLAG_1_NOW;
const GLOBAL = FLAG_1_GLOBAL;
const GROUP = FLAG_1_GROUP;
const NODELETE = FLAG_1_NODELETE;
const LOADFLTR = FLAG_1_LOADFLTR;
const INITFIRST = FLAG_1_INITFIRST;
const NOOPEN = FLAG_1_NOOPEN;
const ORIGIN = FLAG_1_ORIGIN;
const DIRECT = FLAG_1_DIRECT;
const TRANS = FLAG_1_TRANS;
const INTERPOSE = FLAG_1_INTERPOSE;
const NODEFLIB = FLAG_1_NODEFLIB;
const NODUMP = FLAG_1_NODUMP;
const CONFALT = FLAG_1_CONFALT;
const ENDFILTEE = FLAG_1_ENDFILTEE;
const DISPRELDNE = FLAG_1_DISPRELDNE;
const DISPRELPND = FLAG_1_DISPRELPND;
const NODIRECT = FLAG_1_NODIRECT;
const IGNMULDEF = FLAG_1_IGNMULDEF;
const NOKSYMS = FLAG_1_NOKSYMS;
const NOHDR = FLAG_1_NOHDR;
const EDITED = FLAG_1_EDITED;
const NORELOC = FLAG_1_NORELOC;
const SYMINTPOSE = FLAG_1_SYMINTPOSE;
const GLOBAUDIT = FLAG_1_GLOBAUDIT;
const SINGLETON = FLAG_1_SINGLETON;
const STUB = FLAG_1_STUB;
const PIE = FLAG_1_PIE;
}
}
/// Information parse from the .dynamic section
pub struct DynamicInfo {
pub flags1: DynamicFlags1,
pub rela: u64,
pub rela_size: u64,
}
/// Implement this trait for customized ELF loading.
///
/// The flow of ElfBinary is that it first calls `allocate` for all regions
/// that need to be allocated (i.e., the LOAD program headers of the ELF binary),
/// then `load` will be called to fill the allocated regions, and finally
/// `relocate` is called for every entry in the RELA table.
pub trait ElfLoader {
/// Allocates a virtual region specified by `load_headers`.
fn allocate(&mut self, load_headers: LoadableHeaders) -> Result<(), ElfLoaderErr>;
/// Copies `region` into memory starting at `base`.
/// The caller makes sure that there was an `allocate` call previously
/// to initialize the region.
fn load(&mut self, flags: Flags, base: VAddr, region: &[u8]) -> Result<(), ElfLoaderErr>;
/// Request for the client to relocate the given `entry`
/// within the loaded ELF file.
fn relocate(&mut self, entry: RelocationEntry) -> Result<(), ElfLoaderErr>;
/// Inform client about where the initial TLS data is located.
fn tls(
&mut self,
_tdata_start: VAddr,
_tdata_length: u64,
_total_size: u64,
_align: u64,
) -> Result<(), ElfLoaderErr> {
Ok(())
}
/// In case there is a `.data.rel.ro` section we instruct the loader
/// to change the passed offset to read-only (this is called after
/// the relocate calls are completed).
///
/// Note: The default implementation is a no-op since this is
/// not strictly necessary to implement.
fn make_readonly(&mut self, _base: VAddr, _size: usize) -> Result<(), ElfLoaderErr> {
Ok(())
}
}
/// Utility function to verify alignment.
///
/// Note: this may be stabilized in the future as:
///
/// [core::ptr::is_aligned_to](https://doc.rust-lang.org/core/primitive.pointer.html#method.is_aligned_to)
pub(crate) fn is_aligned_to(ptr: usize, align: usize) -> bool {
ptr & (align - 1) == 0
}
#[cfg(doctest)]
mod test_readme {
macro_rules! external_doc_test {
($x:expr) => {
#[doc = $x]
extern "C" {}
};
}
external_doc_test!(include_str!("../README.md"));
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/binary.rs | src/binary.rs | use crate::{
DynamicFlags1, DynamicInfo, ElfLoader, ElfLoaderErr, LoadableHeaders, RelocationEntry,
RelocationType,
};
use core::fmt;
#[cfg(log)]
use log::*;
use xmas_elf::dynamic::Tag;
use xmas_elf::program::ProgramHeader::{self, Ph32, Ph64};
use xmas_elf::program::{ProgramIter, SegmentData, Type};
use xmas_elf::sections::SectionData;
pub use xmas_elf::symbol_table::{Entry, Entry64};
use xmas_elf::ElfFile;
use xmas_elf::*;
/// Abstract representation of a loadable ELF binary.
pub struct ElfBinary<'s> {
/// The ELF file in question.
pub file: ElfFile<'s>,
/// Parsed information from the .dynamic section (if the binary has it).
pub dynamic: Option<DynamicInfo>,
}
impl<'s> fmt::Debug for ElfBinary<'s> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ElfBinary{{ [")?;
for p in self.program_headers() {
write!(f, " pheader = {}", p)?;
}
write!(f, "] }}")
}
}
impl<'s> ElfBinary<'s> {
/// Create a new ElfBinary.
pub fn new(region: &'s [u8]) -> Result<ElfBinary<'s>, ElfLoaderErr> {
// Verify that the slice is aligned properly
if !crate::is_aligned_to(region.as_ptr() as usize, crate::ALIGNMENT) {
return Err(ElfLoaderErr::UnalignedMemory);
}
let file = ElfFile::new(region)?;
// Parse relevant parts out of the the .dynamic section
let mut dynamic = None;
for p in file.program_iter() {
let typ = match p {
Ph64(header) => header.get_type()?,
Ph32(header) => header.get_type()?,
};
if typ == Type::Dynamic {
dynamic = ElfBinary::parse_dynamic(&file, &p)?;
break;
}
}
Ok(ElfBinary { file, dynamic })
}
/// Returns true if the binary is compiled as position independent code or false otherwise.
///
/// For the binary to be PIE it needs to have a .dynamic section with PIE set in the flags1
/// field.
pub fn is_pie(&self) -> bool {
self.dynamic.as_ref().map_or(false, |d: &DynamicInfo| {
d.flags1.contains(DynamicFlags1::PIE)
})
}
/// Returns the dynamic loader if present.
///
/// readelf -x .interp <binary>
///
/// For a statically compiled binary this will return None
pub fn interpreter(&'s self) -> Option<&'s str> {
let section = self.file.find_section_by_name(".interp");
section.and_then(|interp_section| {
let data = interp_section.get_data(&self.file).ok()?;
let cstr = match data {
SectionData::Undefined(val) => val,
_ => return None,
};
// Validate there is room for a null terminator
if cstr.len() < 2 {
return None;
}
// Ensure it is a valid utf8 string
core::str::from_utf8(&cstr[..cstr.len() - 1]).ok()
})
}
/// Returns the target architecture
pub fn get_arch(&self) -> header::Machine {
self.file.header.pt2.machine().as_machine()
}
/// Return the entry point of the ELF file.
///
/// Note this may be zero in case of position independent executables.
pub fn entry_point(&self) -> u64 {
self.file.header.pt2.entry_point()
}
/// Create a slice of the program headers.
pub fn program_headers(&self) -> ProgramIter {
self.file.program_iter()
}
/// Get the name of the sectione
pub fn symbol_name(&self, symbol: &'s dyn Entry) -> &'s str {
symbol.get_name(&self.file).unwrap_or("unknown")
}
/// Enumerate all the symbols in the file
pub fn for_each_symbol<F: FnMut(&'s dyn Entry)>(
&self,
mut func: F,
) -> Result<(), ElfLoaderErr> {
let symbol_section = self
.file
.find_section_by_name(".symtab")
.ok_or(ElfLoaderErr::SymbolTableNotFound)?;
let symbol_table = symbol_section.get_data(&self.file)?;
match symbol_table {
SectionData::SymbolTable32(entries) => {
for entry in entries {
func(entry);
}
Ok(())
}
SectionData::SymbolTable64(entries) => {
for entry in entries {
func(entry);
}
Ok(())
}
_ => Err(ElfLoaderErr::SymbolTableNotFound),
}
}
/// Can we load this binary on our platform?
fn is_loadable(&self) -> Result<(), ElfLoaderErr> {
let header = self.file.header;
let typ = header.pt2.type_().as_type();
if header.pt1.version() != header::Version::Current {
Err(ElfLoaderErr::UnsupportedElfVersion)
} else if header.pt1.data() != header::Data::LittleEndian {
Err(ElfLoaderErr::UnsupportedEndianness)
} else if !(header.pt1.os_abi() == header::OsAbi::SystemV
|| header.pt1.os_abi() == header::OsAbi::Linux)
{
Err(ElfLoaderErr::UnsupportedAbi)
} else if !(typ == header::Type::Executable || typ == header::Type::SharedObject) {
#[cfg(log)]
error!("Invalid ELF type {:?}", typ);
Err(ElfLoaderErr::UnsupportedElfType)
} else {
Ok(())
}
}
/// Process the relocation entries for the ELF file.
///
/// Issues call to `loader.relocate` and passes the relocation entry.
fn maybe_relocate(&self, loader: &mut dyn ElfLoader) -> Result<(), ElfLoaderErr> {
// Relocation types are architecture specific
let arch = self.get_arch();
// It's easier to just locate the section by name, either:
// - .rela.dyn
// - .rel.dyn
let relocation_section = self
.file
.find_section_by_name(".rela.dyn")
.or_else(|| self.file.find_section_by_name(".rel.dyn"));
// Helper macro to call loader.relocate() on all entries
macro_rules! iter_entries_and_relocate {
($rela_entries:expr, $create_addend:ident) => {
for entry in $rela_entries {
loader.relocate(RelocationEntry {
rtype: RelocationType::from(arch, entry.get_type() as u32)?,
offset: entry.get_offset() as u64,
index: entry.get_symbol_table_index(),
addend: $create_addend!(entry),
})?;
}
};
}
// Construct from Rel<T> entries. Does not contain an addend.
macro_rules! rel_entry {
($entry:ident) => {
None
};
}
// Construct from Rela<T> entries. Contains an addend.
macro_rules! rela_entry {
($entry:ident) => {
Some($entry.get_addend() as u64)
};
}
// If either section exists apply the relocations
relocation_section.map_or(Ok(()), |rela_section_dyn| {
let data = rela_section_dyn.get_data(&self.file)?;
match data {
SectionData::Rel32(rel_entries) => {
iter_entries_and_relocate!(rel_entries, rel_entry);
}
SectionData::Rela32(rela_entries) => {
iter_entries_and_relocate!(rela_entries, rela_entry);
}
SectionData::Rel64(rel_entries) => {
iter_entries_and_relocate!(rel_entries, rel_entry);
}
SectionData::Rela64(rela_entries) => {
iter_entries_and_relocate!(rela_entries, rela_entry);
}
_ => return Err(ElfLoaderErr::UnsupportedSectionData),
}
Ok(())
})
}
/// Processes a dynamic header section.
///
/// This section contains mostly entry points to other section headers (like relocation).
/// At the moment this just does sanity checking for relocation later.
///
/// A human readable version of the dynamic section is best obtained with `readelf -d <binary>`.
fn parse_dynamic<'a>(
file: &ElfFile,
dynamic_header: &'a ProgramHeader<'a>,
) -> Result<Option<DynamicInfo>, ElfLoaderErr> {
#[cfg(log)]
trace!("load dynamic segement {:?}", dynamic_header);
// Walk through the dynamic program header and find the rela and sym_tab section offsets:
let segment = dynamic_header.get_data(file)?;
// Init result
let mut info = DynamicInfo {
flags1: Default::default(),
rela: 0,
rela_size: 0,
};
// Each entry/section is parsed for the same information currently
macro_rules! parse_entry_tags {
($info:ident, $entry:ident, $tag:ident) => {
match $tag {
// Trace required libs
Tag::Needed => {
#[cfg(log)]
trace!(
"Required library {:?}",
file.get_dyn_string($entry.get_val()? as _)
)
}
// Rel<T>
Tag::Rel => $info.rela = $entry.get_ptr()?.into(),
Tag::RelSize => $info.rela_size = $entry.get_val()?.into(),
// Rela<T>
Tag::Rela => $info.rela = $entry.get_ptr()?.into(),
Tag::RelaSize => $info.rela_size = $entry.get_val()?.into(),
Tag::Flags1 => {
$info.flags1 =
unsafe { DynamicFlags1::from_bits_unchecked($entry.get_val()? as _) };
}
_ => {
#[cfg(log)]
trace!("unsupported {:?}", $entry)
}
}
};
}
// Helper macro to iterate all entries
macro_rules! iter_entries_and_parse {
($info:ident, $dyn_entries:expr) => {
for dyn_entry in $dyn_entries {
let tag = dyn_entry.get_tag()?;
parse_entry_tags!($info, dyn_entry, tag);
}
};
}
match segment {
SegmentData::Dynamic32(dyn_entries) => {
iter_entries_and_parse!(info, dyn_entries);
}
SegmentData::Dynamic64(dyn_entries) => {
iter_entries_and_parse!(info, dyn_entries);
}
_ => {
return Err(ElfLoaderErr::UnsupportedSectionData);
}
};
#[cfg(log)]
trace!(
"rela size {:?} rela off {:?} flags1 {:?}",
info.rela_size,
info.rela,
info.flags1
);
Ok(Some(info))
}
/// Processing the program headers and issue commands to loader.
///
/// Will tell loader to create space in the address space / region where the
/// header is supposed to go, then copy it there, and finally relocate it.
pub fn load(&self, loader: &mut dyn ElfLoader) -> Result<(), ElfLoaderErr> {
self.is_loadable()?;
loader.allocate(self.iter_loadable_headers())?;
// Load all headers
for header in self.file.program_iter() {
if header.get_type() == Ok(Type::Null) {
continue;
}
let raw = match header {
Ph32(inner) => inner.raw_data(&self.file),
Ph64(inner) => inner.raw_data(&self.file),
};
let typ = header.get_type()?;
match typ {
Type::Load => {
loader.load(header.flags(), header.virtual_addr(), raw)?;
}
Type::Tls => {
loader.tls(
header.virtual_addr(),
header.file_size(),
header.mem_size(),
header.align(),
)?;
}
_ => {} // skip for now
}
}
// Relocate headers
self.maybe_relocate(loader)?;
// Process .data.rel.ro
for header in self.file.program_iter() {
if header.get_type()? == Type::GnuRelro {
loader.make_readonly(header.virtual_addr(), header.mem_size() as usize)?
}
}
Ok(())
}
fn iter_loadable_headers(&self) -> LoadableHeaders {
// Trying to determine loadeable headers
fn select_load(pheader: &ProgramHeader) -> bool {
match pheader {
Ph32(header) => header
.get_type()
.map(|typ| typ == Type::Load)
.unwrap_or(false),
Ph64(header) => header
.get_type()
.map(|typ| typ == Type::Load)
.unwrap_or(false),
}
}
// Create an iterator (well filter really) that has all loadeable
// headers and pass it to the loader
// TODO: This is pretty ugly, maybe we can do something with impl Trait?
// https://stackoverflow.com/questions/27535289/what-is-the-correct-way-to-return-an-iterator-or-any-other-trait
self.file.program_iter().filter(select_load)
}
}
#[test]
fn test_load_unaligned() {
use core::ops::Deref;
#[repr(C, align(8))]
struct AlignedStackBuffer([u8; 8096]);
impl Deref for AlignedStackBuffer {
type Target = [u8; 8096];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl AlignedStackBuffer {
fn slice_at_index(&self, index: usize) -> &[u8] {
&self.0[index..]
}
fn buffer_from_file(&mut self, path: &str) {
let data = std::fs::read(path).unwrap();
let max = core::cmp::min(data.len(), self.0.len());
self.0[..max].copy_from_slice(&data[..max]);
}
}
// Read the file into an aligned buffer
let mut aligned = AlignedStackBuffer([0u8; 8096]);
aligned.buffer_from_file("test/test.riscv64");
// Verify aligned version works
let result = ElfBinary::new(aligned.deref());
assert!(result.is_ok());
// Verify unaligned version fails with appropriate error
let unaligned = aligned.slice_at_index(1);
let result = ElfBinary::new(unaligned);
assert_eq!(result.err().unwrap(), ElfLoaderErr::UnalignedMemory);
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/test.rs | src/arch/test.rs | use crate::*;
use log::{info, trace};
use std::vec::Vec;
#[derive(Eq, Clone, PartialEq, Copy, Debug)]
pub(crate) enum LoaderAction {
Allocate(VAddr, usize, Flags),
Load(VAddr, usize),
Relocate(VAddr, u64),
Tls(VAddr, u64, u64, u64),
}
pub(crate) struct TestLoader {
pub(crate) vbase: VAddr,
pub(crate) actions: Vec<LoaderAction>,
}
impl TestLoader {
pub(crate) fn new(offset: VAddr) -> TestLoader {
TestLoader {
vbase: offset,
actions: Vec::with_capacity(12),
}
}
}
impl ElfLoader for TestLoader {
fn allocate(&mut self, load_headers: LoadableHeaders) -> Result<(), ElfLoaderErr> {
for header in load_headers {
info!(
"allocate base = {:#x} size = {:#x} flags = {}",
header.virtual_addr(),
header.mem_size(),
header.flags()
);
self.actions.push(LoaderAction::Allocate(
header.virtual_addr(),
header.mem_size() as usize,
header.flags(),
));
}
Ok(())
}
fn relocate(&mut self, entry: RelocationEntry) -> Result<(), ElfLoaderErr> {
use crate::arch::aarch64::RelocationTypes::*;
use crate::arch::riscv::RelocationTypes::*;
use crate::arch::x86::RelocationTypes::*;
use crate::arch::x86_64::RelocationTypes::*;
use RelocationType::{x86, x86_64, AArch64, RiscV};
// Get the pointer to where the relocation happens in the
// memory where we loaded the headers
//
// vbase is the new base where we locate the binary
//
// get_offset(): For an executable or shared object, the value indicates
// the virtual address of the storage unit affected by the relocation.
// This information makes the relocation entries more useful for the runtime linker.
let addr: *mut u64 = (self.vbase + entry.offset) as *mut u64;
match entry.rtype {
// x86
x86(R_386_32) => Ok(()),
x86(R_386_RELATIVE) => {
info!("R_RELATIVE {:p} ", addr);
self.actions
.push(LoaderAction::Relocate(addr as u64, self.vbase));
Ok(())
}
x86(R_386_GLOB_DAT) => {
trace!("R_386_GLOB_DAT: Can't handle that.");
Ok(())
}
x86(R_386_NONE) => Ok(()),
// RISCV
RiscV(R_RISCV_64) => Ok(()),
RiscV(R_RISCV_NONE) => Ok(()),
RiscV(R_RISCV_RELATIVE) => {
// This type requires addend to be present
let addend = entry
.addend
.ok_or(ElfLoaderErr::UnsupportedRelocationEntry)?;
// This is a relative relocation, add the offset (where we put our
// binary in the vspace) to the addend and we're done.
self.actions
.push(LoaderAction::Relocate(addr as u64, self.vbase + addend));
trace!("R_RELATIVE *{:p} = {:#x}", addr, self.vbase + addend);
Ok(())
}
// x86_64
x86_64(R_AMD64_64) => {
trace!("R_64");
Ok(())
}
x86_64(R_AMD64_RELATIVE) => {
// This type requires addend to be present
let addend = entry
.addend
.ok_or(ElfLoaderErr::UnsupportedRelocationEntry)?;
// This is a relative relocation, add the offset (where we put our
// binary in the vspace) to the addend and we're done.
self.actions
.push(LoaderAction::Relocate(addr as u64, self.vbase + addend));
trace!("R_RELATIVE *{:p} = {:#x}", addr, self.vbase + addend);
Ok(())
}
AArch64(R_AARCH64_RELATIVE) => {
// This type requires addend to be present
let addend = entry
.addend
.ok_or(ElfLoaderErr::UnsupportedRelocationEntry)?;
// This is a relative relocation, add the offset (where we put our
// binary in the vspace) to the addend and we're done.
self.actions
.push(LoaderAction::Relocate(addr as u64, self.vbase + addend));
trace!("R_RELATIVE *{:p} = {:#x}", addr, self.vbase + addend);
Ok(())
}
AArch64(R_AARCH64_GLOB_DAT) => {
trace!("R_AARCH64_GLOB_DAT: Can't handle that.");
Ok(())
}
x86_64(R_AMD64_GLOB_DAT) => {
trace!("R_AMD64_GLOB_DAT: Can't handle that.");
Ok(())
}
x86_64(R_AMD64_NONE) => Ok(()),
e => {
log::error!("Unsupported relocation type: {:?}", e);
Err(ElfLoaderErr::UnsupportedRelocationEntry)
}
}
}
fn load(&mut self, _flags: Flags, base: VAddr, region: &[u8]) -> Result<(), ElfLoaderErr> {
info!("load base = {:#x} size = {:#x} region", base, region.len());
self.actions.push(LoaderAction::Load(base, region.len()));
Ok(())
}
fn tls(
&mut self,
tdata_start: VAddr,
tdata_length: u64,
total_size: u64,
alignment: u64,
) -> Result<(), ElfLoaderErr> {
info!(
"tdata_start = {:#x} tdata_length = {:#x} total_size = {:#x} alignment = {:#}",
tdata_start, tdata_length, total_size, alignment
);
self.actions.push(LoaderAction::Tls(
tdata_start,
tdata_length,
total_size,
alignment,
));
Ok(())
}
}
pub(crate) fn init() {
let _ = env_logger::builder().is_test(true).try_init();
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/mod.rs | src/arch/mod.rs | use crate::{ElfLoaderErr, Machine};
pub mod aarch64;
pub mod arm;
pub mod riscv;
pub mod x86;
pub mod x86_64;
#[cfg(test)]
mod test;
#[derive(Debug)]
#[allow(non_camel_case_types)]
pub enum RelocationType {
x86(x86::RelocationTypes),
x86_64(x86_64::RelocationTypes),
Arm(arm::RelocationTypes),
AArch64(aarch64::RelocationTypes),
RiscV(riscv::RelocationTypes),
}
impl RelocationType {
/// Match an architecture and value to a Relocation type
pub fn from(machine: Machine, type_num: u32) -> Result<RelocationType, ElfLoaderErr> {
let typ = match machine {
Machine::X86 => RelocationType::x86(x86::RelocationTypes::from(type_num)),
Machine::X86_64 => RelocationType::x86_64(x86_64::RelocationTypes::from(type_num)),
Machine::Arm => RelocationType::Arm(arm::RelocationTypes::from(type_num)),
Machine::AArch64 => RelocationType::AArch64(aarch64::RelocationTypes::from(type_num)),
Machine::RISC_V => RelocationType::RiscV(riscv::RelocationTypes::from(type_num)),
_ => return Err(ElfLoaderErr::UnsupportedArchitecture),
};
Ok(typ)
}
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/riscv/test.rs | src/arch/riscv/test.rs | use std::fs;
use crate::arch::test::*;
use crate::*;
#[test]
fn load_pie_elf() {
init();
let binary_blob = fs::read("test/test.riscv64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(binary.is_pie());
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
for action in loader.actions.iter() {
println!("{:?}", action);
}
// View allocate/load actions with readelf -l [binary]
// Program Headers:
// Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
// PHDR 0x000040 0x0000000000000040 0x0000000000000040 0x000188 0x000188 R 0x8
// INTERP 0x0001c8 0x00000000000001c8 0x00000000000001c8 0x00001a 0x00001a R 0x1
// [Requesting program interpreter: /lib/ld-linux-aarch64.so.1]
// LOAD 0x000000 0x0000000000000000 0x0000000000000000 0x000780 0x000780 R E 0x10000
// LOAD 0x000e20 0x0000000000001e20 0x0000000000001e20 0x000250 0x000288 RW 0x10000
// DYNAMIC 0x000e30 0x0000000000001e30 0x0000000000001e30 0x0001d0 0x0001d0 RW 0x8
assert_eq!(
loader.actions[0],
LoaderAction::Allocate(VAddr::from(0x0u64), 0x780, Flags(1 | 4))
);
assert_eq!(
loader.actions[1],
LoaderAction::Allocate(VAddr::from(0x1e20u64), 0x288, Flags(0b110))
);
assert_eq!(
loader.actions[2],
LoaderAction::Load(VAddr::from(0x0u64), 0x780)
);
assert_eq!(
loader.actions[3],
LoaderAction::Load(VAddr::from(0x1e20u64), 0x250)
);
// View relocation actions with readelf -r [binary]
//
// Relocation section '.rela.dyn' at offset 0x420 contains 11 entries:
// Offset Info Type Sym. Value Sym. Name + Addend
// 000000001e20 000000000003 R_RISCV_RELATIVE 6ac
// 000000001e28 000000000003 R_RISCV_RELATIVE 644
// 000000002000 000000000003 R_RISCV_RELATIVE 2000
// 000000002058 000000000003 R_RISCV_RELATIVE 6e0
// 000000002030 000300000002 R_RISCV_64 0000000000000000 __cxa_finalize + 0
// 000000002038 000400000002 R_RISCV_64 0000000000000000 _init + 0
// 000000002040 000500000002 R_RISCV_64 0000000000000000 __deregister_fram[...] + 0
// 000000002048 000600000002 R_RISCV_64 0000000000000000 _ITM_registerTMCl[...] + 0
// 000000002050 000700000002 R_RISCV_64 0000000000000000 _ITM_deregisterTM[...] + 0
// 000000002060 000800000002 R_RISCV_64 0000000000000000 _fini + 0
// 000000002068 000a00000002 R_RISCV_64 0000000000000000 __register_frame_info + 0
//
// Relocation section '.rela.plt' at offset 0x528 contains 2 entries:
// Offset Info Type Sym. Value Sym. Name + Addend
// 000000002018 000200000005 R_RISCV_JUMP_SLOT 0000000000000000 printf + 0
// 000000002020 000900000005 R_RISCV_JUMP_SLOT 0000000000000000 __libc_start_main + 0
assert_eq!(
loader.actions[4],
LoaderAction::Relocate(0x1000_0000 + 0x1e20, 0x1000_06ac)
);
assert_eq!(
loader.actions[5],
LoaderAction::Relocate(0x1000_0000 + 0x1e28, 0x1000_0644)
);
assert_eq!(
loader.actions[6],
LoaderAction::Relocate(0x1000_0000 + 0x2000, 0x1000_2000)
);
assert_eq!(
loader.actions[7],
LoaderAction::Relocate(0x1000_0000 + 0x2058, 0x1000_06e0)
);
assert_eq!(loader.actions.len(), 8);
}
#[test]
fn check_nopie() {
init();
let binary_blob = fs::read("test/test_nopie.riscv64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(!binary.is_pie());
}
#[test]
fn check_tls() {
init();
let binary_blob = fs::read("test/tls.riscv64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
/*
readelf -l test/tls.riscv64
TLS produces entries of this form:
pheader = Program header:
type: Ok(Tls)
flags: R
offset: 0xe20
virtual address: 0x1e0c
physical address: 0x1e0c
file size: 0x4
memory size: 0x8
align: 0x4
File size is 0x4 because we have one tdata entry; memory size
is 8 because we also have one bss entry that needs to be written with zeroes.
So to initialize TLS: we allocate zeroed memory of size `memory size`, then copy
file size starting at virtual address in the beginning.
*/
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Tls(VAddr::from(0x1e0cu64), 0x4, 0x8, 0x4))
.is_some());
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/riscv/mod.rs | src/arch/riscv/mod.rs | //! RISCV relocation types
//!
#[cfg(test)]
mod test;
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
#[repr(u32)]
pub enum RelocationTypes {
/// No relocation.
R_RISCV_NONE,
/// Add 32 bit zero extended symbol value
R_RISCV_32,
/// Add 64 bit symbol value.
R_RISCV_64,
/// Add load address of shared object.
R_RISCV_RELATIVE,
/// Copy data from shared object.
R_RISCV_COPY,
/// Set GOT entry to code address.
R_RISCV_JUMP_SLOT,
/// 32 bit ID of module containing symbol
R_RISCV_TLS_DTPMOD32,
/// ID of module containing symbol
R_RISCV_TLS_DTPMOD64,
/// 32 bit relative offset in TLS block
R_RISCV_TLS_DTPREL32,
/// Relative offset in TLS block
R_RISCV_TLS_DTPREL64,
/// 32 bit relative offset in static TLS block
R_RISCV_TLS_TPREL32,
/// Relative offset in static TLS block
R_RISCV_TLS_TPREL64,
/// PC-relative branch
R_RISCV_BRANCH,
/// PC-relative jump
R_RISCV_JAL,
/// PC-relative call
R_RISCV_CALL,
/// PC-relative call (PLT)
R_RISCV_CALL_PLT,
/// PC-relative GOT reference
R_RISCV_GOT_HI20,
/// PC-relative TLS IE GOT offset
R_RISCV_TLS_GOT_HI20,
/// PC-relative TLS GD reference
R_RISCV_TLS_GD_HI20,
/// PC-relative reference
R_RISCV_PCREL_HI20,
/// PC-relative reference
R_RISCV_PCREL_LO12_I,
/// PC-relative reference
R_RISCV_PCREL_LO12_S,
/// Absolute address
R_RISCV_HI20,
/// Absolute address
R_RISCV_LO12_I,
/// Absolute address
R_RISCV_LO12_S,
/// TLS LE thread offset
R_RISCV_TPREL_HI20,
/// TLS LE thread offset
R_RISCV_TPREL_LO12_I,
/// TLS LE thread offset
R_RISCV_TPREL_LO12_S,
/// TLS LE thread usage
R_RISCV_TPREL_ADD,
/// 8-bit label addition
R_RISCV_ADD8,
/// 16-bit label addition
R_RISCV_ADD16,
/// 32-bit label addition
R_RISCV_ADD32,
/// 64-bit label addition
R_RISCV_ADD64,
/// 8-bit label subtraction
R_RISCV_SUB8,
/// 16-bit label subtraction
R_RISCV_SUB16,
/// 32-bit label subtraction
R_RISCV_SUB32,
/// 64-bit label subtraction
R_RISCV_SUB64,
/// GNU C++ vtable hierarchy
R_RISCV_GNU_VTINHERIT,
/// GNU C++ vtable member usage
R_RISCV_GNU_VTENTRY,
/// Alignment statement
R_RISCV_ALIGN,
/// PC-relative branch offset
R_RISCV_RVC_BRANCH,
/// PC-relative jump offset
R_RISCV_RVC_JUMP,
/// Absolute address
R_RISCV_RVC_LUI,
/// GP-relative reference
R_RISCV_GPREL_I,
/// GP-relative reference
R_RISCV_GPREL_S,
/// TP-relative TLS LE load
R_RISCV_TPREL_I,
/// TP-relative TLS LE store
R_RISCV_TPREL_S,
/// Instruction pair can be relaxed
R_RISCV_RELAX,
/// Local label subtraction
R_RISCV_SUB6,
/// Local label subtraction
R_RISCV_SET6,
/// Local label subtraction
R_RISCV_SET8,
/// Local label subtraction
R_RISCV_SET16,
/// Local label subtraction
R_RISCV_SET32,
/// Unknown
Unknown(u32),
}
impl RelocationTypes {
/// Construct new riscv::RelocationTypes
pub fn from(typ: u32) -> RelocationTypes {
use RelocationTypes::*;
match typ {
0 => R_RISCV_NONE,
1 => R_RISCV_32,
2 => R_RISCV_64,
3 => R_RISCV_RELATIVE,
4 => R_RISCV_COPY,
5 => R_RISCV_JUMP_SLOT,
6 => R_RISCV_TLS_DTPMOD32,
7 => R_RISCV_TLS_DTPMOD64,
8 => R_RISCV_TLS_DTPREL32,
9 => R_RISCV_TLS_DTPREL64,
10 => R_RISCV_TLS_TPREL32,
11 => R_RISCV_TLS_TPREL64,
16 => R_RISCV_BRANCH,
17 => R_RISCV_JAL,
18 => R_RISCV_CALL,
19 => R_RISCV_CALL_PLT,
20 => R_RISCV_GOT_HI20,
21 => R_RISCV_TLS_GOT_HI20,
22 => R_RISCV_TLS_GD_HI20,
23 => R_RISCV_PCREL_HI20,
24 => R_RISCV_PCREL_LO12_I,
25 => R_RISCV_PCREL_LO12_S,
26 => R_RISCV_HI20,
27 => R_RISCV_LO12_I,
28 => R_RISCV_LO12_S,
29 => R_RISCV_TPREL_HI20,
30 => R_RISCV_TPREL_LO12_I,
31 => R_RISCV_TPREL_LO12_S,
32 => R_RISCV_TPREL_ADD,
33 => R_RISCV_ADD8,
34 => R_RISCV_ADD16,
35 => R_RISCV_ADD32,
36 => R_RISCV_ADD64,
37 => R_RISCV_SUB8,
38 => R_RISCV_SUB16,
39 => R_RISCV_SUB32,
40 => R_RISCV_SUB64,
41 => R_RISCV_GNU_VTINHERIT,
42 => R_RISCV_GNU_VTENTRY,
43 => R_RISCV_ALIGN,
44 => R_RISCV_RVC_BRANCH,
45 => R_RISCV_RVC_JUMP,
46 => R_RISCV_RVC_LUI,
47 => R_RISCV_GPREL_I,
48 => R_RISCV_GPREL_S,
49 => R_RISCV_TPREL_I,
50 => R_RISCV_TPREL_S,
51 => R_RISCV_RELAX,
52 => R_RISCV_SUB6,
53 => R_RISCV_SET6,
54 => R_RISCV_SET8,
55 => R_RISCV_SET16,
56 => R_RISCV_SET32,
x => Unknown(x),
}
}
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/arm/mod.rs | src/arch/arm/mod.rs | // Should be in xmas-elf see: https://github.com/nrc/xmas-elf/issues/54
/// Relocation types for ARM 32-bit.
///
/// Based on "ELF for the ARM® Architecture" pdf.
/// Document number: ARM IHI 0044F, current through ABI release 2.10.
/// Date of issue: 24th November 2015.
///
/// The following nomenclature is used for the operation:
/// - S (when used on its own) is the address of the symbol.
/// - A is the addend for the relocation.
/// - P is the address of the place being relocated (derived from r_offset).
/// - Pa is the adjusted address of the place being relocated, defined as (P & 0xFFFFFFFC).
/// - T is 1 if the target symbol S has type STT_FUNC and the symbol addresses a Thumb instruction;
/// it is 0 otherwise.
/// - B(S) is the addressing origin of the output segment defining the symbol S. The origin is
/// not required to be the base address of the segment. This value must always be word-aligned.
/// - GOT_ORG is the addressing origin of the Global Offset Table (the indirection table for imported
/// data addresses). This value must always be word-aligned. See §4.6.1.8, Proxy generating
/// relocations.
/// - GOT(S) is the address of the GOT entry for the symbol S.
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
#[repr(u32)]
pub enum RelocationTypes {
/// Static, Miscellaneous.
R_ARM_NONE,
/// Deprecated, ARM, ((S + A) | T) – P.
R_ARM_PC24,
/// Static, Data, (S + A) | T.
R_ARM_ABS32,
/// Static, Data, ((S + A) | T) – P.
R_ARM_REL32,
/// Static, ARM, S + A – P.
R_ARM_LDR_PC_G0,
/// Static, Data, S + A.
R_ARM_ABS16,
/// Static, ARM, S + A.
R_ARM_ABS12,
/// Static, Thumb16, S + A.
R_ARM_THM_ABS5,
/// Static, Data, S + A.
R_ARM_ABS8,
/// Static, Data, ((S + A) | T) – B(S).
R_ARM_SBREL32,
/// Static, Thumb32, ((S + A) | T) – P.
R_ARM_THM_CALL,
/// Static, Thumb16, S + A – Pa.
R_ARM_THM_PC8,
/// Dynamic, Data, ΔB(S) + A.
R_ARM_BREL_ADJ,
/// Dynamic, Data.
R_ARM_TLS_DESC,
/// Obsolete, Encoding reserved for future Dynamic relocations.
R_ARM_THM_SWI8,
/// Obsolete, Encoding reserved for future Dynamic relocations.
R_ARM_XPC25,
/// Obsolete, Encoding reserved for future Dynamic relocations.
R_ARM_THM_XPC22,
/// Dynamic, Data, Module[S].
R_ARM_TLS_DTPMOD32,
/// Dynamic, Data, S + A – TLS.
R_ARM_TLS_DTPOFF32,
/// Dynamic, Data, S + A – tp.
R_ARM_TLS_TPOFF32,
/// Dynamic, Miscellaneous.
R_ARM_COPY,
/// Dynamic, Data, (S + A) | T.
R_ARM_GLOB_DAT,
/// Dynamic, Data, (S + A) | T.
R_ARM_JUMP_SLOT,
/// Dynamic, Data, B(S) + A [Note: see Table 4-18].
R_ARM_RELATIVE,
/// Static, Data, ((S + A) | T) – GOT_ORG.
R_ARM_GOTOFF32,
/// Static, Data, B(S) + A – P.
R_ARM_BASE_PREL,
/// Static, Data, GOT(S) + A – GOT_ORG.
R_ARM_GOT_BREL,
/// Deprecated, ARM, ((S + A) | T) – P.
R_ARM_PLT32,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_CALL,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_JUMP24,
/// Static, Thumb32, ((S + A) | T) – P.
R_ARM_THM_JUMP24,
/// Static, Data, B(S) + A.
R_ARM_BASE_ABS,
/// Obsolete, Note, – Legacy (ARM ELF B02) names have been retained for these obsolete relocations.
R_ARM_ALU_PCREL_7_0,
/// Obsolete, Note, – Legacy (ARM ELF B02) names have been retained for these obsolete relocations.
R_ARM_ALU_PCREL_15_8,
/// Obsolete, Note, – Legacy (ARM ELF B02) names have been retained for these obsolete relocations.
R_ARM_ALU_PCREL_23_15,
/// Deprecated, ARM, S + A – B(S).
R_ARM_LDR_SBREL_11_0_NC,
/// Deprecated, ARM, S + A – B(S).
R_ARM_ALU_SBREL_19_12_NC,
/// Deprecated, ARM, S + A – B(S).
R_ARM_ALU_SBREL_27_20_CK,
/// Static, Miscellaneous, (S + A) | T or ((S + A) | T) – P.
R_ARM_TARGET1,
/// Deprecated, Data, ((S + A) | T) – B(S).
R_ARM_SBREL31,
/// Static, Miscellaneous.
R_ARM_V4BX,
/// Static, Miscellaneous.
R_ARM_TARGET2,
/// Static, Data, ((S + A) | T) – P.
R_ARM_PREL31,
/// Static, ARM, (S + A) | T.
R_ARM_MOVW_ABS_NC,
/// Static, ARM, S + A.
R_ARM_MOVT_ABS,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_MOVW_PREL_NC,
/// Static, ARM, S + A – P.
R_ARM_MOVT_PREL,
/// Static, Thumb32, (S + A) | T.
R_ARM_THM_MOVW_ABS_NC,
/// Static, Thumb32, S + A.
R_ARM_THM_MOVT_ABS,
/// Static, Thumb32, ((S + A) | T) – P.
R_ARM_THM_MOVW_PREL_NC,
/// Static, Thumb32, S + A – P.
R_ARM_THM_MOVT_PREL,
/// Static, Thumb32, ((S + A) | T) – P.
R_ARM_THM_JUMP19,
/// Static, Thumb16, S + A – P.
R_ARM_THM_JUMP6,
/// Static, Thumb32, ((S + A) | T) – Pa.
R_ARM_THM_ALU_PREL_11_0,
/// Static, Thumb32, S + A – Pa.
R_ARM_THM_PC12,
/// Static, Data, S + A.
R_ARM_ABS32_NOI,
/// Static, Data, S + A – P.
R_ARM_REL32_NOI,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_ALU_PC_G0_NC,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_ALU_PC_G0,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_ALU_PC_G1_NC,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_ALU_PC_G1,
/// Static, ARM, ((S + A) | T) – P.
R_ARM_ALU_PC_G2,
/// Static, ARM, S + A – P.
R_ARM_LDR_PC_G1,
/// Static, ARM, S + A – P.
R_ARM_LDR_PC_G2,
/// Static, ARM, S + A – P.
R_ARM_LDRS_PC_G0,
/// Static, ARM, S + A – P.
R_ARM_LDRS_PC_G1,
/// Static, ARM, S + A – P.
R_ARM_LDRS_PC_G2,
/// Static, ARM, S + A – P.
R_ARM_LDC_PC_G0,
/// Static, ARM, S + A – P.
R_ARM_LDC_PC_G1,
/// Static, ARM, S + A – P.
R_ARM_LDC_PC_G2,
/// Static, ARM, ((S + A) | T) – B(S).
R_ARM_ALU_SB_G0_NC,
/// Static, ARM, ((S + A) | T) – B(S).
R_ARM_ALU_SB_G0,
/// Static, ARM, ((S + A) | T) – B(S).
R_ARM_ALU_SB_G1_NC,
/// Static, ARM, ((S + A) | T) – B(S).
R_ARM_ALU_SB_G1,
/// Static, ARM, ((S + A) | T) – B(S).
R_ARM_ALU_SB_G2,
/// Static, ARM, S + A – B(S).
R_ARM_LDR_SB_G0,
/// Static, ARM, S + A – B(S).
R_ARM_LDR_SB_G1,
/// Static, ARM, S + A – B(S).
R_ARM_LDR_SB_G2,
/// Static, ARM, S + A – B(S).
R_ARM_LDRS_SB_G0,
/// Static, ARM, S + A – B(S).
R_ARM_LDRS_SB_G1,
/// Static, ARM, S + A – B(S).
R_ARM_LDRS_SB_G2,
/// Static, ARM, S + A – B(S).
R_ARM_LDC_SB_G0,
/// Static, ARM, S + A – B(S).
R_ARM_LDC_SB_G1,
/// Static, ARM, S + A – B(S).
R_ARM_LDC_SB_G2,
/// Static, ARM, ((S + A) | T) – B(S).
R_ARM_MOVW_BREL_NC,
/// Static, ARM, S + A – B(S).
R_ARM_MOVT_BREL,
/// Static, ARM, ((S + A) | T) – B(S).
R_ARM_MOVW_BREL,
/// Static, Thumb32, ((S + A) | T) – B(S).
R_ARM_THM_MOVW_BREL_NC,
/// Static, Thumb32, S + A – B(S).
R_ARM_THM_MOVT_BREL,
/// Static, Thumb32, ((S + A) | T) – B(S).
R_ARM_THM_MOVW_BREL,
/// Static, Data.
R_ARM_TLS_GOTDESC,
/// Static, ARM,
R_ARM_TLS_CALL,
/// Static, ARM, TLS relaxation.
R_ARM_TLS_DESCSEQ,
/// Static, Thumb32.
R_ARM_THM_TLS_CALL,
/// Static, Data, PLT(S) + A.
R_ARM_PLT32_ABS,
/// Static, Data, GOT(S) + A.
R_ARM_GOT_ABS,
/// Static, Data, GOT(S) + A – P.
R_ARM_GOT_PREL,
/// Static, ARM, GOT(S) + A – GOT_ORG.
R_ARM_GOT_BREL12,
/// Static, ARM, S + A – GOT_ORG.
R_ARM_GOTOFF12,
/// Static, Miscellaneous.
R_ARM_GOTRELAX,
/// Deprecated, Data, ???.
R_ARM_GNU_VTENTRY,
/// Deprecated, Data, ???.
R_ARM_GNU_VTINHERIT,
/// Static, Thumb16, S + A – P.
R_ARM_THM_JUMP11,
/// Static, Thumb16, S + A – P.
R_ARM_THM_JUMP8,
/// Static, Data, GOT(S) + A – P.
R_ARM_TLS_GD32,
/// Static, Data, GOT(S) + A – P.
R_ARM_TLS_LDM32,
/// Static, Data, S + A – TLS.
R_ARM_TLS_LDO32,
/// Static, Data, GOT(S) + A – P.
R_ARM_TLS_IE32,
/// Static, Data, S + A – tp.
R_ARM_TLS_LE32,
/// Static, ARM, S + A – TLS.
R_ARM_TLS_LDO12,
/// Static, ARM, S + A – tp.
R_ARM_TLS_LE12,
/// Static, ARM, GOT(S) + A – GOT_ORG.
R_ARM_TLS_IE12GP,
/// Private 0.
R_ARM_PRIVATE_0,
/// Private 1.
R_ARM_PRIVATE_1,
/// Private 2.
R_ARM_PRIVATE_2,
/// Private 3.
R_ARM_PRIVATE_3,
/// Private 4.
R_ARM_PRIVATE_4,
/// Private 5.
R_ARM_PRIVATE_5,
/// Private 6.
R_ARM_PRIVATE_6,
/// Private 7.
R_ARM_PRIVATE_7,
/// Private 8.
R_ARM_PRIVATE_8,
/// Private 9.
R_ARM_PRIVATE_9,
/// Private 10.
R_ARM_PRIVATE_10,
/// Private 11.
R_ARM_PRIVATE_11,
/// Private 12.
R_ARM_PRIVATE_12,
/// Private 13.
R_ARM_PRIVATE_13,
/// Private 14.
R_ARM_PRIVATE_14,
/// Private 15.
R_ARM_PRIVATE_15,
/// Obsolete.
R_ARM_ME_TOO,
/// Static, Thumb16.
R_ARM_THM_TLS_DESCSEQ16,
/// Static, Thumb32.
R_ARM_THM_TLS_DESCSEQ32,
/// Static, Thumb32, GOT(S) + A – GOT_ORG.
R_ARM_THM_GOT_BREL12,
/// Static, Thumb16, (S + A) | T.
R_ARM_THM_ALU_ABS_G0_NC,
/// Static, Thumb16, S + A.
R_ARM_THM_ALU_ABS_G1_NC,
/// Static, Thumb16, S + A.
R_ARM_THM_ALU_ABS_G2_NC,
/// Static, Thumb16, S + A.
R_ARM_THM_ALU_ABS_G3,
/// Unknown
Unknown(u32),
}
impl RelocationTypes {
/// Construct new arm::RelocationTypes
pub fn from(typ: u32) -> RelocationTypes {
use RelocationTypes::*;
match typ {
0 => R_ARM_NONE,
1 => R_ARM_PC24,
2 => R_ARM_ABS32,
3 => R_ARM_REL32,
4 => R_ARM_LDR_PC_G0,
5 => R_ARM_ABS16,
6 => R_ARM_ABS12,
7 => R_ARM_THM_ABS5,
8 => R_ARM_ABS8,
9 => R_ARM_SBREL32,
10 => R_ARM_THM_CALL,
11 => R_ARM_THM_PC8,
12 => R_ARM_BREL_ADJ,
13 => R_ARM_TLS_DESC,
14 => R_ARM_THM_SWI8,
15 => R_ARM_XPC25,
16 => R_ARM_THM_XPC22,
17 => R_ARM_TLS_DTPMOD32,
18 => R_ARM_TLS_DTPOFF32,
19 => R_ARM_TLS_TPOFF32,
20 => R_ARM_COPY,
21 => R_ARM_GLOB_DAT,
22 => R_ARM_JUMP_SLOT,
23 => R_ARM_RELATIVE,
24 => R_ARM_GOTOFF32,
25 => R_ARM_BASE_PREL,
26 => R_ARM_GOT_BREL,
27 => R_ARM_PLT32,
28 => R_ARM_CALL,
29 => R_ARM_JUMP24,
30 => R_ARM_THM_JUMP24,
31 => R_ARM_BASE_ABS,
32 => R_ARM_ALU_PCREL_7_0,
33 => R_ARM_ALU_PCREL_15_8,
34 => R_ARM_ALU_PCREL_23_15,
35 => R_ARM_LDR_SBREL_11_0_NC,
36 => R_ARM_ALU_SBREL_19_12_NC,
37 => R_ARM_ALU_SBREL_27_20_CK,
38 => R_ARM_TARGET1,
39 => R_ARM_SBREL31,
40 => R_ARM_V4BX,
41 => R_ARM_TARGET2,
42 => R_ARM_PREL31,
43 => R_ARM_MOVW_ABS_NC,
44 => R_ARM_MOVT_ABS,
45 => R_ARM_MOVW_PREL_NC,
46 => R_ARM_MOVT_PREL,
47 => R_ARM_THM_MOVW_ABS_NC,
48 => R_ARM_THM_MOVT_ABS,
49 => R_ARM_THM_MOVW_PREL_NC,
50 => R_ARM_THM_MOVT_PREL,
51 => R_ARM_THM_JUMP19,
52 => R_ARM_THM_JUMP6,
53 => R_ARM_THM_ALU_PREL_11_0,
54 => R_ARM_THM_PC12,
55 => R_ARM_ABS32_NOI,
56 => R_ARM_REL32_NOI,
57 => R_ARM_ALU_PC_G0_NC,
58 => R_ARM_ALU_PC_G0,
59 => R_ARM_ALU_PC_G1_NC,
60 => R_ARM_ALU_PC_G1,
61 => R_ARM_ALU_PC_G2,
62 => R_ARM_LDR_PC_G1,
63 => R_ARM_LDR_PC_G2,
64 => R_ARM_LDRS_PC_G0,
65 => R_ARM_LDRS_PC_G1,
66 => R_ARM_LDRS_PC_G2,
67 => R_ARM_LDC_PC_G0,
68 => R_ARM_LDC_PC_G1,
69 => R_ARM_LDC_PC_G2,
70 => R_ARM_ALU_SB_G0_NC,
71 => R_ARM_ALU_SB_G0,
72 => R_ARM_ALU_SB_G1_NC,
73 => R_ARM_ALU_SB_G1,
74 => R_ARM_ALU_SB_G2,
75 => R_ARM_LDR_SB_G0,
76 => R_ARM_LDR_SB_G1,
77 => R_ARM_LDR_SB_G2,
78 => R_ARM_LDRS_SB_G0,
79 => R_ARM_LDRS_SB_G1,
80 => R_ARM_LDRS_SB_G2,
81 => R_ARM_LDC_SB_G0,
82 => R_ARM_LDC_SB_G1,
83 => R_ARM_LDC_SB_G2,
84 => R_ARM_MOVW_BREL_NC,
85 => R_ARM_MOVT_BREL,
86 => R_ARM_MOVW_BREL,
87 => R_ARM_THM_MOVW_BREL_NC,
88 => R_ARM_THM_MOVT_BREL,
89 => R_ARM_THM_MOVW_BREL,
90 => R_ARM_TLS_GOTDESC,
91 => R_ARM_TLS_CALL,
92 => R_ARM_TLS_DESCSEQ,
93 => R_ARM_THM_TLS_CALL,
94 => R_ARM_PLT32_ABS,
95 => R_ARM_GOT_ABS,
96 => R_ARM_GOT_PREL,
97 => R_ARM_GOT_BREL12,
98 => R_ARM_GOTOFF12,
99 => R_ARM_GOTRELAX,
100 => R_ARM_GNU_VTENTRY,
101 => R_ARM_GNU_VTINHERIT,
102 => R_ARM_THM_JUMP11,
103 => R_ARM_THM_JUMP8,
104 => R_ARM_TLS_GD32,
105 => R_ARM_TLS_LDM32,
106 => R_ARM_TLS_LDO32,
107 => R_ARM_TLS_IE32,
108 => R_ARM_TLS_LE32,
109 => R_ARM_TLS_LDO12,
110 => R_ARM_TLS_LE12,
111 => R_ARM_TLS_IE12GP,
112 => R_ARM_PRIVATE_0,
113 => R_ARM_PRIVATE_1,
114 => R_ARM_PRIVATE_2,
115 => R_ARM_PRIVATE_3,
116 => R_ARM_PRIVATE_4,
117 => R_ARM_PRIVATE_5,
118 => R_ARM_PRIVATE_6,
119 => R_ARM_PRIVATE_7,
120 => R_ARM_PRIVATE_8,
121 => R_ARM_PRIVATE_9,
122 => R_ARM_PRIVATE_10,
123 => R_ARM_PRIVATE_11,
124 => R_ARM_PRIVATE_12,
125 => R_ARM_PRIVATE_13,
126 => R_ARM_PRIVATE_14,
127 => R_ARM_PRIVATE_15,
128 => R_ARM_ME_TOO,
129 => R_ARM_THM_TLS_DESCSEQ16,
130 => R_ARM_THM_TLS_DESCSEQ32,
131 => R_ARM_THM_GOT_BREL12,
132 => R_ARM_THM_ALU_ABS_G0_NC,
133 => R_ARM_THM_ALU_ABS_G1_NC,
134 => R_ARM_THM_ALU_ABS_G2_NC,
135 => R_ARM_THM_ALU_ABS_G3,
x => Unknown(x),
}
}
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/x86_64/test.rs | src/arch/x86_64/test.rs | use std::fs;
use crate::arch::test::*;
use crate::*;
#[test]
fn load_pie_elf() {
init();
let binary_blob = fs::read("test/test.x86_64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(binary.is_pie());
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
for action in loader.actions.iter() {
println!("{:?}", action);
}
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Allocate(VAddr::from(0x0u64), 0x888, Flags(1 | 4)))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Allocate(VAddr::from(0x200db8u64), 0x260, Flags(2 | 4)))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Load(VAddr::from(0x0u64), 0x888))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Load(VAddr::from(0x200db8u64), 0x258))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Relocate(0x1000_0000 + 0x200db8, 0x1000_0000 + 0x000640))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Relocate(0x1000_0000 + 0x200dc0, 0x1000_0000 + 0x000600))
.is_some());
}
#[test]
fn check_nopie() {
init();
let binary_blob = fs::read("test/test_nopie.x86_64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(!binary.is_pie());
}
#[test]
fn check_tls() {
init();
let binary_blob = fs::read("test/tls.x86_64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
/*
TLS produces entries of this form:
pheader = Program header:
type: Ok(Tls)
flags: R
offset: 0xdb4
virtual address: 0x200db4
physical address: 0x200db4
file size: 0x4
memory size: 0x8
align: 0x4
File size is 0x4 because we have one tdata entry; memory size
is 8 because we also have one bss entry that needs to be written with zeroes.
So to initialize TLS: we allocate zeroed memory of size `memory size`, then copy
file size starting at virtual address in the beginning.
*/
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Tls(VAddr::from(0x200db4u64), 0x4, 0x8, 0x4))
.is_some());
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/x86_64/mod.rs | src/arch/x86_64/mod.rs | #[cfg(test)]
mod test;
// Should be in xmas-elf see: https://github.com/nrc/xmas-elf/issues/54
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
#[repr(u32)]
pub enum RelocationTypes {
/// No relocation.
R_AMD64_NONE,
/// Add 64 bit symbol value.
R_AMD64_64,
/// PC-relative 32 bit signed sym value.
R_AMD64_PC32,
/// PC-relative 32 bit GOT offset.
R_AMD64_GOT32,
/// PC-relative 32 bit PLT offset.
R_AMD64_PLT32,
/// Copy data from shared object.
R_AMD64_COPY,
/// Set GOT entry to data address.
R_AMD64_GLOB_DAT,
/// Set GOT entry to code address.
R_AMD64_JMP_SLOT,
/// Add load address of shared object.
R_AMD64_RELATIVE,
/// Add 32 bit signed pcrel offset to GOT.
R_AMD64_GOTPCREL,
/// Add 32 bit zero extended symbol value
R_AMD64_32,
/// Add 32 bit sign extended symbol value
R_AMD64_32S,
/// Add 16 bit zero extended symbol value
R_AMD64_16,
/// Add 16 bit signed extended pc relative symbol value
R_AMD64_PC16,
/// Add 8 bit zero extended symbol value
R_AMD64_8,
/// Add 8 bit signed extended pc relative symbol value
R_AMD64_PC8,
/// ID of module containing symbol
R_AMD64_DTPMOD64,
/// Offset in TLS block
R_AMD64_DTPOFF64,
/// Offset in static TLS block
R_AMD64_TPOFF64,
/// PC relative offset to GD GOT entry
R_AMD64_TLSGD,
/// PC relative offset to LD GOT entry
R_AMD64_TLSLD,
/// Offset in TLS block
R_AMD64_DTPOFF32,
/// PC relative offset to IE GOT entry
R_AMD64_GOTTPOFF,
/// Offset in static TLS block
R_AMD64_TPOFF32,
/// Unknown
Unknown(u32),
}
impl RelocationTypes {
// Construct a new x86_64::RelocationTypes
pub fn from(typ: u32) -> RelocationTypes {
use RelocationTypes::*;
match typ {
0 => R_AMD64_NONE,
1 => R_AMD64_64,
2 => R_AMD64_PC32,
3 => R_AMD64_GOT32,
4 => R_AMD64_PLT32,
5 => R_AMD64_COPY,
6 => R_AMD64_GLOB_DAT,
7 => R_AMD64_JMP_SLOT,
8 => R_AMD64_RELATIVE,
9 => R_AMD64_GOTPCREL,
10 => R_AMD64_32,
11 => R_AMD64_32S,
12 => R_AMD64_16,
13 => R_AMD64_PC16,
14 => R_AMD64_8,
15 => R_AMD64_PC8,
16 => R_AMD64_DTPMOD64,
17 => R_AMD64_DTPOFF64,
18 => R_AMD64_TPOFF64,
19 => R_AMD64_TLSGD,
20 => R_AMD64_TLSLD,
21 => R_AMD64_DTPOFF32,
22 => R_AMD64_GOTTPOFF,
23 => R_AMD64_TPOFF32,
x => Unknown(x),
}
}
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/aarch64/test.rs | src/arch/aarch64/test.rs | use std::fs;
use crate::arch::test::*;
use crate::*;
#[test]
fn load_pie_elf() {
init();
let binary_blob = fs::read("test/test.aarch64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(binary.is_pie());
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
for action in loader.actions.iter() {
println!("{:?}", action);
}
// View allocate/load actions with readelf -l [binary]
// Program Headers:
// Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
// PHDR 0x000040 0x0000000000000040 0x0000000000000040 0x0001f8 0x0001f8 R 0x8
// INTERP 0x000238 0x0000000000000238 0x0000000000000238 0x00001b 0x00001b R 0x1
// [Requesting program interpreter: /lib/ld-linux-aarch64.so.1]
// LOAD 0x000000 0x0000000000000000 0x0000000000000000 0x0008cc 0x0008cc R E 0x10000
// LOAD 0x000d90 0x0000000000010d90 0x0000000000010d90 0x000280 0x000288 RW 0x10000
// DYNAMIC 0x000da0 0x0000000000010da0 0x0000000000010da0 0x0001f0 0x0001f0 RW 0x8
// NOTE 0x000254 0x0000000000000254 0x0000000000000254 0x000044 0x000044 R 0x4
// GNU_EH_FRAME 0x0007e4 0x00000000000007e4 0x00000000000007e4 0x00003c 0x00003c R 0x4
// GNU_STACK 0x000000 0x0000000000000000 0x0000000000000000 0x000000 0x000000 RW 0x10
// GNU_RELRO 0x000d90 0x0000000000010d90 0x0000000000010d90 0x000270 0x000270 R 0x1
assert_eq!(
loader.actions[0],
LoaderAction::Allocate(VAddr::from(0x0u64), 0x8cc, Flags(1 | 4))
);
assert_eq!(
loader.actions[1],
LoaderAction::Allocate(VAddr::from(0x10d90u64), 0x288, Flags(0b110))
);
assert_eq!(
loader.actions[2],
LoaderAction::Load(VAddr::from(0x0u64), 0x8cc)
);
assert_eq!(
loader.actions[3],
LoaderAction::Load(VAddr::from(0x10d90u64), 0x280)
);
// View relocation actions with readelf -r [binary]
// Relocation section '.rela.dyn' at offset 0x480 contains 8 entries:
// Offset Info Type Symbol's Value Symbol's Name + Addend
// 0000000000010d90 0000000000000403 R_AARCH64_RELATIVE 750
// 0000000000010d98 0000000000000403 R_AARCH64_RELATIVE 700
// 0000000000010ff0 0000000000000403 R_AARCH64_RELATIVE 754
// 0000000000011008 0000000000000403 R_AARCH64_RELATIVE 11008
// 0000000000010fd8 0000000400000401 R_AARCH64_GLOB_DAT 0000000000000000 _ITM_deregisterTMCloneTable + 0
// 0000000000010fe0 0000000500000401 R_AARCH64_GLOB_DAT 0000000000000000 __cxa_finalize@GLIBC_2.17 + 0
// 0000000000010fe8 0000000600000401 R_AARCH64_GLOB_DAT 0000000000000000 __gmon_start__ + 0
// 0000000000010ff8 0000000800000401 R_AARCH64_GLOB_DAT 0000000000000000 _ITM_registerTMCloneTable + 0
//
// Relocation section '.rela.plt' at offset 0x540 contains 5 entries:
// Offset Info Type Symbol's Value Symbol's Name + Addend
// 0000000000010fa8 0000000300000402 R_AARCH64_JUMP_SLOT 0000000000000000 __libc_start_main@GLIBC_2.34 + 0
// 0000000000010fb0 0000000500000402 R_AARCH64_JUMP_SLOT 0000000000000000 __cxa_finalize@GLIBC_2.17 + 0
// 0000000000010fb8 0000000600000402 R_AARCH64_JUMP_SLOT 0000000000000000 __gmon_start__ + 0
// 0000000000010fc0 0000000700000402 R_AARCH64_JUMP_SLOT 0000000000000000 abort@GLIBC_2.17 + 0
// 0000000000010fc8 0000000900000402 R_AARCH64_JUMP_SLOT 0000000000000000 printf@GLIBC_2.17 + 0
assert_eq!(
loader.actions[4],
LoaderAction::Relocate(0x1000_0000 + 0x10d90, 0x1000_0750)
);
assert_eq!(
loader.actions[5],
LoaderAction::Relocate(0x1000_0000 + 0x10d98, 0x1000_0700)
);
assert_eq!(
loader.actions[6],
LoaderAction::Relocate(0x1000_0000 + 0x10ff0, 0x1000_0754)
);
assert_eq!(
loader.actions[7],
LoaderAction::Relocate(0x1000_0000 + 0x11008, 0x1001_1008)
);
// R_AARCH64_GLOB_DAT entries next, but we ignore them in the test loader:
/*assert_eq!(
loader.actions[8],
LoaderAction::Relocate(0x1000_0000 + 0x10fd8, 0x1000_0000)
);
assert_eq!(
loader.actions[9],
LoaderAction::Relocate(0x1000_0000 + 0x10fe0, 0x1000_0000)
);
assert_eq!(
loader.actions[10],
LoaderAction::Relocate(0x1000_0000 + 0x10fe8, 0x1000_0000)
);
assert_eq!(
loader.actions[11],
LoaderAction::Relocate(0x1000_0000 + 0x10ff8, 0x1000_0000)
);*/
assert_eq!(loader.actions.len(), 8);
}
#[test]
fn check_nopie() {
init();
let binary_blob = fs::read("test/test_nopie.aarch64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(!binary.is_pie());
}
#[test]
fn check_tls() {
init();
let binary_blob = fs::read("test/tls.aarch64").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
/*
TLS produces entries of this form:
pheader = Program header:
type: Ok(Tls)
flags: R
offset: 0xdb4
virtual address: 0x200db4
physical address: 0x200db4
file size: 0x4
memory size: 0x8
align: 0x4
File size is 0x4 because we have one tdata entry; memory size
is 8 because we also have one bss entry that needs to be written with zeroes.
So to initialize TLS: we allocate zeroed memory of size `memory size`, then copy
file size starting at virtual address in the beginning.
*/
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Tls(VAddr::from(0x10d8cu64), 0x4, 0x8, 0x4))
.is_some());
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/aarch64/mod.rs | src/arch/aarch64/mod.rs | //! AArch64 relocation types
//!
//! As defined in the "ELF for the ARM® 64-bit Architecture (AArch64)" doc.
//! Dcoument number: ARM IHI 0056B, current through AArch64 ABI release 1.0
#[cfg(test)]
mod test;
// Should be in xmas-elf see: https://github.com/nrc/xmas-elf/issues/54
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
#[repr(u32)]
pub enum RelocationTypes {
R_ARM_NONE,
R_AARCH64_NONE,
R_AARCH64_ABS64,
R_AARCH64_ABS32,
R_AARCH64_ABS16,
R_AARCH64_PREL64,
R_AARCH64_PREL32,
R_AARCH64_PREL16,
R_AARCH64_MOVW_UABS_G0,
R_AARCH64_MOVW_UABS_G0_NC,
R_AARCH64_MOVW_UABS_G1,
R_AARCH64_MOVW_UABS_G1_NC,
R_AARCH64_MOVW_UABS_G2,
R_AARCH64_MOVW_UABS_G2_NC,
R_AARCH64_MOVW_UABS_G3,
R_AARCH64_MOVW_SABS_G0,
R_AARCH64_MOVW_SABS_G1,
R_AARCH64_MOVW_SABS_G2,
R_AARCH64_LD_PREL_LO19,
R_AARCH64_ADR_PREL_LO21,
R_AARCH64_ADR_PREL_PG_HI21,
R_AARCH64_ADR_PREL_PG_HI21_NC,
R_AARCH64_ADD_ABS_LO12_NC,
R_AARCH64_LDST8_ABS_LO12_NC,
R_AARCH64_TSTBR14,
R_AARCH64_CONDBR19,
R_AARCH64_JUMP26,
R_AARCH64_CALL26,
R_AARCH64_LDST16_ABS_LO12_NC,
R_AARCH64_LDST32_ABS_LO12_NC,
R_AARCH64_LDST64_ABS_LO12_NC,
R_AARCH64_LDST128_ABS_LO12_NC,
R_AARCH64_MOVW_PREL_G0,
R_AARCH64_MOVW_PREL_G0_NC,
R_AARCH64_MOVW_PREL_G1,
R_AARCH64_MOVW_PREL_G1_NC,
R_AARCH64_MOVW_PREL_G2,
R_AARCH64_MOVW_PREL_G2_NC,
R_AARCH64_MOVW_PREL_G3,
R_AARCH64_MOVW_GOTOFF_G0,
R_AARCH64_MOVW_GOTOFF_G0_NC,
R_AARCH64_MOVW_GOTOFF_G1,
R_AARCH64_MOVW_GOTOFF_G1_NC,
R_AARCH64_MOVW_GOTOFF_G2,
R_AARCH64_MOVW_GOTOFF_G2_NC,
R_AARCH64_MOVW_GOTOFF_G3,
R_AARCH64_GOTREL64,
R_AARCH64_GOTREL32,
R_AARCH64_GOT_LD_PREL19,
R_AARCH64_LD64_GOTOFF_LO15,
R_AARCH64_ADR_GOT_PAGE,
R_AARCH64_LD64_GOT_LO12_NC,
R_AARCH64_LD64_GOTPAGE_LO15,
R_AARCH64_TLSGD_ADR_PREL21,
R_AARCH64_TLSGD_ADR_PAGE21,
R_AARCH64_TLSGD_ADD_LO12_NC,
R_AARCH64_TLSGD_MOVW_G1,
R_AARCH64_TLSGD_MOVW_G0_NC,
R_AARCH64_TLSLD_ADR_PREL21,
R_AARCH64_TLSLD_ADR_PAGE21,
R_AARCH64_TLSLD_ADD_LO12_NC,
R_AARCH64_TLSLD_MOVW_G1,
R_AARCH64_TLSLD_MOVW_G0_NC,
R_AARCH64_TLSLD_LD_PREL19,
R_AARCH64_TLSLD_MOVW_DTPREL_G2,
R_AARCH64_TLSLD_MOVW_DTPREL_G1,
R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
R_AARCH64_TLSLD_MOVW_DTPREL_G0,
R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
R_AARCH64_TLSLD_ADD_DTPREL_HI12,
R_AARCH64_TLSLD_ADD_DTPREL_LO12,
R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
R_AARCH64_TLSLD_LDST8_DTPREL_LO12,
R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
R_AARCH64_TLSLD_LDST16_DTPREL_LO12,
R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
R_AARCH64_TLSLD_LDST32_DTPREL_LO12,
R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
R_AARCH64_TLSLD_LDST64_DTPREL_LO12,
R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
R_AARCH64_TLSLD_LDST128_DTPREL_LO12,
R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC,
R_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
R_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
R_AARCH64_TLSLE_MOVW_TPREL_G2,
R_AARCH64_TLSLE_MOVW_TPREL_G1,
R_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
R_AARCH64_TLSLE_MOVW_TPREL_G0,
R_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
R_AARCH64_TLSLE_ADD_TPREL_HI12,
R_AARCH64_TLSLE_ADD_TPREL_LO12,
R_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
R_AARCH64_TLSLE_LDST8_TPREL_LO12,
R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
R_AARCH64_TLSLE_LDST16_TPREL_LO12,
R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
R_AARCH64_TLSLE_LDST32_TPREL_LO12,
R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
R_AARCH64_TLSLE_LDST64_TPREL_LO12,
R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
R_AARCH64_TLSLE_LDST128_TPREL_LO12,
R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC,
R_AARCH64_TLSDESC_LD_PREL19,
R_AARCH64_TLSDESC_ADR_PREL21,
R_AARCH64_TLSDESC_ADR_PAGE21,
R_AARCH64_TLSDESC_LD64_LO12,
R_AARCH64_TLSDESC_ADD_LO12,
R_AARCH64_TLSDESC_OFF_G1,
R_AARCH64_TLSDESC_OFF_G0_NC,
R_AARCH64_TLSDESC_LDR,
R_AARCH64_TLSDESC_ADD,
R_AARCH64_TLSDESC_CALL,
R_AARCH64_COPY,
R_AARCH64_GLOB_DAT,
R_AARCH64_JUMP_SLOT,
R_AARCH64_RELATIVE,
R_AARCH64_TLS_DTPREL64,
R_AARCH64_TLS_DTPMOD64,
R_AARCH64_TLS_TPREL64,
R_AARCH64_TLSDESC,
R_AARCH64_IRELATIVE,
/// Unknown
Unknown(u32),
}
impl RelocationTypes {
/// Construct new aarch64::RelocationTypes
pub fn from(typ: u32) -> RelocationTypes {
use RelocationTypes::*;
// The weird ordering comes by copying directly from the manual which is
// not consecutive either...
match typ {
0 => R_ARM_NONE,
256 => R_AARCH64_NONE,
257 => R_AARCH64_ABS64,
258 => R_AARCH64_ABS32,
259 => R_AARCH64_ABS16,
260 => R_AARCH64_PREL64,
261 => R_AARCH64_PREL32,
262 => R_AARCH64_PREL16,
263 => R_AARCH64_MOVW_UABS_G0,
264 => R_AARCH64_MOVW_UABS_G0_NC,
265 => R_AARCH64_MOVW_UABS_G1,
266 => R_AARCH64_MOVW_UABS_G1_NC,
267 => R_AARCH64_MOVW_UABS_G2,
268 => R_AARCH64_MOVW_UABS_G2_NC,
269 => R_AARCH64_MOVW_UABS_G3,
270 => R_AARCH64_MOVW_SABS_G0,
271 => R_AARCH64_MOVW_SABS_G1,
272 => R_AARCH64_MOVW_SABS_G2,
273 => R_AARCH64_LD_PREL_LO19,
274 => R_AARCH64_ADR_PREL_LO21,
275 => R_AARCH64_ADR_PREL_PG_HI21,
276 => R_AARCH64_ADR_PREL_PG_HI21_NC,
277 => R_AARCH64_ADD_ABS_LO12_NC,
278 => R_AARCH64_LDST8_ABS_LO12_NC,
279 => R_AARCH64_TSTBR14,
280 => R_AARCH64_CONDBR19,
282 => R_AARCH64_JUMP26,
283 => R_AARCH64_CALL26,
284 => R_AARCH64_LDST16_ABS_LO12_NC,
285 => R_AARCH64_LDST32_ABS_LO12_NC,
286 => R_AARCH64_LDST64_ABS_LO12_NC,
299 => R_AARCH64_LDST128_ABS_LO12_NC,
287 => R_AARCH64_MOVW_PREL_G0,
288 => R_AARCH64_MOVW_PREL_G0_NC,
289 => R_AARCH64_MOVW_PREL_G1,
290 => R_AARCH64_MOVW_PREL_G1_NC,
291 => R_AARCH64_MOVW_PREL_G2,
292 => R_AARCH64_MOVW_PREL_G2_NC,
293 => R_AARCH64_MOVW_PREL_G3,
300 => R_AARCH64_MOVW_GOTOFF_G0,
301 => R_AARCH64_MOVW_GOTOFF_G0_NC,
302 => R_AARCH64_MOVW_GOTOFF_G1,
303 => R_AARCH64_MOVW_GOTOFF_G1_NC,
304 => R_AARCH64_MOVW_GOTOFF_G2,
305 => R_AARCH64_MOVW_GOTOFF_G2_NC,
306 => R_AARCH64_MOVW_GOTOFF_G3,
307 => R_AARCH64_GOTREL64,
308 => R_AARCH64_GOTREL32,
309 => R_AARCH64_GOT_LD_PREL19,
310 => R_AARCH64_LD64_GOTOFF_LO15,
311 => R_AARCH64_ADR_GOT_PAGE,
312 => R_AARCH64_LD64_GOT_LO12_NC,
313 => R_AARCH64_LD64_GOTPAGE_LO15,
512 => R_AARCH64_TLSGD_ADR_PREL21,
513 => R_AARCH64_TLSGD_ADR_PAGE21,
514 => R_AARCH64_TLSGD_ADD_LO12_NC,
515 => R_AARCH64_TLSGD_MOVW_G1,
516 => R_AARCH64_TLSGD_MOVW_G0_NC,
517 => R_AARCH64_TLSLD_ADR_PREL21,
518 => R_AARCH64_TLSLD_ADR_PAGE21,
519 => R_AARCH64_TLSLD_ADD_LO12_NC,
520 => R_AARCH64_TLSLD_MOVW_G1,
521 => R_AARCH64_TLSLD_MOVW_G0_NC,
522 => R_AARCH64_TLSLD_LD_PREL19,
523 => R_AARCH64_TLSLD_MOVW_DTPREL_G2,
524 => R_AARCH64_TLSLD_MOVW_DTPREL_G1,
525 => R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
526 => R_AARCH64_TLSLD_MOVW_DTPREL_G0,
527 => R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
528 => R_AARCH64_TLSLD_ADD_DTPREL_HI12,
529 => R_AARCH64_TLSLD_ADD_DTPREL_LO12,
530 => R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
531 => R_AARCH64_TLSLD_LDST8_DTPREL_LO12,
532 => R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
533 => R_AARCH64_TLSLD_LDST16_DTPREL_LO12,
534 => R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
535 => R_AARCH64_TLSLD_LDST32_DTPREL_LO12,
536 => R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
537 => R_AARCH64_TLSLD_LDST64_DTPREL_LO12,
538 => R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
572 => R_AARCH64_TLSLD_LDST128_DTPREL_LO12,
573 => R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC,
539 => R_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
540 => R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
541 => R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
542 => R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
543 => R_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
544 => R_AARCH64_TLSLE_MOVW_TPREL_G2,
545 => R_AARCH64_TLSLE_MOVW_TPREL_G1,
546 => R_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
547 => R_AARCH64_TLSLE_MOVW_TPREL_G0,
548 => R_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
549 => R_AARCH64_TLSLE_ADD_TPREL_HI12,
550 => R_AARCH64_TLSLE_ADD_TPREL_LO12,
551 => R_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
552 => R_AARCH64_TLSLE_LDST8_TPREL_LO12,
553 => R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
554 => R_AARCH64_TLSLE_LDST16_TPREL_LO12,
555 => R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
556 => R_AARCH64_TLSLE_LDST32_TPREL_LO12,
557 => R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
558 => R_AARCH64_TLSLE_LDST64_TPREL_LO12,
559 => R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
570 => R_AARCH64_TLSLE_LDST128_TPREL_LO12,
571 => R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC,
560 => R_AARCH64_TLSDESC_LD_PREL19,
561 => R_AARCH64_TLSDESC_ADR_PREL21,
562 => R_AARCH64_TLSDESC_ADR_PAGE21,
563 => R_AARCH64_TLSDESC_LD64_LO12,
564 => R_AARCH64_TLSDESC_ADD_LO12,
565 => R_AARCH64_TLSDESC_OFF_G1,
566 => R_AARCH64_TLSDESC_OFF_G0_NC,
567 => R_AARCH64_TLSDESC_LDR,
568 => R_AARCH64_TLSDESC_ADD,
569 => R_AARCH64_TLSDESC_CALL,
1024 => R_AARCH64_COPY,
1025 => R_AARCH64_GLOB_DAT,
1026 => R_AARCH64_JUMP_SLOT,
1027 => R_AARCH64_RELATIVE,
1028 => R_AARCH64_TLS_DTPREL64,
1029 => R_AARCH64_TLS_DTPMOD64,
1030 => R_AARCH64_TLS_TPREL64,
1031 => R_AARCH64_TLSDESC,
1032 => R_AARCH64_IRELATIVE,
x => Unknown(x),
}
}
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/x86/test.rs | src/arch/x86/test.rs | use std::fs;
use crate::arch::test::*;
use crate::*;
#[test]
fn load_pie_elf() {
init();
let binary_blob = fs::read("test/test.x86").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(binary.is_pie());
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
for action in loader.actions.iter() {
println!("{:?}", action);
}
// View allocate/load actions with readelf -l [binary]
// Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
// LOAD 0x000000 0x00000000 0x00000000 0x003bc 0x003bc R 0x1000
// LOAD 0x001000 0x00001000 0x00001000 0x00288 0x00288 R E 0x1000
// LOAD 0x002000 0x00002000 0x00002000 0x0016c 0x0016c R 0x1000
// LOAD 0x002ef4 0x00003ef4 0x00003ef4 0x00128 0x0012c RW 0x1000
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Allocate(VAddr::from(0x0u64), 0x003bc, Flags(4)))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Allocate(VAddr::from(0x1000u64), 0x288, Flags(1 | 4)))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Allocate(VAddr::from(0x002000u64), 0x0016c, Flags(4)))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Allocate(VAddr::from(0x3ef4u64), 0x12c, Flags(2 | 4)))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Load(VAddr::from(0x0u64), 0x003bc))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Load(VAddr::from(0x001000u64), 0x00288))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Load(VAddr::from(0x002000u64), 0x0016c))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Load(VAddr::from(0x00003ef4u64), 0x00128))
.is_some());
// View relocation actions with readelf -r [binary]
// Offset Info Type Sym.Value Sym. Name
// 00003ef4 00000008 R_386_RELATIVE
// 00003ef8 00000008 R_386_RELATIVE
// 00003ff8 00000008 R_386_RELATIVE
// 00004018 00000008 R_386_RELATIVE
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Relocate(0x1000_0000 + 0x00003ef4, 0x1000_0000))
.is_some());
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Relocate(0x1000_0000 + 0x00003ef8, 0x1000_0000))
.is_some());
}
#[test]
fn check_nopie() {
init();
let binary_blob = fs::read("test/test_nopie.x86").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
assert!(!binary.is_pie());
}
#[test]
fn check_tls() {
init();
let binary_blob = fs::read("test/tls.x86").expect("Can't read binary");
let binary = ElfBinary::new(binary_blob.as_slice()).expect("Got proper ELF file");
let mut loader = TestLoader::new(0x1000_0000);
binary.load(&mut loader).expect("Can't load?");
/*
TLS produces entries of this form:
pheader = Program header:
type: Ok(Tls)
flags: R
offset: 0x2ef0
virtual address: 0x3ef0
physical address: 0x3ef0
file size: 0x4
memory size: 0x8
align: 0x4
File size is 0x4 because we have one tdata entry; memory size
is 8 because we also have one bss entry that needs to be written with zeroes.
So to initialize TLS: we allocate zeroed memory of size `memory size`, then copy
file size starting at virtual address in the beginning.
*/
assert!(loader
.actions
.iter()
.find(|&&x| x == LoaderAction::Tls(VAddr::from(0x3ef0u64), 0x4, 0x8, 0x4))
.is_some());
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
gz/rust-elfloader | https://github.com/gz/rust-elfloader/blob/3363aba9a9fefdb1c4edd9f3d992020b532ed890/src/arch/x86/mod.rs | src/arch/x86/mod.rs | #[cfg(test)]
mod test;
// Should be in xmas-elf see: https://github.com/nrc/xmas-elf/issues/54
#[derive(Eq, PartialEq, Debug, Clone, Copy)]
#[allow(non_camel_case_types)]
#[repr(u32)]
pub enum RelocationTypes {
/// No relocation.
R_386_NONE,
/// Add 32 bit dword symbol value.
R_386_32,
/// PC-relative 32 bit signed sym value.
R_386_PC32,
/// 32 bit GOT offset.
R_386_GOT32,
/// 32 bit PLT offset.
R_386_PLT32,
/// Copy data from shared object.
R_386_COPY,
/// Set GOT entry to data address.
R_386_GLOB_DAT,
/// Set PLT entry to code address.
R_386_JMP_SLOT,
/// Add load address of shared object.
R_386_RELATIVE,
/// 32-bit GOT offset
R_386_GOTOFF,
/// 32-bit PC relative offset to GOT
R_386_GOTPC,
/// Direct 32 bit PLT address
R_386_32PLT,
/// Direct 16 bit zero extended
R_386_16,
/// 16 bit sign extended pc relative
R_386_PC16,
/// Direct 8 bit sign extended
R_386_8,
/// 8 bit sign extended pc relative
R_386_PC8,
/// 32-bit symbol size
R_386_SIZE32,
/// Unknown
Unknown(u32),
}
impl RelocationTypes {
pub fn from(typ: u32) -> RelocationTypes {
use RelocationTypes::*;
match typ {
0 => R_386_NONE,
1 => R_386_PC32,
2 => R_386_32,
3 => R_386_GOT32,
4 => R_386_PLT32,
5 => R_386_COPY,
6 => R_386_GLOB_DAT,
7 => R_386_JMP_SLOT,
8 => R_386_RELATIVE,
9 => R_386_GOTOFF,
10 => R_386_GOTPC,
11 => R_386_32PLT,
20 => R_386_16,
21 => R_386_PC16,
22 => R_386_8,
23 => R_386_PC8,
38 => R_386_SIZE32,
x => Unknown(x),
}
}
}
| rust | Apache-2.0 | 3363aba9a9fefdb1c4edd9f3d992020b532ed890 | 2026-01-04T20:20:38.811615Z | false |
bananaofhappiness/soundscope | https://github.com/bananaofhappiness/soundscope/blob/763740575c4654d9fafd28e77f68771ad3d276b5/src/analyzer.rs | src/analyzer.rs | //! This module is responsible for analyzing audio files.
//! Taking samples it returns the loudness and spectrum.
use ebur128::{EbuR128, Mode};
use eyre::Result;
use spectrum_analyzer::{
FrequencyLimit, samples_fft_to_spectrum, scaling::scale_20_times_log10, windows::hann_window,
};
pub struct Analyzer {
loudness_meter: EbuR128,
sample_rate: u32,
}
impl Default for Analyzer {
fn default() -> Self {
let loudness_meter = match EbuR128::new(2, 44100, Mode::all()) {
Ok(loudness_meter) => loudness_meter,
Err(err) => panic!("Failed to create loudness meter: {}", err),
};
Self {
loudness_meter,
sample_rate: 44100,
}
}
}
impl Analyzer {
/// used when new file or device selected
pub fn create_loudness_meter(&mut self, channels: u32, rate: u32) -> Result<()> {
self.sample_rate = rate;
self.loudness_meter = EbuR128::new(channels, rate, Mode::all())?;
Ok(())
}
pub fn get_fft(&self, samples: &[f32]) -> Vec<(f64, f64)> {
// apply hann window for smoothing
let hann_window = hann_window(samples);
// calc spectrum
let spectrum = samples_fft_to_spectrum(
&hann_window,
self.sample_rate,
FrequencyLimit::Range(20.0, 20000.0),
Some(&scale_20_times_log10),
)
.unwrap();
// convert OrderaleF32 to f64
let fft_vec = spectrum
.data()
.iter()
.map(|(x, y)| (x.val() as f64, y.val() as f64))
.collect::<Vec<(f64, f64)>>();
// transform to log scale
Self::transform_to_log_scale(&fft_vec)
}
fn transform_to_log_scale(fft_data: &[(f64, f64)]) -> Vec<(f64, f64)> {
// set frequency range
let min_freq_log = 20_f64.log10();
let max_freq_log = 20000_f64.log10();
let log_range = max_freq_log - min_freq_log;
// set chart width to 100 (from 0 to 100)
let chart_width = 100.;
fft_data
.iter()
.map(|(freq, val)| {
let log_freq = freq.log10();
// normalize frequency to range [0.0, 1.0]
let normalized_pos = (log_freq - min_freq_log) / log_range;
// Scale normalized position to chart width
let chart_x = normalized_pos * chart_width;
(chart_x, *val)
})
.collect()
}
pub fn get_waveform(&self, samples: &[f32]) -> Vec<(f64, f64)> {
let samples_in_one_ms = self.sample_rate as usize / 1000;
let iter = samples.iter().step_by(samples_in_one_ms).map(|x| *x as f64);
(0..15 * 1000)
.map(|x| x as f64)
.zip(iter)
.collect::<Vec<(f64, f64)>>()
}
pub fn add_samples(&mut self, samples: &[f32]) -> Result<(), ebur128::Error> {
self.loudness_meter.add_frames_f32(samples)
}
pub fn reset(&mut self) {
self.loudness_meter.reset();
}
pub fn get_shortterm_lufs(&mut self) -> Result<f64, ebur128::Error> {
self.loudness_meter.loudness_shortterm()
}
pub fn get_integrated_lufs(&mut self) -> Result<f64, ebur128::Error> {
self.loudness_meter.loudness_global()
}
pub fn get_loudness_range(&mut self) -> Result<f64, ebur128::Error> {
self.loudness_meter.loudness_range()
}
pub fn get_true_peak(&mut self) -> Result<(f64, f64), ebur128::Error> {
let tp_left = self.loudness_meter.true_peak(0)?;
let tp_right = self.loudness_meter.true_peak(1)?;
Ok((tp_left, tp_right))
}
pub fn sample_rate(&self) -> u32 {
self.sample_rate
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
/// Checks if the transformation to log scale works correctly and frequencies are in a given range
fn test_transform_to_log_scale() {
let input = vec![(20.0, -10.0), (100.0, -5.0), (1000.0, 0.0), (20000.0, 5.0)];
let result = Analyzer::transform_to_log_scale(&input);
assert!((result[0].0 - 0.0).abs() < 1e-6); // 20Hz → 0
assert!((result[3].0 - 100.0).abs() < 1e-6); // 20kHz → 100
}
#[test]
/// Tests the FFT functionality with a simple sine wave
fn test_get_fft() {
let analyzer = Analyzer::default();
// Generate a simple sine wave at 440Hz
let sample_rate = 44100;
let frequency = 440.0;
// 1 sec of samples
let samples: Vec<f32> = (0..16384 as usize)
.map(|i| {
let t = i as f32 / sample_rate as f32;
(2.0 * std::f32::consts::PI * frequency * t).sin()
})
.collect();
let fft_result = analyzer.get_fft(&samples);
// Should have some data points
assert!(!fft_result.is_empty());
}
#[test]
/// Tests the waveform generation
fn test_get_waveform() {
let analyzer = Analyzer::default();
let samples: Vec<f32> = (0..44100).map(|i| (i as f32 / 44100.0).sin()).collect();
let waveform = analyzer.get_waveform(&samples);
// Should have data points
assert!(!waveform.is_empty());
// Check that x values are sequential
for i in 1..waveform.len().min(100) {
assert!(waveform[i].0 > waveform[i - 1].0);
}
}
#[test]
/// Tests loudness measurement functionality
fn test_loudness_measurements() {
let mut analyzer = Analyzer::default();
// Generate some test audio (1 second of sine wave)
let samples: Vec<f32> = (0..88200) // 2 seconds stereo at 44.1kHz
.map(|i| 0.1 * (440.0 * 2.0 * std::f32::consts::PI * (i as f32 / 44100.0)).sin())
.collect();
let _ = analyzer.add_samples(&samples);
// Test integrated loudness (should be valid after enough samples)
if let Ok(lufs) = analyzer.get_integrated_lufs() {
assert!(lufs < 0.0); // LUFS values are typically negative
assert!(lufs > -100.0); // Reasonable range
}
// Test true peak
if let Ok((left, right)) = analyzer.get_true_peak() {
assert!(left >= 0.0);
assert!(right >= 0.0);
assert!(left <= 1.0);
assert!(right <= 1.0);
}
}
#[test]
/// Tests analyzer reinitialization with different parameters
fn test_analyzer_reinit() {
let mut analyzer = Analyzer::default();
// Test reinitializing with different parameters
let result = analyzer.create_loudness_meter(1, 48000); // mono, 48kHz
assert!(result.is_ok());
let result = analyzer.create_loudness_meter(6, 96000); // 5.1 surround, 96kHz
assert!(result.is_ok());
}
#[test]
/// Tests edge cases for transform_to_log_scale
fn test_transform_to_log_scale_edge_cases() {
// Empty input
let empty_input: Vec<(f64, f64)> = vec![];
let result = Analyzer::transform_to_log_scale(&empty_input);
assert!(result.is_empty());
// Single frequency
let single_input = vec![(1000.0, -3.0)];
let result = Analyzer::transform_to_log_scale(&single_input);
assert_eq!(result.len(), 1);
assert!(result[0].0 >= 0.0 && result[0].0 <= 100.0);
assert_eq!(result[0].1, -3.0);
}
}
| rust | MIT | 763740575c4654d9fafd28e77f68771ad3d276b5 | 2026-01-04T20:17:12.581714Z | false |
bananaofhappiness/soundscope | https://github.com/bananaofhappiness/soundscope/blob/763740575c4654d9fafd28e77f68771ad3d276b5/src/audio_player.rs | src/audio_player.rs | //! This module contains the implementation of the audio player used to play audio files in user's terminal.
//! under the hood it uses `rodio` for playback and `symphonia` for decoding.
use crossbeam::channel::{Receiver, Sender};
use eyre::{Result, eyre};
use rodio::{ChannelCount, OutputStream, OutputStreamBuilder, Sink, Source, source};
use std::{path::PathBuf, time::Duration};
use symphonia::core::{
audio::{Channels, SampleBuffer},
codecs::{CODEC_TYPE_NULL, DecoderOptions},
errors::Error,
formats::FormatOptions,
io::MediaSourceStream,
meta::MetadataOptions,
probe::Hint,
};
// Samples of the whole file
pub type Samples = Vec<f32>;
// pub type Samples = Vec<f32>;
pub type SampleRate = u32;
pub type PlaybackPosition = usize;
pub enum PlayerCommand {
SelectFile(PathBuf),
ChangeState,
// Had to add Quit because on MacOS tui can't be on the main thread (smth does not implement Send), player must be there.
// So when tui quits, player must know tui has quit and quits too.
Quit,
/// Move the playhead right
MoveRight,
/// Move the playhead left
MoveLeft,
/// Shows an error (only in debug mode)
#[cfg(debug_assertions)]
ShowTestError,
}
/// `AudioFile` represents a loaded audio file with its samples, sample rate, and channels.
/// It implements [`Source`] and [`Iterator`] for playback.
#[derive(Clone)]
pub struct AudioFile {
title: String,
samples: Samples,
mid_samples: Samples,
side_samples: Samples,
sample_rate: SampleRate,
duration: Duration,
// channels of the file (mono, stereo, etc.)
channels: Channels,
// Global state and the sender of it
playback_position: usize, // Index of the Samples vec
playback_position_tx: Sender<usize>,
}
impl AudioFile {
pub fn title(&self) -> &str {
&self.title
}
pub fn samples(&self) -> &Samples {
&self.samples
}
pub fn mid_samples(&self) -> &Samples {
&self.mid_samples
}
pub fn side_samples(&self) -> &Samples {
&self.side_samples
}
pub fn duration(&self) -> &Duration {
&self.duration
}
}
impl Iterator for AudioFile {
type Item = f32;
fn next(&mut self) -> Option<Self::Item> {
let pos = self.playback_position;
let res = if pos < self.samples.len() {
Some(self.samples[pos])
} else {
None
};
if pos.is_multiple_of(4096)
&& let Err(_err) = self.playback_position_tx.send(pos)
{
// TODO: log sending error
}
self.playback_position += 1;
res
}
}
impl Source for AudioFile {
fn current_span_len(&self) -> Option<usize> {
None
}
fn channels(&self) -> ChannelCount {
self.channels.count() as u16
}
fn sample_rate(&self) -> SampleRate {
self.sample_rate
}
fn total_duration(&self) -> Option<Duration> {
Some(self.duration)
}
fn try_seek(&mut self, pos: Duration) -> Result<(), source::SeekError> {
// TODO: other channels, see https://docs.rs/rodio/latest/src/rodio/buffer.rs.html#88-105
let curr_channel = self.playback_position % self.channels() as usize;
let new_pos = pos.as_secs_f32() * self.sample_rate() as f32 * self.channels() as f32;
// saturate pos at the end of the source
let new_pos = new_pos as usize;
let new_pos = new_pos.min(self.samples.len());
// make sure the next sample is for the right channel
let new_pos = new_pos.next_multiple_of(self.channels() as usize);
let new_pos = new_pos - curr_channel;
self.playback_position = new_pos;
// send position again so the charts update even when the audio is paused.
if let Err(_err) = self.playback_position_tx.send(new_pos) {
// TODO: log sending error
}
Ok(())
}
}
impl AudioFile {
pub fn new(playback_position_tx: Sender<usize>) -> Self {
AudioFile {
title: "".to_string(),
samples: Vec::new(),
mid_samples: Vec::new(),
side_samples: Vec::new(),
sample_rate: 44100,
duration: Duration::from_secs(15),
channels: Channels::all(),
playback_position: 0,
playback_position_tx,
}
}
/// creates a new `AudioFile` from file
fn from_file(path: &PathBuf, playback_position_tx: Sender<usize>) -> Result<Self> {
// get file name
let title = path.file_name().unwrap().to_string_lossy().to_string();
let (samples, sample_rate, channels) = Self::decode_file(path)?;
// TODO: other channels, not only stereo sound.
let (mid_samples, side_samples) = get_mid_and_side_samples(&samples);
let duration = mid_samples.len() as f64 / sample_rate as f64 * 1000.;
Ok(AudioFile {
title,
samples,
mid_samples,
side_samples,
sample_rate,
duration: Duration::from_millis(duration as u64),
channels,
playback_position: 0,
playback_position_tx,
})
}
/// Decodes file and returns its [`Samples`], [`SampleRate`] and [`Channels`]
fn decode_file(path: &PathBuf) -> Result<(Samples, SampleRate, Channels)> {
// open the media source and create a stream
let src = std::fs::File::open(path)?;
let mss = MediaSourceStream::new(Box::new(src), Default::default());
// Create a probe hint using the file's extension.
let mut hint = Hint::new();
hint.with_extension("mp3");
// Use the default options for metadata and format readers.
let meta_opts: MetadataOptions = Default::default();
let fmt_opts: FormatOptions = Default::default();
// Probe the media source.
let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?;
// Get the instantiated format reader.
let mut format = probed.format;
// Find the first audio track with a known (decodeable) codec.
let track = match format
.tracks()
.iter()
.find(|t| t.codec_params.codec != CODEC_TYPE_NULL)
{
Some(track) => track,
None => {
return Err(eyre!("No audio track found with a decodeable codec"));
}
};
// Use the default options for the decoder.
let dec_opts: DecoderOptions = Default::default();
// Create a decoder for the track.
let mut decoder = symphonia::default::get_codecs().make(&track.codec_params, &dec_opts)?;
// Store the track identifier, it will be used to filter packets.
let track_id = track.id;
// Make a sample buffer to hold the decoded audio samples.
let mut all_samples = Vec::<f32>::new();
let mut sample_buf = None;
// Defaults for sample rate and channels
let mut sample_rate = 44100;
let mut channels = Channels::empty();
// The decode loop.
loop {
// Get the next packet from the format reader.
let packet = match format.next_packet() {
Ok(packet) => packet,
Err(Error::IoError(_)) => {
// End of stream - return Ok to indicate successful completion
return Ok((all_samples, sample_rate, channels));
}
Err(err) => {
return Err(err.into());
// return Err(err.into());
}
};
// If the packet does not belong to the selected track, skip it.
if packet.track_id() != track_id {
continue;
}
// Decode the packet into audio samples, ignoring any decode errors.
match decoder.decode(&packet) {
Ok(audio_buf) => {
// If this is the *first* decoded packet, create a sample buffer matching the
// decoded audio buffer format.
if sample_buf.is_none() {
// Get the audio buffer specification.
let spec = *audio_buf.spec();
sample_rate = spec.rate;
channels = spec.channels;
// Get the capacity of the decoded buffer. Note: This is capacity, not length!
let duration = audio_buf.capacity() as u64;
// Create the f32 sample buffer.
sample_buf = Some(SampleBuffer::<f32>::new(duration, spec));
}
// Copy the decoded audio buffer into the sample buffer in an interleaved format.
if let Some(buf) = &mut sample_buf {
buf.copy_interleaved_ref(audio_buf);
// Append the samples to our complete buffer
let samples = buf.samples();
all_samples.extend_from_slice(samples);
}
}
Err(symphonia::core::errors::Error::DecodeError(_)) => (),
Err(err) => {
return Err(err.into());
}
}
}
}
}
pub struct AudioPlayer {
// sends playback position
playback_position_tx: Sender<usize>,
audio_file: AudioFile,
_stream_handle: OutputStream,
sink: Sink,
}
impl AudioPlayer {
pub fn new(playback_position_tx: Sender<usize>) -> Result<Self> {
let _stream_handle = OutputStreamBuilder::open_default_stream()?;
let sink = Sink::connect_new(_stream_handle.mixer());
let audio_file = AudioFile::new(playback_position_tx.clone());
Ok(Self {
playback_position_tx,
audio_file,
_stream_handle,
sink,
})
}
/// Runs `audio_player`
pub fn run(
&mut self,
player_command_rx: Receiver<PlayerCommand>,
audio_file_tx: Sender<AudioFile>,
error_tx: Sender<String>,
) -> Result<()> {
loop {
// recieve a `PlayerCommand` from an UI
if let Ok(cmd) = player_command_rx.try_recv() {
match cmd {
PlayerCommand::SelectFile(path) => {
match AudioFile::from_file(&path, self.playback_position_tx.clone()) {
Err(err) => {
if let Err(_err) =
error_tx.send(format!("Error loading file: {}", err))
{
//TODO: log a sending error
}
continue;
}
Ok(af) => {
self.audio_file = af.clone();
if let Err(_err) = audio_file_tx.send(af) {
//TODO: log a sending error
}
}
};
// clear the sink and append new file
self.sink.stop();
self.sink.clear();
self.audio_file.playback_position = 0;
self.sink.append(self.audio_file.clone());
if let Err(_err) = self.playback_position_tx.send(0) {
// TODO: log a sending error
}
}
PlayerCommand::ChangeState => {
if self.sink.is_paused() {
self.sink.play();
} else {
self.sink.pause();
}
// if we hit the end of the track, then load it again
if self.sink.empty() {
self.audio_file.playback_position = 0;
self.sink.append(self.audio_file.clone());
}
}
PlayerCommand::Quit => {
self.sink.stop();
self.sink.clear();
self.audio_file.playback_position = 0;
ratatui::crossterm::execute!(
std::io::stdout(),
ratatui::crossterm::event::DisableMouseCapture
)?;
return Ok(());
}
// move the playhead right
PlayerCommand::MoveRight => {
let pos = self.sink.get_pos();
if self.sink.empty() {
continue;
}
let seek = (pos + Duration::from_secs(5)).min(self.audio_file.duration);
if let Err(err) = self.sink.try_seek(seek) {
println!("Error seeking: {:?}", err);
// TODO: error handling
}
}
// move the playhead left
PlayerCommand::MoveLeft => {
if self.sink.empty() {
let pos = self.audio_file.duration - Duration::from_secs(5);
self.sink.append(self.audio_file.clone());
if let Err(err) = self.sink.try_seek(pos) {
println!("Error seeking: {:?}", err);
// TODO: error handling
}
continue;
}
let pos = self.sink.get_pos();
if let Err(_err) = self
.sink
.try_seek(pos.saturating_sub(Duration::from_secs(5)))
{
// TODO: error handling
}
}
#[cfg(debug_assertions)]
PlayerCommand::ShowTestError => {
error_tx.send("This is a test message".to_string()).unwrap()
}
}
}
std::thread::sleep(Duration::from_millis(10));
}
// Ok(())
}
}
pub fn get_mid_and_side_samples(samples: &[f32]) -> (Vec<f32>, Vec<f32>) {
let left_samples = samples.iter().step_by(2).cloned().collect::<Vec<f32>>();
let right_samples = samples
.iter()
.skip(1)
.step_by(2)
.cloned()
.collect::<Vec<f32>>();
let mid_samples = left_samples
.iter()
.zip(right_samples.iter())
.map(|(l, r)| (l + r) / 2.)
.collect::<Vec<f32>>();
let side_samples = left_samples
.iter()
.zip(right_samples.iter())
.map(|(l, r)| (l - r) / 2.)
.collect::<Vec<f32>>();
(mid_samples, side_samples)
}
| rust | MIT | 763740575c4654d9fafd28e77f68771ad3d276b5 | 2026-01-04T20:17:12.581714Z | false |
bananaofhappiness/soundscope | https://github.com/bananaofhappiness/soundscope/blob/763740575c4654d9fafd28e77f68771ad3d276b5/src/main.rs | src/main.rs | mod analyzer;
mod audio_capture;
mod audio_player;
mod tui;
use crate::audio_player::{AudioFile, AudioPlayer, PlaybackPosition, PlayerCommand};
use crossbeam::channel::{bounded, unbounded};
use eyre::Result;
use ringbuffer::{AllocRingBuffer, RingBuffer};
use std::{
sync::{Arc, Mutex},
thread,
};
fn main() -> Result<()> {
#[cfg(target_os = "linux")]
suppress_alsa_messages();
// create a tui sender that sends signals when the file is stopped, selected etc.
let (player_command_tx, player_command_rx) = bounded::<PlayerCommand>(1);
// create an audio player sender that sends position to the analyzer so it knows what samples to use
let (playback_position_tx, playback_position_rx) = unbounded::<PlaybackPosition>();
// create an audio_file sender to send audio file from player to the tui app
let (audio_file_tx, audio_file_rx) = bounded::<AudioFile>(1);
// create an error sender to send errors from player to the tui app
let (error_tx, error_rx) = bounded::<String>(1);
// create an audio player
let mut player = AudioPlayer::new(playback_position_tx.clone())?;
// just a place holder audio_file to initialize app
let audio_file = AudioFile::new(playback_position_tx);
let mut buf = AllocRingBuffer::new(44100usize * 30);
buf.fill(0.0);
let latest_captured_samples = Arc::new(Mutex::new(buf));
thread::spawn(|| {
tui::run(
audio_file,
player_command_tx,
audio_file_rx,
playback_position_rx,
error_rx,
latest_captured_samples,
)
});
player.run(player_command_rx, audio_file_tx, error_tx)
}
// The code below suppresses ALSA error messages
#[cfg(target_os = "linux")]
#[link(name = "asound")]
unsafe extern "C" {
fn snd_lib_error_set_handler(
handler: Option<extern "C" fn(*const i8, i32, *const i8, i32, *const i8)>,
);
}
#[cfg(target_os = "linux")]
extern "C" fn no_errors(_: *const i8, _: i32, _: *const i8, _: i32, _: *const i8) {}
#[cfg(target_os = "linux")]
fn suppress_alsa_messages() {
unsafe {
snd_lib_error_set_handler(Some(no_errors));
}
}
| rust | MIT | 763740575c4654d9fafd28e77f68771ad3d276b5 | 2026-01-04T20:17:12.581714Z | false |
bananaofhappiness/soundscope | https://github.com/bananaofhappiness/soundscope/blob/763740575c4654d9fafd28e77f68771ad3d276b5/src/tui.rs | src/tui.rs | //! This module contains the implementation of the terminal user interface (TUI) used to display audio analysis results.
//! It uses `ratatui` under the hood.
use crate::{
analyzer::Analyzer,
audio_capture::{self, AudioDevice, list_input_devs},
audio_player::{self, AudioFile, PlayerCommand},
};
use cpal::{Stream, traits::StreamTrait as _};
use crossbeam::channel::{Receiver, Sender};
use dirs::config_dir;
use eyre::{Result, eyre};
use ratatui::{
DefaultTerminal,
crossterm::event::{Event, KeyCode, KeyEvent, MouseEvent, MouseEventKind, poll, read},
layout::Flex,
prelude::*,
style::{Color, Style, Stylize},
text::{Line, Span, ToLine, ToSpan},
widgets::{Axis, Block, Chart, Clear, Dataset, GraphType, List, ListItem, Paragraph, Wrap},
};
use ratatui_explorer::FileExplorer;
use ringbuffer::{AllocRingBuffer, RingBuffer};
use rodio::Source;
use serde::Deserialize;
use std::{
fmt::Display,
fs::{self, File},
io::Read,
path::PathBuf,
sync::{Arc, Mutex},
time::{Duration, Instant},
};
/// Uses [fill] to conviniently fill all fields of a struct.
macro_rules! fill_fields {
($self:ident.$section:ident.$($field:ident => $value:expr),* $(,)?) => {
$( fill(&mut $self.$section.$field, $value); )*
};
}
pub type RBuffer = Arc<Mutex<AllocRingBuffer<f32>>>;
/// Settings like showing/hiding UI elements.
struct UI {
theme: Theme,
show_explorer: bool,
show_fft_chart: bool,
show_mid_fft: bool,
show_side_fft: bool,
show_devices_list: bool,
show_lufs: bool,
show_themes_list: bool,
error_text: String,
error_timer: Option<Instant>,
device_name: String,
waveform_window: f64,
// Used to flash control elements when the button is pressed
left_arrow_timer: Option<Instant>,
right_arrow_timer: Option<Instant>,
plus_sign_timer: Option<Instant>,
minus_sign_timer: Option<Instant>,
// Used to be able to hover fft chart to get more precise frequencies
chart_rect: Option<Rect>,
}
impl Default for UI {
fn default() -> Self {
Self {
theme: Theme::default(),
show_explorer: false,
show_fft_chart: true,
show_mid_fft: true,
show_side_fft: false,
show_devices_list: false,
show_lufs: false,
show_themes_list: false,
error_text: String::new(),
error_timer: None,
device_name: String::new(),
waveform_window: 15.,
left_arrow_timer: None,
right_arrow_timer: None,
plus_sign_timer: None,
minus_sign_timer: None,
chart_rect: None,
}
}
}
/// Mode of the [App]. Currently, only Player and Microphone are supported.
#[derive(Default)]
enum Mode {
#[default]
Player,
Microphone,
_System,
}
impl Display for Mode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Mode::Player => write!(f, "Player"),
Mode::Microphone => write!(f, "Microphone"),
Mode::_System => write!(f, "System"),
}
}
}
/// Defines theme using .theme file
/// Otherwise, uses default values.
#[derive(Deserialize, Default)]
struct Theme {
global: GlobalTheme,
waveform: WaveformTheme,
fft: FftTheme,
lufs: LufsTheme,
devices: DevicesTheme,
explorer: ExplorerTheme,
error: ErrorTheme,
}
/// Used to set `default: T` to a `field` if it is not set (it is None).
/// Used in [fill_fields] macro
fn fill<T>(field: &mut Option<T>, default: T) {
if field.is_none() {
*field = Some(default);
}
}
impl Theme {
/// Sets `self.global.foreground` and `self.global.background` for every field that was not defined in a .theme file.
fn apply_global_as_default(&mut self) {
let fg = self.global.foreground;
let bg = self.global.background;
self.global.highlight = self.global.highlight.or(Some(fg));
let hl = self.global.highlight.unwrap();
fill_fields!(self.waveform.
borders => fg,
controls => fg,
controls_highlight => hl,
labels => fg,
playhead => hl,
current_time => fg,
total_duration => fg,
waveform => fg,
background => bg,
highlight => hl,
);
fill_fields!(self.lufs.
axis => fg,
chart => fg,
foreground => fg,
labels => fg,
numbers => fg,
borders => fg,
background => bg,
highlight => hl,
);
fill_fields!(self.fft.
axes => fg,
axes_labels => fg,
borders => fg,
labels => fg,
mid_fft => fg,
side_fft => hl,
background => bg,
highlight => hl,
);
fill_fields!(self.explorer.
background => bg,
borders => fg,
dir_foreground => fg,
item_foreground => fg,
highlight_dir_foreground => hl,
highlight_item_foreground => hl,
);
fill_fields!(self.devices.
background => bg,
foreground => fg,
borders => fg,
highlight => hl,
);
fill_fields!(self.error.
background => bg,
foreground => fg,
borders => fg,
);
}
}
/// Used to set default values of every UI element if they are not specified in the config file.
#[derive(Deserialize)]
struct GlobalTheme {
background: Color,
/// It is default value for everything that is not a background,
/// Except for SideFFT, which is LightGreen, and playhead position, which is LightRed
foreground: Color,
/// Color used to highlight corresponding characters
/// Like highlighting L in LUFS to let the user know
/// that pressing L will open the LUFS meter
highlight: Option<Color>,
}
/// Used to define the theme for the waveform display.
#[derive(Deserialize)]
struct WaveformTheme {
borders: Option<Color>,
waveform: Option<Color>,
playhead: Option<Color>,
/// Current playing time and total duration
current_time: Option<Color>,
total_duration: Option<Color>,
/// Buttons like <-, +, -, ->
controls: Option<Color>,
controls_highlight: Option<Color>,
labels: Option<Color>,
/// Background of the chart
background: Option<Color>,
highlight: Option<Color>,
}
/// Used to define the theme for the FFT display.
#[derive(Deserialize)]
struct FftTheme {
borders: Option<Color>,
/// Frequencies and LUFS tabs text
labels: Option<Color>,
axes: Option<Color>,
axes_labels: Option<Color>,
mid_fft: Option<Color>,
side_fft: Option<Color>,
/// Background of the chart
background: Option<Color>,
highlight: Option<Color>,
}
/// Used to define the theme for the LUFS display.
#[derive(Deserialize)]
struct LufsTheme {
axis: Option<Color>,
chart: Option<Color>,
/// Frequencies and LUFS tabs text
labels: Option<Color>,
/// Text color on the left
foreground: Option<Color>,
/// Color of the numbers on the left
numbers: Option<Color>,
borders: Option<Color>,
/// Background of the chart
background: Option<Color>,
highlight: Option<Color>,
}
/// Used to define the theme for the devices list.
#[derive(Deserialize)]
struct DevicesTheme {
background: Option<Color>,
foreground: Option<Color>,
borders: Option<Color>,
highlight: Option<Color>,
}
/// Used to define the theme for the explorer.
#[derive(Deserialize)]
struct ExplorerTheme {
background: Option<Color>,
borders: Option<Color>,
item_foreground: Option<Color>,
highlight_item_foreground: Option<Color>,
dir_foreground: Option<Color>,
highlight_dir_foreground: Option<Color>,
}
/// Used to define the theme for the error popup.
#[derive(Deserialize)]
struct ErrorTheme {
background: Option<Color>,
foreground: Option<Color>,
borders: Option<Color>,
}
impl Default for GlobalTheme {
fn default() -> Self {
Self {
background: Color::Black,
foreground: Color::Yellow,
highlight: Some(Color::LightRed),
}
}
}
impl Default for WaveformTheme {
fn default() -> Self {
Self {
borders: Some(Color::Yellow),
waveform: Some(Color::Yellow),
playhead: Some(Color::LightRed),
current_time: Some(Color::Yellow),
total_duration: Some(Color::Yellow),
controls: Some(Color::Yellow),
controls_highlight: Some(Color::LightRed),
labels: Some(Color::Yellow),
background: Some(Color::Black),
highlight: Some(Color::LightRed),
}
}
}
impl Default for FftTheme {
fn default() -> Self {
Self {
axes: Some(Color::Yellow),
axes_labels: Some(Color::Yellow),
borders: Some(Color::Yellow),
labels: Some(Color::Yellow),
mid_fft: Some(Color::Yellow),
side_fft: Some(Color::LightGreen),
background: Some(Color::Black),
highlight: Some(Color::LightRed),
}
}
}
impl Default for LufsTheme {
fn default() -> Self {
Self {
axis: Some(Color::Yellow),
chart: Some(Color::Yellow),
labels: Some(Color::Yellow),
foreground: Some(Color::Yellow),
numbers: Some(Color::Yellow),
borders: Some(Color::Yellow),
background: Some(Color::Black),
highlight: Some(Color::LightRed),
}
}
}
impl Default for DevicesTheme {
fn default() -> Self {
Self {
background: Some(Color::Black),
foreground: Some(Color::Yellow),
borders: Some(Color::Yellow),
highlight: Some(Color::LightRed),
}
}
}
impl Default for ExplorerTheme {
fn default() -> Self {
Self {
background: Some(Color::Black),
borders: Some(Color::Yellow),
item_foreground: Some(Color::Yellow),
highlight_item_foreground: Some(Color::LightRed),
dir_foreground: Some(Color::Yellow),
highlight_dir_foreground: Some(Color::LightRed),
}
}
}
impl Default for ErrorTheme {
fn default() -> Self {
Self {
background: Some(Color::Black),
foreground: Some(Color::LightRed),
borders: Some(Color::LightRed),
}
}
}
/// Settings for the [App]. Currently only the [Mode] is supported.
#[derive(Default)]
struct Settings {
mode: Mode,
}
/// FFT data for the UI.
#[derive(Default)]
struct FFTData {
mid_fft: Vec<(f64, f64)>,
side_fft: Vec<(f64, f64)>,
}
/// Waveform data for the UI.
struct WaveForm {
chart: Vec<(f64, f64)>,
playhead: usize,
at_zero: bool,
at_end: bool,
}
impl Default for WaveForm {
fn default() -> Self {
Self {
chart: vec![(0., 0.)],
playhead: 0,
at_zero: true,
at_end: false,
}
}
}
/// `App` contains the necessary components for the application like senders, receivers, [AudioFile] data, [UIsettings].
struct App {
/// Audio file which is loaded into the player.
audio_file: AudioFile,
/// If file is not selected, the app crashes when you try to play it.
/// It is easier to use this bool instead of Option<AudioFile> because
/// we would always have to check if it is not None. But it can be None only before
/// the first file is selected.
is_file_selected: bool,
is_playing_audio: bool,
audio_file_rx: Receiver<AudioFile>,
/// RingBuffer used to store the latest captured samples when the `Mode` is not `Mode::Player`.
latest_captured_samples: RBuffer,
/// The stream that captures the audio through input device
audio_capture_stream: Option<Stream>,
/// Sends commands like pause and play to the player.
player_command_tx: Sender<PlayerCommand>,
/// Gets playback position of an audio file when the mode is player
/// for an analyzer to know what samples to analyze.
playback_position_rx: Receiver<usize>,
/// Gets errors to display them afterwards.
error_rx: Receiver<String>,
/// Used to get LUFS of an audio file.
file_analyzer: Analyzer,
/// Used to get LUFS of microphone input.
device_analyzer: Analyzer,
// Charts data
/// Data used to render FFT chart.
fft_data: FFTData,
/// Data used to render waveform.
waveform: WaveForm,
/// LUFS chart.
lufs: [f64; 300],
settings: Settings,
//UI
explorer: FileExplorer,
ui: UI,
// Used to conviniently return to current directory when opening an explorer
current_directory: PathBuf,
// Used to print info about fft chart when it's hovered
mouse_position: Option<(u16, u16)>,
}
impl App {
fn new(
audio_file: AudioFile,
player_command_tx: Sender<PlayerCommand>,
audio_file_rx: Receiver<AudioFile>,
playback_position_rx: Receiver<usize>,
error_rx: Receiver<String>,
latest_captured_samples: RBuffer,
) -> Result<Self> {
Ok(Self {
audio_file,
is_file_selected: false,
is_playing_audio: false,
audio_file_rx,
latest_captured_samples,
audio_capture_stream: None,
player_command_tx,
playback_position_rx,
error_rx,
file_analyzer: Analyzer::default(),
device_analyzer: Analyzer::default(),
fft_data: FFTData::default(),
waveform: WaveForm::default(),
lufs: [-50.; 300],
settings: Settings::default(),
explorer: FileExplorer::with_theme(ratatui_explorer::Theme::default())?,
ui: UI::default(),
current_directory: PathBuf::from(""),
mouse_position: None,
})
}
fn set_theme(&mut self, theme: Theme) {
// define styles
let s = Style::default()
.bg(theme.explorer.background.unwrap())
.fg(theme.explorer.borders.unwrap());
let is = s.fg(theme.explorer.item_foreground.unwrap());
let ihl = s.fg(theme.explorer.highlight_item_foreground.unwrap());
let ds = s.fg(theme.explorer.dir_foreground.unwrap()).bold();
let dhl = s
.fg(theme.explorer.highlight_dir_foreground.unwrap())
.bold();
let explorer_theme = ratatui_explorer::Theme::default()
.with_style(s)
.with_item_style(is)
.with_highlight_item_style(ihl)
.with_dir_style(ds)
.with_highlight_dir_style(dhl)
.add_default_title();
self.explorer.set_theme(explorer_theme);
self.ui.theme = theme;
}
/// The function used to draw the UI.
fn draw(&mut self, f: &mut Frame) {
// split the area into waveform part and charts parts
let area = f.area();
let layout = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Percentage(30), Constraint::Percentage(70)].as_ref())
.split(area);
// make the background black
let background = Paragraph::new("").style(self.ui.theme.global.background);
f.render_widget(background, area);
self.render_waveform(f, layout[0]);
self.ui.chart_rect = Some(layout[1]);
// show charts based on user settings
if self.ui.show_lufs {
self.render_lufs(f, layout[1]);
} else if self.ui.show_fft_chart {
self.render_fft_chart(f, layout[1]);
if let Some((x, y)) = self.mouse_position {
self.render_fft_info(f, x, y)
}
}
// render error
if let Ok(err) = self.error_rx.try_recv() {
self.ui.error_text = err;
self.ui.error_timer = Some(std::time::Instant::now())
}
self.render_error_message(f);
// render explorer
if self.ui.show_explorer || self.ui.show_themes_list {
let area = Self::get_explorer_popup_area(area, 50, 70);
f.render_widget(Clear, area);
f.render_widget(&self.explorer.widget(), area);
}
if self.ui.show_devices_list {
self.render_devices_list(f);
}
}
fn render_waveform(&mut self, frame: &mut Frame, area: Rect) {
let s = Style::default().bg(self.ui.theme.waveform.background.unwrap());
let lb = s.fg(self.ui.theme.waveform.labels.unwrap());
let bd = s.fg(self.ui.theme.waveform.borders.unwrap());
let _ct = s.fg(self.ui.theme.waveform.controls.unwrap());
let hl = s.fg(self.ui.theme.waveform.highlight.unwrap());
let pl = s.fg(self.ui.theme.waveform.playhead.unwrap());
let ct = s.fg(self.ui.theme.waveform.current_time.unwrap());
let td = s.fg(self.ui.theme.waveform.total_duration.unwrap());
let wv = s.fg(self.ui.theme.waveform.waveform.unwrap());
// playhead is just a function that looks like a vertical line
let samples_in_one_ms = self.audio_file.sample_rate() / 1000;
let mut playhead_chart = [
(self.waveform.playhead as f64 / samples_in_one_ms as f64, 1.),
(
self.waveform.playhead as f64 / samples_in_one_ms as f64 + 0.01,
-1.,
),
];
if self.waveform.at_end {
let chart_x_position = self.get_relative_playhead_pos(samples_in_one_ms);
playhead_chart = [(chart_x_position, 1.), (chart_x_position + 0.01, -1.)];
} else if !self.waveform.at_zero {
// if not at zero then place the playhead right at the middle of a chart
playhead_chart = self.get_middle_playhead_pos(samples_in_one_ms);
}
if !matches!(self.settings.mode, Mode::Player) {
playhead_chart = [(-1., -1.), (-1., -1.)];
}
// get current playback time in seconds
let playhead_position_in_milis = Duration::from_millis(
(self.waveform.playhead as f64 / self.audio_file.sample_rate() as f64 * 1000.) as u64,
);
let current_sec = playhead_position_in_milis.as_secs_f64();
let current_min = (current_sec / 60.) as u32;
let current_sec = current_sec % 60.;
// get total audio file duration
let total_sec = self.audio_file.duration().as_secs_f64();
let total_min = (total_sec / 60.) as u32;
let total_sec = total_sec % 60.;
// make datasets
// first one to render a waveform
// the other one to render the playhead
let datasets = vec![
Dataset::default()
.marker(symbols::Marker::Braille)
.graph_type(GraphType::Line)
.style(wv)
.data(&self.waveform.chart),
Dataset::default()
.marker(symbols::Marker::Braille)
.graph_type(GraphType::Line)
.style(pl)
.data(&playhead_chart),
];
// render chart
let upper_right_title = match self.settings.mode {
Mode::Player => Line::from(vec![
"C".bold().style(hl),
"hange Mode: ".to_span().style(lb),
self.settings.mode.to_span().style(lb),
" T".bold().style(hl),
"heme".to_span().style(lb),
])
.right_aligned(),
_ => Line::from(vec![
"D".bold().style(hl),
"evice: ".to_span().style(lb),
self.ui.device_name.to_span().style(lb),
" ".to_span(),
"C".bold().style(hl),
"hange Mode: ".to_span().style(lb),
self.settings.mode.to_span().style(lb),
" ".to_span(),
"T".bold().style(hl),
"heme".to_span().style(lb),
])
.right_aligned(),
};
let title = self.audio_file.title();
let x_window = match self.settings.mode {
Mode::Microphone | Mode::_System => [
15000. - (self.ui.waveform_window as usize * 1000) as f64,
15000.,
],
_ => [0., (self.ui.waveform_window as usize * 1000) as f64],
};
let chart = Chart::new(datasets)
.block(
Block::bordered()
.title(title.to_span().style(lb))
.title_bottom(self.get_flashing_controls_text().left_aligned())
// current position and total duration
.title_bottom(
Line::styled(format!("{:0>2}:{:0>5.2}", current_min, current_sec), ct)
.centered(),
)
.title_bottom(
Line::styled(format!("{:0>2}:{:0>5.2}", total_min, total_sec), td)
.right_aligned(),
)
.title(upper_right_title)
.style(bd),
)
.style(wv)
.x_axis(Axis::default().bounds(x_window))
.y_axis(Axis::default().bounds([-1., 1.]));
frame.render_widget(chart, area);
}
fn get_relative_playhead_pos(&self, samples_in_one_ms: u32) -> f64 {
// if at last 15 sec of the audion the playhead should move from the middle to the end of the chart
let total_samples = self.audio_file.mid_samples().len();
let chart_duration_seconds = self.ui.waveform_window; // make a var not to hard code and be able to to add resizing waveform window if needed
let chart_middle_seconds = chart_duration_seconds / 2.0;
// calculate the absolute sample position where the playhead starts scrolling from the middle of the chart to the end
// this is when playback enters the last `chart_middle_seconds` (default is 7.5s) of the total audio duration.
let scroll_start_absolute_samples = total_samples
.saturating_sub((chart_middle_seconds * self.audio_file.sample_rate() as f64) as usize);
// calculate playhead's position relative to the start of this scroll phase
// since `self.waveform.playhead` is the absolute current playback position.
let relative_samples_in_scroll_phase = self
.waveform
.playhead
.saturating_sub(scroll_start_absolute_samples);
// map this relative sample position to the chart's X-axis range for the playhead.
// the conversion from samples to chart units (milliseconds) uses the same 1/samles_in_one_ms scale
// as other playhead positions in this function.
let mut chart_x_position = (chart_middle_seconds * 1000.)
+ (relative_samples_in_scroll_phase as f64 / samples_in_one_ms as f64);
// Ensure the playhead does not exceed the chart's upper bound.
chart_x_position = f64::min(chart_x_position, chart_duration_seconds * 1000.);
chart_x_position
}
fn get_middle_playhead_pos(&self, samples_in_one_ms: u32) -> [(f64, f64); 2] {
[
(
f64::min(
self.waveform.playhead as f64 / samples_in_one_ms as f64,
1000. * self.ui.waveform_window / 2.,
),
1.,
),
(
f64::min(
self.waveform.playhead as f64 / samples_in_one_ms as f64,
1000. * self.ui.waveform_window / 2.,
) + 0.01,
-1.,
),
]
}
fn get_flashing_controls_text(&self) -> Line<'_> {
let t = 100;
let s = Style::default()
.bg(self.ui.theme.waveform.background.unwrap())
.fg(self.ui.theme.waveform.controls.unwrap());
let hl = s.fg(self.ui.theme.waveform.controls_highlight.unwrap());
let left_arrow = match self.ui.left_arrow_timer {
Some(timer) if timer.elapsed().as_millis() < t => "<-".to_span().style(hl),
_ => "<-".to_span().style(s),
};
let right_arrow = match self.ui.right_arrow_timer {
Some(timer) if timer.elapsed().as_millis() < t => "->".to_span().style(hl),
_ => "->".to_span().style(s),
};
let minus = match self.ui.minus_sign_timer {
Some(timer) if timer.elapsed().as_millis() < t => "-".to_span().style(hl),
_ => "-".to_span().style(s),
};
let plus = match self.ui.plus_sign_timer {
Some(timer) if timer.elapsed().as_millis() < t => "+".to_span().style(hl),
_ => "+".to_span().style(s),
};
// Line::from(format!(
// "{} {} {:0>2}s {} {}",
// left_arrow, minus, self.ui_settings.waveform_window, plus, right_arrow
// ))
Line::from(vec![
left_arrow,
" ".to_span(),
minus,
" ".to_span(),
format!("{:0>2}s", self.ui.waveform_window.to_span().style(s)).into(),
" ".to_span(),
plus,
" ".to_span(),
right_arrow,
])
}
fn render_fft_chart(&mut self, frame: &mut Frame, area: Rect) {
let s = Style::default().bg(self.ui.theme.fft.background.unwrap());
let fg = s.fg(self.ui.theme.fft.axes_labels.unwrap());
let ax = s.fg(self.ui.theme.fft.axes.unwrap());
let lb = s.fg(self.ui.theme.fft.labels.unwrap());
let bd = s.fg(self.ui.theme.fft.borders.unwrap());
let mf = s.fg(self.ui.theme.fft.mid_fft.unwrap());
let sf = s.fg(self.ui.theme.fft.side_fft.unwrap());
let hl = s.fg(self.ui.theme.fft.highlight.unwrap());
let x_labels = vec![
// frequencies are commented because their positions are off.
// they are not rendered where the corresponding frequencies are.
Span::styled("20Hz", fg.bold()),
// Span::raw("20Hz"),
// Span::raw(""),
// Span::raw(""),
// Span::raw("112.47"),
// Span::raw(""),
// Span::raw(""),
// Span::raw(""),
Span::styled("632.46Hz", fg),
// Span::raw(""),
// Span::raw(""),
// Span::raw(" "),
// Span::raw("3556.57"),
// Span::raw(""),
// Span::raw(""),
// Span::raw("20000Hz"),
Span::styled("20kHz", fg.bold()),
];
// if no data about frequencies then default to some low value
let mid_fft: &[(f64, f64)] = if self.ui.show_mid_fft {
&self.fft_data.mid_fft
} else {
&[(-1000.0, -1000.0)]
};
let side_fft: &[(f64, f64)] = if self.ui.show_side_fft {
&self.fft_data.side_fft
} else {
&[(-1000.0, -1000.0)]
};
let datasets = vec![
Dataset::default()
// highlight the letter M so the user knows they must press M to toggle it
// same with Side fft
.name(vec!["M".bold().style(hl), "id Frequency".into()])
.marker(symbols::Marker::Braille)
.graph_type(GraphType::Line)
.style(mf)
.data(mid_fft),
Dataset::default()
.name(vec!["S".bold().style(hl), "ide Frequency".into()])
.marker(symbols::Marker::Braille)
.graph_type(GraphType::Line)
.style(sf)
.data(side_fft),
];
let chart = Chart::new(datasets)
// the title uses the same highlighting technique
.block(Block::bordered().style(bd).title(vec![
"F".to_span().style(hl).bold(),
"requencies ".to_span().style(lb).bold(),
"L".to_span().style(hl),
"UFS".to_span().style(lb),
]))
.x_axis(
Axis::default()
.title("Hz")
.labels(x_labels)
.style(ax)
.bounds([0., 100.]),
)
.y_axis(
Axis::default()
.title("Db")
.labels(vec![
Span::raw("-78 Db").style(fg),
Span::raw("-18 Db").style(fg),
])
.style(ax)
.bounds([-150., 100.]),
)
.style(s);
frame.render_widget(chart, area);
}
fn render_lufs(&mut self, f: &mut Frame, area: Rect) {
let s = Style::default().bg(self.ui.theme.lufs.background.unwrap());
let fg = s.fg(self.ui.theme.lufs.foreground.unwrap());
let ax = s.fg(self.ui.theme.lufs.axis.unwrap());
let hl = s.fg(self.ui.theme.lufs.highlight.unwrap());
let bd = s.fg(self.ui.theme.lufs.borders.unwrap());
let ch = s.fg(self.ui.theme.lufs.chart.unwrap());
let lb = s.fg(self.ui.theme.lufs.labels.unwrap());
let nb = s.fg(self.ui.theme.lufs.numbers.unwrap());
let layout = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(20), Constraint::Percentage(80)].as_ref())
.split(area);
let data = self
.lufs
.iter()
.enumerate()
.map(|(x, &y)| (x as f64, y))
.collect::<Vec<(f64, f64)>>();
let integrated_lufs = match self.file_analyzer.get_integrated_lufs() {
Ok(lufs) => lufs,
Err(err) => {
self.handle_error(format!("Error getting integrated LUFS: {}", err));
0.0
}
};
// it should not display `-inf`
let integrated_lufs = if integrated_lufs.is_infinite() {
-50.0
} else {
integrated_lufs
};
// text layout
let paragraph_layout = Layout::default()
.direction(Direction::Vertical)
.constraints([
Constraint::Ratio(1, 3),
Constraint::Ratio(1, 3),
Constraint::Ratio(1, 3),
])
.split(layout[0]);
// get lufs text
let integrated = format!("{:06.2}", integrated_lufs);
let short_term = format!("{:06.2}", self.lufs[299]);
let integrated = integrated.to_span().style(nb);
let short_term = short_term.to_span().style(nb);
let lufs_text = vec![
"Short term LUFS:".bold().style(fg) + short_term,
"Intergrated LUFS:".bold().style(fg) + integrated,
];
// get true peak
let (tp_left, tp_right) = match self.file_analyzer.get_true_peak() {
Ok((tp_left, tp_right)) => (tp_left, tp_right),
Err(err) => {
self.handle_error(format!("Error getting true peak: {}", err));
(0.0, 0.0)
}
};
// get true peak text
let left = format!("{:.2}", tp_left);
let right = format!("{:.2}", tp_right);
let left = left.to_span().style(nb);
let right = right.to_span().style(nb);
let true_peak_text = vec![
"True Peak".to_line().style(fg).bold(),
"L: ".bold().style(fg) + left + " Db".bold().style(fg),
"R: ".bold().style(fg) + right + " Db".bold().style(fg),
];
//get range text
let range = match self.file_analyzer.get_loudness_range() {
Ok(range) => range,
Err(err) => {
self.handle_error(format!("Error getting loudness range: {}", err));
0.0
}
};
let range_text = vec![("Range: ".bold() + format!("{:.2} LU", range).into()).style(fg)];
// paragraphs
let lufs_paragraph = Paragraph::new(lufs_text)
.block(Block::bordered().style(bd).title(vec![
"F".to_span().style(hl),
"requencies ".to_span().style(lb),
"L".to_span().style(hl).bold(),
"UFS".to_span().style(lb).bold(),
]))
.alignment(Alignment::Center);
let true_peak_paragraph = Paragraph::new(true_peak_text)
.block(Block::bordered().style(bd))
.alignment(Alignment::Center)
.style(bd);
| rust | MIT | 763740575c4654d9fafd28e77f68771ad3d276b5 | 2026-01-04T20:17:12.581714Z | true |
bananaofhappiness/soundscope | https://github.com/bananaofhappiness/soundscope/blob/763740575c4654d9fafd28e77f68771ad3d276b5/src/audio_capture.rs | src/audio_capture.rs | //! This module is responsible for capturing audio from the PC and microphone.
use crate::tui::RBuffer;
use cpal::{
Device, Stream, StreamConfig, default_host,
traits::{DeviceTrait, HostTrait},
};
use eyre::Result;
pub struct AudioDevice {
device: Device,
config: StreamConfig,
}
impl AudioDevice {
pub fn new(preferred_dev: Option<cpal::Device>) -> Self {
let host = default_host();
let device = preferred_dev.unwrap_or(host.default_input_device().unwrap());
let config = device.default_input_config().unwrap().config();
Self { device, config }
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn config(&self) -> &StreamConfig {
&self.config
}
}
pub fn build_input_stream(
latest_captured_samples: RBuffer,
audio_device: AudioDevice,
) -> Result<Stream> {
let dev = audio_device.device();
let cfg = audio_device.config();
let is_mono = cfg.channels == 1;
let stream = dev.build_input_stream(
cfg,
move |data: &[f32], _info| {
let mut audio_buf = latest_captured_samples.lock().unwrap();
if is_mono {
let data: Vec<f32> = data
.iter()
.enumerate()
.flat_map(|(i, &x)| if i == 0 { vec![x] } else { vec![0., x] })
.collect();
audio_buf.extend(data)
} else {
audio_buf.extend(data.iter().copied());
}
},
|err| {
eprintln!("got stream error: {}", err);
},
None,
)?;
Ok(stream)
}
pub fn list_input_devs() -> Vec<(String, Device)> {
let host = default_host();
let mut devs: Vec<(String, Device)> = host
.input_devices()
.unwrap()
.map(|dev| {
(
dev.name().unwrap_or_else(|_| String::from("<unknown>")),
dev,
)
})
.collect();
devs.sort_by(|(n1, _), (n2, _)| n1.cmp(n2));
devs
}
| rust | MIT | 763740575c4654d9fafd28e77f68771ad3d276b5 | 2026-01-04T20:17:12.581714Z | false |
beeb/coffee-scale-app | https://github.com/beeb/coffee-scale-app/blob/1486b8bac6ea97718546fc82ddba372ae85b21af/rs/build.rs | rs/build.rs | fn main() {
embuild::espidf::sysenv::output();
}
| rust | MIT | 1486b8bac6ea97718546fc82ddba372ae85b21af | 2026-01-04T20:20:37.981145Z | false |
beeb/coffee-scale-app | https://github.com/beeb/coffee-scale-app/blob/1486b8bac6ea97718546fc82ddba372ae85b21af/rs/src/critical_section.rs | rs/src/critical_section.rs | //! Critical section implementation for ESP-IDF
//!
//! This module provides a critical section implementation that uses `IsrCriticalSection`, which prevents any interrupt
//! from happening while the critical section is acquired.
//!
//! This is necessary, because the hx711 reading must happen with precise timing, and any interrupt could disrupt it.
use std::sync::Mutex;
use esp_idf_svc::hal::interrupt::{IsrCriticalSection, IsrCriticalSectionGuard};
static CS: IsrCriticalSection = IsrCriticalSection::new();
static CS_GUARD: Mutex<Option<IsrCriticalSectionGuard>> = Mutex::new(None);
pub struct EspCriticalSection {}
unsafe impl critical_section::Impl for EspCriticalSection {
unsafe fn acquire() {
let mut guard = CS_GUARD.lock().unwrap();
*guard = Some(CS.enter());
}
unsafe fn release(_token: ()) {
let mut guard = CS_GUARD.lock().unwrap();
*guard = None;
}
}
critical_section::set_impl!(EspCriticalSection);
| rust | MIT | 1486b8bac6ea97718546fc82ddba372ae85b21af | 2026-01-04T20:20:37.981145Z | false |
beeb/coffee-scale-app | https://github.com/beeb/coffee-scale-app/blob/1486b8bac6ea97718546fc82ddba372ae85b21af/rs/src/ble.rs | rs/src/ble.rs | //! Bluetooth Low Energy (BLE) server implementation
//!
//! This module provides the BLE server implementation for the coffee scale. It exposes a weight characteristic and a
//! battery characteristic.
use std::sync::{Arc, OnceLock};
use anyhow::{anyhow, Result};
use esp32_nimble::{
utilities::{mutex::Mutex, BleUuid},
BLEAdvertisementData, BLECharacteristic, BLEDevice, NimbleProperties,
};
const WEIGHT_SCALE_SERVICE: BleUuid = BleUuid::from_uuid16(0x181D);
const WEIGHT_MEASUREMENT_CHARACTERISTIC: BleUuid = BleUuid::from_uuid16(0x2A9D);
const BATTERY_SERVICE: BleUuid = BleUuid::from_uuid16(0x180F);
const BATTERY_LEVEL_CHARACTERISTIC: BleUuid = BleUuid::from_uuid16(0x2A19);
/// Make weight characteristic available globally
pub static WEIGHT: OnceLock<Arc<Mutex<BLECharacteristic>>> = OnceLock::new();
/// Make battery characteristic available globally
pub static BATTERY: OnceLock<Arc<Mutex<BLECharacteristic>>> = OnceLock::new();
/// Initialize the BLE server
pub fn init() -> Result<()> {
let ble_device = BLEDevice::take();
let ble_advertising = ble_device.get_advertising();
let server = ble_device.get_server();
server.on_connect(|server, desc| {
log::info!("Client connected");
server
.update_conn_params(desc.conn_handle(), 24, 48, 0, 60)
.expect("ble update conn params");
log::info!("Multi-connect support: start advertising");
ble_advertising
.lock()
.start()
.expect("ble start advertising");
});
server.on_disconnect(|_desc, reason| {
log::info!("Client disconnected ({:?})", reason);
ble_advertising
.lock()
.start()
.expect("ble start advertising after disconnect");
});
let battery_service = server.create_service(BATTERY_SERVICE);
let battery_characteristic = battery_service
.lock()
.create_characteristic(BATTERY_LEVEL_CHARACTERISTIC, NimbleProperties::READ);
battery_characteristic.lock().set_value(&50u8.to_be_bytes());
BATTERY
.set(battery_characteristic)
.map_err(|_| anyhow!("Battery characteristic already initialized"))?;
let weight_service = server.create_service(WEIGHT_SCALE_SERVICE);
let weight_characteristic = weight_service.lock().create_characteristic(
WEIGHT_MEASUREMENT_CHARACTERISTIC,
NimbleProperties::READ | NimbleProperties::NOTIFY,
);
weight_characteristic.lock().set_value(&0i32.to_be_bytes());
WEIGHT
.set(weight_characteristic)
.map_err(|_| anyhow!("Weight characteristic already initialized"))?;
ble_advertising
.lock()
.set_data(
BLEAdvertisementData::new()
.name("coffee-scale")
.add_service_uuid(WEIGHT_SCALE_SERVICE)
.add_service_uuid(BATTERY_SERVICE),
)
.map_err(|_| anyhow!("Set advertisement data error"))?;
ble_advertising
.lock()
.start()
.map_err(|_| anyhow!("Advertising start error"))?;
Ok(())
}
| rust | MIT | 1486b8bac6ea97718546fc82ddba372ae85b21af | 2026-01-04T20:20:37.981145Z | false |
beeb/coffee-scale-app | https://github.com/beeb/coffee-scale-app/blob/1486b8bac6ea97718546fc82ddba372ae85b21af/rs/src/weight.rs | rs/src/weight.rs | //! Weight sensor module
//!
//! This module contains the code for the weight sensor. It uses the HX711 library to interface with the loadcell and
//! a Kalman filter to smooth out the readings.
//!
//! Ideally, this module would also use interrupts to detect when the HX711 is ready to read, but the current version
//! does polling instead.
use std::{
collections::VecDeque,
sync::atomic::{AtomicI32, Ordering},
};
use anyhow::Result;
use esp_idf_svc::hal::{
delay::Ets,
gpio::{self, Input, InputPin, Output, OutputPin, Pin, PinDriver},
peripheral::Peripheral,
};
use loadcell::{
hx711::{self, HX711},
LoadCell,
};
use signalo_filters::{
observe::kalman::{Config, Kalman},
signalo_traits::{Filter, Reset, WithConfig},
};
/// How long to wait until retry if the hx711 is not ready
const LOADCELL_READY_DELAY_US: u32 = 1000;
/// How long to wait between readings
const LOADCELL_LOOP_DELAY_US: u32 = 10000;
/// How many readings to take to determine if the weight is stable
const LOADCELL_STABLE_READINGS: usize = 10;
/// How many readings to take to tare the loadcell
const LOADCELL_TARE_READINGS: usize = 5;
/// Type alias for the HX711 load sensor
pub type LoadSensor<'a, SckPin, DtPin> =
HX711<PinDriver<'a, SckPin, Output>, PinDriver<'a, DtPin, Input>, Ets>;
/// Loadcell struct
pub struct Loadcell<'a, SckPin, DtPin>
where
DtPin: Peripheral<P = DtPin> + Pin + InputPin,
SckPin: Peripheral<P = SckPin> + Pin + OutputPin,
{
sensor: LoadSensor<'a, SckPin, DtPin>,
filter: Kalman<f32>,
}
impl<'a, SckPin, DtPin> Loadcell<'a, SckPin, DtPin>
where
DtPin: Peripheral<P = DtPin> + Pin + InputPin,
SckPin: Peripheral<P = SckPin> + Pin + OutputPin,
{
/// Create a new Loadcell instance, taking ownership of the pins
pub fn new(clock_pin: SckPin, data_pin: DtPin, scale: f32) -> Result<Self> {
let filter = Kalman::with_config(Config {
r: 0.5, // process noise covariance
q: 0.1, // measurement noise covariance
// parameters below are not tunable
a: 1.0,
b: 0.0,
c: 1.0,
});
let hx711_sck = gpio::PinDriver::output(clock_pin)?;
let hx711_dt = gpio::PinDriver::input(data_pin)?;
let mut sensor = hx711::HX711::new(hx711_sck, hx711_dt, Ets);
sensor.set_scale(scale);
while !sensor.is_ready() {
Ets::delay_us(LOADCELL_READY_DELAY_US);
}
Ok(Loadcell { sensor, filter })
}
/// Wait until the HX711 is ready to read
pub fn wait_ready(&self) {
while !self.sensor.is_ready() {
Ets::delay_us(LOADCELL_READY_DELAY_US);
}
}
/// Wait until the weight is stable
///
/// This function takes readings of the loadcell and keeps iterating until the weight is stable (all readings are
/// within 0.1g of each other)
pub fn wait_stable(&mut self) {
// take readings of the loadcell and keep iterating until the weight is stable
let mut readings: VecDeque<f32> = VecDeque::with_capacity(LOADCELL_STABLE_READINGS);
loop {
self.wait_ready();
let reading = self.sensor.read_scaled().expect("read scaled");
log::info!("Waiting for stable weight: {:.4}", reading);
if readings.len() == LOADCELL_STABLE_READINGS {
readings.pop_front();
}
readings.push_back(reading);
if readings.len() == LOADCELL_STABLE_READINGS
&& readings.iter().all(|&x| (x - reading).abs() < 0.1)
{
break;
}
Ets::delay_us(LOADCELL_LOOP_DELAY_US);
}
}
/// Tare the loadcell
pub fn tare(&mut self, num_samples: Option<usize>) {
self.filter = self.filter.clone().reset();
self.sensor
.tare(num_samples.unwrap_or(LOADCELL_TARE_READINGS))
}
/// Read the loadcell and return the average of `count` readings, in raw units
pub fn read_average(&mut self, count: usize) -> i32 {
let mut current;
let mut average: f32 = 0.0;
for n in 1..=count {
self.wait_ready();
current = self.sensor.read().expect("read with offset") as f32;
Ets::delay_us(LOADCELL_LOOP_DELAY_US);
average += (current - average) / (n as f32);
}
average as i32
}
/// Read the loadcell and store the weight in grams into the `weight` atomic integer
///
/// This function reads the loadcell and returns the weight in grams, after filtering.
pub fn read_weight(&mut self, weight: &AtomicI32) {
self.wait_ready();
let reading = self.sensor.read_scaled().expect("read scaled");
log::info!("Raw reading: {reading:.2}");
let filtered = self.filter.filter(reading);
log::info!("Filtered reading: {filtered:.2}");
// round to 0.10g, multiply by 100 to cast as integer with 2 decimal places
let val = (filtered / 0.1).round() * 10.;
weight.store(val as i32, Ordering::Relaxed);
}
}
| rust | MIT | 1486b8bac6ea97718546fc82ddba372ae85b21af | 2026-01-04T20:20:37.981145Z | false |
beeb/coffee-scale-app | https://github.com/beeb/coffee-scale-app/blob/1486b8bac6ea97718546fc82ddba372ae85b21af/rs/src/main.rs | rs/src/main.rs | //! Firmware for the ESP32 based smart scale.
//!
//! The scale uses a HX711 loadcell amplifier to read the weight and a SSD1306 OLED display to show the weight and
//! battery level.
//!
//! The scale is also a Bluetooth Low Energy (BLE) peripheral that exposes a weight characteristic and a battery
//! characteristic. It also notifies subscribers of the weight characteristic approx. every 200ms.
//!
//! The scale can be calibrated by pressing the button for 2 seconds. The calibration mode shows the raw loadcell
//! readings and the ADC value of the battery voltage. The calibration mode is exited by pressing the button again.
//! The values can be then used to calculate the scaling factor (`LOADCELL_SCALING`) as well as adjust the battery level
//! conversion function (`battery::adc_to_percent`).
//!
//! At the moment, there is no interactive way to set the scaling factor, so it has to be hardcoded in the source code.
use std::{
num::NonZeroU32,
sync::{
atomic::{AtomicBool, AtomicI32, Ordering},
Arc, Mutex,
},
thread,
};
use anyhow::{anyhow, Result};
use esp_idf_svc::{
hal::{
delay::{Ets, BLOCK},
gpio::{self, InterruptType, Pull},
i2c,
peripherals::Peripherals,
prelude::*,
task::notification::Notification,
timer::{config, TimerDriver},
},
systime::EspSystemTime,
};
use ssd1306::I2CDisplayInterface;
use crate::{battery::BatteryReader, screen::Screen, weight::Loadcell};
mod battery;
mod ble;
mod critical_section;
mod screen;
mod weight;
/// Scaling factor for the loadcell.
///
/// The hx711 raw value is multiplied by this to get the weight in grams.
const LOADCELL_SCALING: f32 = 6.49304e-4;
fn main() -> Result<()> {
// Initialize the IDF stuff and logger
esp_idf_svc::sys::link_patches();
esp_idf_svc::log::EspLogger::initialize_default();
// Setup screen communication over I2C
let peripherals = Peripherals::take()?;
let pins = peripherals.pins;
log::info!("Starting up...");
let i2c = i2c::I2cDriver::new(
peripherals.i2c0,
pins.gpio21,
pins.gpio22,
&i2c::I2cConfig::new().baudrate(400.kHz().into()),
)?;
let interface = I2CDisplayInterface::new(i2c);
let mut screen = Screen::new(interface);
// Initialize BLE
ble::init()?;
log::info!("BLE initialized");
// Read battery level
let mut battery_reader = BatteryReader::new(pins.gpio34, peripherals.adc1)?;
let (battery_percent, _) = battery_reader.read_battery_percent()?;
log::info!("Battery level: {}%", battery_percent);
screen.set_battery(battery_percent);
ble::BATTERY
.get()
.ok_or_else(|| anyhow!("Battery characteristic not initialized"))?
.lock()
.set_value(&battery_percent.to_be_bytes());
// Initialize the loadcell
let scales = Arc::new(Mutex::new(Loadcell::new(
pins.gpio13,
pins.gpio14,
LOADCELL_SCALING,
)?));
// Tare the scales after it's become stable
{
let mut scales = scales.lock().expect("mutex lock");
scales.wait_stable();
scales.tare(None);
// unlock mutex
}
// Weight value to be shared between threads
let weight: Arc<AtomicI32> = Arc::new(AtomicI32::new(0));
// Bluetooth reporting thread
thread::spawn({
let weight = Arc::clone(&weight);
move || {
// Timer to notify subscribers of the weight characteristic value
let notification = Notification::new();
let timer_conf = config::Config::new().auto_reload(true);
let mut timer = TimerDriver::new(peripherals.timer00, &timer_conf).expect("timer");
timer
.set_alarm(timer.tick_hz() / 5) // every 200ms = 5 times per second
.expect("set timer alarm");
let notifier = notification.notifier();
unsafe {
timer
.subscribe(move || {
notifier.notify(NonZeroU32::new(0b00000000001).expect("new bitset"));
})
.expect("subscribe to timer");
}
// Enable timer interrupt
timer.enable_interrupt().expect("enable timer interrupt");
timer.enable_alarm(true).expect("enable timer alarm");
timer.enable(true).expect("enable timer");
loop {
notification.wait(BLOCK);
log::info!("Timer fired");
let weight = weight.load(Ordering::Relaxed);
ble::WEIGHT
.get()
.expect("weight characteristic not initialized")
.lock()
.set_value(&weight.to_be_bytes())
.notify();
}
}
});
// Calibration mode flag to be shared between threads
let calibration_mode = Arc::new(AtomicBool::new(false));
// Tare/Calibration button thread
thread::spawn({
let calibration_mode = Arc::clone(&calibration_mode); // moved inside thread
let scales = Arc::clone(&scales); // moved inside thread
move || {
let mut button_pin = gpio::PinDriver::input(pins.gpio0).expect("button pin");
button_pin
.set_pull(Pull::Up)
.expect("set button pin to pull up");
button_pin
.set_interrupt_type(InterruptType::NegEdge)
.expect("set interrupt type");
let notification = Notification::new();
let notifier = notification.notifier();
unsafe {
button_pin
.subscribe(move || {
notifier.notify(NonZeroU32::new(0b00000000001).expect("new bitset"));
})
.expect("subscribe to button press");
}
button_pin
.enable_interrupt()
.expect("enable button interrupt");
loop {
notification.wait(BLOCK);
log::info!("button pressed, wait for letting go");
let before = EspSystemTime {}.now();
let mut calib = false;
while button_pin.is_low() {
Ets::delay_ms(10);
let after = EspSystemTime {}.now();
if (after - before).as_millis() > 2000 {
calib = true;
break;
}
}
if calib {
log::info!("long press, enter calibration mode");
let mut scales = scales.lock().expect("mutex lock");
scales.tare(None);
calibration_mode.store(true, Ordering::Relaxed);
break;
}
log::info!("button released");
log::info!("short press, tare scales");
let mut scales = scales.lock().expect("mutex lock");
scales.tare(Some(5));
button_pin
.enable_interrupt()
.expect("enable button interrupt");
}
}
});
// Main loop
loop {
// Check if we are in calibration mode
if calibration_mode.load(Ordering::Relaxed) {
// Calibration mode, display the raw readings
let average = {
let mut scales = scales.lock().expect("mutex lock");
scales.read_average(10)
};
log::info!("Weight reading: {average}");
let (_, adc_value) = battery_reader.read_battery_percent()?;
screen.print_calibration(average, adc_value);
continue;
}
// Normal operation
// Read weight from loadcell and display
{
let mut scales = scales.lock().expect("mutex lock");
scales.read_weight(&weight);
// unlock mutex
}
let weight = weight.load(Ordering::Relaxed);
screen.print(weight);
}
}
| rust | MIT | 1486b8bac6ea97718546fc82ddba372ae85b21af | 2026-01-04T20:20:37.981145Z | false |
beeb/coffee-scale-app | https://github.com/beeb/coffee-scale-app/blob/1486b8bac6ea97718546fc82ddba372ae85b21af/rs/src/battery.rs | rs/src/battery.rs | //! Battery voltage reader
//!
//! This module provides a way to read the battery voltage using the ADC peripheral.
use anyhow::Result;
use esp_idf_svc::hal::{
adc::{self, Adc},
gpio::ADCPin,
peripheral::Peripheral,
};
/// Battery voltage reader
pub struct BatteryReader<'a, ADC: Adc + 'a, PIN: Peripheral<P = PIN> + ADCPin<Adc = ADC>> {
adc: adc::AdcDriver<'a, ADC>,
analog: adc::AdcChannelDriver<'a, { adc::attenuation::DB_11 }, PIN>,
}
impl<'a, ADC: Peripheral<P = ADC> + Adc, PIN: Peripheral<P = PIN> + ADCPin<Adc = ADC>>
BatteryReader<'a, ADC, PIN>
{
/// Create a new battery reader
pub fn new(vsense_pin: PIN, adc: ADC) -> Result<Self> {
let analog =
adc::AdcChannelDriver::<{ adc::attenuation::DB_11 }, _>::new(vsense_pin).expect("adc");
Ok(BatteryReader {
adc: adc::AdcDriver::new(adc, &adc::config::Config::new().calibration(true))?,
analog,
})
}
/// Read the battery voltage and return the percentage and the raw ADC value
///
/// The ADC value is the average of 10 readings.
pub fn read_battery_percent(&mut self) -> Result<(u8, u16)> {
let mut value = self.adc.read(&mut self.analog)?;
for _ in 0..9 {
value += self.adc.read(&mut self.analog)?;
}
value /= 10;
Ok((adc_to_percent(value), value))
}
}
/// Convert ADC reading into voltage and then percentage
///
/// Calibration values:
///
/// 2080: 4.15V
/// 2055: 4.1V
/// 2000: 4.0V
/// 1949: 3.9V
/// 1897: 3.8V
/// 1848: 3.7V
/// 1795: 3.6V
/// 1746: 3.5V
/// 1692: 3.4V
/// 1642: 3.3V
///
/// Least-squares fit: V = 0.112202 + 0.00194226 ADC
///
/// Conversion to percentage (extracted from a chart a long time ago, can't remember the source):
///
/// 4.2V: 100%
/// 4.1V: 94%
/// 4.0V: 83%
/// 3.9V: 72%
/// 3.8V: 59%
/// 3.7V: 50%
/// 3.6V: 33%
/// 3.5V: 15%
/// 3.4V: 6%
/// 3.3V: 0%
///
/// Cubic fit: y = -141.608 x^3 + 1574.53 x^2 - 5694.03 x + 6731.1
fn adc_to_percent(adc: u16) -> u8 {
let voltage = 0.112202 + 0.00194226 * (adc as f32);
(-141.608 * voltage.powi(3) + 1574.53 * voltage.powi(2) - 5694.03 * voltage + 6731.1)
.clamp(0., 100.) as u8
}
| rust | MIT | 1486b8bac6ea97718546fc82ddba372ae85b21af | 2026-01-04T20:20:37.981145Z | false |
beeb/coffee-scale-app | https://github.com/beeb/coffee-scale-app/blob/1486b8bac6ea97718546fc82ddba372ae85b21af/rs/src/screen.rs | rs/src/screen.rs | //! Display driver for the SSD1306 OLED display.
//!
//! The display is used to show the weight and battery level. It also shows the raw loadcell readings and the ADC value
//! of the battery voltage when in calibration mode.
//!
//! The display is driven by the `ssd1306` crate, which provides a high-level API for the SSD1306 display driver. The
//! `embedded-graphics` crate is used to draw text and images on the display.
//!
//! The display is a 128x32 monochrome OLED display, which is connected to the ESP32 over I2C.
use embedded_graphics::{
geometry::{Point, Size},
image::{Image, ImageRaw},
mono_font::{
ascii::FONT_7X13, mapping::StrGlyphMapping, DecorationDimensions, MonoFont, MonoTextStyle,
},
pixelcolor::BinaryColor,
text::{Alignment, Baseline, Text, TextStyleBuilder},
Drawable,
};
use esp_idf_svc::hal::i2c::I2cDriver;
use ssd1306::{
mode::{BufferedGraphicsMode, DisplayConfig},
prelude::I2CInterface,
rotation::DisplayRotation,
size::DisplaySize128x32,
Ssd1306,
};
type Display<'a> = Ssd1306<
I2CInterface<I2cDriver<'a>>,
DisplaySize128x32,
BufferedGraphicsMode<DisplaySize128x32>,
>;
/// Custom font for the display (digits and symbols in normal operation mode)
const CUSTOM_FONT: MonoFont = MonoFont {
image: ImageRaw::new(include_bytes!("../assets/font.raw"), 266),
glyph_mapping: &StrGlyphMapping::new("0123456789.-gb", 10),
character_size: Size::new(19, 30),
character_spacing: 2,
baseline: 30,
underline: DecorationDimensions::default_underline(30),
strikethrough: DecorationDimensions::default_strikethrough(30),
};
/// Screen struct
pub struct Screen<'a> {
pub display: Display<'a>,
pub battery: u8,
}
impl<'a> Screen<'a> {
/// Create a new Screen instance, initializing the battery to 100% for now
pub fn new(interface: I2CInterface<I2cDriver<'a>>) -> Self {
let mut display = Ssd1306::new(interface, DisplaySize128x32, DisplayRotation::Rotate0)
.into_buffered_graphics_mode();
display.init().expect("display init");
let raw: ImageRaw<BinaryColor> = ImageRaw::new(include_bytes!("../assets/hex.raw"), 26);
let im = Image::new(&raw, Point::new(51, 1));
im.draw(&mut display).expect("draw splash image");
display.flush().expect("flush display buffer");
Screen {
display,
battery: 100,
}
}
/// Set the battery level (displays a battery icon if the battery level is below 20%)
pub fn set_battery(&mut self, battery: u8) {
self.battery = battery;
}
/// Print the weight and symbols on the display
pub fn print(&mut self, number: i32) {
// use custom font
let character_style = MonoTextStyle::new(&CUSTOM_FONT, BinaryColor::On);
let text_style = TextStyleBuilder::new()
.baseline(Baseline::Bottom)
.alignment(Alignment::Right)
.build();
// clear the display buffer
self.display.clear_buffer();
// draw the first decimal place value
// to draw two decimal places: &format!("{:02}", number.abs() % 100)
Text::with_text_style(
&format!("{}", number.abs() % 100 / 10),
Point::new(116, 31),
character_style,
text_style,
)
.draw(&mut self.display)
.expect("draw decimals");
// Draw the digits left of the decimal separator
if number > -100 && number < 0 {
// -0.xx would not show the minus sign due to the divison by 100 being 0.
Text::with_text_style(
&format!("-{}.", number / 100),
Point::new(110, 31), // x coordinate if drawing two decimal places: 89
character_style,
text_style,
)
.draw(&mut self.display)
.expect("draw digits");
} else {
Text::with_text_style(
&format!("{}.", number / 100),
Point::new(110, 31), // x coordinate if drawing two decimal places: 89
character_style,
text_style,
)
.draw(&mut self.display)
.expect("draw digits");
}
// draw the gram symbol
Text::with_text_style("g", Point::new(136, 31), character_style, text_style)
.draw(&mut self.display)
.expect("draw gram symbol");
// draw the battery symbol if the battery level is below 20%
if self.battery < 20 {
Text::with_text_style("b", Point::new(136, 8), character_style, text_style)
.draw(&mut self.display)
.expect("draw battery symbol");
}
self.display.flush().expect("flush display buffer");
}
/// Print the raw loadcell reading and the ADC value of the battery voltage (calibration mode)
pub fn print_calibration(&mut self, number: i32, adc: u16) {
let character_style = MonoTextStyle::new(&FONT_7X13, BinaryColor::On);
let text_style = TextStyleBuilder::new()
.baseline(Baseline::Bottom)
.alignment(Alignment::Left)
.build();
self.display.clear_buffer();
Text::with_text_style(
&format!("calib: {number:>7}"),
Point::new(2, 15),
character_style,
text_style,
)
.draw(&mut self.display)
.expect("draw loadcell");
Text::with_text_style(
&format!("adc: {adc}"),
Point::new(2, 30),
character_style,
text_style,
)
.draw(&mut self.display)
.expect("draw adc");
self.display.flush().expect("flush display buffer");
}
}
| rust | MIT | 1486b8bac6ea97718546fc82ddba372ae85b21af | 2026-01-04T20:20:37.981145Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ffi_interface/src/lib.rs | ffi_interface/src/lib.rs | pub mod serialization;
// TODO: These are re-exported to not break the java code
// TODO: we ideally don't want to export these.
// - deserialize_update_commitment_sparse should not be exported and is an abstraction leak
pub use serialization::{
deserialize_commitment, deserialize_update_commitment_sparse, serialize_commitment,
};
use banderwagon::Element;
use banderwagon::Fr;
use ipa_multipoint::committer::{Committer, DefaultCommitter};
use ipa_multipoint::crs::CRS;
use ipa_multipoint::lagrange_basis::PrecomputedWeights;
use ipa_multipoint::multiproof::{MultiPoint, MultiPointProof, ProverQuery, VerifierQuery};
use ipa_multipoint::transcript::Transcript;
pub use serialization::{fr_from_le_bytes, fr_to_le_bytes};
use verkle_trie::proof::golang_proof_format::{bytes32_to_element, hex_to_bytes32, VerkleProofGo};
pub use crate::serialization::{
deserialize_proof_query, deserialize_proof_query_uncompressed, deserialize_verifier_query,
deserialize_verifier_query_uncompressed,
};
/// Context holds all of the necessary components needed for cryptographic operations
/// in the Verkle Trie. This includes:
/// - Updating the verkle trie
/// - Generating proofs
///
/// This is useful for caching purposes, since the context can be reused for multiple
/// function calls. More so because the Context is relatively expensive to create
/// compared to making a function call.
pub struct Context {
pub crs: CRS,
pub committer: DefaultCommitter,
pub precomputed_weights: PrecomputedWeights,
}
impl Default for Context {
fn default() -> Self {
Self::new()
}
}
impl Context {
pub fn new() -> Self {
let crs = CRS::default();
let committer = DefaultCommitter::new(&crs.G);
let precomputed_weights = PrecomputedWeights::new(256);
Self {
crs,
committer,
precomputed_weights,
}
}
}
/// A serialized uncompressed group element
pub type CommitmentBytes = [u8; 64];
/// A serialized scalar field element
pub type ScalarBytes = [u8; 32];
/// This is the identity element of the group
pub const ZERO_POINT: CommitmentBytes = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
#[derive(Debug, Clone)]
pub enum Error {
LengthOfScalarsNotMultipleOf32 {
len: usize,
},
TooManyScalars {
expected: usize,
got: usize,
},
FailedToDeserializeScalar {
bytes: Vec<u8>,
},
LengthIsNotAnExpectedMultiple {
item_descriptor: &'static str,
expected_multiple: u64,
actual_size: u64,
},
CouldNotDeserializeCommitment {
bytes: Vec<u8>,
},
ProofVerificationFailed,
}
#[allow(deprecated)]
#[deprecated(note = "moving forward one should implement this method on the caller side")]
/// Compute the key prefix used in the `get_tree_key` method
///
/// Returns a 32 byte slice representing the first 31 bytes of the `key` to be used in `get_tree_key`
///
// TODO: We could probably make this use `map_to_field` instead of `.to_bytes`
pub fn get_tree_key_hash(
context: &Context,
address: [u8; 32],
tree_index_le: [u8; 32],
) -> [u8; 32] {
let mut input = [0u8; 64];
input[..32].copy_from_slice(&address);
input[32..].copy_from_slice(&tree_index_le);
get_tree_key_hash_flat_input(context, input)
}
#[deprecated(note = "moving forward one should implement this method on the caller side")]
/// Same method as `get_tree_key_hash` but takes a 64 byte input instead of two 32 byte inputs
///
/// This is kept for backwards compatibility and because we have not yet checked if its better
/// for Java to pass in two 32 bytes or one 64 byte input.
///
/// The former probably requires two allocations, while the latter is less type safe.
pub fn get_tree_key_hash_flat_input(context: &Context, input: [u8; 64]) -> [u8; 32] {
verkle_spec::hash64(&context.committer, input).to_fixed_bytes()
}
#[allow(deprecated)]
#[deprecated(note = "moving forward one should implement this method on the caller side")]
pub fn get_tree_key(
context: &Context,
address: [u8; 32],
tree_index_le: [u8; 32],
sub_index: u8,
) -> [u8; 32] {
let mut hash = get_tree_key_hash(context, address, tree_index_le);
hash[31] = sub_index;
hash
}
#[allow(deprecated)]
#[deprecated(note = "moving forward one should implement this method on the caller side")]
/// This is exactly the same as `get_tree_key_hash` method.
/// Use get_tree_key_hash instead.
///
/// Moving to rename this as it causes confusion. For now, I'll call this `get_tree_key_hash`
pub fn pedersen_hash(context: &Context, address: [u8; 32], tree_index_le: [u8; 32]) -> [u8; 32] {
get_tree_key_hash(context, address, tree_index_le)
}
fn _commit_to_scalars(context: &Context, scalars: &[u8]) -> Result<Element, Error> {
let scalars_len = scalars.len();
// scalars when serialized are 32 bytes
// check that the length of scalars is a multiple of 32
if scalars_len % 32 != 0 {
return Err(Error::LengthOfScalarsNotMultipleOf32 { len: scalars_len });
}
// A verkle branch can only hold 256 elements, so we never expect to commit
// to more than 256 scalars.
let num_scalars = scalars_len / 32;
if num_scalars > 256 {
return Err(Error::TooManyScalars {
got: num_scalars,
expected: context.crs.max_number_of_elements(),
});
}
let mut inputs = Vec::with_capacity(num_scalars);
for chunk in scalars.chunks_exact(32) {
inputs.push(fr_from_le_bytes(chunk)?);
}
Ok(context.committer.commit_lagrange(&inputs))
}
/// Commits to at most 256 scalars
///
/// Returns the commitment to those scalars
pub fn commit_to_scalars(context: &Context, scalars: &[u8]) -> Result<CommitmentBytes, Error> {
let commitment = _commit_to_scalars(context, scalars)?;
Ok(commitment.to_bytes_uncompressed())
}
/// Adds two commitments together
pub fn add_commitment(lhs: CommitmentBytes, rhs: CommitmentBytes) -> CommitmentBytes {
let lhs = Element::from_bytes_unchecked_uncompressed(lhs);
let rhs = Element::from_bytes_unchecked_uncompressed(rhs);
(lhs + rhs).to_bytes_uncompressed()
}
/// Updates a commitment from vG to wG
///
/// Since the commitment is homomorphic, wG = vG - vG + wG = vG + (w-v)G
/// - `vG` is the old commitment
/// - `v` is the old scalar
/// - `w` is the new scalar
///
/// Returns the updated commitment
pub fn update_commitment(
context: &Context,
old_commitment_bytes: CommitmentBytes,
// There can only be at most 256 elements in a verkle branch
commitment_index: u8,
old_scalar_bytes: ScalarBytes,
new_scalar_bytes: ScalarBytes,
) -> Result<CommitmentBytes, Error> {
let old_commitment = Element::from_bytes_unchecked_uncompressed(old_commitment_bytes);
let old_scalar = fr_from_le_bytes(&old_scalar_bytes)?;
let new_scalar = fr_from_le_bytes(&new_scalar_bytes)?;
// w-v
let delta = new_scalar - old_scalar;
// (w-v)G
let delta_commitment = context
.committer
.scalar_mul(delta, commitment_index as usize);
// vG + (w-v)G
Ok((delta_commitment + old_commitment).to_bytes_uncompressed())
}
/// Update commitment for sparse vector.
pub fn update_commitment_sparse(
context: &Context,
old_commitment_bytes: CommitmentBytes,
// There can only be at most 256 elements in a verkle branch
commitment_index_vec: Vec<usize>,
old_scalar_bytes_vec: Vec<ScalarBytes>,
new_scalar_bytes_vec: Vec<ScalarBytes>,
) -> Result<CommitmentBytes, Error> {
let old_commitment = Element::from_bytes_unchecked_uncompressed(old_commitment_bytes);
let mut delta_values: Vec<(Fr, usize)> = Vec::new();
// For each index in commitment_index, we compute the delta value.
for index in 0..commitment_index_vec.len() {
let old_scalar = fr_from_le_bytes(&old_scalar_bytes_vec[index]).unwrap();
let new_scalar = fr_from_le_bytes(&new_scalar_bytes_vec[index]).unwrap();
let tuple = (new_scalar - old_scalar, commitment_index_vec[index]);
delta_values.push(tuple);
}
let delta_commitment = context.committer.commit_sparse(delta_values);
Ok((delta_commitment + old_commitment).to_bytes_uncompressed())
}
/// Hashes a commitment
///
/// Note: This commitment can be used as the `commitment root`
///
/// Returns a `Scalar` representing the hash of the commitment
pub fn hash_commitment(commitment: CommitmentBytes) -> ScalarBytes {
// TODO: We could introduce a method named `hash_commit_to_scalars`
// TODO: which would save this serialization roundtrip. We should profile/check that
// TODO: this is actually a bottleneck for the average workflow before doing this.
fr_to_le_bytes(Element::from_bytes_unchecked_uncompressed(commitment).map_to_scalar_field())
}
/// Hashes a vector of commitments.
///
/// This is more efficient than repeatedly calling `hash_commitment`
///
/// Returns a vector of `Scalar`s representing the hash of each commitment
pub fn hash_commitments(commitments: &[CommitmentBytes]) -> Vec<ScalarBytes> {
let elements = commitments
.iter()
.map(|commitment| Element::from_bytes_unchecked_uncompressed(*commitment))
.collect::<Vec<_>>();
Element::batch_map_to_scalar_field(&elements)
.into_iter()
.map(fr_to_le_bytes)
.collect()
}
/// Receives a tuple (C_i, f_i(X), z_i, y_i)
///
/// Where C_i is a commitment to f_i(X) serialized as 32 bytes
/// f_i(X) is the polynomial serialized as 8192 bytes since we have 256 Fr elements each serialized as 32 bytes
/// z_i is index of the point in the polynomial: 1 byte (number from 1 to 256)
/// y_i is the evaluation of the polynomial at z_i i.e value we are opening: 32 bytes
/// Returns a proof serialized as bytes
///
/// This function assumes that the domain is always 256 values and commitment is 32bytes.
pub fn create_proof(context: &Context, input: Vec<u8>) -> Result<Vec<u8>, Error> {
// - Checks for the serialized proof queries
//
// Define the chunk size (8257 bytes)
// C_i, f_i(X), z_i, y_i
// 32, 8192, 1, 32
// = 8257
const CHUNK_SIZE: usize = 8257; // TODO: get this from ipa-multipoint
if input.len() % CHUNK_SIZE != 0 {
return Err(Error::LengthIsNotAnExpectedMultiple {
item_descriptor: "Input length for proof",
expected_multiple: CHUNK_SIZE as u64,
actual_size: input.len() as u64,
});
}
let num_openings = input.len() / CHUNK_SIZE;
let proofs_bytes = input.chunks_exact(CHUNK_SIZE);
assert!(
proofs_bytes.remainder().is_empty(),
"There should be no left over bytes when chunking the proof"
);
// - Deserialize proof queries
//
let mut prover_queries: Vec<ProverQuery> = Vec::with_capacity(num_openings);
for proof_bytes in proofs_bytes {
let prover_query = deserialize_proof_query(proof_bytes);
prover_queries.push(prover_query);
}
// - Create proofs
//
let mut transcript = Transcript::new(b"verkle");
let proof = MultiPoint::open(
// TODO: This should not need to clone the CRS, but instead take a reference
context.crs.clone(),
&context.precomputed_weights,
&mut transcript,
prover_queries,
);
Ok(proof.to_bytes().expect("cannot serialize proof"))
}
/// Receives a proof and a tuple (C_i, z_i, y_i)
///
/// Where C_i is a commitment to f_i(X) serialized as 64 bytes (uncompressed commitment)
/// z_i is index of the point in the polynomial: 1 byte (number from 1 to 256)
/// y_i is the evaluation of the polynomial at z_i i.e value we are opening: 32 bytes or Fr (scalar field element)
/// Returns true of false.
/// Proof is verified or not.
/// TODO: Add more tests.
pub fn verify_proof(context: &Context, input: Vec<u8>) -> Result<(), Error> {
// Proof bytes are 576 bytes
// First 32 bytes is the g_x_comm_bytes
// Next 544 bytes are part of IPA proof. Domain size is always 256. Explanation is in IPAProof::from_bytes().
let proof_bytes = &input[0..576];
let proof = MultiPointProof::from_bytes(proof_bytes, 256).unwrap();
let verifier_queries_bytes = &input[576..];
// Define the chunk size 32+1+32 = 65 bytes for C_i, z_i, y_i
const CHUNK_SIZE: usize = 65;
if verifier_queries_bytes.len() % CHUNK_SIZE != 0 {
return Err(Error::LengthIsNotAnExpectedMultiple {
item_descriptor: "Verifier queries",
expected_multiple: CHUNK_SIZE as u64,
actual_size: verifier_queries_bytes.len() as u64,
});
}
let num_openings = verifier_queries_bytes.len() / CHUNK_SIZE;
// Create an iterator over the input Vec<u8>
let chunked_verifier_queries = verifier_queries_bytes.chunks(CHUNK_SIZE);
// - Deserialize verifier queries
let mut verifier_queries: Vec<VerifierQuery> = Vec::with_capacity(num_openings);
for verifier_query_bytes in chunked_verifier_queries {
let verifier_query = deserialize_verifier_query(verifier_query_bytes);
verifier_queries.push(verifier_query);
}
let mut transcript = Transcript::new(b"verkle");
if proof.check(
&context.crs,
&context.precomputed_weights,
&verifier_queries,
&mut transcript,
) {
Ok(())
} else {
Err(Error::ProofVerificationFailed)
}
}
#[deprecated(
note = "Parsing of the execution witness and preprocessing its input should be done by clients in the future"
)]
/// Verifies an execution witness as specified in the EIP and on Kaustinen.
///
/// For an example of the format, see: https://github.com/ethereumjs/ethereumjs-monorepo/blob/master/packages/statemanager/test/testdata/verkleKaustinenBlock.json#L1-L2626
pub fn verify_execution_witness(root: &str, execution_witness_json_str: &str) -> bool {
let (verkle_proof, keys_values) = match VerkleProofGo::from_json_str(execution_witness_json_str)
.from_verkle_proof_go_to_verkle_proof()
{
Some((verkle_proof, keys_values)) => (verkle_proof, keys_values),
None => return false,
};
let root = match bytes32_to_element(hex_to_bytes32(root)) {
Some(root) => root,
None => return false,
};
let (ok, _) = verkle_proof.check(keys_values.keys, keys_values.current_values, root);
ok
}
#[cfg(test)]
mod tests {
use crate::{verify_execution_witness, Context};
use banderwagon::Fr;
use ipa_multipoint::committer::Committer;
use verkle_trie::proof::golang_proof_format::{EXECUTION_WITNESS_JSON, PREVIOUS_STATE_ROOT};
use crate::{fr_from_le_bytes, fr_to_le_bytes};
#[test]
fn exec_witness_works() {
let result = verify_execution_witness(PREVIOUS_STATE_ROOT, EXECUTION_WITNESS_JSON);
assert!(result);
}
#[test]
fn commitment_update() {
let context = Context::default();
let committer = &context.committer;
let a_0 = banderwagon::Fr::from(123u128);
let a_1 = banderwagon::Fr::from(123u128);
let a_2 = banderwagon::Fr::from(456u128);
// Compute C = a_0 * G_0 + a_1 * G_1
let commitment = committer.commit_lagrange(&[a_0, a_1]);
// Now we want to compute C = a_2 * G_0 + a_1 * G_1
let naive_update = committer.commit_lagrange(&[a_2, a_1]);
// We can do this by computing C = (a_2 - a_0) * G_0 + a_1 * G_1
let delta = a_2 - a_0;
let delta_commitment = committer.scalar_mul(delta, 0);
let delta_update = delta_commitment + commitment;
assert_eq!(naive_update, delta_update);
// Now lets do it using the update_commitment method
let updated_commitment = super::update_commitment(
&context,
commitment.to_bytes_uncompressed(),
0,
fr_to_le_bytes(a_0),
fr_to_le_bytes(a_2),
)
.unwrap();
assert_eq!(updated_commitment, naive_update.to_bytes_uncompressed())
}
#[test]
fn commitment_exists_sparse_update() {
let context = Context::default();
let committer = &context.committer;
let a_0 = banderwagon::Fr::from(123u128);
let a_1 = banderwagon::Fr::from(123u128);
let a_2 = banderwagon::Fr::from(246u128);
let a_zero = banderwagon::Fr::from(0u128);
// Compute C = a_0 * G_0
let commitment = committer.scalar_mul(a_0, 0);
let naive_update = commitment + committer.scalar_mul(a_1, 1) + committer.scalar_mul(a_2, 2);
let val_indices: Vec<(Fr, usize)> = vec![(a_1, 1), (a_2, 2)];
let new_commitment = commitment + committer.commit_sparse(val_indices);
assert_eq!(naive_update, new_commitment);
let commitment_index_vec = vec![1, 2];
let old_scalar_bytes_vec = vec![fr_to_le_bytes(a_zero), fr_to_le_bytes(a_zero)];
let new_scalar_bytes_vec = vec![fr_to_le_bytes(a_1), fr_to_le_bytes(a_2)];
// Now lets do it using the update_commitment_sparse method
let updated_commitment = super::update_commitment_sparse(
&context,
commitment.to_bytes_uncompressed(),
commitment_index_vec,
old_scalar_bytes_vec,
new_scalar_bytes_vec,
)
.unwrap();
assert_eq!(updated_commitment, naive_update.to_bytes_uncompressed());
}
#[test]
fn from_be_to_be_bytes() {
let value = banderwagon::Fr::from(123456u128);
let bytes = fr_to_le_bytes(value);
let got_value = fr_from_le_bytes(&bytes).unwrap();
assert_eq!(got_value, value)
}
}
#[test]
fn check_identity_constant() {
let identity = Element::zero();
let identity_bytes = identity.to_bytes_uncompressed();
assert_eq!(identity_bytes, ZERO_POINT);
}
#[allow(deprecated)]
#[cfg(test)]
mod pedersen_hash_tests {
use banderwagon::Fr;
use ipa_multipoint::committer::Committer;
use crate::{
add_commitment, commit_to_scalars, get_tree_key, get_tree_key_hash, hash_commitment,
Context,
};
#[test]
fn smoke_test_address_zero() {
let context = Context::default();
let address = [0u8; 32];
let tree_index = [0u8; 32];
let expected = "1a100684fd68185060405f3f160e4bb6e034194336b547bdae323f888d533207";
let got_hash_bytes = get_tree_key_hash(&context, address, tree_index);
let got_hash_hex = hex::encode(got_hash_bytes);
assert_eq!(expected, got_hash_hex)
}
#[test]
fn get_tree_key_add_commitment_equivalence() {
let context = Context::default();
let input = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
];
// First 32 bytes is the address
let mut address = [0u8; 32];
address.copy_from_slice(&input[..32]);
// Next 32 bytes is the tree index -- But interpreted as a little endian number
let mut tree_index = [0u8; 32];
tree_index.copy_from_slice(&input[32..64]);
tree_index.reverse();
let got_hash_bytes = get_tree_key(&context, address, tree_index, 0);
let expected_hash = "ff7e3916badeb510dfcdad458726273319280742e553d8d229bd676428147300";
let got_hash_hex = hex::encode(got_hash_bytes);
assert_eq!(expected_hash, got_hash_hex);
// Now compute the get_tree_key_hash using the ffi_interface
//
let input: Vec<_> = address.into_iter().chain(tree_index).collect();
// This method is expected to be in the caller.
// Note: this method adds the verkle specific marker as the first u128 integer
let chunked_input = verkle_spec::chunk64(input.try_into().unwrap());
let marker = u128_to_32bytes(chunked_input[0]);
let address_low = u128_to_32bytes(chunked_input[1]);
let address_high = u128_to_32bytes(chunked_input[2]);
let tree_index_low = u128_to_32bytes(chunked_input[3]);
let tree_index_high = u128_to_32bytes(chunked_input[4]);
let address_point_scalars: Vec<_> = vec![marker, address_low, address_high]
.into_iter()
.flatten()
.collect();
let address_point_cache = commit_to_scalars(&context, &address_point_scalars).unwrap();
let tree_index_scalars: Vec<_> = vec![
[0u8; 32],
[0u8; 32],
[0u8; 32],
tree_index_low,
tree_index_high,
]
.into_iter()
.flatten()
.collect();
let tree_index_commit = commit_to_scalars(&context, &tree_index_scalars).unwrap();
let committed_point = add_commitment(address_point_cache, tree_index_commit);
let mut key = hash_commitment(committed_point);
key[31] = 0; // modify the last byte since get_tree_key above uses sub_index=0
assert_eq!(hex::encode(&key), expected_hash)
}
#[test]
fn smoke_test_add_commitment_fixed() {
// A test to check that add commitment does not change
let lhs = "0ff070a99e9f38e4f1ec1db91795ef4942fcd188152562c2773d9125236a50444687ab68507977d6276428d7d570a3c95efa406427f6641ba1e247133d17e030";
let rhs = "333e05d05e6533e993f519c23dbce6205fb9e0b78f38b3336d9c4296f144cb0204c389bb5e6925157ce16eda2ebf45640956be98e2be2df77a86f0bca135da21";
let output = "8b5feb2eb0cc73a8ca2f24ae7b2c61e88ff0b019dea9b881d1b5f7815280b6393834cb80ab2c09984c5b9f70be680206a6e12c8bbb169fe5ab672f45c5d51e20";
let lhs_bytes = hex::decode(lhs).unwrap();
let rhs_bytes = hex::decode(rhs).unwrap();
let output_bytes = hex::decode(output).unwrap();
let got = add_commitment(lhs_bytes.try_into().unwrap(), rhs_bytes.try_into().unwrap());
assert_eq!(&got, &output_bytes[..])
}
fn u128_to_32bytes(integer: u128) -> [u8; 32] {
let mut bytes = integer.to_le_bytes().to_vec();
bytes.extend(vec![0u8; 16]);
bytes.try_into().unwrap()
}
}
#[cfg(test)]
mod prover_verifier_test {
use super::Context;
use crate::fr_to_le_bytes;
use crate::verify_proof;
use ipa_multipoint::{committer::Committer, lagrange_basis::LagrangeBasis};
#[test]
fn test_one_opening_create_proof_verify_proof() {
let a_0 = banderwagon::Fr::from(123u128);
let a_1 = banderwagon::Fr::from(123u128);
let a_2 = banderwagon::Fr::from(456u128);
let a_3 = banderwagon::Fr::from(789u128);
let mut _poly: LagrangeBasis;
let mut all_vals = Vec::new();
for _i in 0..64 {
all_vals.push(a_0);
all_vals.push(a_1);
all_vals.push(a_2);
all_vals.push(a_3);
}
let context = Context::new();
let commitment = context.committer.commit_lagrange(all_vals.as_slice());
let commitment_bytes = commitment.to_bytes();
let mut poly_bytes: Vec<u8> = Vec::new();
for val in all_vals.clone() {
let bytes = fr_to_le_bytes(val);
poly_bytes.extend_from_slice(&bytes);
}
let point_bytes = [2u8; 1];
let result_bytes = fr_to_le_bytes(a_2);
let mut create_prover_bytes: Vec<u8> = Vec::new();
create_prover_bytes.extend_from_slice(&commitment_bytes);
create_prover_bytes.extend_from_slice(&poly_bytes);
create_prover_bytes.extend_from_slice(&point_bytes);
create_prover_bytes.extend_from_slice(&result_bytes);
let proof_bytes = super::create_proof(&context, create_prover_bytes).unwrap();
let mut create_verifier_bytes: Vec<u8> = Vec::new();
create_verifier_bytes.extend_from_slice(&commitment_bytes);
create_verifier_bytes.extend_from_slice(&point_bytes);
create_verifier_bytes.extend_from_slice(&result_bytes);
let mut verifier_call_bytes: Vec<u8> = Vec::new();
verifier_call_bytes.extend_from_slice(&proof_bytes);
verifier_call_bytes.extend_from_slice(&create_verifier_bytes);
let verified = verify_proof(&context, verifier_call_bytes).is_ok();
assert!(verified);
}
#[test]
fn test_multiple_openings_create_proof_verify_proof() {
let a_0 = banderwagon::Fr::from(123u128);
let a_1 = banderwagon::Fr::from(123u128);
let a_2 = banderwagon::Fr::from(456u128);
let a_3 = banderwagon::Fr::from(789u128);
let context = Context::new();
let mut create_prover_bytes: Vec<u8> = Vec::new();
let mut create_verifier_bytes: Vec<u8> = Vec::new();
for _iterate in 0..100 {
let mut _poly: LagrangeBasis;
let mut all_vals = Vec::new();
for _i in 0..64 {
all_vals.push(a_0);
all_vals.push(a_1);
all_vals.push(a_2);
all_vals.push(a_3);
}
let commitment = context.committer.commit_lagrange(all_vals.as_slice());
let commitment_bytes = commitment.to_bytes();
let mut poly_bytes: Vec<u8> = Vec::new();
for val in all_vals.clone() {
let bytes = fr_to_le_bytes(val);
poly_bytes.extend_from_slice(&bytes);
}
let point_bytes = [2u8; 1];
let result_bytes = fr_to_le_bytes(a_2);
create_prover_bytes.extend_from_slice(&commitment_bytes);
create_prover_bytes.extend_from_slice(&poly_bytes);
create_prover_bytes.extend_from_slice(&point_bytes);
create_prover_bytes.extend_from_slice(&result_bytes);
create_verifier_bytes.extend_from_slice(&commitment_bytes);
create_verifier_bytes.extend_from_slice(&point_bytes);
create_verifier_bytes.extend_from_slice(&result_bytes);
}
let proof_bytes = super::create_proof(&context, create_prover_bytes).unwrap();
let mut verifier_call_bytes: Vec<u8> = Vec::new();
verifier_call_bytes.extend_from_slice(&proof_bytes);
verifier_call_bytes.extend_from_slice(&create_verifier_bytes);
let verified = verify_proof(&context, verifier_call_bytes).is_ok();
assert!(verified);
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ffi_interface/src/serialization.rs | ffi_interface/src/serialization.rs | use banderwagon::{CanonicalDeserialize, CanonicalSerialize};
use banderwagon::{Element, Fr};
use ipa_multipoint::{
lagrange_basis::LagrangeBasis,
multiproof::{ProverQuery, VerifierQuery},
};
use crate::{CommitmentBytes, Error, ScalarBytes};
// TODO: Find a better name for this
pub type DeserializedSparseCommitmentItem = (
CommitmentBytes,
Vec<usize>,
Vec<ScalarBytes>,
Vec<ScalarBytes>,
);
/// TODO: This method should not be exported. Leave it exported for now, so that its not
/// a breaking change.
///
/// This is used for deserializing the input for `update_commitment_sparse`.
pub fn deserialize_update_commitment_sparse(
input: Vec<u8>,
) -> Result<DeserializedSparseCommitmentItem, Error> {
// First 64 bytes is the commitment
let commitment_bytes = CommitmentBytes::try_from(&input[0..64]).unwrap();
// Chunkify leftover with 65 bytes (32, 32, 1)
const CHUNK_SIZE: usize = 65;
let input_without_commitment_bytes = &input[64..];
if input_without_commitment_bytes.len() % CHUNK_SIZE != 0 {
return Err(Error::LengthIsNotAnExpectedMultiple {
item_descriptor: "input for update commitment",
expected_multiple: CHUNK_SIZE as u64,
actual_size: input_without_commitment_bytes.len() as u64,
});
}
let update_commitment_bytes = input_without_commitment_bytes.chunks_exact(CHUNK_SIZE);
assert!(
update_commitment_bytes.remainder().is_empty(),
"There should be no left over bytes when chunking the input"
);
let mut indexes: Vec<usize> = Vec::new();
let mut old_scalars: Vec<ScalarBytes> = Vec::new();
let mut new_scalars: Vec<ScalarBytes> = Vec::new();
for update_commitment_bytes in update_commitment_bytes {
// First 32 bytes is the old scalar
let old_scalar = ScalarBytes::try_from(&update_commitment_bytes[0..32]).unwrap();
old_scalars.push(old_scalar);
// Next 32 bytes is the new scalar
let new_scalar = ScalarBytes::try_from(&update_commitment_bytes[32..64]).unwrap();
new_scalars.push(new_scalar);
// Last byte is the index
// This works properly with only with this syntax
let index: &usize = &update_commitment_bytes[64].into();
indexes.push(*index);
}
Ok((commitment_bytes, indexes, old_scalars, new_scalars))
}
/// Serializes a commitment to a byte array
///
/// Note: This is used so that we can serialize the root node.
pub fn serialize_commitment(commitment: CommitmentBytes) -> [u8; 32] {
Element::from_bytes_unchecked_uncompressed(commitment).to_bytes()
}
/// Deserialize a serialized commitment
///
/// Note: This is used so that we can deserialize the root node.
pub fn deserialize_commitment(serialized_commitment: [u8; 32]) -> Result<CommitmentBytes, Error> {
let element = Element::from_bytes(&serialized_commitment).ok_or_else(|| {
Error::CouldNotDeserializeCommitment {
bytes: serialized_commitment.to_vec(),
}
})?;
Ok(element.to_bytes_uncompressed())
}
#[must_use]
pub fn deserialize_proof_query(bytes: &[u8]) -> ProverQuery {
// Commitment
let (commitment, mut bytes) = take_group_element(bytes);
// f_x is a polynomial of degree 255, so we have 256 Fr elements
const NUMBER_OF_EVALUATIONS: usize = 256;
let mut collect_lagrange_basis: Vec<Fr> = Vec::with_capacity(NUMBER_OF_EVALUATIONS);
for _ in 0..NUMBER_OF_EVALUATIONS {
let (scalar, offsetted_bytes) = take_scalar(bytes);
collect_lagrange_basis.push(scalar);
bytes = offsetted_bytes;
}
// The input point is a single byte
let (z_i, bytes) = take_byte(bytes);
// The evaluation is a single scalar
let (y_i, bytes) = take_scalar(bytes);
assert!(bytes.is_empty(), "we should have consumed all the bytes");
ProverQuery {
commitment,
poly: LagrangeBasis::new(collect_lagrange_basis),
point: z_i,
result: y_i,
}
}
#[must_use]
pub fn deserialize_proof_query_uncompressed(bytes: &[u8]) -> ProverQuery {
// Commitment
let (commitment, mut bytes) = take_uncompressed_group_element(bytes);
// f_x is a polynomial of degree 255, so we have 256 Fr elements
const NUMBER_OF_EVALUATIONS: usize = 256;
let mut collect_lagrange_basis: Vec<Fr> = Vec::with_capacity(NUMBER_OF_EVALUATIONS);
for _ in 0..NUMBER_OF_EVALUATIONS {
let (scalar, offsetted_bytes) = take_scalar(bytes);
collect_lagrange_basis.push(scalar);
bytes = offsetted_bytes;
}
// The input point is a single byte
let (z_i, bytes) = take_byte(bytes);
// The evaluation is a single scalar
let (y_i, bytes) = take_scalar(bytes);
assert!(bytes.is_empty(), "we should have consumed all the bytes");
ProverQuery {
commitment,
poly: LagrangeBasis::new(collect_lagrange_basis),
point: z_i,
result: y_i,
}
}
#[must_use]
pub fn deserialize_verifier_query(bytes: &[u8]) -> VerifierQuery {
// Commitment
let (commitment, bytes) = take_group_element(bytes);
// The input point is a single byte
let (z_i, bytes) = take_byte(bytes);
// The evaluation is a single scalar
let (y_i, bytes) = take_scalar(bytes);
assert!(bytes.is_empty(), "we should have consumed all the bytes");
VerifierQuery {
commitment,
point: Fr::from(z_i as u128),
result: y_i,
}
}
#[must_use]
pub fn deserialize_verifier_query_uncompressed(bytes: &[u8]) -> VerifierQuery {
// Commitment
let (commitment, bytes) = take_uncompressed_group_element(bytes);
// The input point is a single byte
let (z_i, bytes) = take_byte(bytes);
// The evaluation is a single scalar
let (y_i, bytes) = take_scalar(bytes);
assert!(bytes.is_empty(), "we should have consumed all the bytes");
VerifierQuery {
commitment,
point: Fr::from(z_i as u128),
result: y_i,
}
}
#[must_use]
pub(crate) fn take_uncompressed_group_element(bytes: &[u8]) -> (Element, &[u8]) {
let commitment: CommitmentBytes = bytes[..64]
.try_into()
.expect("Expected a slice of exactly 64 bytes");
let element = Element::from_bytes_unchecked_uncompressed(commitment);
// Increment the slice by 64 bytes
(element, &bytes[64..])
}
#[must_use]
pub(crate) fn take_group_element(bytes: &[u8]) -> (Element, &[u8]) {
let element = Element::from_bytes(&bytes[0..32]).expect("could not deserialize element");
// Increment the slice by 32 bytes
(element, &bytes[32..])
}
#[must_use]
pub(crate) fn take_byte(bytes: &[u8]) -> (usize, &[u8]) {
let z_i = bytes[0] as usize;
// Increment the slice by 32 bytes
(z_i, &bytes[1..])
}
#[must_use]
pub(crate) fn take_scalar(bytes: &[u8]) -> (Fr, &[u8]) {
let y_i = fr_from_le_bytes(&bytes[0..32]).expect("could not deserialize y_i");
// Increment the slice by 32 bytes
(y_i, &bytes[32..])
}
pub fn fr_to_le_bytes(fr: banderwagon::Fr) -> [u8; 32] {
let mut bytes = [0u8; 32];
fr.serialize_compressed(&mut bytes[..])
.expect("Failed to serialize scalar to bytes");
bytes
}
pub fn fr_from_le_bytes(bytes: &[u8]) -> Result<banderwagon::Fr, Error> {
banderwagon::Fr::deserialize_uncompressed(bytes).map_err(|_| Error::FailedToDeserializeScalar {
bytes: bytes.to_vec(),
})
}
#[cfg(test)]
mod tests {
use banderwagon::{Element, Fr};
use ipa_multipoint::committer::Committer;
use crate::{
serialization::deserialize_update_commitment_sparse, serialize_commitment,
update_commitment_sparse, Context, ZERO_POINT,
};
use super::deserialize_commitment;
#[test]
fn test_byte_array_input_update_commitment_sparse() {
let old_commitment_bytes = ZERO_POINT;
let index = 7u8;
let old_scalar = [
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
];
let new_scalar = [
19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
];
let index2 = 8u8;
let old_scalar2 = [
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
];
let new_scalar2 = [
17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0,
];
let mut concatenated: Vec<u8> = Vec::from(ZERO_POINT);
concatenated.extend_from_slice(&old_scalar);
concatenated.extend_from_slice(&new_scalar);
concatenated.push(index);
concatenated.extend_from_slice(&old_scalar2);
concatenated.extend_from_slice(&new_scalar2);
concatenated.push(index2);
let (_old_commitment, commitment_index_vec, old_scalar_bytes_vec, new_scalar_bytes_vec) =
deserialize_update_commitment_sparse(concatenated).unwrap();
let context = Context::default();
let committer = &context.committer;
let new_commitment = update_commitment_sparse(
&context,
old_commitment_bytes,
commitment_index_vec,
old_scalar_bytes_vec,
new_scalar_bytes_vec,
)
.unwrap();
let val_indices: Vec<(Fr, usize)> = vec![(Fr::from(17u8), 7), (Fr::from(15u8), 8)];
let test_comm = committer.commit_sparse(val_indices);
assert_eq!(test_comm.to_bytes_uncompressed(), new_commitment);
}
#[test]
fn serialize_commitment_roundtrip() {
let gen = Element::zero();
// Serialize the commitment
let gen_uncompressed_bytes = gen.to_bytes_uncompressed();
let serialized_commitment = serialize_commitment(gen_uncompressed_bytes);
let got_commitment_bytes = deserialize_commitment(serialized_commitment).unwrap();
let got_commitment = Element::from_bytes_unchecked_uncompressed(got_commitment_bytes);
// Note that we do not compare the raw uncompressed_bytes.
//
// See the note on `to_bytes_uncompressed` -- that method does not guarantee uniqueness
// of the decoding with respects to the quotient group.
assert_eq!(gen, got_commitment);
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-db/src/lib.rs | verkle-db/src/lib.rs | ///The traits in this file do not need to be implemented
/// If these methods are implemented, then it allows one to
/// use the default higher level trait implementations
/// that the verkle trie needs. (See database.rs)
#[cfg(feature = "sled_db")]
mod sled_impl;
#[cfg(feature = "sled_db")]
pub use sled_impl::DB as SledDb;
#[cfg(feature = "rocks_db")]
mod rocksdb_impl;
#[cfg(feature = "rocks_db")]
pub use rocksdb_impl::DB as RocksDb;
// Bare metal database assumes the most basic functionality for a key value database
pub trait BareMetalKVDb {
// Get the value stored at this key
fn fetch(&self, key: &[u8]) -> Option<Vec<u8>>;
// Create a database given the default path
// This cannot be implemented here since Self is not sized.
fn new() -> Self;
}
pub trait BareMetalDiskDb {
fn from_path<P: AsRef<std::path::Path>>(path: P) -> Self;
const DEFAULT_PATH: &'static str;
}
pub trait BatchWriter {
fn new() -> Self;
fn batch_put(&mut self, key: &[u8], val: &[u8]);
}
pub trait BatchDB {
type BatchWrite: BatchWriter;
fn flush(&mut self, batch: Self::BatchWrite);
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-db/src/rocksdb_impl.rs | verkle-db/src/rocksdb_impl.rs | use crate::{BareMetalDiskDb, BareMetalKVDb};
pub use rocksdb::DB;
impl BareMetalDiskDb for DB {
fn from_path<P: AsRef<std::path::Path>>(path: P) -> Self {
// use rusty_leveldb::{CompressionType, Options};
// let mut opt = Options::default();
// opt.compression_type = CompressionType::CompressionSnappy;
let db = DB::open_default(path).unwrap();
db
}
const DEFAULT_PATH: &'static str = "./db/verkle_db";
}
impl BareMetalKVDb for DB {
fn fetch(&self, key: &[u8]) -> Option<Vec<u8>> {
self.get(key).unwrap()
}
// Create a database given the default path
fn new() -> Self {
Self::from_path(Self::DEFAULT_PATH)
}
}
use crate::{BatchDB, BatchWriter};
use rocksdb::WriteBatch;
impl BatchWriter for WriteBatch {
fn new() -> Self {
WriteBatch::default()
}
fn batch_put(&mut self, key: &[u8], val: &[u8]) {
self.put(key, val)
}
}
impl BatchDB for DB {
type BatchWrite = WriteBatch;
fn flush(&mut self, batch: Self::BatchWrite) {
self.write(batch).unwrap();
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-db/src/sled_impl.rs | verkle-db/src/sled_impl.rs | use crate::{BareMetalDiskDb, BareMetalKVDb};
pub use sled::Db as DB;
impl BareMetalDiskDb for sled::Db {
fn from_path<P: AsRef<std::path::Path>>(path: P) -> Self {
let _config = sled::Config::default().path(path);
_config.open().unwrap()
}
const DEFAULT_PATH: &'static str = "./db/verkle_db";
}
impl BareMetalKVDb for sled::Db {
fn fetch(&self, key: &[u8]) -> Option<Vec<u8>> {
self.get(key).unwrap().map(|i_vec| i_vec.to_vec())
}
// Create a database given the default path
fn new() -> Self {
Self::from_path(Self::DEFAULT_PATH)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/element.rs | banderwagon/src/element.rs | use ark_ec::{twisted_edwards::TECurveConfig, Group, ScalarMul, VariableBaseMSM};
use ark_ed_on_bls12_381_bandersnatch::{BandersnatchConfig, EdwardsAffine, EdwardsProjective, Fq};
use ark_ff::{batch_inversion, Field, One, Zero};
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize};
pub use ark_ed_on_bls12_381_bandersnatch::Fr;
#[derive(Debug, Clone, Copy, Eq)]
pub struct Element(pub(crate) EdwardsProjective);
impl PartialEq for Element {
fn eq(&self, other: &Self) -> bool {
let x1 = self.0.x;
let y1 = self.0.y;
let x2 = other.0.x;
let y2 = other.0.y;
// One should not be able to generate this point, unless they have assigned `x` and `y`
// to be 0 directly and have bypassed the API.
//
// This is possible in languages such as C, we will leave this check here
// for those who are using this as a reference, or in the case that there is some way to
// create an Element and bypass the checks.
if x1.is_zero() & y1.is_zero() {
return false;
}
if x2.is_zero() & y2.is_zero() {
return false;
}
(x1 * y2) == (x2 * y1)
}
}
impl Element {
pub fn to_bytes(&self) -> [u8; 32] {
// We assume that internally this point is "correct"
//
// We serialize a correct point by serializing the x co-ordinate times sign(y)
let affine = EdwardsAffine::from(self.0);
let x = if is_positive(affine.y) {
affine.x
} else {
-affine.x
};
let mut bytes = [0u8; 32];
x.serialize_compressed(&mut bytes[..])
.expect("serialization failed");
// reverse bytes to big endian, for interoperability
bytes.reverse();
bytes
}
// Do not compare the results of this function.
//
// This is because if (x, -y) is on the curve, then (x,y) is also on the curve.
// This method will return two different byte arrays for each of these.
//
// TODO: perhaps change this so that it chooses a representative, ie respecting the equivalence class
pub fn to_bytes_uncompressed(&self) -> [u8; 64] {
let mut bytes = [0u8; 64];
self.0
.serialize_uncompressed(&mut bytes[..])
.expect("cannot serialize point as an uncompressed byte array");
bytes
}
pub fn from_bytes_unchecked_uncompressed(bytes: [u8; 64]) -> Self {
let point = EdwardsProjective::deserialize_uncompressed_unchecked(&bytes[..])
.expect("could not deserialize byte array into a point");
Self(point)
}
pub fn from_bytes(bytes: &[u8]) -> Option<Element> {
// Switch from big endian to little endian, as arkworks library uses little endian
let mut bytes = bytes.to_vec();
bytes.reverse();
let x: Fq = Fq::deserialize_compressed(&bytes[..]).ok()?;
let return_positive_y = true;
// Construct a point that is in the group -- this point may or may not be in the prime subgroup
let point = Self::get_point_from_x(x, return_positive_y)?;
let element = Element(EdwardsProjective::new_unchecked(
point.x,
point.y,
point.x * point.y,
Fq::one(),
));
// Check if the point is in the correct subgroup
//
// Check legendre - checks whether 1 - ax^2 is a QR
if !element.subgroup_check() {
return None;
}
Some(element)
}
pub const fn compressed_serialized_size() -> usize {
32
}
pub fn prime_subgroup_generator() -> Element {
Element(EdwardsProjective::generator())
}
fn get_point_from_x(x: Fq, choose_largest: bool) -> Option<EdwardsAffine> {
let dx_squared_minus_one = BandersnatchConfig::COEFF_D * x.square() - Fq::one();
let ax_squared_minus_one = BandersnatchConfig::COEFF_A * x.square() - Fq::one();
let y_squared = ax_squared_minus_one / dx_squared_minus_one;
let y = y_squared.sqrt()?;
let is_largest = is_positive(y);
let y = if is_largest && choose_largest { y } else { -y };
Some(EdwardsAffine::new_unchecked(x, y))
}
fn map_to_field(&self) -> Fq {
self.0.x / self.0.y
}
// Note: This is a 2 to 1 map, but the two preimages are identified to be the same
pub fn map_to_scalar_field(&self) -> Fr {
use ark_ff::PrimeField;
let base_field = self.map_to_field();
let mut bytes = [0u8; 32];
base_field
.serialize_compressed(&mut bytes[..])
.expect("could not serialize point into a 32 byte array");
Fr::from_le_bytes_mod_order(&bytes)
}
pub fn batch_map_to_scalar_field(elements: &[Element]) -> Vec<Fr> {
use ark_ff::PrimeField;
let mut x_div_y = Vec::with_capacity(elements.len());
for element in elements {
let y = element.0.y;
x_div_y.push(y);
}
batch_inversion(&mut x_div_y);
for i in 0..elements.len() {
x_div_y[i] *= elements[i].0.x;
}
let mut scalars = Vec::with_capacity(elements.len());
for element in x_div_y {
let mut bytes = [0u8; 32];
element
.serialize_compressed(&mut bytes[..])
.expect("could not serialize point into a 32 byte array");
scalars.push(Fr::from_le_bytes_mod_order(&bytes));
}
scalars
}
pub fn zero() -> Element {
Element(EdwardsProjective::zero())
}
pub fn is_zero(&self) -> bool {
*self == Element::zero()
}
pub(crate) fn subgroup_check(&self) -> bool {
legendre_check_point(&self.0.x)
}
}
// The lexographically largest value is defined to be the positive value
fn is_positive(coordinate: Fq) -> bool {
coordinate > -coordinate
}
fn legendre_check_point(x: &Fq) -> bool {
let res = Fq::one() - (BandersnatchConfig::COEFF_A * x.square());
res.legendre().is_qr()
}
pub fn multi_scalar_mul(bases: &[Element], scalars: &[Fr]) -> Element {
let bases_inner: Vec<_> = bases.iter().map(|element| element.0).collect();
// XXX: Converting all of these to affine hurts performance
let bases = EdwardsProjective::batch_convert_to_mul_base(&bases_inner);
let result = EdwardsProjective::msm(&bases, scalars)
.expect("number of bases should equal number of scalars");
Element(result)
}
#[cfg(test)]
mod tests {
use super::*;
use ark_serialize::CanonicalSerialize;
#[test]
fn consistent_group_to_field() {
// In python this is called commitment_to_field
// print(commitment_to_field(Point(generator=True)).to_bytes(32, "little").hex())
let expected = "d1e7de2aaea9603d5bc6c208d319596376556ecd8336671ba7670c2139772d14";
let generator = Element::prime_subgroup_generator();
let mut bytes = [0u8; 32];
generator
.map_to_scalar_field()
.serialize_compressed(&mut bytes[..])
.unwrap();
assert_eq!(hex::encode(bytes), expected);
}
#[test]
fn from_bytes_unchecked_uncompressed_roundtrip() {
let generator = Element::prime_subgroup_generator();
let bytes = generator.to_bytes_uncompressed();
let element = Element::from_bytes_unchecked_uncompressed(bytes);
assert_eq!(element, generator)
}
#[test]
fn from_batch_map_to_scalar_field() {
let mut points = Vec::new();
for i in 0..10 {
points.push(Element::prime_subgroup_generator() * Fr::from(i));
}
let got = Element::batch_map_to_scalar_field(&points);
for i in 0..10 {
let expected_i = points[i].map_to_scalar_field();
assert_eq!(expected_i, got[i]);
}
for i in 0..10 {
let expected_i = points[i].map_to_scalar_field();
assert_eq!(expected_i, got[i]);
}
}
}
#[cfg(test)]
mod test {
use super::*;
// Two torsion point, *not* point at infinity {0,-1,0,1}
fn two_torsion() -> EdwardsProjective {
EdwardsProjective::new_unchecked(Fq::zero(), -Fq::one(), Fq::zero(), Fq::one())
}
fn points_at_infinity() -> [EdwardsProjective; 2] {
let d = BandersnatchConfig::COEFF_D;
let a = BandersnatchConfig::COEFF_A;
let sqrt_da = (d / a).sqrt().unwrap();
let p1 = EdwardsProjective::new_unchecked(sqrt_da, Fq::zero(), Fq::one(), Fq::zero());
let p2 = EdwardsProjective::new_unchecked(-sqrt_da, Fq::zero(), Fq::one(), Fq::zero());
[p1, p2]
}
#[test]
fn fixed_test_vectors() {
let expected_bit_string = [
"4a2c7486fd924882bf02c6908de395122843e3e05264d7991e18e7985dad51e9",
"43aa74ef706605705989e8fd38df46873b7eae5921fbed115ac9d937399ce4d5",
"5e5f550494159f38aa54d2ed7f11a7e93e4968617990445cc93ac8e59808c126",
"0e7e3748db7c5c999a7bcd93d71d671f1f40090423792266f94cb27ca43fce5c",
"14ddaa48820cb6523b9ae5fe9fe257cbbd1f3d598a28e670a40da5d1159d864a",
"6989d1c82b2d05c74b62fb0fbdf8843adae62ff720d370e209a7b84e14548a7d",
"26b8df6fa414bf348a3dc780ea53b70303ce49f3369212dec6fbe4b349b832bf",
"37e46072db18f038f2cc7d3d5b5d1374c0eb86ca46f869d6a95fc2fb092c0d35",
"2c1ce64f26e1c772282a6633fac7ca73067ae820637ce348bb2c8477d228dc7d",
"297ab0f5a8336a7a4e2657ad7a33a66e360fb6e50812d4be3326fab73d6cee07",
"5b285811efa7a965bd6ef5632151ebf399115fcc8f5b9b8083415ce533cc39ce",
"1f939fa2fd457b3effb82b25d3fe8ab965f54015f108f8c09d67e696294ab626",
"3088dcb4d3f4bacd706487648b239e0be3072ed2059d981fe04ce6525af6f1b8",
"35fbc386a16d0227ff8673bc3760ad6b11009f749bb82d4facaea67f58fc60ed",
"00f29b4f3255e318438f0a31e058e4c081085426adb0479f14c64985d0b956e0",
"3fa4384b2fa0ecc3c0582223602921daaa893a97b64bdf94dcaa504e8b7b9e5f",
];
let mut points = vec![];
let mut point = Element::prime_subgroup_generator();
for (i, _) in expected_bit_string.into_iter().enumerate() {
let byts = hex::encode(point.to_bytes());
assert_eq!(byts, expected_bit_string[i], "index {} does not match", i);
points.push(point);
point = Element(point.0.double())
}
}
#[test]
fn ser_der_roundtrip() {
let point = EdwardsProjective::generator();
let two_torsion_point = two_torsion();
let element1 = Element(point);
let bytes1 = element1.to_bytes();
let element2 = Element(point + two_torsion_point);
let bytes2 = element2.to_bytes();
assert_eq!(bytes1, bytes2);
let got = Element::from_bytes(&bytes1).expect("points are in the valid subgroup");
assert!(got == element1);
assert!(got == element2);
}
#[test]
fn check_infinity_does_not_pass_legendre() {
// We cannot use the points at infinity themselves
// as they have Z=0, which will panic when converting to
// affine co-ordinates. So we create a point which is
// the sum of the point at infinity and another point
let point = points_at_infinity()[0];
let gen = EdwardsProjective::generator();
let gen2 = gen + gen + gen + gen;
let res = point + gen + gen2;
let element1 = Element(res);
let bytes1 = element1.to_bytes();
if Element::from_bytes(&bytes1).is_some() {
panic!("point contains a point at infinity and should not have passed deserialization")
}
}
#[test]
fn two_torsion_correct() {
let two_torsion_point = two_torsion();
assert!(!two_torsion_point.is_zero());
let result = two_torsion_point.double();
assert!(result.is_zero());
let [inf1, inf2] = points_at_infinity();
assert!(!inf1.is_zero());
assert!(!inf2.is_zero());
assert!(inf1.double().is_zero());
assert!(inf2.double().is_zero());
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/lib.rs | banderwagon/src/lib.rs | pub mod msm;
pub mod msm_windowed_sign;
pub mod trait_impls;
mod element;
use ark_ed_on_bls12_381_bandersnatch::Fq;
use ark_ff::BigInteger256;
pub use element::{multi_scalar_mul, Element, Fr};
// Re-export arkworks traits that one may need to use in order to use
// specific methods on field elements and for serialization.
//
// For example, if we expose Fr directly, then for consumers to call methods like Fr::one()
// they will need to import ark_ff::One, which means they will need to import
// ark_ff as a dependency.
//
// This reexport allows us to avoid that.
pub use trait_defs::*;
pub mod trait_defs {
pub use ark_ff::{batch_inversion, batch_inversion_and_mul, Field, One, PrimeField, Zero};
pub use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError};
}
pub const fn fr_from_u64_limbs(limbs: [u64; 4]) -> Fr {
Fr::new(BigInteger256::new(limbs))
}
// Takes as input a random byte array and attempts to map it
// to a point in the subgroup.
//
// This is useful in try-and-increment algorithms.
pub fn try_reduce_to_element(bytes: &[u8]) -> Option<Element> {
// The Element::from_bytes method does not reduce the bytes, it expects the
// input to be in a canonical format, so we must do the reduction ourselves
let x_coord = Fq::from_be_bytes_mod_order(bytes);
let mut bytes = [0u8; 32];
x_coord.serialize_compressed(&mut bytes[..]).unwrap();
// TODO: this reverse is hacky, and its because there is no way to specify the endianness in arkworks
// TODO So we reverse it here, to be interopable with the banderwagon specs which needs big endian bytes
bytes.reverse();
// Deserialize the x-coordinate to get a valid banderwagon element
Element::from_bytes(&bytes)
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/msm.rs | banderwagon/src/msm.rs | use ark_ec::scalar_mul::wnaf::WnafContext;
use ark_ed_on_bls12_381_bandersnatch::{EdwardsProjective, Fr};
use ark_ff::Zero;
use rayon::prelude::*;
use crate::Element;
#[derive(Clone, Debug)]
pub struct MSMPrecompWnaf {
window_size: usize,
tables: Vec<Vec<EdwardsProjective>>,
}
impl MSMPrecompWnaf {
pub fn new(bases: &[Element], window_size: usize) -> MSMPrecompWnaf {
let wnaf_context = WnafContext::new(window_size);
let mut tables = Vec::with_capacity(bases.len());
for base in bases {
tables.push(wnaf_context.table(base.0));
}
MSMPrecompWnaf {
tables,
window_size,
}
}
pub fn mul_index(&self, scalar: Fr, index: usize) -> Element {
let wnaf_context = WnafContext::new(self.window_size);
Element(
wnaf_context
.mul_with_table(&self.tables[index], &scalar)
.unwrap(),
)
}
pub fn mul(&self, scalars: &[Fr]) -> Element {
let wnaf_context = WnafContext::new(self.window_size);
let result: EdwardsProjective = scalars
.iter()
.zip(self.tables.iter())
.filter(|(scalar, _)| !scalar.is_zero())
.map(|(scalar, table)| wnaf_context.mul_with_table(table, scalar).unwrap())
.sum();
Element(result)
}
// TODO: This requires more benchmarking and feedback to see if we should
// TODO put this behind a config flag
pub fn mul_par(&self, scalars: &[Fr]) -> Element {
let wnaf_context = WnafContext::new(self.window_size);
let result: EdwardsProjective = scalars
.par_iter()
.zip(self.tables.par_iter())
.filter(|(scalar, _)| !scalar.is_zero())
.map(|(scalar, table)| wnaf_context.mul_with_table(table, scalar).unwrap())
.sum();
Element(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{multi_scalar_mul, Element};
#[test]
fn correctness_smoke_test() {
let mut crs = Vec::with_capacity(256);
for i in 0..256 {
crs.push(Element::prime_subgroup_generator() * Fr::from((i + 1) as u64));
}
let mut scalars = vec![];
for i in 0..256 {
scalars.push(-Fr::from(i + 1));
}
let result = multi_scalar_mul(&crs, &scalars);
let precomp = MSMPrecompWnaf::new(&crs, 12);
let got_result = precomp.mul(&scalars);
let got_par_result = precomp.mul_par(&scalars);
assert_eq!(result, got_result);
assert_eq!(result, got_par_result);
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/trait_impls.rs | banderwagon/src/trait_impls.rs | pub mod from_to_bytes;
pub mod ops;
pub mod serialize;
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/msm_windowed_sign.rs | banderwagon/src/msm_windowed_sign.rs | use crate::Element;
use ark_ec::CurveGroup;
use ark_ed_on_bls12_381_bandersnatch::{EdwardsAffine, EdwardsProjective, Fr};
use ark_ff::Zero;
use ark_ff::{BigInteger, BigInteger256};
use std::ops::Neg;
#[derive(Debug, Clone)]
pub struct MSMPrecompWindowSigned {
tables: Vec<Vec<EdwardsAffine>>,
num_windows: usize,
window_size: usize,
}
impl MSMPrecompWindowSigned {
pub fn new(bases: &[Element], window_size: usize) -> MSMPrecompWindowSigned {
use ark_ff::PrimeField;
let number_of_windows = Fr::MODULUS_BIT_SIZE as usize / window_size + 1;
let precomputed_points: Vec<_> = bases
.iter()
.map(|point| {
Self::precompute_points(
window_size,
number_of_windows,
EdwardsAffine::from(point.0),
)
})
.collect();
MSMPrecompWindowSigned {
window_size,
tables: precomputed_points,
num_windows: number_of_windows,
}
}
fn precompute_points(
window_size: usize,
number_of_windows: usize,
point: EdwardsAffine,
) -> Vec<EdwardsAffine> {
let window_size_scalar = Fr::from(1 << window_size);
use ark_ff::Field;
use rayon::prelude::*;
let all_tables: Vec<_> = (0..number_of_windows)
.into_par_iter()
.flat_map(|window_index| {
let window_scalar = window_size_scalar.pow([window_index as u64]);
let mut lookup_table = Vec::with_capacity(1 << (window_size - 1));
let point = EdwardsProjective::from(point) * window_scalar;
let mut current = point;
// Compute and store multiples
for _ in 0..(1 << (window_size - 1)) {
lookup_table.push(current);
current += point;
}
EdwardsProjective::normalize_batch(&lookup_table)
})
.collect();
all_tables
}
pub fn mul(&self, scalars: &[Fr]) -> Element {
let scalars_bytes: Vec<_> = scalars
.iter()
.map(|a| {
let bigint: BigInteger256 = (*a).into();
bigint.to_bytes_le()
})
.collect();
let mut points_to_add = Vec::with_capacity(self.num_windows);
for window_idx in 0..self.num_windows {
for (scalar_idx, scalar_bytes) in scalars_bytes.iter().enumerate() {
let sub_table = &self.tables[scalar_idx];
let point_idx =
get_booth_index(window_idx, self.window_size, scalar_bytes.as_ref());
if point_idx == 0 {
continue;
}
let sign = point_idx.is_positive();
let point_idx = point_idx.unsigned_abs() as usize - 1;
// Scale the point index by the window index to figure out whether
// we need P, 2^wP, 2^{2w}P, etc
let scaled_point_index = window_idx * (1 << (self.window_size - 1)) + point_idx;
let mut point = sub_table[scaled_point_index];
if !sign {
point = -point;
}
points_to_add.push(point);
}
}
let mut result = EdwardsProjective::zero();
for point in points_to_add {
result += point;
}
Element(result)
}
}
// TODO: Link to halo2 file + docs + comments
pub fn get_booth_index(window_index: usize, window_size: usize, el: &[u8]) -> i32 {
// Booth encoding:
// * step by `window` size
// * slice by size of `window + 1``
// * each window overlap by 1 bit
// * append a zero bit to the least significant end
// Indexing rule for example window size 3 where we slice by 4 bits:
// `[0, +1, +1, +2, +2, +3, +3, +4, -4, -3, -3 -2, -2, -1, -1, 0]``
// So we can reduce the bucket size without preprocessing scalars
// and remembering them as in classic signed digit encoding
let skip_bits = (window_index * window_size).saturating_sub(1);
let skip_bytes = skip_bits / 8;
// fill into a u32
let mut v: [u8; 4] = [0; 4];
for (dst, src) in v.iter_mut().zip(el.iter().skip(skip_bytes)) {
*dst = *src
}
let mut tmp = u32::from_le_bytes(v);
// pad with one 0 if slicing the least significant window
if window_index == 0 {
tmp <<= 1;
}
// remove further bits
tmp >>= skip_bits - (skip_bytes * 8);
// apply the booth window
tmp &= (1 << (window_size + 1)) - 1;
let sign = tmp & (1 << window_size) == 0;
// div ceil by 2
tmp = (tmp + 1) >> 1;
// find the booth action index
if sign {
tmp as i32
} else {
((!(tmp - 1) & ((1 << window_size) - 1)) as i32).neg()
}
}
#[test]
fn smoke_test_interop_strauss() {
use ark_ff::UniformRand;
let length = 5;
let scalars: Vec<_> = (0..length)
.map(|_| Fr::rand(&mut rand::thread_rng()))
.collect();
let points: Vec<_> = (0..length)
.map(|_| Element::prime_subgroup_generator() * Fr::rand(&mut rand::thread_rng()))
.collect();
let precomp = MSMPrecompWindowSigned::new(&points, 2);
let result = precomp.mul(&scalars);
let mut expected = Element::zero();
for (scalar, point) in scalars.into_iter().zip(points) {
expected += point * scalar
}
assert_eq!(expected, result)
}
#[cfg(test)]
mod booth_tests {
use std::ops::Neg;
use ark_ed_on_bls12_381_bandersnatch::Fr;
use ark_ff::{BigInteger, BigInteger256, Field, PrimeField};
use super::get_booth_index;
use crate::Element;
#[test]
fn smoke_scalar_mul() {
let gen = Element::prime_subgroup_generator();
let s = -Fr::ONE;
let res = gen * s;
let got = mul(&s, &gen, 4);
assert_eq!(Element::from(res), got)
}
fn mul(scalar: &Fr, point: &Element, window: usize) -> Element {
let u_bigint: BigInteger256 = (*scalar).into();
use ark_ff::Field;
let u = u_bigint.to_bytes_le();
let n = Fr::MODULUS_BIT_SIZE as usize / window + 1;
let table = (0..=1 << (window - 1))
.map(|i| point * &Fr::from(i as u64))
.collect::<Vec<_>>();
let table_scalars = (0..=1 << (window - 1))
.map(|i| Fr::from(i as u64))
.collect::<Vec<_>>();
let mut acc: Element = Element::zero();
let mut acc_scalar = Fr::ZERO;
for i in (0..n).rev() {
for _ in 0..window {
acc = acc + acc;
acc_scalar = acc_scalar + acc_scalar;
}
let idx = get_booth_index(i as usize, window, u.as_ref());
if idx.is_negative() {
acc += table[idx.unsigned_abs() as usize].neg();
acc_scalar -= table_scalars[idx.unsigned_abs() as usize];
}
if idx.is_positive() {
acc += table[idx.unsigned_abs() as usize];
acc_scalar += table_scalars[idx.unsigned_abs() as usize];
}
}
assert_eq!(acc_scalar, *scalar);
acc.into()
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/trait_impls/serialize.rs | banderwagon/src/trait_impls/serialize.rs | use crate::Element;
use ark_ec::CurveGroup;
use ark_ed_on_bls12_381_bandersnatch::EdwardsProjective;
use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError, Valid};
impl CanonicalSerialize for Element {
fn serialize_with_mode<W: std::io::prelude::Write>(
&self,
mut writer: W,
compress: ark_serialize::Compress,
) -> Result<(), SerializationError> {
match compress {
ark_serialize::Compress::Yes => {
writer.write_all(&self.to_bytes())?;
Ok(())
}
ark_serialize::Compress::No => self.0.into_affine().serialize_uncompressed(writer),
}
}
fn serialized_size(&self, compress: ark_serialize::Compress) -> usize {
match compress {
ark_serialize::Compress::Yes => Element::compressed_serialized_size(),
ark_serialize::Compress::No => self.0.uncompressed_size(),
}
}
}
impl Valid for Element {
// TODO: Arkworks has split up validation from serialization
// TODO Element doesnt currently work like this though
fn check(&self) -> Result<(), SerializationError> {
Ok(())
}
}
impl CanonicalDeserialize for Element {
fn deserialize_with_mode<R: std::io::prelude::Read>(
reader: R,
compress: ark_serialize::Compress,
validate: ark_serialize::Validate,
) -> Result<Self, SerializationError> {
fn deserialize_with_no_validation<R: std::io::prelude::Read>(
mut reader: R,
compress: ark_serialize::Compress,
) -> Result<Element, SerializationError> {
match compress {
ark_serialize::Compress::Yes => {
let mut bytes = [0u8; Element::compressed_serialized_size()];
if let Err(err) = reader.read_exact(&mut bytes) {
return Err(SerializationError::IoError(err));
}
match Element::from_bytes(&bytes) {
Some(element) => Ok(element),
None => Err(SerializationError::InvalidData),
}
}
ark_serialize::Compress::No => {
let point = EdwardsProjective::deserialize_uncompressed(reader)?;
Ok(Element(point))
}
}
}
match validate {
ark_serialize::Validate::Yes => deserialize_with_no_validation(reader, compress),
ark_serialize::Validate::No => deserialize_with_no_validation(reader, compress),
}
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/trait_impls/from_to_bytes.rs | banderwagon/src/trait_impls/from_to_bytes.rs | use ark_serialize::SerializationError;
pub trait ToBytes<T> {
fn to_bytes(&self) -> Result<T, SerializationError>;
}
pub trait FromBytes<T> {
fn from_bytes(bytes: T) -> Result<Self, SerializationError>
where
Self: Sized;
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/src/trait_impls/ops.rs | banderwagon/src/trait_impls/ops.rs | use crate::Element;
use ark_ed_on_bls12_381_bandersnatch::Fr;
use std::{
hash::Hash,
iter::Sum,
ops::{Add, AddAssign, Mul, Neg, Sub},
};
impl Mul<Fr> for Element {
type Output = Element;
fn mul(self, rhs: Fr) -> Self::Output {
Element(self.0.mul(rhs))
}
}
impl Mul<&Fr> for &Element {
type Output = Element;
fn mul(self, rhs: &Fr) -> Self::Output {
Element(self.0.mul(rhs))
}
}
impl Add<Element> for Element {
type Output = Element;
fn add(self, rhs: Element) -> Self::Output {
Element(self.0 + rhs.0)
}
}
impl AddAssign<Element> for Element {
fn add_assign(&mut self, rhs: Element) {
self.0 += rhs.0
}
}
impl Sub<Element> for Element {
type Output = Element;
fn sub(self, rhs: Element) -> Self::Output {
Element(self.0 - rhs.0)
}
}
impl Neg for Element {
type Output = Element;
fn neg(self) -> Self::Output {
Element(-self.0)
}
}
impl Sum for Element {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
Element(iter.map(|element| element.0).sum())
}
}
impl Hash for Element {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.to_bytes().hash(state)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/banderwagon/benches/benchmark.rs | banderwagon/benches/benchmark.rs | use banderwagon::{msm::MSMPrecompWnaf, msm_windowed_sign::MSMPrecompWindowSigned, Element, Fr};
use criterion::{criterion_group, criterion_main, Criterion};
use rand::RngCore;
pub fn msm_wnaf(c: &mut Criterion) {
const NUM_ELEMENTS: usize = 5;
let bases = random_point(120, NUM_ELEMENTS);
let scalars = random_scalars(NUM_ELEMENTS, 16);
let precomp = MSMPrecompWnaf::new(&bases, 12);
c.bench_function(&format!("msm wnaf: {}", NUM_ELEMENTS), |b| {
b.iter(|| precomp.mul(&scalars))
});
let precomp = MSMPrecompWindowSigned::new(&bases, 16);
c.bench_function(&format!("msm precomp 16: {}", NUM_ELEMENTS), |b| {
b.iter(|| precomp.mul(&scalars))
});
}
pub fn keccak_32bytes(c: &mut Criterion) {
use rand::Rng;
use sha3::{Digest, Keccak256};
c.bench_function("keccak 64 bytes", |b| {
b.iter_with_setup(
// Setup function: generates new random data for each iteration
|| {
let keccak = Keccak256::default();
let mut rand_buffer = [0u8; 64];
rand::thread_rng().fill(&mut rand_buffer);
(keccak, rand_buffer)
},
|(mut keccak, rand_buffer)| {
keccak.update(&rand_buffer);
keccak.finalize()
},
)
});
}
fn random_point(seed: u64, num_points: usize) -> Vec<Element> {
(0..num_points)
.map(|i| Element::prime_subgroup_generator() * Fr::from((seed + i as u64 + 1) as u64))
.collect()
}
fn random_scalars(num_points: usize, num_bytes: usize) -> Vec<Fr> {
use ark_ff::PrimeField;
(0..num_points)
.map(|_| {
let mut bytes = vec![0u8; num_bytes];
rand::thread_rng().fill_bytes(&mut bytes[..]);
Fr::from_le_bytes_mod_order(&bytes)
})
.collect()
}
criterion_group!(benches, msm_wnaf, keccak_32bytes);
criterion_main!(benches);
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/java/rust_code/build.rs | bindings/java/rust_code/build.rs | use std::{env, path::PathBuf};
/// Path to the java directory that we will use to generate the java bindings from
///
/// Relative to the bindings folder.
const PATH_TO_JAVA_BINDINGS_FILE: &str = "java/java_code/src/main/java/verkle/cryptography";
// These are the files needed to pass to the `javac` command to generate the header file
const INPUT_FILES: [&str; 1] = ["LibIpaMultipoint.java"];
fn main() {
let path_to_bindings_dir = path_to_bindings_folder();
let path_to_java_bindings_file = path_to_bindings_dir.join(PATH_TO_JAVA_BINDINGS_FILE);
println!(
"cargo:rerun-if-changed={}",
path_to_java_bindings_file.as_os_str().to_str().unwrap()
);
// Generate the header file
let mut command = std::process::Command::new("javac");
command.arg("-h").arg(".");
for file in INPUT_FILES.iter() {
command.arg(path_to_java_bindings_file.join(file));
}
let output = command.output().unwrap();
if !output.status.success() {
let output = std::str::from_utf8(&output.stderr).unwrap();
panic!("{}", output)
}
}
fn path_to_bindings_folder() -> PathBuf {
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let crate_dir = PathBuf::from(crate_dir);
// Go up two directories to be at bindings parent directory
let parent = crate_dir.parent().unwrap().parent().unwrap().to_path_buf();
parent
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/java/rust_code/src/lib.rs | bindings/java/rust_code/src/lib.rs | /* Copyright Besu Contributors
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
mod parsers;
use parsers::{parse_commitment, parse_commitments, parse_indices, parse_scalars};
mod utils;
use utils::{
byte_to_depth_extension_present, bytes32_to_element, bytes32_to_scalar, jobjectarray_to_vec,
};
use utils::{
convert_byte_array_to_fixed_array, convert_to_btree_set, get_array, get_optional_array,
};
use jni::objects::{JByteArray, JClass, JObjectArray};
use jni::JNIEnv;
use once_cell::sync::Lazy;
use ipa_multipoint::ipa::IPAProof;
use ipa_multipoint::multiproof::MultiPointProof;
use std::convert::TryInto;
use verkle_trie::proof::{ExtPresent, VerificationHint, VerkleProof};
// TODO: Use a pointer here instead. This is only being used so that the interface does not get changed.
// TODO: and bindings do not need to be modified.
pub static CONFIG: Lazy<ffi_interface::Context> = Lazy::new(ffi_interface::Context::default);
/// Commit receives a list of 32 byte scalars and returns a 32 byte scalar
///
/// Scalar is actually the map_to_field(commitment) because we want to
/// reuse the commitment in parent node. This is ported from rust-verkle.
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_commit<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
values: JByteArray,
) -> JByteArray<'local> {
let input = match parse_scalars(&env, values) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let commitment = match ffi_interface::commit_to_scalars(&CONFIG, &input) {
Ok(v) => v,
Err(e) => {
let error_message = format!("Could not commit to scalars: {:?}", e);
env.throw_new("java/lang/IllegalArgumentException", &error_message)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let result = match env.byte_array_from_slice(&commitment) {
Ok(v) => v,
Err(e) => {
let error_message = format!("Couldn't return commitment.: {:?}", e);
env.throw_new("java/lang/IllegalArgumentException", &error_message)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_commitAsCompressed<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
values: JByteArray,
) -> JByteArray<'local> {
let input = match parse_scalars(&env, values) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let commitment = match ffi_interface::commit_to_scalars(&CONFIG, &input) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", format!("{e:?}"))
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let compressed = ffi_interface::serialize_commitment(commitment);
let result = match env.byte_array_from_slice(&compressed) {
Ok(v) => v,
Err(e) => {
let error_message = format!("Couldn't return commitment: {:?}", e);
env.throw_new("java/lang/IllegalArgumentException", &error_message)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_updateSparse<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
commitment: JByteArray,
indices: JByteArray,
old_values: JByteArray,
new_values: JByteArray,
) -> JByteArray<'local> {
let commitment = match parse_commitment(&env, commitment) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for updateSparse commitment input.");
return JByteArray::default();
}
};
let pos = match parse_indices(&env, indices) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let old = match parse_scalars(&env, old_values) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let old: Vec<ffi_interface::ScalarBytes> = old
.chunks_exact(32)
.map(|x| {
let mut array = [0u8; 32];
array.copy_from_slice(x);
array
})
.collect();
let new = match parse_scalars(&env, new_values) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let new: Vec<ffi_interface::ScalarBytes> = new
.chunks_exact(32)
.map(|x| {
let mut array = [0u8; 32];
array.copy_from_slice(x);
array
})
.collect();
let commitment =
match ffi_interface::update_commitment_sparse(&CONFIG, commitment, pos, old, new) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", format!("{e:?}"))
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let result = match env.byte_array_from_slice(&commitment) {
Ok(v) => v,
Err(e) => {
let error_message = format!("Couldn't return commitment: {:?}", e);
env.throw_new("java/lang/IllegalArgumentException", &error_message)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_compress<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
commitment: JByteArray,
) -> JByteArray<'local> {
let commitment = match parse_commitment(&env, commitment) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let compressed = ffi_interface::serialize_commitment(commitment);
let result = match env.byte_array_from_slice(&compressed) {
Ok(s) => s,
Err(e) => {
let error_message = format!(
"Invalid commitment output. Couldn't convert to byte array: {:?}",
e
);
env.throw_new("java/lang/IllegalArgumentException", &error_message)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_compressMany<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
commitments: JByteArray,
) -> JByteArray<'local> {
let commitments = match parse_commitments(&env, commitments) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let compressed: Vec<u8> = commitments
.chunks_exact(64)
.flat_map(|x| ffi_interface::serialize_commitment(x.try_into().unwrap()))
.collect();
let result = match env.byte_array_from_slice(&compressed) {
Ok(s) => s,
Err(e) => {
let error_message = format!(
"Invalid commitment output. Couldn't convert to byte array: {:?}",
e
);
env.throw_new("java/lang/IllegalArgumentException", &error_message)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_hash<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
commitment: JByteArray,
) -> JByteArray<'local> {
let commitment = match parse_commitment(&env, commitment) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let hash = ffi_interface::hash_commitment(commitment);
let result = match env.byte_array_from_slice(&hash) {
Ok(s) => s,
Err(_e) => {
env.throw_new(
"java/lang/IllegalArgumentException",
"Invalid commitment output. Couldn't convert to byte array.",
)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_addCommitment<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
lhs: JByteArray,
rhs: JByteArray,
) -> JByteArray<'local> {
let lhs = match parse_commitment(&env, lhs) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for add commitment.");
return JByteArray::default();
}
};
let rhs = match parse_commitment(&env, rhs) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for add commitment.");
return JByteArray::default();
}
};
let output = ffi_interface::add_commitment(lhs, rhs);
let result = match env.byte_array_from_slice(&output) {
Ok(s) => s,
Err(_e) => {
env.throw_new(
"java/lang/IllegalArgumentException",
"Invalid commitment output. Couldn't convert to byte array.",
)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_hashMany<'local>(
mut env: JNIEnv<'local>,
_class: JClass<'_>,
commitments: JByteArray,
) -> JByteArray<'local> {
let input = match parse_commitments(&env, commitments) {
Ok(v) => v,
Err(e) => {
env.throw_new("java/lang/IllegalArgumentException", e)
.expect("Failed to throw exception for commit inputs.");
return JByteArray::default();
}
};
let input: Vec<ffi_interface::CommitmentBytes> = input
.chunks_exact(64)
.map(|x| {
let mut array = [0u8; 64];
array.copy_from_slice(x);
array
})
.collect();
let hashes = ffi_interface::hash_commitments(&input);
let hashes: Vec<u8> = hashes.iter().flat_map(|x| x.iter().copied()).collect();
let result = match env.byte_array_from_slice(&hashes) {
Ok(s) => s,
Err(e) => {
let error_message = format!(
"Invalid scalars output. Couldn't convert to byte array: {:?}",
e
);
env.throw_new("java/lang/IllegalArgumentException", &error_message)
.expect("Couldn't convert to byte array");
return JByteArray::default();
}
};
result
}
#[no_mangle]
pub extern "system" fn Java_verkle_cryptography_LibIpaMultipoint_verifyPreStateRoot(
mut env: JNIEnv,
_class: JClass<'_>,
stems_keys: JObjectArray,
current_values: JObjectArray,
commitments_by_path: JObjectArray,
cl: JObjectArray,
cr: JObjectArray,
other_stems: JObjectArray,
d: JByteArray,
depths_extension_present_stems: JByteArray,
final_evaluation: JByteArray,
prestate_root: JByteArray,
) -> bool {
let num_keys = match env.get_array_length(&stems_keys) {
Ok(len) => len,
Err(_) => return false,
};
let mut formatted_keys: Vec<[u8; 32]> = Vec::new();
let mut formatted_current_values: Vec<Option<[u8; 32]>> = Vec::new();
for i in 0..num_keys {
match get_array(&mut env, &stems_keys, i) {
Some(key) => formatted_keys.push(key),
None => return false,
}
match get_optional_array(&mut env, ¤t_values, i) {
Some(value) => formatted_current_values.push(value),
None => return false,
}
}
let formatted_commitments =
match jobjectarray_to_vec(&mut env, &commitments_by_path, bytes32_to_element) {
Some(vec) => vec,
None => return false,
};
let formatted_cl = match jobjectarray_to_vec(&mut env, &cl, bytes32_to_element) {
Some(vec) => vec,
None => return false,
};
let formatted_cr = match jobjectarray_to_vec(&mut env, &cr, bytes32_to_element) {
Some(vec) => vec,
None => return false,
};
let formatted_d = match convert_byte_array_to_fixed_array(&env, d) {
Some(arr) => arr,
None => return false,
};
let formatted_final_evaluation = match convert_byte_array_to_fixed_array(&env, final_evaluation)
{
Some(arr) => arr,
None => return false,
};
let scalar_final_evaluation = match bytes32_to_scalar(formatted_final_evaluation) {
Some(scalar) => scalar,
None => return false,
};
let g_x_comm = match bytes32_to_element(formatted_d) {
Some(element) => element,
None => return false,
};
let proof = MultiPointProof {
open_proof: IPAProof {
L_vec: formatted_cl,
R_vec: formatted_cr,
a: scalar_final_evaluation,
},
g_x_comm,
};
let depths_bytes = match env.convert_byte_array(depths_extension_present_stems) {
Ok(bytes) => bytes,
Err(_) => return false,
};
let (formatted_extension_present, depths): (Vec<ExtPresent>, Vec<u8>) = depths_bytes
.iter()
.map(|&byte| byte_to_depth_extension_present(byte))
.unzip();
let formatted_other_stems = match convert_to_btree_set(&mut env, &other_stems) {
Some(set) => set,
None => return false,
};
let verkle_proof = VerkleProof {
verification_hint: VerificationHint {
depths,
extension_present: formatted_extension_present,
diff_stem_no_proof: formatted_other_stems,
},
comms_sorted: formatted_commitments,
proof,
};
let prestate_root_bytes =
match convert_byte_array_to_fixed_array(&env, prestate_root).and_then(bytes32_to_element) {
Some(element) => element,
None => return false,
};
let (bool, _update_hint) = verkle_proof.check(
formatted_keys,
formatted_current_values,
prestate_root_bytes,
);
bool
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/java/rust_code/src/utils.rs | bindings/java/rust_code/src/utils.rs | use banderwagon::{CanonicalDeserialize, Element, Fr};
use jni::objects::{JByteArray, JObjectArray};
use jni::JNIEnv;
use std::collections::BTreeSet;
use verkle_trie::proof::ExtPresent;
/// Converts a 32-byte array into an `Element` object.
///
/// This function attempts to convert a fixed-size byte array into an `Element`.
/// returns `Some(Element)`. Otherwise, it returns `None`.
///
/// # Arguments
///
/// * `bytes` - A 32-byte array representing the binary representation of an `Element`.
///
/// # Returns
///
/// An `Option<Element>` which is `Some` if the conversion is successful, otherwise `None`.
pub fn bytes32_to_element(bytes: [u8; 32]) -> Option<Element> {
Element::from_bytes(&bytes)
}
/// Converts a 32-byte array into a scalar value `Fr`.
///
/// This function reverses the byte array and attempts to deserialize it into a scalar
/// value of type `Fr`, which is used in cryptographic operations. The deserialization
/// uses a compressed format. If successful, it returns `Some(Fr)`. Otherwise, it returns `None`.
///
/// # Arguments
///
/// * `bytes` - A 32-byte array representing the binary representation of a scalar value.
///
/// # Returns
///
/// An `Option<Fr>` which is `Some` if the deserialization is successful, otherwise `None`.
pub fn bytes32_to_scalar(bytes: [u8; 32]) -> Option<Fr> {
let mut bytes = bytes;
bytes.reverse();
CanonicalDeserialize::deserialize_compressed(&bytes[..]).ok()
}
/// Extracts extension presence and depth information from a byte.
///
/// This function interprets the lower 3 bits of the input byte as the extension
/// presence status and the remaining higher bits as the depth. It returns a tuple
/// containing the `ExtPresent` status and the depth as an `u8`.
///
/// # Arguments
///
/// * `value` - A byte containing encoded extension presence and depth information.
///
/// # Returns
///
/// A tuple `(ExtPresent, u8)` where the first element represents the extension presence
/// status and the second element represents the depth.
pub fn byte_to_depth_extension_present(value: u8) -> (ExtPresent, u8) {
let ext_status = value & 3;
let ext_status = match ext_status {
0 => ExtPresent::None,
1 => ExtPresent::DifferentStem,
2 => ExtPresent::Present,
_ => return (ExtPresent::None, 0), // Handle unexpected value gracefully
};
let depth = value >> 3;
(ext_status, depth)
}
/// Converts a `jobjectArray` into a `Vec<T>` by applying a conversion function to each element.
///
/// This function iterates over each element of the input `jobjectArray`, applies a conversion
/// function to convert each element into type `T`, and collects the results into a `Vec<T>`.
/// If any conversion fails, it returns `None`.
///
/// # Type Parameters
///
/// * `T` - The target type to which the byte arrays are converted.
/// * `F` - The type of the conversion function.
///
/// # Arguments
///
/// * `env` - The JNI environment.
/// * `array` - The input `jobjectArray` containing the elements to be converted.
/// * `converter` - A function that converts a `[u8; 32]` array into `Option<T>`.
///
/// # Returns
///
/// An `Option<Vec<T>>` which is `Some` containing the converted elements if all conversions
/// are successful, otherwise `None`.
pub fn jobjectarray_to_vec<T, F>(
env: &mut JNIEnv,
array: &JObjectArray<'_>,
mut converter: F,
) -> Option<Vec<T>>
where
F: FnMut([u8; 32]) -> Option<T>,
{
let vec_vec = jobject_array_to_2d_byte_array(env, array);
// Convert vector into fixed size 32 byte arrays
let vec_arr: Option<Vec<[u8; 32]>> = vec_vec.into_iter().map(|v| v.try_into().ok()).collect();
vec_arr?.into_iter().map(&mut converter).collect()
}
/// Converts a `jbyteArray` into a fixed-size `[u8; 32]` array.
///
/// This function attempts to convert a JNI `jbyteArray` into a Rust fixed-size byte array.
/// If the conversion is successful and the size is exactly 32 bytes, it returns `Some([u8; 32])`.
/// Otherwise, it returns `None`.
///
/// # Arguments
///
/// * `env` - The JNI environment.
/// * `byte_array` - The input `jbyteArray` to be converted.
///
/// # Returns
///
/// An `Option<[u8; 32]>` which is `Some` containing the converted byte array if successful,
/// otherwise `None`.
pub fn convert_byte_array_to_fixed_array(
env: &JNIEnv,
byte_array: JByteArray<'_>,
) -> Option<[u8; 32]> {
let bytes = env.convert_byte_array(byte_array).ok()?;
if bytes.len() != 32 {
return None;
}
let mut arr = [0u8; 32];
arr.copy_from_slice(&bytes);
Some(arr)
}
/// Retrieves a fixed-size `[u8; 32]` array from a `jobjectArray` at a specified index.
///
/// This function attempts to extract a byte array from a given index within a `jobjectArray`,
/// convert it into a `[u8; 32]` array, and return it. If the operation fails or the size
/// does not match, it returns `None`.
///
/// # Arguments
///
/// * `env` - The JNI environment.
/// * `array` - The `jobjectArray` from which to retrieve the byte array.
/// * `index` - The index within the array from which to retrieve the byte array.
///
/// # Returns
///
/// An `Option<[u8; 32]>` which is `Some` containing the byte array if successful, otherwise `None`.
pub fn get_array(env: &mut JNIEnv, array: &JObjectArray<'_>, index: i32) -> Option<[u8; 32]> {
let vec_vec = jobject_array_to_2d_byte_array(env, array);
let bytes = vec_vec.get(index as usize).cloned()?;
if bytes.len() != 32 {
return None;
}
Some(
bytes
.try_into()
.expect("infallible: expected a 32 byte vector"),
)
// let obj = env.get_object_array_element(array, index).ok()?;
// let bytes = env.convert_byte_array(obj.into_inner()).ok()?;
// if bytes.len() != 32 {
// return None;
// }
// let mut arr = [0u8; 32];
// arr.copy_from_slice(&bytes);
// Some(arr)
}
/// Retrieves an optional fixed-size `[u8; 32]` array from a `jobjectArray` at a specified index.
///
/// Similar to `get_array`, but allows for `null` elements within the `jobjectArray`, representing
/// them as `None` in the resulting `Option<Option<[u8; 32]>>`. This is useful for arrays that
/// may contain optional elements.
///
/// # Arguments
///
/// * `env` - The JNI environment.
/// * `array` - The `jobjectArray` from which to retrieve the optional byte array.
/// * `index` - The index within the array from which to retrieve the byte array.
///
/// # Returns
///
/// An `Option<Option<[u8; 32]>>` which is `Some(None)` if the element is `null`, `Some(Some([u8; 32]))`
/// if the element is successfully converted, or `None` if the operation fails.
pub fn get_optional_array(
env: &mut JNIEnv,
array: &JObjectArray<'_>,
index: i32,
) -> Option<Option<[u8; 32]>> {
let vec_of_vec = jobject_array_to_2d_byte_array(env, array);
vec_of_vec
.get(index as usize)
.cloned()
.map(|inner_vec| inner_vec.try_into().ok())
// let obj_result = env.get_object_array_element(array, index).ok()?;
// if obj_result.is_null() {
// return Some(None);
// }
// let bytes_result = env.convert_byte_array(obj_result.into_inner()).ok()?;
// if bytes_result.len() == 32 {
// let mut arr = [0u8; 32];
// arr.copy_from_slice(&bytes_result);
// Some(Some(arr))
// } else {
// Some(None)
// }
}
/// Converts a `jobjectArray` into a `BTreeSet<[u8; 31]>`.
///
/// This function iterates over each element of the input `jobjectArray`, attempts to convert
/// each element into a `[u8; 31]` array, and inserts the result into a `BTreeSet`. If any
/// conversion fails or if the size does not match, it returns `None`.
///
/// # Arguments
///
/// * `env` - The JNI environment.
/// * `array` - The `jobjectArray` containing the elements to be converted.
///
/// # Returns
///
/// An `Option<BTreeSet<[u8; 31]>>` which is `Some` containing the converted elements as a set
/// if all conversions are successful, otherwise `None`.
pub fn convert_to_btree_set(
env: &mut JNIEnv,
array: &JObjectArray<'_>,
) -> Option<BTreeSet<[u8; 31]>> {
// jobject_array_to_2d_byte_array(env, array)
// .into_iter()
// .map(|arr| arr.try_into().ok())
// .collect()
let vec_of_vec = jobject_array_to_2d_byte_array(env, array);
let mut set = BTreeSet::new();
// Check if any of the inner elements are not 31 bytes and return None if so
// or add them to BTreeSet if they are
for arr in vec_of_vec {
if arr.len() != 31 {
return None;
}
set.insert(arr.try_into().expect("infallible: array is 31 bytes"));
}
Some(set)
}
pub(crate) fn jobject_array_to_2d_byte_array(
env: &mut JNIEnv,
array: &JObjectArray,
) -> Vec<Vec<u8>> {
// Get the length of the outer array
let outer_len = env.get_array_length(array).unwrap();
let mut result = Vec::with_capacity(outer_len as usize);
for i in 0..outer_len {
// Get each inner array (JByteArray)
let inner_array_obj = env.get_object_array_element(array, i).unwrap();
let inner_array: JByteArray = JByteArray::from(inner_array_obj);
// Get the length of the inner array
let inner_len = env.get_array_length(&inner_array).unwrap();
// Get the elements of the inner array
let mut buf = vec![0; inner_len as usize];
env.get_byte_array_region(inner_array, 0, &mut buf).unwrap();
// Convert i8 to u8
let buf = buf.into_iter().map(|x| x as u8).collect();
result.push(buf);
}
result
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/java/rust_code/src/parsers.rs | bindings/java/rust_code/src/parsers.rs | use ffi_interface::CommitmentBytes;
use jni::{objects::JByteArray, JNIEnv};
use std::convert::TryFrom;
pub fn parse_scalars<'a>(env: &'a JNIEnv<'a>, values: JByteArray<'a>) -> Result<Vec<u8>, String> {
let input_elements = env
.convert_byte_array(values)
.map_err(|_| "cannot convert byte array to vector")?;
if input_elements.len() % 32 != 0 {
return Err("Wrong input size: should be a multiple of 32 bytes".to_string());
};
Ok(input_elements)
}
pub fn parse_indices(env: &JNIEnv, values: JByteArray<'_>) -> Result<Vec<usize>, String> {
let input_elements = env
.convert_byte_array(values)
.map_err(|_| "could not convert byte array to vector".to_string())?;
Ok(input_elements.into_iter().map(|x| x as usize).collect())
}
pub fn parse_commitment(
env: &JNIEnv,
commitment: JByteArray<'_>,
) -> Result<CommitmentBytes, String> {
let commitment_bytes = env
.convert_byte_array(commitment)
.map_err(|_| "cannot convert byte vector to vector")?;
let result: CommitmentBytes = CommitmentBytes::try_from(commitment_bytes)
.map_err(|_| "Wrong commitment size: should be 64 bytes".to_string())?;
Ok(result)
}
pub fn parse_commitments<'a>(
env: &JNIEnv<'a>,
commitment: JByteArray<'a>,
) -> Result<Vec<u8>, String> {
let commitment_bytes = env
.convert_byte_array(commitment)
.map_err(|_| "cannot convert byte vector to vector")?;
if commitment_bytes.len() % 64 != 0 {
return Err("Wrong input size: should be a multiple of 64 bytes".to_string());
};
Ok(commitment_bytes)
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/c/build.rs | bindings/c/build.rs | use std::env;
use std::path::PathBuf;
/// The directory where the generated header file will be written.
const DIR_FOR_HEADER: &str = "build";
fn main() {
// linker flags
// Link libm on Unix-like systems (needed due to use of num_cpus crate)
#[cfg(not(target_os = "windows"))]
println!("cargo:rustc-link-lib=m");
println!("cargo:rerun-if-changed=src/");
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let package_name = env::var("CARGO_PKG_NAME").unwrap();
let path_to_crate_dir = PathBuf::from(&crate_dir);
let output_file = PathBuf::from(&path_to_crate_dir)
.join(DIR_FOR_HEADER)
.join(format!("{}.h", package_name))
.display()
.to_string();
cbindgen::Builder::new()
.with_crate(crate_dir)
.with_language(cbindgen::Language::C)
.generate()
.unwrap()
.write_to_file(output_file);
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/c/src/lib.rs | bindings/c/src/lib.rs | use ffi_interface::{
deserialize_proof_query, deserialize_proof_query_uncompressed, deserialize_verifier_query,
deserialize_verifier_query_uncompressed, fr_from_le_bytes, Context,
};
use ipa_multipoint::committer::Committer;
use ipa_multipoint::multiproof::{MultiPoint, MultiPointProof, ProverQuery, VerifierQuery};
use ipa_multipoint::transcript::Transcript;
#[allow(deprecated)]
use ffi_interface::get_tree_key_hash;
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn context_new() -> *mut Context {
let ctx = Box::new(Context::default());
Box::into_raw(ctx)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn context_free(ctx: *mut Context) {
if ctx.is_null() {
return;
}
unsafe {
let _ = Box::from_raw(ctx);
}
}
#[allow(deprecated)]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn pedersen_hash(
ctx: *mut Context,
address: *const u8,
tree_index_le: *const u8,
out: *mut u8,
) {
if ctx.is_null() || address.is_null() || tree_index_le.is_null() || out.is_null() {
// TODO: We have ommited the error handling for null pointers at the moment.
// TODO: Likely will panic in this case.
return;
}
let (tree_index, add, context) = unsafe {
let add_slice = std::slice::from_raw_parts(address, 32);
let ctx_ref = &*ctx;
let tree_index_slice = std::slice::from_raw_parts(tree_index_le, 32);
(tree_index_slice, add_slice, ctx_ref)
};
let hash = get_tree_key_hash(
context,
<[u8; 32]>::try_from(add).unwrap(),
<[u8; 32]>::try_from(tree_index).unwrap(),
);
unsafe {
let commitment_data_slice = std::slice::from_raw_parts_mut(out, 32);
commitment_data_slice.copy_from_slice(&hash);
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn multi_scalar_mul(
ctx: *mut Context,
scalars: *const u8,
len: usize,
out: *mut u8,
) {
let (scalar_slice, context) = unsafe {
let scalar = std::slice::from_raw_parts(scalars, len);
let ctx_ref = &*ctx;
(scalar, ctx_ref)
};
let mut inputs = Vec::with_capacity(len);
for chunk in scalar_slice.chunks_exact(32) {
inputs.push(fr_from_le_bytes(chunk).unwrap());
}
let data = context.committer.commit_lagrange(&inputs);
let hash = data.to_bytes();
unsafe {
let commitment_data_slice = std::slice::from_raw_parts_mut(out, 32);
commitment_data_slice.copy_from_slice(&hash);
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn create_proof(ctx: *mut Context, input: *const u8, len: usize, out: *mut u8) {
const CHUNK_SIZE: usize = 8257; // TODO: get this from ipa-multipoint
const PROOF_SIZE: usize = 576; // TODO: get this from ipa-multipoint
let (scalar_slice, context) = unsafe {
let scalar = std::slice::from_raw_parts(input, len);
let ctx_ref = &*ctx;
(scalar, ctx_ref)
};
let num_openings = len / CHUNK_SIZE;
let proofs_bytes = scalar_slice.chunks_exact(CHUNK_SIZE);
assert!(
proofs_bytes.remainder().is_empty(),
"There should be no left over bytes when chunking the proof"
);
// - Deserialize proof queries
//
let mut prover_queries: Vec<ProverQuery> = Vec::with_capacity(num_openings);
for proof_bytes in proofs_bytes {
let prover_query = deserialize_proof_query(proof_bytes);
prover_queries.push(prover_query);
}
// - Create proofs
//
let mut transcript = Transcript::new(b"verkle");
let proof = MultiPoint::open(
// TODO: This should not need to clone the CRS, but instead take a reference
context.crs.clone(),
&context.precomputed_weights,
&mut transcript,
prover_queries,
);
let hash = proof.to_bytes().expect("cannot serialize proof");
unsafe {
let commitment_data_slice = std::slice::from_raw_parts_mut(out, PROOF_SIZE);
commitment_data_slice.copy_from_slice(&hash);
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn create_proof_uncompressed(
ctx: *mut Context,
input: *const u8,
len: usize,
out: *mut u8,
) {
// 8257 + 32 because first commitment is uncompressed as 64 bytes
const CHUNK_SIZE: usize = 8289; // TODO: get this from ipa-multipoint
const PROOF_SIZE: usize = 1120; // TODO: get this from ipa-multipoint
let (scalar_slice, context) = unsafe {
let scalar = std::slice::from_raw_parts(input, len);
let ctx_ref = &*ctx;
(scalar, ctx_ref)
};
let num_openings = len / CHUNK_SIZE;
let proofs_bytes = scalar_slice.chunks_exact(CHUNK_SIZE);
assert!(
proofs_bytes.remainder().is_empty(),
"There should be no left over bytes when chunking the proof"
);
// - Deserialize proof queries
//
let mut prover_queries: Vec<ProverQuery> = Vec::with_capacity(num_openings);
for proof_bytes in proofs_bytes {
let prover_query = deserialize_proof_query_uncompressed(proof_bytes);
prover_queries.push(prover_query);
}
// - Create proofs
//
let mut transcript = Transcript::new(b"verkle");
let proof = MultiPoint::open(
// TODO: This should not need to clone the CRS, but instead take a reference
context.crs.clone(),
&context.precomputed_weights,
&mut transcript,
prover_queries,
);
let hash = proof
.to_bytes_uncompressed()
.expect("cannot serialize proof");
unsafe {
let commitment_data_slice = std::slice::from_raw_parts_mut(out, PROOF_SIZE);
commitment_data_slice.copy_from_slice(&hash);
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn verify_proof(ctx: *mut Context, input: *const u8, len: usize) -> bool {
const CHUNK_SIZE: usize = 65; // TODO: get this from ipa-multipoint
const PROOF_SIZE: usize = 576; // TODO: get this from ipa-multipoint
let (proof_slice, verifier_queries_slices, context) = unsafe {
let input_slice = std::slice::from_raw_parts(input, len);
let (proof_slice, verifier_queries_slices) = input_slice.split_at(PROOF_SIZE);
let ctx_ref = &*ctx;
(proof_slice, verifier_queries_slices, ctx_ref)
};
let verifier_queries_bytes = verifier_queries_slices.chunks_exact(CHUNK_SIZE);
assert!(
verifier_queries_bytes.remainder().is_empty(),
"There should be no left over bytes when chunking the verifier queries"
);
let num_openings = verifier_queries_bytes.len() / CHUNK_SIZE;
// - Deserialize verifier queries
//
let mut verifier_queries: Vec<VerifierQuery> = Vec::with_capacity(num_openings);
for verifier_query_bytes in verifier_queries_bytes {
let verifier_query = deserialize_verifier_query(verifier_query_bytes);
verifier_queries.push(verifier_query);
}
// - Check proof
//
let proof = MultiPointProof::from_bytes(proof_slice, 256).unwrap();
let mut transcript = Transcript::new(b"verkle");
// TODO: This should not need to clone the CRS, but instead take a reference
MultiPointProof::check(
&proof,
&context.crs.clone(),
&context.precomputed_weights,
&verifier_queries,
&mut transcript,
)
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
#[no_mangle]
pub extern "C" fn verify_proof_uncompressed(
ctx: *mut Context,
input: *const u8,
len: usize,
) -> bool {
// Chunk is now 65 + 32 = 97 because first commitment is uncompressed as 64 bytes
const CHUNK_SIZE: usize = 97; // TODO: get this from ipa-multipoint
const PROOF_SIZE: usize = 1120; // TODO: get this from ipa-multipoint
let (proof_slice, verifier_queries_slices, context) = unsafe {
let input_slice = std::slice::from_raw_parts(input, len);
let (proof_slice, verifier_queries_slices) = input_slice.split_at(PROOF_SIZE);
let ctx_ref = &*ctx;
(proof_slice, verifier_queries_slices, ctx_ref)
};
let verifier_queries_bytes = verifier_queries_slices.chunks_exact(CHUNK_SIZE);
assert!(
verifier_queries_bytes.remainder().is_empty(),
"There should be no left over bytes when chunking the verifier queries"
);
let num_openings = verifier_queries_bytes.len() / CHUNK_SIZE;
// - Deserialize verifier queries
//
let mut verifier_queries: Vec<VerifierQuery> = Vec::with_capacity(num_openings);
for verifier_query_bytes in verifier_queries_bytes {
let verifier_query = deserialize_verifier_query_uncompressed(verifier_query_bytes);
verifier_queries.push(verifier_query);
}
// - Check proof
//
let proof = MultiPointProof::from_bytes_unchecked_uncompressed(proof_slice, 256).unwrap();
let mut transcript = Transcript::new(b"verkle");
// TODO: This should not need to clone the CRS, but instead take a reference
MultiPointProof::check(
&proof,
&context.crs.clone(),
&context.precomputed_weights,
&verifier_queries,
&mut transcript,
)
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/csharp/rust_code/build.rs | bindings/csharp/rust_code/build.rs | use std::{env, fs, path::PathBuf};
use toml::Value;
/// The path where the generated bindings file will be written, relative to the bindings folder.
const PATH_FOR_CSHARP_BINDINGS_FILE: &str =
"csharp/csharp_code/Verkle.Bindings/native_methods.g.cs";
fn main() {
let package_name_of_c_crate = get_package_name_of_c_crate();
println!(
"cargo:rerun-if-changed={}",
path_to_bindings_folder().display()
);
let parent = path_to_bindings_folder();
let path_to_output_file = parent.join(PATH_FOR_CSHARP_BINDINGS_FILE);
let path_to_c_crates_lib_file = path_to_c_crate().join("src/lib.rs");
csbindgen::Builder::default()
.input_extern_file(path_to_c_crates_lib_file)
.csharp_namespace("Verkle.Bindings")
.csharp_dll_name(package_name_of_c_crate)
.csharp_class_name("NativeMethods")
.csharp_use_nint_types(false)
.generate_csharp_file(path_to_output_file)
.expect("csharp bindgen failed to generate bindgen file");
}
fn path_to_bindings_folder() -> PathBuf {
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let crate_dir = PathBuf::from(crate_dir);
// Go up two directories to be at bindings parent directory
let parent = crate_dir.parent().unwrap().parent().unwrap().to_path_buf();
parent
}
fn path_to_c_crate() -> PathBuf {
let parent = path_to_bindings_folder();
parent.join("c")
}
fn get_package_name_of_c_crate() -> String {
let path_to_c_crate = path_to_c_crate();
let path_to_c_crate_cargo_toml = path_to_c_crate.join("Cargo.toml");
// Read the Cargo.toml of the other crate
let cargo_toml =
fs::read_to_string(path_to_c_crate_cargo_toml).expect("Failed to read Cargo.toml");
// Parse the Cargo.toml content
let cargo_toml: Value = cargo_toml.parse().expect("Failed to parse Cargo.toml");
// Access the package name from the parsed Cargo.toml
let package_name = cargo_toml["package"]["name"]
.as_str()
.expect("Failed to get package name");
package_name.to_string()
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/bindings/csharp/rust_code/src/lib.rs | bindings/csharp/rust_code/src/lib.rs | // This is a dummy crate being used to generate the csharp bindings file with build.rs
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/multiproof.rs | ipa-multipoint/src/multiproof.rs | // We get given multiple polynomials evaluated at different points
#![allow(non_snake_case)]
use crate::crs::CRS;
use crate::ipa::{slow_vartime_multiscalar_mul, IPAProof};
use crate::lagrange_basis::{LagrangeBasis, PrecomputedWeights};
use crate::math_utils::powers_of;
use crate::transcript::Transcript;
use crate::transcript::TranscriptProtocol;
use std::collections::HashMap;
use banderwagon::{trait_defs::*, Element, Fr};
pub struct MultiPoint;
#[derive(Clone, Debug)]
pub struct ProverQuery {
pub commitment: Element,
pub poly: LagrangeBasis, // TODO: Make this a reference so that upstream libraries do not need to clone
// Given a function f, we use z_i to denote the input point and y_i to denote the output, ie f(z_i) = y_i
pub point: usize,
pub result: Fr,
}
impl From<ProverQuery> for VerifierQuery {
fn from(pq: ProverQuery) -> Self {
VerifierQuery {
commitment: pq.commitment,
point: Fr::from(pq.point as u128),
result: pq.result,
}
}
}
pub struct VerifierQuery {
pub commitment: Element,
pub point: Fr,
pub result: Fr,
}
//XXX: change to group_prover_queries_by_point
fn group_prover_queries<'a>(
prover_queries: &'a [ProverQuery],
challenges: &'a [Fr],
) -> HashMap<usize, Vec<(&'a ProverQuery, &'a Fr)>> {
// We want to group all of the polynomials which are evaluated at the same point together
use itertools::Itertools;
prover_queries
.iter()
.zip(challenges.iter())
.into_group_map_by(|x| x.0.point)
}
impl MultiPoint {
pub fn open(
crs: CRS,
precomp: &PrecomputedWeights,
transcript: &mut Transcript,
queries: Vec<ProverQuery>,
) -> MultiPointProof {
transcript.domain_sep(b"multiproof");
// 1. Compute `r`
//
// Add points and evaluations
for query in queries.iter() {
transcript.append_point(b"C", &query.commitment);
transcript.append_scalar(b"z", &Fr::from(query.point as u128));
// XXX: note that since we are always opening on the domain
// the prover does not need to pass y_i explicitly
// It's just an index operation on the lagrange basis
transcript.append_scalar(b"y", &query.result)
}
let r = transcript.challenge_scalar(b"r");
let powers_of_r = powers_of(r, queries.len());
let grouped_queries = group_prover_queries(&queries, &powers_of_r);
// aggregate all of the queries evaluated at the same point
let aggregated_queries: Vec<_> = grouped_queries
.into_iter()
.map(|(point, queries_challenges)| {
let mut aggregated_polynomial = vec![Fr::zero(); crs.n];
let scaled_lagrange_polynomials =
queries_challenges.into_iter().map(|(query, challenge)| {
// scale the polynomial by the challenge
query.poly.values().iter().map(move |x| *x * challenge)
});
for poly_mul_challenge in scaled_lagrange_polynomials {
for (result, scaled_poly) in
aggregated_polynomial.iter_mut().zip(poly_mul_challenge)
{
*result += scaled_poly;
}
}
(point, LagrangeBasis::new(aggregated_polynomial))
})
.collect();
// Compute g(X)
//
let g_x: LagrangeBasis = aggregated_queries
.iter()
.map(|(point, agg_f_x)| (agg_f_x).divide_by_linear_vanishing(precomp, *point))
.fold(LagrangeBasis::zero(), |mut res, val| {
res = res + val;
res
});
let g_x_comm = crs.commit_lagrange_poly(&g_x);
transcript.append_point(b"D", &g_x_comm);
// 2. Compute g_1(t)
//
//
let t = transcript.challenge_scalar(b"t");
//
//
let mut g1_den: Vec<_> = aggregated_queries
.iter()
.map(|(z_i, _)| t - Fr::from(*z_i as u128))
.collect();
batch_inversion(&mut g1_den);
let g1_x = aggregated_queries
.into_iter()
.zip(g1_den)
.map(|((_, agg_f_x), den_inv)| {
let term: Vec<_> = agg_f_x
.values()
.iter()
.map(|coeff| den_inv * coeff)
.collect();
LagrangeBasis::new(term)
})
.fold(LagrangeBasis::zero(), |mut res, val| {
res = res + val;
res
});
let g1_comm = crs.commit_lagrange_poly(&g1_x);
transcript.append_point(b"E", &g1_comm);
//3. Compute g_1(X) - g(X)
// This is the polynomial, we will create an opening for
let g_3_x = &g1_x - &g_x;
let g_3_x_comm = g1_comm - g_x_comm;
// 4. Compute the IPA for g_3
let g_3_ipa = open_point_outside_of_domain(crs, precomp, transcript, g_3_x, g_3_x_comm, t);
MultiPointProof {
open_proof: g_3_ipa,
g_x_comm,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct MultiPointProof {
// TODO: These are now public because the golang code
// exposes the proof structure to client devs,
// and if we don't expose, then we can't deserialize the json
// proof into a MultiPointProof
pub open_proof: IPAProof,
pub g_x_comm: Element,
}
impl MultiPointProof {
pub fn from_bytes(bytes: &[u8], poly_degree: usize) -> crate::IOResult<MultiPointProof> {
use crate::{IOError, IOErrorKind};
let g_x_comm_bytes = &bytes[0..32];
let ipa_bytes = &bytes[32..]; // TODO: we should return a Result here incase the user gives us bad bytes
let point: Element =
Element::from_bytes(g_x_comm_bytes).ok_or(IOError::from(IOErrorKind::InvalidData))?;
let g_x_comm = point;
let open_proof = IPAProof::from_bytes(ipa_bytes, poly_degree)?;
Ok(MultiPointProof {
open_proof,
g_x_comm,
})
}
pub fn from_bytes_unchecked_uncompressed(
bytes: &[u8],
poly_degree: usize,
) -> crate::IOResult<MultiPointProof> {
let g_x_comm_bytes: [u8; 64] = bytes[..64]
.try_into()
.expect("Expected a slice of exactly 64 bytes");
let ipa_bytes = &bytes[64..]; // TODO: we should return a Result here incase the user gives us bad bytes
let g_x_comm = Element::from_bytes_unchecked_uncompressed(g_x_comm_bytes);
let open_proof = IPAProof::from_bytes_unchecked_uncompressed(ipa_bytes, poly_degree)?;
Ok(MultiPointProof {
open_proof,
g_x_comm,
})
}
pub fn to_bytes(&self) -> crate::IOResult<Vec<u8>> {
let mut bytes = Vec::with_capacity(self.open_proof.serialized_size() + 32);
bytes.extend(self.g_x_comm.to_bytes());
bytes.extend(self.open_proof.to_bytes()?);
Ok(bytes)
}
pub fn to_bytes_uncompressed(&self) -> crate::IOResult<Vec<u8>> {
let mut bytes = Vec::with_capacity(self.open_proof.uncompressed_size() + 64);
bytes.extend(self.g_x_comm.to_bytes_uncompressed());
bytes.extend(self.open_proof.to_bytes_uncompressed()?);
Ok(bytes)
}
}
impl MultiPointProof {
pub fn check(
&self,
crs: &CRS,
precomp: &PrecomputedWeights,
queries: &[VerifierQuery],
transcript: &mut Transcript,
) -> bool {
transcript.domain_sep(b"multiproof");
// 1. Compute `r`
//
// Add points and evaluations
for query in queries.iter() {
transcript.append_point(b"C", &query.commitment);
transcript.append_scalar(b"z", &query.point);
transcript.append_scalar(b"y", &query.result);
}
let r = transcript.challenge_scalar(b"r");
let powers_of_r = powers_of(r, queries.len());
// 2. Compute `t`
transcript.append_point(b"D", &self.g_x_comm);
let t = transcript.challenge_scalar(b"t");
// 3. Compute g_2(t)
//
let mut g2_den: Vec<_> = queries.iter().map(|query| t - query.point).collect();
batch_inversion(&mut g2_den);
let helper_scalars: Vec<_> = powers_of_r
.iter()
.zip(g2_den)
.map(|(r_i, den_inv)| den_inv * r_i)
.collect();
let g2_t: Fr = helper_scalars
.iter()
.zip(queries.iter())
.map(|(r_i_den_inv, query)| *r_i_den_inv * query.result)
.sum();
//4. Compute [g_1(X)] = E
let comms: Vec<_> = queries.iter().map(|query| query.commitment).collect();
let g1_comm = slow_vartime_multiscalar_mul(helper_scalars.iter(), comms.iter());
transcript.append_point(b"E", &g1_comm);
// E - D
let g3_comm = g1_comm - self.g_x_comm;
// Check IPA
let b = LagrangeBasis::evaluate_lagrange_coefficients(precomp, crs.n, t); // TODO: we could put this as a method on PrecomputedWeights
self.open_proof
.verify_multiexp(transcript, crs, b, g3_comm, t, g2_t)
}
}
// TODO: we could probably get rid of this method altogether and just do this in the multiproof
// TODO method
// TODO: check that the point is actually not in the domain
pub(crate) fn open_point_outside_of_domain(
crs: CRS,
precomp: &PrecomputedWeights,
transcript: &mut Transcript,
polynomial: LagrangeBasis,
commitment: Element,
z_i: Fr,
) -> IPAProof {
let a = polynomial.values().to_vec();
let b = LagrangeBasis::evaluate_lagrange_coefficients(precomp, crs.n, z_i);
crate::ipa::create(transcript, crs, a, commitment, b, z_i)
}
#[test]
fn open_multiproof_lagrange() {
use ark_std::One;
let poly = LagrangeBasis::new(vec![
Fr::one(),
Fr::from(10u128),
Fr::from(200u128),
Fr::from(78u128),
]);
let n = poly.values().len();
let point = 1;
let y_i = poly.evaluate_in_domain(point);
let crs = CRS::new(n, b"random seed");
let poly_comm = crs.commit_lagrange_poly(&poly);
let prover_query = ProverQuery {
commitment: poly_comm,
poly,
point,
result: y_i,
};
let precomp = PrecomputedWeights::new(n);
let mut transcript = Transcript::new(b"foo");
let multiproof = MultiPoint::open(
crs.clone(),
&precomp,
&mut transcript,
vec![prover_query.clone()],
);
let mut transcript = Transcript::new(b"foo");
let verifier_query: VerifierQuery = prover_query.into();
assert!(multiproof.check(&crs, &precomp, &[verifier_query], &mut transcript));
}
#[test]
fn open_multiproof_lagrange_2_polys() {
use ark_std::One;
let poly = LagrangeBasis::new(vec![
Fr::one(),
Fr::from(10u128),
Fr::from(200u128),
Fr::from(78u128),
]);
let n = poly.values().len();
let z_i = 1;
let y_i = poly.evaluate_in_domain(z_i);
let x_j = 2;
let y_j = poly.evaluate_in_domain(x_j);
let crs = CRS::new(n, b"random seed");
let poly_comm = crs.commit_lagrange_poly(&poly);
let prover_query_i = ProverQuery {
commitment: poly_comm,
poly: poly.clone(),
point: z_i,
result: y_i,
};
let prover_query_j = ProverQuery {
commitment: poly_comm,
poly,
point: x_j,
result: y_j,
};
let precomp = PrecomputedWeights::new(n);
let mut transcript = Transcript::new(b"foo");
let multiproof = MultiPoint::open(
crs.clone(),
&precomp,
&mut transcript,
vec![prover_query_i.clone(), prover_query_j.clone()],
);
let mut transcript = Transcript::new(b"foo");
let verifier_query_i: VerifierQuery = prover_query_i.into();
let verifier_query_j: VerifierQuery = prover_query_j.into();
assert!(multiproof.check(
&crs,
&precomp,
&[verifier_query_i, verifier_query_j],
&mut transcript,
));
}
#[test]
fn test_ipa_consistency() {
use crate::math_utils::inner_product;
use banderwagon::trait_defs::*;
let n = 256;
let crs = CRS::new(n, b"eth_verkle_oct_2021");
let precomp = PrecomputedWeights::new(n);
let input_point = Fr::from(2101_u128);
let poly: Vec<Fr> = (0..n).map(|i| Fr::from(((i % 32) + 1) as u128)).collect();
let polynomial = LagrangeBasis::new(poly.clone());
let commitment = crs.commit_lagrange_poly(&polynomial);
assert_eq!(
hex::encode(commitment.to_bytes()),
"1b9dff8f5ebbac250d291dfe90e36283a227c64b113c37f1bfb9e7a743cdb128"
);
let mut prover_transcript = Transcript::new(b"test");
let proof = open_point_outside_of_domain(
crs.clone(),
&precomp,
&mut prover_transcript,
polynomial,
commitment,
input_point,
);
let p_challenge = prover_transcript.challenge_scalar(b"state");
let mut bytes = [0u8; 32];
p_challenge.serialize_compressed(&mut bytes[..]).unwrap();
assert_eq!(
hex::encode(bytes),
"0a81881cbfd7d7197a54ebd67ed6a68b5867f3c783706675b34ece43e85e7306"
);
let mut verifier_transcript = Transcript::new(b"test");
let b = LagrangeBasis::evaluate_lagrange_coefficients(&precomp, crs.n, input_point);
let output_point = inner_product(&poly, &b);
let mut bytes = [0u8; 32];
output_point.serialize_compressed(&mut bytes[..]).unwrap();
assert_eq!(
hex::encode(bytes),
"4a353e70b03c89f161de002e8713beec0d740a5e20722fd5bd68b30540a33208"
);
assert!(proof.verify_multiexp(
&mut verifier_transcript,
&crs,
b,
commitment,
input_point,
output_point,
));
let v_challenge = verifier_transcript.challenge_scalar(b"state");
assert_eq!(p_challenge, v_challenge);
// Check that serialization and deserialization is consistent
let bytes = proof.to_bytes().unwrap();
let deserialized_proof = IPAProof::from_bytes(&bytes, crs.n).unwrap();
assert_eq!(deserialized_proof, proof);
// Check that serialization is consistent with other implementations
let got = hex::encode(&bytes);
let expected = "273395a8febdaed38e94c3d874e99c911a47dd84616d54c55021d5c4131b507e46a4ec2c7e82b77ec2f533994c91ca7edaef212c666a1169b29c323eabb0cf690e0146638d0e2d543f81da4bd597bf3013e1663f340a8f87b845495598d0a3951590b6417f868edaeb3424ff174901d1185a53a3ee127fb7be0af42dda44bf992885bde279ef821a298087717ef3f2b78b2ede7f5d2ea1b60a4195de86a530eb247fd7e456012ae9a070c61635e55d1b7a340dfab8dae991d6273d099d9552815434cc1ba7bcdae341cf7928c6f25102370bdf4b26aad3af654d9dff4b3735661db3177342de5aad774a59d3e1b12754aee641d5f9cd1ecd2751471b308d2d8410add1c9fcc5a2b7371259f0538270832a98d18151f653efbc60895fab8be9650510449081626b5cd24671d1a3253487d44f589c2ff0da3557e307e520cf4e0054bbf8bdffaa24b7e4cce5092ccae5a08281ee24758374f4e65f126cacce64051905b5e2038060ad399c69ca6cb1d596d7c9cb5e161c7dcddc1a7ad62660dd4a5f69b31229b80e6b3df520714e4ea2b5896ebd48d14c7455e91c1ecf4acc5ffb36937c49413b7d1005dd6efbd526f5af5d61131ca3fcdae1218ce81c75e62b39100ec7f474b48a2bee6cef453fa1bc3db95c7c6575bc2d5927cbf7413181ac905766a4038a7b422a8ef2bf7b5059b5c546c19a33c1049482b9a9093f864913ca82290decf6e9a65bf3f66bc3ba4a8ed17b56d890a83bcbe74435a42499dec115";
assert_eq!(got, expected)
}
#[test]
fn multiproof_consistency() {
use banderwagon::trait_defs::*;
let n = 256;
let crs = CRS::new(n, b"eth_verkle_oct_2021");
let precomp = PrecomputedWeights::new(n);
// 1 to 32 repeated 8 times
let poly_a: Vec<Fr> = (0..n).map(|i| Fr::from(((i % 32) + 1) as u128)).collect();
let polynomial_a = LagrangeBasis::new(poly_a.clone());
// 32 to 1 repeated 8 times
let poly_b: Vec<Fr> = (0..n)
.rev()
.map(|i| Fr::from(((i % 32) + 1) as u128))
.collect();
let polynomial_b = LagrangeBasis::new(poly_b.clone());
let point_a = 0;
let y_a = Fr::one();
let point_b = 0;
let y_b = Fr::from(32_u128);
let poly_comm_a = crs.commit_lagrange_poly(&polynomial_a);
let poly_comm_b = crs.commit_lagrange_poly(&polynomial_b);
let prover_query_a = ProverQuery {
commitment: poly_comm_a,
poly: polynomial_a,
point: point_a,
result: y_a,
};
let prover_query_b = ProverQuery {
commitment: poly_comm_b,
poly: polynomial_b,
point: point_b,
result: y_b,
};
let mut prover_transcript = Transcript::new(b"test");
let multiproof = MultiPoint::open(
crs.clone(),
&precomp,
&mut prover_transcript,
vec![prover_query_a.clone(), prover_query_b.clone()],
);
let p_challenge = prover_transcript.challenge_scalar(b"state");
let mut bytes = [0u8; 32];
p_challenge.serialize_compressed(&mut bytes[..]).unwrap();
assert_eq!(
hex::encode(bytes),
"eee8a80357ff74b766eba39db90797d022e8d6dee426ded71234241be504d519"
);
let mut verifier_transcript = Transcript::new(b"test");
let verifier_query_a: VerifierQuery = prover_query_a.into();
let verifier_query_b: VerifierQuery = prover_query_b.into();
assert!(multiproof.check(
&crs,
&precomp,
&[verifier_query_a, verifier_query_b],
&mut verifier_transcript
));
// Check that serialization and deserialization is consistent
let bytes = multiproof.to_bytes().unwrap();
let deserialized_proof = MultiPointProof::from_bytes(&bytes, crs.n).unwrap();
assert_eq!(deserialized_proof, multiproof);
// Check that serialization is consistent with other implementations
let got = hex::encode(bytes);
let expected = "4f53588244efaf07a370ee3f9c467f933eed360d4fbf7a19dfc8bc49b67df4711bf1d0a720717cd6a8c75f1a668cb7cbdd63b48c676b89a7aee4298e71bd7f4013d7657146aa9736817da47051ed6a45fc7b5a61d00eb23e5df82a7f285cc10e67d444e91618465ca68d8ae4f2c916d1942201b7e2aae491ef0f809867d00e83468fb7f9af9b42ede76c1e90d89dd789ff22eb09e8b1d062d8a58b6f88b3cbe80136fc68331178cd45a1df9496ded092d976911b5244b85bc3de41e844ec194256b39aeee4ea55538a36139211e9910ad6b7a74e75d45b869d0a67aa4bf600930a5f760dfb8e4df9938d1f47b743d71c78ba8585e3b80aba26d24b1f50b36fa1458e79d54c05f58049245392bc3e2b5c5f9a1b99d43ed112ca82b201fb143d401741713188e47f1d6682b0bf496a5d4182836121efff0fd3b030fc6bfb5e21d6314a200963fe75cb856d444a813426b2084dfdc49dca2e649cb9da8bcb47859a4c629e97898e3547c591e39764110a224150d579c33fb74fa5eb96427036899c04154feab5344873d36a53a5baefd78c132be419f3f3a8dd8f60f72eb78dd5f43c53226f5ceb68947da3e19a750d760fb31fa8d4c7f53bfef11c4b89158aa56b1f4395430e16a3128f88e234ce1df7ef865f2d2c4975e8c82225f578310c31fd41d265fd530cbfa2b8895b228a510b806c31dff3b1fa5c08bffad443d567ed0e628febdd22775776e0cc9cebcaea9c6df9279a5d91dd0ee5e7a0434e989a160005321c97026cb559f71db23360105460d959bcdf74bee22c4ad8805a1d497507";
assert_eq!(got, expected)
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/ipa.rs | ipa-multipoint/src/ipa.rs | #![allow(non_snake_case)]
use crate::crs::CRS;
use crate::math_utils::inner_product;
use crate::transcript::{Transcript, TranscriptProtocol};
use banderwagon::{multi_scalar_mul, trait_defs::*, Element, Fr};
use itertools::Itertools;
use crate::{IOError, IOErrorKind, IOResult};
use std::iter;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct IPAProof {
// TODO: These are now public because the golang code
// exposes the proof structure to client devs,
// and if we don't expose, then we can't deserialize the json
// proof into a IPAProof.
pub L_vec: Vec<Element>,
pub R_vec: Vec<Element>,
pub a: Fr,
}
impl IPAProof {
pub(crate) fn serialized_size(&self) -> usize {
(self.L_vec.len() * 2 + 1) * 32
}
// might be : self.L_vec.len() * 2 * 64 + 32 or something similar
pub(crate) fn uncompressed_size(&self) -> usize {
(self.L_vec.len() * 2 * 64) + 32
}
pub fn from_bytes(bytes: &[u8], poly_degree: usize) -> IOResult<IPAProof> {
// Given the polynomial degree, we will have log2 * 2 points
let num_points = log2(poly_degree);
let mut L_vec = Vec::with_capacity(num_points as usize);
let mut R_vec = Vec::with_capacity(num_points as usize);
assert_eq!(((num_points * 2) + 1) * 32, bytes.len() as u32);
assert!(bytes.len() % 32 == 0);
// Chunk the byte slice into 32 bytes
let mut chunks = bytes.chunks_exact(32);
for _ in 0..num_points {
let chunk = chunks.next().unwrap();
let point: Element =
Element::from_bytes(chunk).ok_or(IOError::from(IOErrorKind::InvalidData))?;
L_vec.push(point)
}
for _ in 0..num_points {
let chunk = chunks.next().unwrap();
let point: Element =
Element::from_bytes(chunk).ok_or(IOError::from(IOErrorKind::InvalidData))?;
R_vec.push(point)
}
let last_32_bytes = chunks.next().unwrap();
let a: Fr = CanonicalDeserialize::deserialize_compressed(last_32_bytes)
.map_err(|_| IOError::from(IOErrorKind::InvalidData))?;
Ok(IPAProof { L_vec, R_vec, a })
}
pub fn from_bytes_unchecked_uncompressed(
bytes: &[u8],
poly_degree: usize,
) -> IOResult<IPAProof> {
// Given the polynomial degree, we will have log2 * 2 points
let num_points = log2(poly_degree);
let mut L_vec = Vec::with_capacity(num_points as usize);
let mut R_vec = Vec::with_capacity(num_points as usize);
assert_eq!(((num_points * 2) * 64) + 32, bytes.len() as u32);
let (points_bytes, a_bytes) = bytes.split_at(bytes.len() - 32);
assert!(a_bytes.len() == 32);
// Chunk the byte slice into 64 bytes
let mut chunks = points_bytes.chunks_exact(64);
for _ in 0..num_points {
let chunk = chunks.next().unwrap();
let L_bytes: [u8; 64] = chunk.try_into().unwrap();
let point: Element = Element::from_bytes_unchecked_uncompressed(L_bytes);
L_vec.push(point)
}
for _ in 0..num_points {
let chunk = chunks.next().unwrap();
let R_bytes: [u8; 64] = chunk.try_into().unwrap();
let point: Element = Element::from_bytes_unchecked_uncompressed(R_bytes);
R_vec.push(point)
}
let a: Fr = CanonicalDeserialize::deserialize_compressed(a_bytes)
.map_err(|_| IOError::from(IOErrorKind::InvalidData))?;
Ok(IPAProof { L_vec, R_vec, a })
}
pub fn to_bytes(&self) -> IOResult<Vec<u8>> {
// We do not serialize the length. We assume that the deserializer knows this.
let mut bytes = Vec::with_capacity(self.serialized_size());
for L in &self.L_vec {
bytes.extend(L.to_bytes());
}
for R in &self.R_vec {
bytes.extend(R.to_bytes());
}
self.a
.serialize_compressed(&mut bytes)
.map_err(|_| IOError::from(IOErrorKind::InvalidData))?;
Ok(bytes)
}
pub fn to_bytes_uncompressed(&self) -> IOResult<Vec<u8>> {
let mut bytes = Vec::with_capacity(self.uncompressed_size());
for L in &self.L_vec {
bytes.extend(L.to_bytes_uncompressed());
}
for R in &self.R_vec {
bytes.extend(R.to_bytes_uncompressed());
}
self.a
.serialize_uncompressed(&mut bytes)
.map_err(|_| IOError::from(IOErrorKind::InvalidData))?;
Ok(bytes)
}
}
pub fn create(
transcript: &mut Transcript,
mut crs: CRS,
mut a_vec: Vec<Fr>,
a_comm: Element,
mut b_vec: Vec<Fr>,
// This is the z in f(z)
input_point: Fr,
) -> IPAProof {
transcript.domain_sep(b"ipa");
let mut a = &mut a_vec[..];
let mut b = &mut b_vec[..];
let mut G = &mut crs.G[..];
let n = G.len();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
// All of the input vectors must have a length that is a power of two.
assert!(n.is_power_of_two());
// transcript.append_u64(b"n", n as u64);
let output_point = inner_product(a, b);
transcript.append_point(b"C", &a_comm);
transcript.append_scalar(b"input point", &input_point);
transcript.append_scalar(b"output point", &output_point);
let w = transcript.challenge_scalar(b"w");
let Q = crs.Q * w; // XXX: It would not hurt to add this augmented point into the transcript
let num_rounds = log2(n);
let mut L_vec: Vec<Element> = Vec::with_capacity(num_rounds as usize);
let mut R_vec: Vec<Element> = Vec::with_capacity(num_rounds as usize);
for _k in 0..num_rounds {
let (a_L, a_R) = halve(a);
let (b_L, b_R) = halve(b);
let (G_L, G_R) = halve(G);
let z_L = inner_product(a_R, b_L);
let z_R = inner_product(a_L, b_R);
let L = slow_vartime_multiscalar_mul(
a_R.iter().chain(iter::once(&z_L)),
G_L.iter().chain(iter::once(&Q)),
);
let R = slow_vartime_multiscalar_mul(
a_L.iter().chain(iter::once(&z_R)),
G_R.iter().chain(iter::once(&Q)),
);
L_vec.push(L);
R_vec.push(R);
transcript.append_point(b"L", &L);
transcript.append_point(b"R", &R);
let x = transcript.challenge_scalar(b"x");
let x_inv = x.inverse().unwrap();
for i in 0..a_L.len() {
a_L[i] += x * a_R[i];
b_L[i] += x_inv * b_R[i];
G_L[i] += G_R[i] * x_inv;
}
a = a_L;
b = b_L;
G = G_L;
}
IPAProof {
L_vec,
R_vec,
a: a[0],
}
}
// Halves the slice that is passed in
// Assumes that the slice has an even length
fn halve<T>(scalars: &mut [T]) -> (&mut [T], &mut [T]) {
let len = scalars.len();
scalars.split_at_mut(len / 2)
}
fn log2(n: usize) -> u32 {
n.next_power_of_two().trailing_zeros()
}
impl IPAProof {
pub fn verify(
&self,
transcript: &mut Transcript,
mut crs: CRS,
mut b: Vec<Fr>,
a_comm: Element,
input_point: Fr,
output_point: Fr,
) -> bool {
transcript.domain_sep(b"ipa");
let mut G = &mut crs.G[..];
let mut b = &mut b[..];
let num_rounds = self.L_vec.len();
// Check that the prover computed an inner proof
// over a vector of size n
if crs.n != 1 << num_rounds {
return false;
}
// transcript.append_u64(b"n", n as u64);
transcript.append_point(b"C", &a_comm);
transcript.append_scalar(b"input point", &input_point);
transcript.append_scalar(b"output point", &output_point);
let w = transcript.challenge_scalar(b"w");
let Q = crs.Q * w;
let mut a_comm = a_comm + (Q * output_point);
let challenges = generate_challenges(self, transcript);
let mut challenges_inv = challenges.clone();
batch_inversion(&mut challenges_inv);
// Compute the expected commitment
// TODO use a multizip from itertools
for i in 0..num_rounds {
let x = challenges[i];
let x_inv = challenges_inv[i];
let L = self.L_vec[i];
let R = self.R_vec[i];
a_comm = a_comm + (L * x) + (R * x_inv);
}
for x_inv in challenges_inv.iter() {
let (G_L, G_R) = halve(G);
let (b_L, b_R) = halve(b);
for i in 0..G_L.len() {
G_L[i] += G_R[i] * *x_inv;
b_L[i] += b_R[i] * x_inv;
}
G = G_L;
b = b_L;
}
assert_eq!(G.len(), 1);
assert_eq!(b.len(), 1);
let exp_P = (G[0] * self.a) + Q * (self.a * b[0]);
exp_P == a_comm
}
pub fn verify_multiexp(
&self,
transcript: &mut Transcript,
crs: &CRS,
b_vec: Vec<Fr>,
a_comm: Element,
input_point: Fr,
output_point: Fr,
) -> bool {
transcript.domain_sep(b"ipa");
let logn = self.L_vec.len();
let n = crs.n;
// Check that the prover computed an inner proof
// over a vector of size n
if n != (1 << logn) {
return false;
}
// transcript.append_u64(b"n", n as u64);
transcript.append_point(b"C", &a_comm);
transcript.append_scalar(b"input point", &input_point);
transcript.append_scalar(b"output point", &output_point);
// Compute the scalar which will augment the point corresponding
// to the inner product
let w = transcript.challenge_scalar(b"w");
// Generate all of the necessary challenges and their inverses
let challenges = generate_challenges(self, transcript);
let mut challenges_inv = challenges.clone();
batch_inversion(&mut challenges_inv);
// Generate the coefficients for the `G` vector and the `b` vector
// {-g_i}{-b_i}
let mut g_i: Vec<Fr> = Vec::with_capacity(1 << logn);
let mut b_i: Vec<Fr> = Vec::with_capacity(1 << logn);
for index in 0..n {
let mut b = -Fr::one();
for (bit, x_inv) in to_bits(index, logn).zip_eq(&challenges_inv) {
if bit == 1 {
b *= x_inv;
}
}
b_i.push(b);
g_i.push(self.a * b);
}
let b_0 = inner_product(&b_vec, &b_i);
let q_i = w * (output_point + self.a * b_0);
slow_vartime_multiscalar_mul(
challenges
.iter()
.chain(challenges_inv.iter())
.chain(iter::once(&Fr::one()))
.chain(iter::once(&q_i))
.chain(g_i.iter()),
self.L_vec
.iter()
.chain(self.R_vec.iter())
.chain(iter::once(&a_comm))
.chain(iter::once(&crs.Q))
// XXX: note that we can do a Halo style optimization here also
// but instead of being (m log(d)) it will be O(mn) which is still good
// because the verifier will be doing m*n field operations instead of m size n multi-exponentiations
// This is done by interpreting g_i as coefficients in monomial basis
// TODO: Optimise the majority of the time is spent on this vector, precompute
.chain(crs.G.iter()),
)
.is_zero()
}
// It's only semi unrolled.
// This is being committed incase someone goes through the git history
// The fully unrolled code is not that intuitive, but maybe this semi
// unrolled version can help you to figure out the gap
pub fn verify_semi_multiexp(
&self,
transcript: &mut Transcript,
crs: &CRS,
b_Vec: Vec<Fr>,
a_comm: Element,
input_point: Fr,
output_point: Fr,
) -> bool {
transcript.domain_sep(b"ipa");
let logn = self.L_vec.len();
let n = crs.n;
// Check that the prover computed an inner proof
// over a vector of size n
if n != (1 << logn) {
return false;
}
// transcript.append_u64(b"n", n as u64);
transcript.append_point(b"C", &a_comm);
transcript.append_scalar(b"input point", &input_point);
transcript.append_scalar(b"output point", &output_point);
let w = transcript.challenge_scalar(b"w");
let Q = crs.Q * w;
let a_comm = a_comm + (Q * output_point);
let challenges = generate_challenges(self, transcript);
let mut challenges_inv = challenges.clone();
batch_inversion(&mut challenges_inv);
let P = slow_vartime_multiscalar_mul(
challenges
.iter()
.chain(challenges_inv.iter())
.chain(iter::once(&Fr::one())),
self.L_vec
.iter()
.chain(self.R_vec.iter())
.chain(iter::once(&a_comm)),
);
// {g_i}
let mut g_i: Vec<Fr> = Vec::with_capacity(1 << logn);
for index in 0..n {
let mut g = Fr::one();
for (bit, x_inv) in to_bits(index, logn).zip_eq(&challenges_inv) {
if bit == 1 {
g *= x_inv;
}
}
g_i.push(g);
}
let b_0 = inner_product(&b_Vec, &g_i);
let G_0 = slow_vartime_multiscalar_mul(g_i.iter(), crs.G.iter()); // TODO: Optimise; the majority of the time is spent on this vector, precompute
let exp_P = (G_0 * self.a) + Q * (self.a * b_0);
exp_P == P
}
}
fn to_bits(n: usize, bits_needed: usize) -> impl Iterator<Item = u8> {
(0..bits_needed).map(move |i| ((n >> i) & 1) as u8).rev()
}
pub fn slow_vartime_multiscalar_mul<'a>(
scalars: impl Iterator<Item = &'a Fr>,
points: impl Iterator<Item = &'a Element>,
) -> Element {
let scalars: Vec<_> = scalars.into_iter().copied().collect();
let points: Vec<_> = points.into_iter().copied().collect();
multi_scalar_mul(&points, &scalars)
}
fn generate_challenges(proof: &IPAProof, transcript: &mut Transcript) -> Vec<Fr> {
let mut challenges: Vec<Fr> = Vec::with_capacity(proof.L_vec.len());
for (L, R) in proof.L_vec.iter().zip(proof.R_vec.iter()) {
transcript.append_point(b"L", L);
transcript.append_point(b"R", R);
let x_i = transcript.challenge_scalar(b"x");
challenges.push(x_i);
}
challenges
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crs::CRS;
use crate::math_utils::{inner_product, powers_of};
use ark_std::{rand::SeedableRng, UniformRand};
use rand_chacha::ChaCha20Rng;
#[test]
fn test_create_IPAProof_proof() {
let n = 8;
let crs = CRS::new(n, b"random seed");
let mut rng = ChaCha20Rng::from_seed([0u8; 32]);
let a: Vec<Fr> = (0..n).map(|_| Fr::rand(&mut rng)).collect();
let input_point = Fr::rand(&mut rng);
let b = powers_of(input_point, n);
let output_point = inner_product(&a, &b);
let mut prover_transcript = Transcript::new(b"ip_no_zk");
let P = slow_vartime_multiscalar_mul(a.iter(), crs.G.iter());
let proof = create(
&mut prover_transcript,
crs.clone(),
a,
P,
b.clone(),
input_point,
);
let mut verifier_transcript = Transcript::new(b"ip_no_zk");
assert!(proof.verify(
&mut verifier_transcript,
crs,
b,
P,
input_point,
output_point
));
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/lib.rs | ipa-multipoint/src/lib.rs | pub mod committer;
pub mod crs;
mod default_crs;
pub mod ipa; // follows the BCMS20 scheme
pub mod math_utils;
pub mod multiproof;
pub mod transcript;
pub mod lagrange_basis;
// TODO: We use the IO Result while we do not have a dedicated Error enum
pub(crate) type IOResult<T> = std::io::Result<T>;
pub(crate) type IOError = std::io::Error;
pub(crate) type IOErrorKind = std::io::ErrorKind;
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/committer.rs | ipa-multipoint/src/committer.rs | use banderwagon::{msm::MSMPrecompWnaf, msm_windowed_sign::MSMPrecompWindowSigned, Element, Fr};
// This is the functionality that commits to the branch nodes and computes the delta optimization
// For consistency with the Pcs, ensure that this component uses the same CRS as the Pcs
// This is being done in the config file automatically
pub trait Committer {
// Commit to a lagrange polynomial, evaluations.len() must equal the size of the SRS at the moment
fn commit_lagrange(&self, evaluations: &[Fr]) -> Element;
// compute value * G for a specific generator in the SRS
fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> Element;
// TODO: For large vectors, we could probably do this in parallel
fn commit_sparse(&self, val_indices: Vec<(Fr, usize)>) -> Element {
let mut result = Element::zero();
for (value, lagrange_index) in val_indices {
result += self.scalar_mul(value, lagrange_index)
}
result
}
}
#[derive(Clone, Debug)]
pub struct DefaultCommitter {
precomp_first_five: MSMPrecompWindowSigned,
precomp: MSMPrecompWnaf,
}
impl DefaultCommitter {
pub fn new(points: &[Element]) -> Self {
// Take the first five elements and use a more aggressive optimization strategy
// since they are used for computing storage keys.
let (points_five, _) = points.split_at(5);
let precomp_first_five = MSMPrecompWindowSigned::new(points_five, 16);
let precomp = MSMPrecompWnaf::new(points, 12);
Self {
precomp,
precomp_first_five,
}
}
}
impl Committer for DefaultCommitter {
fn commit_lagrange(&self, evaluations: &[Fr]) -> Element {
if evaluations.len() <= 5 {
return self.precomp_first_five.mul(evaluations);
}
// Preliminary benchmarks indicate that the parallel version is faster
// for vectors of length 64 or more
if evaluations.len() >= 64 {
self.precomp.mul_par(evaluations)
} else {
self.precomp.mul(evaluations)
}
}
fn scalar_mul(&self, value: Fr, lagrange_index: usize) -> Element {
if lagrange_index < 5 {
let mut arr = [Fr::from(0u64); 5];
arr[lagrange_index] = value;
self.precomp_first_five.mul(&arr)
} else {
self.precomp.mul_index(value, lagrange_index)
}
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/math_utils.rs | ipa-multipoint/src/math_utils.rs | use banderwagon::{trait_defs::*, Fr};
/// Computes the inner product between two scalar vectors
pub fn inner_product(a: &[Fr], b: &[Fr]) -> Fr {
a.iter().zip(b.iter()).map(|(a, b)| *a * *b).sum()
}
pub fn powers_of(point: Fr, n: usize) -> Vec<Fr> {
let mut powers = Vec::with_capacity(n);
powers.push(Fr::one());
for i in 1..n {
powers.push(powers[i - 1] * point);
}
powers
}
#[test]
fn simple_vandemonde() {
use ark_std::test_rng;
use ark_std::UniformRand;
let rand_fr = Fr::rand(&mut test_rng());
let n = 100;
let powers = powers_of(rand_fr, n);
assert_eq!(powers[0], Fr::one());
assert_eq!(powers[n - 1], rand_fr.pow([(n - 1) as u64]));
for (i, power) in powers.into_iter().enumerate() {
assert_eq!(power, rand_fr.pow([i as u64]))
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/default_crs.rs | ipa-multipoint/src/default_crs.rs | pub(crate) const HEX_ENCODED_CRS : [&str; 257] = [
"a098a29045f1482ea82bdd90b424b82389eb282aec502591eb756633d17a580183c43f2f4eea67cfd9ef71de006ece8c0f31fc4891ba0b74f62c20ac82045c4c",
"823b32fd59b50770b96fe0b20a30f33681c3bf14a92f38ffdf3e72f07d606e6ce52ddfe24377616727e8c738e04e93fecac6ee340246d9a8fdb06619eafbc66b",
"8fdb6314ae2c1cb7108148f129b1246a037ec48b1bcad4d0d97ed9bfbee36b32167e9b931eaf98fe8b02c7be3608288b9e815fdc7673f9aa354b0c18aaf7fc55",
"f8ddc3a1d255a5a0eefac73929a9c1495c6016e085fcd60a2c9bdc12cc41d26b3184818d7e65ef46ae42f5ad831a8e8ce7b136a63a3b31d1e3ee383f0dd65b65",
"2cfbbf4e08a0964c95c543667cae575821b3d23325825b8aa4cc780494bbd400df241d3bd41dbe85db77d7d8b0ae0d43581aecc1339064f5d4183aa61cba0346",
"3b0de628a1ec296a6105ee617aae0d81dc04bcb8e59c4fa7c469c8e1767b811c1583b78cfb94fc63f28b433edc60a6a1116b81f11df04b1ebb5497fa9bc27e6b",
"f97accfab8609474472ad9adff16a127725314b1d02b9443b0631aedcb64ef0333f0aaddab636d8a534bb22b4339f45401a47772680e76c3fa4bf824c2109150",
"847ef1de586c659f5f46429a6c6bc417b94d96d6ac84d0c499769562a9bd3614a566eae66bc852a1a95702dd8be98892ff21ccde56f4ed02238f82271af18566",
"9560ac3149930f0207d8cea8d3d858912dca0ab5dd1cbc348d1ab1e9e8cdfc027d4c86798ed0883c3ca598578a6b5cfb831aa1bdd0e6b3037799c658971f8550",
"f03d94100e16ae4ea56a0ba3ddab24ba8af5c5e7c0c211d81284b416027b094522a0dc66fba15b051f239650d144ae4f02f32a3ef5289eff6a31b3320c45f75c",
"e54ca5aa30cd28b9645088bd929e159848fd30b9cbf550b7bc18eff9b41c0d0313e48960881ac683a31f128c886de69cda0fa976944dd9975f7609ba6f1c6a43",
"5191adf4441bba1cfcc41be725bd1ef701cd327c38b7c27ebf68f1d5bf151f6e2957db691deef5b01bf0cb6ad6497f10267a0a33a46cbb893604fb77f19a0059",
"bb21a7923dcb9b766a8cac650658ae3ab04686dd4bab1722afabbb2bc3841e6d7d227a2bcfd06733f2f5d665019f3eb6bac635e6b77898923d7cbe47f8e92657",
"2be83313c6a8bfc5e4910a1963ec23794a3ae3964828da4687fa1baabb72de128cbc9a5e383af05ca56897ce3d5a535946acc41ef64f871ab5c46afd89ad4d3c",
"12d1d530ef50f4bad719925bf7a59e54637fe38773894311fb08def9a379ff585197b9937348246e88b5efc1fccfcda9b9cd9b9e66c11dcdb584e9997c1bce69",
"33daaa3b0d7343f38b12a81b9799694ae8efcc5cac1daf9b93dde13d816fa8103aa63e3cf1ce614bcb42c8dc6c32f0eb4cbec1fb419cd5b2760e48719f3e3f62",
"f6964470cd10ada606e60754826f71b384d2112bfd6d13077951f7b40b95dd2c873fed8972e62010941ecc766174007ee3757fc2d366763063d31895a17ecb52",
"f0dcd9abba9095272d638b903c629873ff40900ab91e47e0caa63664c1256b09b7b6c7565e1dd022f87b0a67d74b74f1da386e959dddac01075e16d0bb5a1e61",
"6e19b989cb59f1895b8ca0c460ed538abc6a45541645cb64e4b64e14d318070ddb253116cf5e4ae3df4f8a323896df15990162d9cd520b0b9def83019edd8845",
"ed171bb156e2af8c1c46275c38f9d41a1525c479e8e7d99f23d8810eced8151b1e246abfeb82be1094e18b0a6c5095105424a6ea013a5827b05cbae910b5b570",
"48064440ee15af320c4dd72bafc9fd2bb5828b0b26c945d638d901d3fd8659316c8cba907a0e2d59440b59459e881e01cd200e20db58c6f577b9a898aef30d6c",
"cc66a9b6a58d0b1c55424faf513ebd5fb6f40851a45a7f4d28f6d166ccd21a604169b6451f4dd0986777e557e4e4db45a8be841fcaa82f32401d7f64ab1e3353",
"10fbad69a1fb26289b80c506526e0f2616174b0d88519d672917e9c71b658772fbee10ab4d4d8ef192426ab3bb4eae97db235d8b620e37608b1647037f1a725e",
"32a0fccd7fcf5e08322fcbe89897169db5ad5daef0a88acfd1238378149b0611387915289785e4b23a41988d8c58b9b0504e5370d510a188fb2b37e558bd0d49",
"d87e33f404379a043aaff416ecc2953018a2d7415c51a6321d47560b42f8dd36eae40f25035e16c1fe70a7c7f0c6c3186390165b29a1b016cf50a26827bde36e",
"3c413a3246cdf826793f46912a49361b59e0cbe6d8bfa289bb36758ef4bd092afbde52f0d3f20430ea50a60264d14020655c89c56a8d7213de196a4fb95aee61",
"58cda63669d39ff4da7aa86a61a83603266f1b24f3be638b1b73b2ee70b90d0870c99c018491ed9b4ed2630e9c6de50488ef4cef0193ea94dc7dc1f48d36c448",
"6925a8539c447bec24b3d9b9b746386d4d37531482884d3c4076060ccf38d871afee69215036330c64695b0061b6413956bd5097d88b1ff97173aa4feb031e43",
"3599d53e8676786d9ae5f3e32998adf5642d6153f4246cecfda9535f3c692b3d544b456881597db69adfeb0b96721dfc61494c6cd3cd76acc596eaa80214b34e",
"68b64fe92c37de6070676e56ee9efed4dd1f649a9fa9d3bfa85df9ef50b9d42cd99910434d6a674f032581edbb658c85186e456cf5403ecd52afc83c698ae556",
"67099133844f933f06b2bab38bfcdfb4aaad72bc509bc6fd899f61848a2e9c15e28b9c1ebcfba99939c36e8e8a416fd8d6660a7adcbdc4c347347a6d7685ee52",
"a0946dc359fa191ef5d279caeeab7354230255dc1a44332ba6b25b6d7e0d011bb1b854df0a861f15027575611c7f1a5f4d0012344fe1285025ba8d656dc21a43",
"aca3a868790953f836d5b8f6b4e235847e5b8502b4be58af1db7989a69500263f2471495cc4243b0997687d3ea9c4c1bca7fb31c5e4b85756e01d039ccc5dc44",
"f6726c945318294d7d88eaf66207473586d743df89e8a7b0119ba71dc37ed614ee7a6960a73e1f58bec16c960d9150d153178b3e76f628b665863c5c073a4440",
"b2cf902938ab413aa5a065df77e5fd71d6599ba8c973e13212fec3fd3e7c9f5a7186b3df9222222887495a9fe88c9cc487a13e19c2882c1b01908559f9160a3f",
"bb652c58a6b1b3bc89dfdb01b13236843b9308bb616fd3cf7c1991e46950de6083f729a22692a6c869905b62cf0ab25df05c42d794bce45bf4eeac9e8f5aa560",
"844ae8df265e56604dd0d900534e02b3504aef546dffdc9cbebe4aed011bfc0f6769cff47b46b9244a59a43f49bfe9d047945aee7f9e6175c0934cefd652515a",
"de229d874dcf92cc86c53e5d0fdde711cfccb0ee3229a98438e3570cf272e436ed3b709d0f6c5c00f75188d605a85db6bc925a4029c97eeb3ba67da9bef0213f",
"21cc75fecf1e901b20010fb3160be4f726863698b93a2eaf9c95e28827e6ec60ce0eca97cd9566b7559ca08d80d1647e980ab745cd739754c0299720f3939a44",
"4c2c7f88573fbc03e4be70fbf71985e8645dab5e1afd2cf88b5ffe9cb3f6851952b1dc4dde6878416efa42e3c3f3e13a3238edba14c292d8076e043e8820b35a",
"6249e1111ad2aafde886a165b5b4c48eb80df82ac49a1163d1bf38f02c7ce42409e0ea487587d3dcfeb2d9b37d1c0428a8c052ed578cff5a09ab4ba7ed94b969",
"0a15a8122e11e5e070d4023d429f8f9ccc1583a9615d5bba1a50949d6463bf4fc9de74e209e1f1fddb612dd60b8e53f20dfb4d7d23288a0f648823501ad76845",
"84263cb808009fbe130b1e8c8b941fce524f4206d26ac4a259fce24e05db8870f0c0c72bcff303d0c18a7a4f322e2b0d9fbd258489fc4147ba349d42e1486940",
"507b317dda453fb6ea86ffba63c3d0ab4a27b9aa393e86b2055899f163335b47261020d8f073fc994aefd51616364ccaf81b7cc63ef41c61ac3f442aa8fc1968",
"7ea2c8658648b79aa6b729a7f93d0cbbb9e9257a20b3b624d8fb2998f2095d3c6bc70c5a8f9570cf81c9cbff4deb535bd8013ac9efea2c4bcd8d52ea5e5f3655",
"18d14cbcc41c8c675817a33ce1651017449749eafcec7ee8e67aa3a9a893f817f487df284e65c2e58ed8f38b077c626bac72b3a5aaea6d2a232b24d2bf96c13e",
"c4a0bffebdc1aa4a2f5332e4c5f0349249b0d91347427bbab232a070fee60820d4c895607d3fa8e46a37d37761e49a5a3f374c18f0bbc374932069cec9814f65",
"b187520a6954c8ae0e7e2356eb3a3142a5c38f113a937bc72601c7b3616e892269d8d581e2d556fe18993d80861ce3346140ce4d30accc816738accd9b025067",
"9a232f851fd9f3d90a86640407cdbfc2d7c08d1d45e4be1d3fb12861caaa3153aca66e725735ea0a8249a80d109cf62fb69797841f8fe73ad3a55122bcfd4a54",
"658d8ac7c01acaf92999c2474c7c02cfe29270ec26bb778c3de28d926cdf0b0d100386f83c857f89206b256724a93d5c6ceff555e02eb9b8f6f169541119196f",
"5f05e817df1d5ba9b953136007d0000151200776fa1befb5bd5c0ff940e06528528fbb0e75ac00b742d25a324e1c1161c61a2fc4860b836d75d9b4c94451a167",
"5fb8b4b5604b3946d150c00ed2ca45f3fc90617ed217285d9e510ab1328ceb0056b317571c80e8b3cbb295d313c2271a574671b0333d483fefe0a32832ff4753",
"1cc4fd7627f5da55ada623ac2ba75f23bac1643a7070f0f29ab63888ac6cb05a9d01b264ca200f1796db9c32ca0d08bdb42feea0c4cb8c6ff7a605cb5efa5b5a",
"fb38d053674d695609098ef930b7722504bfc082431e71f317f9ca30b562ad1876a61e8de46e6d3067e6a3f953ea1ae52c08ba158fce6ed6f9a9475e08f9ed3a",
"353a80ee664ebb642836925be445eac0dda24461e51a8ac6e0bfd1589da07b003a0fddfde3c035cd70b505555361061e023f696b778dfa60d208d5d4647b0142",
"d34d2c45bbbc038f93ba6fb15687b3eaae6019ab25f3f6b8e53eebb92433eb120ed23e12e1ee250f994ac346f7d6986dc160809419d16988adaac886c751cc61",
"e6329b8b94f08581cad3a16fc9bd8f04407fdd27335c600954ce7da4bb67f51edc333c526834d4c363147650e5d921eb367e1ab50f1bd251bc1d2fd55296533c",
"14c97e68301b5860b19aa619cc91250a1fd4a3ced6eeb145e6c791723f4c7b135ad6006252e7155c1662be510e97f6f1d53f0fa8a1f5ed95907987aae8218e51",
"3908568352d80b0b1d33436733944f19632a805e20f81a091cd39425ce4f6a50463bfc47e17e297d07d0c2f0fd9aa185046d91815e74c2414f24f2f59754ce60",
"0797f85fd50a65a716089dab15da3706c8d3b2193789b0898fca1402fe110a2b726b49a3e395b9db4b907366555ab01fc02690d9b029df5c03cafce6eeeb5255",
"c1acc40f2e738da286d2b9f9614fdad453db33290d68f4f7f4a967cfe70e0316d3f8c7e843a4b8dfec173cfe5212074f5df7ed4494facf2ba41f4e86ec19c872",
"55d0029b76ac4372ef654cc35ed543255330a4f6f3f9b7fc8450c262c04c922cfce4c5b2b5adcbbcbdcde6dfc59967694da1e48db158093167efe0ac4438ee72",
"3d37576144c32a876c614ed7ee7ddba519c6e57b336e06444cdb6a2d3b6e243b4116328265ed27a7492826eadc1c9de4091119d83ce18556c97f33fe7856ff44",
"78cbd559f0b2bb104aeeb6762af3fdcaabf84b0cacf86310e42e50633960b815903096053798e2c959e72004d1a066cc923ffc649773340c2b34be79e6d1746d",
"10f2cfcc65e5a661538da9b9bdf36baddbcdd2ff1bc4599b1ad144f80bc6350f8792c3599ffbf890b38bc8159940d0b79960bc072b672ab4d581baebb9aaeb3f",
"9845341dbf04311acc227531623860d2b9d859731c374454ee15031f4bf341504248a98cab6fd25f5e59a7c3d7918e3b051543f9f0484f0dfe9e779ebac6fc6a",
"24950a6f689c8cac2f1663d6e075d15aca1c9da753f67fb5410d866589f9032ad4c5938fec96155ce209e310622819262a7f0d66b7612904baba6fc1e789fd71",
"77fcc3b0948f2a53bf2573eb226ca1020f8d35dc3a5ddef451146ce7877e653db98a6b4e03c09724f32a18894c7166b82eba5243322f7678bbb482436d930058",
"e54fb1ff03309f45ab75c27c58ead73ac14c017b55887aea644bb28da6296a4e5d5e12b93ccf0c9b6e9e110c3b210cc6298e7a92d709f09345e5bc3f62f25d6b",
"25f1e373a6ac129d2db2ea9c4d07700888a8cbc83a42e7bab3de102357c246430dfc5cca315e36756caf20b8fa6f635e4e05cccea1cb646025bbb8b5df747f46",
"e4da3e8f8d52cc5a062d42f27e79053bbbc424aff7798442dde8e4681f5e5c13289663cc9d93468b0ecac3f7d4e83284ad3866ad845442fe4979bd013ad84a46",
"a110683aa5b7ec7957487182808dcb3f4d153684a240573662fbf6b01701a155e04f5f9cd8e6648e3b474569984e7a8c64e1629cdcd6095ad2d719b87a9f0f70",
"21a4562e207cec21dfb5365eba7f8d4cecc08eba4aeffbe3b6f3f2ec7ea1b70bb4d7c4df444ceb3a059719ab654c8af98bcb66cb85eaf5c09dbddb8aa8efb555",
"eceb4568c318b48d376e5df77386e75fa4ba38c2800626a83dd666ae40873c43629fa5e9a78caf11c33b65c7282111598ec135ba5abb0a82a8390762bca40b55",
"827700aa3904796f531ba6be5c1807a407de39e50cd33b463ab6b582f841bb6e9124e87a2dbedd4703a29ba39355968019a88eb420b06897b2712d7c66f3533c",
"981d8f33bb2729cc8e3b8a415501816ab58ff98a4a1fb2a522360cada85f75581d438586ac124770e7d9a55f0e19448487a62b448b8a3278fde06467e7bc385a",
"7ca32dbba01faa2809245f42221a05ba34e2f5a7f0bd619da3707cf681cef449d8ef6c9890734009835a35b7a4aa2ec41a323955ff7290729cc052b911cf0f4a",
"dab0a250e22dc2aa0a01cba444f554f3d584766e2d6a27db8432913ade734070f967c336b34823d6716ca132561cd1a715ffa2952fb6348c0561c7c8fd907e57",
"d2b823db9c42a64607f2b46a9443dff0ec7550ea5246df1bb8e78e0dea132b083441311b009a559a21e02ee6d200e95cb18cb42e684bb1652ef2c883ade8014a",
"f4c0f399dcfafd93d6301f8d01319e6df53f8385a7e42f8a8eb5952ce857f759159a9608ffc1014961a0b0525cc9bedbe93edc16be33548dab653f5c9d2b5d56",
"6457ab94d434166f4e312355863abc878e8ea45c92167cf0214c6c238b61de4254b89f8750bc857c24a333d3f721657ece3ef5157dc8d514d7e51068d3ae0f70",
"40cdc4818935a99e3263ddd09964de6886a310c6a3e3dc674268fd59c6d99b0d3bd131cfed21281ca364e21402a9d310aafef4826a7873fc39ea97ba7f97fc3b",
"0db0117662b0fd80ccaa6679333aa589969fbae3c6c5dc75af71a28a57d12c1aad7fd368fbe55fc45a3e377e88770b8e72a40e70435d872b56486765f8612e4b",
"e8b070c3d7319e034fecad833593888fa00e9b8692cd286045836b8f3edff144a348b8eb9f5155cfd9c698252ace15eeac9a4b2e746d2b75f510ff74accbd243",
"4fa1fe7f1bbce08505061aff3fca473da6f2392fd96fe22d5131b0a3e075c719aec8cfdf963ec61de0d7d5684dc568b821a0e99da114ee504220e794196c1943",
"38a61b53dc6551af03e99b081513373e43bebeed156ae8290f80832ed67a9964dbe7d62901d7f63669afa98adf5f994447c6bb845ca8a8e8de4e9ef1d9d7095f",
"814f3351695a793bc37df8932950ae2a903149e44e5dfa8e02680d5c07d0f911980449f39b6229d2ae5f151e402e6bf6e7dc2506c099f2f1447d6f519c22ba48",
"44dd0f0f870d64c251749aa03beeb6a2a7862481d058bf9e6b8c76c1d595350a0fce4c8ac5eae24fc3d299254db491b5b10710019881050b4f7c66029af3c35c",
"74b97a73b0d20e6ff9f07943e802a2470ceaf0dadc69a58a9a1b2454b1334551a64dbd76f3ab31ccecfbd68eaae6c839c382d11e9fa77bf1b8b93b5768f6dc68",
"58ebf31c655351cda21f4c4fb87741c18267ddac6f57cbff2ff51ccadd1e3140588ab0aa4ae6f2e0f82699e62f91498bce9e45d8b4ddbf3b1e09cf08baa4d366",
"d7ecb2a34c9574e223849a1aa7dfbc160fdfcf0b31f67bd9d95746bd6e7e0411cca59b8384987cb9b0c9893e567a57e47b4aecf54adcf3bf848a966e34f71266",
"adc861b6b547571f155df38a8020df087d211f3aaaa9d6b2ff33eb0880bd540efddba7132ca9769520a0b7e15221358529a72e83add9822e7cf0f97bb1bff966",
"0ae728fb9dca7f2065326765c3440203ba1d200b844079fd3535268cf3f16f18985ff723e165a4b35fba92c10d36bf257e8604ad149e359dd041965e3059b83a",
"434f0e01d428468cf976d6615ce057f16794ff48c913abea958e80ac8c79631c5d76d62c038fadf7f48430d44cd3d638433a6cd4d80dff78f95b17c97cc8fe3f",
"23caa800525fee033d953d8672281d71aaa9ba0d2040d2a9f9209a219e86ef50d767133efa082720b45db9e3295e358f4dd7d878b88330f114bffd66a3a26f4b",
"b874bc61f823b51c4c06476d677b7ef545dca117ef4963918c428cea9b126f2f9c87fb8e5357310fd3596e715771dec1b8528afae02922fdc955c85d6231ef6f",
"a376a1e66ea78bb19208ee02e76ce4c2c9fde7569b2241f041ddd53575e10937e8943830cdf631c1514e13e17cfb2edb51843a3934f83bdb3b1a4199dd14564d",
"4d43ff0e0ece3c48ffb5cfd671584d007d245b0be5b25bd8374022becb0cad2d3a046051cdfbb8e035fdbb4a9ac595e705f0472c2affdc4195fdab3f25e3b053",
"e3f90f871e1fc29df670be839dfb4b1089240f4b2199e827f0a7209e316a8b42f55026f0bd1838f60803997b6707e3643bca2a29053bed2d84512951f37e3c59",
"fa8d9f8bcadf1550b559027b4a84d9f1607e5358ca9e22bbcd9a8e559c8d0c05b5ab886aa6c0577993e6bdfc86e45d273e0dbc9b28b4cba0595eaae50de1166b",
"cef60fdae6278476cfb03265c8f9bcd5148fe3d021573bbaac1ab1e1fe46ee64f9dae27a71ea2d790ecd5f58bb5116527241ff0e0a90162c1216a0c78dcaec65",
"d982d47620d4e2f9145fe5d02ed1147ed8e3d063a7d64d64b7f020db58c1111d7d2ac7b2a797415011eb5f689364372ebf7a94fa7ba048c8488354105d2f6940",
"54a1f65072cfceda1ce23f7bc2c36c28dff250d85ee9001fc91e15ed31dace2373e0503142f8747f214cb3d9c6f92e6a945f8463f7410cb326dd72dc5c6f626e",
"305c686f041e68f8eba2f8d12da501f1bbc82b5e86064ae0d7397c07b349ee48f99981acaace93c7120cf25ac5b8b27ba90987eef68cb0f48b89571c8d163571",
"deff0d7b32876c69bedc3b1f7dc2d5ecb3843de23aadb54807712a1cd3e6a14cf48be88e45c2850f699d2ccfb0480213f48d3479090ee0c6f1b64a73e9a1b36e",
"aa180c1605b6f0879b504a8514e48d08525d9e03653a4619b38f4bc924d8270ab0b7f8e5b2e1f32de5a99781e68e22050932fa4ce60e0dae03b7eacf67e68759",
"299ce6aecf90c44aca64987a98aa2b1486968584cfdfe122ffb87218c308d74f9bf20b38fda7bf67d5633f22592a9baef4030ff8a5014a10f81242d2e9371f52",
"d241c156a0914c8cb2c937cda2090e4898c7b145c4332f4ed59688c60cd1b32332320d6457e61c585d1a2983586afda02f7992d7e51ef51327e749723bb5a262",
"2fa0df0408edc828541b90ca3ca1a86692d41186da27c6a4c6c1c52b7f5d8d50a578090e8d4bbb52fee69371c0f98e81f040c01d542603d6cec96ecc8ad0fe48",
"2ddd049d75f87924dcaad87fc7890bfeeb0dc7396657094591c1e6eeb15ada3b327c231bd32d1cfd99ac60b46e5a0fb083b37ab641c0977a3626eb2220842566",
"8dc254151bc5f205256f955fad48247443af8e20ad725ecec1cd815c7c6d4948f8c488c02f893e10ee17266135563ed6d74249353e1e5cec2a743ea06fa28c47",
"45c7f6858be14893878df5ab208956e4c8155309d17f8ffdd493486d17365f111d3f02545e713572d733fc985e47639ecdb90a268103d6505c78bf2fb85a484d",
"19f8db4876c23b92d9ed0d2f6bbf8b1902a7e3bfed5fda47a828e1a6d63cc000a357c66a229afe4c1b4b9de383d7b7c00cb45500d5876bde7463413763adc859",
"d3d5e5d2fc4107a691445f0f0680ec2d2a0a287d4c008e75af86598344afd7442cd445fab58713387aec9b7551c1f31a945bd316bb85a963ec6a748a84e3cc53",
"a8eb84640b37ed0f391d7392c8baf55426117e426365bb143bc90353649c307350a1367d9489f69ad32b9c45dce6a3f6c30403149274edf465e4f4b4536f156f",
"1dedf887e5c2fb3146a5163b9a45bf6cc6960480424f1d56f0077f4fef88dd0613906710cd0e91619606c5d7b136686fe808c8c4b69e5a533e22ee046da3e56f",
"694da6157c72de377ffdb2ac29d459568569205fdffd691f4e67a2f2bafdfa3cb1ad42322bae0229e48c76741f4181b514722139d63bb30643dca48d0e55b260",
"4330c744f7e4b6565af95d6dc7d5b23acfba760b19a24594272978fd4d81d93454c7d106084f556511424bda4a3efbafa063270ed4d3170123449555a0546d3c",
"8b78281882f8d2e996d27573f20b300a2a66858d9c5de82dcdd5997627ea1b2546b1d5d89681f2ee846cc804f33407b6c8921bc9bd2741a6a7248cb34fcab04b",
"d136fe36e606a9debb4adf8868a23b3c6c276adfc672902c902a6f866b9d5d3b233c957edf9a8a68422869d5b8a0c91f5cc32676bbc772d33222cfe03b3fa56d",
"d71783316b9f9ab90651b2e94d18ffdf7de47f9e65b5b81e49d2852f72a9a026f11ce9a6681ba31eec99f22e630da5a3d0a9dc48dc1c864396c21895eee35b53",
"ad146e5f44b89b499ebec9190f57863d5a2e85626865cdc7d612153d078ad15439db1332863f19c53e731bd4fac4f0d8934ab559a5caea65bccf6d41d5fd7649",
"86a7caad967ec42d30b48efeaa85e63f656d427342610ad3cfa025a1c616fd17b29423203af8f36459773046ec6b7e5f07574561438e07efecfe8674af851c55",
"10ad798898bda69fcc3610dfb1dcd0323b5c9f9cb67aa00f286bc9689d92580ce71184019e288aeb45cbb21025a0080fa9e0a624f115cbf29918b79f7c470867",
"62922260ff1c3ec3bcfac408e81b5e9bac69e2d8a408274b793dd119798b36419ccfc30791e678d6fb8be86a1d9e071a63276c8899df217c63a30836f0e58b48",
"db3999fba6bbf3f13212eab680559382849690198167e2c76d459c94637db7633973cfc82fee3fd04493d30172b32787e029f223728aea2f30393d77c99ad265",
"af71f60f0a548aa673cd39333591c66d111b4ac86a8f30f49559efd9ad703a4e64b5bd5219c4507844ffa6e259f9ae4b5ce766047fddb35b181713fd8a3ea64d",
"0faa106fc49b8fb8e72480d4d3c221c736ed42a057f0622ac5d8384616e7a21be4d13e873917fc82ac1014db883b84a00da90d641040a651661402fbeb09526c",
"bc8ca8715eea7cb24648f05d9c2b41d11ba6ad6fcd89575128983ed7f8d39f3d69dd9cc8c0fa8096a61debc69e14d17e03542bb7aace170ef74cee07196eca3f",
"fde94f4c9aa0dcecd160702921565694c56a10bc2e12cbe2732613c43ecc353022f33a5454baee898b9a365b518b4c54c1a7136d78deb6791257c73b5f14104a",
"ea254a2a2cbeb3affffef46c56928f95f45a9663db72f470cc3276b251c44620705b5cd36779cfe7c273598f5594c8ca81d331cd04126d0c82976150e7a2d16f",
"f8b47f2e6d7cef812acfd5599d56debb6fe79068464f79c24af5dbd11480a8411c8b748692511f45476cfaabfa9d3c191c0714c724c475c93a916c727ddd116f",
"7b3aa17ec684ba36f99bbdf0ea7f32690e9d51c076962cf7a636fc846b519c21159f379003b7c0cccef1886546876f300e27d1c8f95b532a71773e70766b3f4d",
"0b4fbd054d356fcb58794f7ddf7273a7455e46672505220a6d8425b391e64a605f0c691f4524033887542d0402946f3e15e822a62e0301a1df1be64e15fd8853",
"61fd71d97f2960e0cc548545ee64fbdea435b942cd19dfca31298cd24fa4aa46e55065b2ee92d62a53e02fd2022ebdf288bab5978976f4fbd49230e1fb0da14f",
"b270d906d14815f0a33faeb4f4bc519f08e800d8ecbdd0f763fa29403da65e195614bc9d5df95e8539a3cdc9c4098d354dc4c914340dd550709512f22e11bf53",
"ce33c296d8d7b8c70bab4135934366c7a7f3fa8a2681eb1027398df0b86dd447da80e3a363ee2ed5ec93f6a67c66372d5b1f86fae13b5e968489c6662723d76b",
"a962d8cd17966ca98f5be0720d308a4833846601c06c2a3d27e20f8d860b0f2fffed4c975828a52135e7ea243b500e5775822585fc112b08704b73355c7a6451",
"75fa5f3831149b17e14a824fe1e28093479754ac4683a2b59d2b10066dbd5f0733c7bb1721eeb8d5645b63cbcf539e4b238ee528806ab996a061fe888972b746",
"bdf1fab128a4761b38a467588d062104143bb9671ed99e916462e9efd549cd3e18ebd04cb13ce793db38805793a82cc649ffeb57ea2a84f7279028d72dff0754",
"02598c3c9afc32e976a833a3f633d1623b779edf31ef76366fb5840aab5ed736535accbfc81f2faaa8096dd6594fa964ec0c87dcf13b65c19599bdc342a3575c",
"ad3d670be713cd59ab69b3192a58dc7f750b1d63ceccf28c998c94e6fb7cd2179934fdd1c688a64b6dd7a94d26c800344848f8b6a3cca51874047241f9e6de61",
"23e2f2c4ead325dc25eae677ecf19412e9b083aea4f8814eb048f90eaf5f55031713a439a89a6837d6ea452b595924098c3448b767dd8996eb311848df5d0b54",
"0565914882fefbfaed7b5bfd1cd42e9b866d2b5002e82348ec3ab4c6c2269914153fe284274777fdbce9ce0da3af92452ddaac4a8944213446db56a00fbb4a5e",
"3166e35a00da103ad26fd51ca7275a4dd38ce23099f6b4dde848f72b8c5e1e2330bbd1f005cb51d3fe4deafcf15fe182c5a68f0182c96160e816eb8e95fff569",
"ca3529bea94fbe7fb7b9981d74a5e59f8160394e77b4d9e830693b70884e122d88a5a8eeaab7a3ee266e81b45d2261b895d5867554639795d995e2da6cc2d26b",
"db84e21c290d50071ddcf383c6d745e2162320ec2b3f61ee25ae26cbd7e3346a5834952f40050320bf5e6dd9a5c726f39ae61128bd66a1cd5de6ab6724eb1966",
"b196b4be05c3f48d9a747dfdb15d0bdfaa0af5fa297da8bfa5c4746e72f02766f4336582bc5494d9f47b9c50ed43128e39f582edd4bb808ee43763400d364545",
"d1c30828b452b8926975a49e9fa20923776175f9d2172976ba9400ef4e5c6d644cc9e61fd45c4189b9897289e40faaefd5f0dc6948ccfaaba9ea267435f06a67",
"d0152e52eed470308ce3a72543eb84843cc167715a98f1e804aea37f29ca1b603c8bcd74ed9908a56d6e66f07b50dee8244e101d9efab1f20a06c278ac50084f",
"8169fe4df47bd62dcbe7f7b86a6b21d570d95e1962084261819656ba7221af0d1ee7d408d7b5ff787ecea8dad0c553f974bb8916d139d0b606596c5fa974856e",
"b56f727c8e29afece41b616429e7aaf39d299d424635eb8026917fc03a410432a5be97f6466db5618289183be088b92fb3752c13de98d8ca2ae1f4429657dd49",
"eda121b79c1a6a6c827c7ae5b78a10be9614dc9498b14d87b2f08e4dc9075f37312727381710e7aea7f5d5cf62a62a0786058009ede80d3cf3f3a0c17c3ae06e",
"6b1e0fb73294e52c2cabbd5a0acb992d8c7bcd0ce9b59dcf27da52da08b1b36f09515eb358df9898c35c86ed88094a68bb24280e6680961bbd00526a62d8a268",
"f85d88ef732009e533f45d34a3c7c2b75998836e5730d67a9829408e65cc61127272a65acae36a2d14425e4b1884ce873c9a63554197f80f7301f50237556168",
"958abb8ae407313361f1ae317011a138024f776a8ffbe1c555040f778a89da3539c8916e309d583ac8dc22578feeb927bfa854d1fdf9f5d33efc34e72d486867",
"0f8f510d8cb01b274538b75af2f8052b6dcc677461eadc317d3806fd18722e54e3fa971f2fc955492d00c84acb61f9f0f56f91f92ebd78c76f7f70f0e198276d",
"7d82936d4a555b02f85b1c5b907b05a1d20acc0102f5f55e3ad9835d225e1d1309d2df740022962ae3bd567f0958ffd0f67cb19147493b8eb3a9eb516919144a",
"31a6e1aac71c6fc2e9ad1b79e93aefe91e5658e5cde9c40be2e31f3b6f4e883b1fead7dfdffbb1ddec43da44db90f7a90fe8ac677a981ca45b6f05efa890d561",
"c430d4b8601f99a6dfc082ac73ee10c2c15fbaabbf777a074ea0903280d735463d00ff48a62351deced346373e889844f655368a12f57470a00f2528094c4759",
"8c27a0ea68588ec914959dd14030e2222f4fdda90e0d862a4c7f74370474666ed825f48f3659ed3e6e0bc031626aecd4a4c24a091f01fc3927987335369fab3c",
"2dee90ea2ffe2529cba0ce14010abef7fe5ae71b460e108d833865a21e77223d03362e24acb1cccbd2861251aa53689f3bfc820f658d69f62fbdbcb0fb6ef872",
"ef0fd14e01e226244156e38f06fb11b2a4b12b22b55da5455fe4d39ab8efc566333e451937b78fb4779f0d8d7743f6e780f2f9bc858ffe08fdc6fe16569beb53",
"8e5c3a9686d56bf5ab83c335fb5265d47806443886d1cee4a694711a3c68d03b09725bcd7d8cb1210aefbd1fb63029abdfe90337a090adb582837f93291f1055",
"536c2073d152b58979b7da119a75263dc2815d9c3a809fe324d7f31b166cfd5a4fbebfb6ae51a79858f1f4f2124f0a84e52e02ee5dc001e8635ede9498e01e51",
"2b744a5501117d75416c662c8814be0475d5ea8b2e52a1b5b6637ef2e414bb5da98df931666deca0280d73b7d480a23ee518311b4ee16a925897c00bceb95f61",
"dd3cfda427ebc5543dfd27676d1f90ad3bd78e4f714e6fc99e5330b2c41e14152c8f857c3365ff780f8d51dcd14cd48ac8e2cd41f96c2c43b4ec40749a47176a",
"373fc18a3bd2afc6d4eace8495420f331c1e96a461b1b57de1e43551493c62343a4b1594f3a4de864aa4d51972093ef089523f54cc432e08213f991ec4ca2a46",
"94ff6af7c45cafa3278cd3295bc817dc87818e89066dc9d0aefadfec8630fc20fd540bc7e262a27ce977c24ea7ce3f9d0f7c758cdcea3f950f20cbf17059be58",
"76637382bf79b7ff11fabf1fcc4b87b7a4a81e446eb8531cb02dc93ecc6d7b73519793d6ec617f07dd5574f8853bddb4306ffb9851b80241757be36c8111666f",
"b9e957c52f664132f56c2d48afab8f02223295aef211c40e241752c764133c0d61dba48105062486896420ed35818bbe8c759f5d9b7e1327bbb2e4a4f03eaa69",
"53bc713a62603e5eeff257fe64e29a52c5ccf0e02aac6038d3fb34eb5ca2580f273c7cd8f511ddf80e867a1ec330f6aa1cce85bd585057df195fbf1a65346c5c",
"dc1ff5bef2731a3344d356b7230d0467649be273289d50be6ab4332e285b526670fab821dc6c16e4095e48ce7e78d003bd25bfc23f6516221c0a49553800e645",
"85e5c4d2368bacf3595053562a2d0509b841ed0faecb636f7e06052452b55206b955c3cff4f27a6d33f090fb0aa7ebcfb671fe7047f5098783f6e40fb5526249",
"e1a4b57ee8f0bef5e2fbae40cca4ca4dff1e0434284bfc24ef80cbc88c4a7c65f700fdda521e552ba243c9cd90a3022356eb65f0c3b56a0a26d15497f0a9014f",
"50c67856c2a90b2ff201d930e4760d1a5bf2b16ae54bdc896839f5c5dc12ba1195ccd525f06b2023c8b6bdfe2bf0af29ab7273bd8b32123ab8d03c69858d6769",
"ddf2fbad51a5bc8ed77c8e480b43b143868a7ac034a7384f92cf491971dca5573e414d3a57de8cbcce4e95b74957e6cce3a10d6015bc33b691d3dcce576a764a",
"096df4d1540946b6b9d07d0251e3a063c9750b31aa7eaa85dd57d1e8d2cc9f05fb821252afb828e7af490545682326635feec2583c97517ad5c70603ef86145f",
"b8773584c28edc2b4399a98b2b958a675ed1b769bafcdc6e8d969492f5d9c45afd3c4fdcb3846fb9ce14a42b75b1a120e58c95311983917f0124a3fd53be3744",
"130c3f1b69f6c78ce251e7a64b66aab41e99fd8cda5fda34d9c58d867b1bd30d0d69f6b30ff2b83a973818fda629cfb18031a4a259c463da5ca46aa5091b6e5f",
"a7151766811727287e34ee0def964f388dfc12897bc9ff40320f86c65fa7ab03cbff0e4fc33a8eb31c98d892fe63779df9f698f796d1a39e222b1de4b180443f",
"b126432c893db94853edadfbe3ce16a89f9cff9005696b6e01220a3a6b36455b10b81309524c201a6c1aeb9b28a7142459245aa4e9f84715f48d6fd4cf46636b",
"1470493639a2dd3599a3438271c6d1eafc67a3c85ca54c20577fe32dff6da02da3f4e2818a25c044b3034f8c50de55b93d5a64afdc76c720a1406b96e8e67e42",
"85189f914ceb7fdae4bb37532dec3d8d98dc7c05cceee453b11d58884500f956817b5c2153ac9b0bc137ed6305c3b9abca4cf3bd244eb1cf0ad6b88973d09e53",
"fe09078d16db182c8f8d48f2fc23eed29f1d7cc5ae3f4784c7b40cfef0e2df2e4261e7191a68b7467cd83635904467464af181112bf18dd54874a00553fd3043",
"78b6ceef2abab2eb92754226274330e7affbbd63c5c32293ad3d2373048d0f2948a44ada78b8884863ce2eacacb8b451692175410a0bc9d1573535d138dabf51",
"c8087fe3fbe7e1d133c05589a2a37329100250177bcdc34c177859d5689eab02136da04fc3ef18cee26231d6deae358e181b0d1364c080f7a45dc9c55c761b66",
"e3e826caa655ab54b61e4140283d3414d3bc8554eda81734c74a9dbc1dbad44316220ba55ec1a85d5acc9b360718f31a1044c4a17bdfeec6b6f03b579674ea4e",
"fbbe9d11717a14aa4317951078b404aee01b50b4c2e39f50212a9995d47eb831c45990194d74f951ab93a5ffc4ea4c17c5d59d1f02e4c130530b9c3b73243073",
"f59b93605701138ba7c1877b79d2426fee8fc335401b353ebe670c3bbe64b06f8d01001263aa3f1d6b59a43fc3db3810e4b02ad2de46a6db77ead21db1d2133d",
"86f8ce63d892a11c71c5a941e6ccc30a275c41fd24d3c3a3357660ff2df5c832a2d9d2b17a395970ff123a929c8d70b354285c0b9d670d722f6347f8571b7e43",
"68baa492b3b487b205ad78184e354a377230bffa97d0e4f5fefc7e7f51322a713657f16a5692e1a2349a3000964e27c9b1c30ae4d7b64c456196f0659de5fe67",
"7342908b5912da0b964b93f4ae5e6a32acfd518cd1d3d0e8ec9c21f24fea7750297e66f3e9a2537dfbdcbb47d0bdd1c88ee836b173154fc1c9079b71b269d76b",
"23208347466807ef44481b0237b2d6fc0cbec0fa39ac948c94d54991d577ca617cbc2fa1c36338664bc20cd1873bb5d712b9e731489c6642700498c488f48547",
"331251d58e976ed7818aa6dd99091c6ce628bb950b43b280ba1e12be2f9ad3469b866e40b593c964c0799e8ba590c8d7ebe9ae2d8deeb819fd47bb8384fcb84a",
"d13f725492298ab7161742b3974ea02b90597c9acdc720e3aec5a214c61eef0f14148909d6cb756bf18a8aebff36af87d335fc5f24e09874a826afd8454a9772",
"301b9cc517fcb92be39020194ac8b02313fa04f1dd79d8e6e246752f63a2f93b226a201715cd77cbf945f4847d1438c92b4e5b81d781bd629bffd04cc65bf766",
"cb9a0ec8fc5c3362d7d87d1f1809e101d6232e78b29c7968ca461bb1c2cdae70d0d145f0c81c12b111ebc3fb2330b82589a1528efc1ee1f2d700b204f963f867",
"d003e57ef6bfa01788c9c12aa4bb0057754aec75b5f34221a8fe9dedf5041230de4d5bb18f74ee339f68d48f9343b4e7ea0929cf392ab086bc286a8fe6fcd65c",
"c1c60227a37e97eb9e728c71688b3fbb3cabddacf23131a34c35c8b2af90ca5bd945932d5fba71f059fb4a01375cabed28d7706213ad7b185eccc2fc204c5643",
"558ff7eb60c7d9cc41f2ddd393610e149c7c18c7d525d9deb32220164160116b5e13363255fb504385e0178c0f1402ec4a87192a34ae61a4ce71f50445c27d5b",
"50749dc74260fa3af68763b4ff5b00baabad9f639045864dba6b50a429f3db6d0fa680e7e8c4fa2269180ba30e3a399b897a1b91f5ff06c89eb74cbcf13c146b",
"0f4cfdc38ebc99beb86a70fb4f76ac4783d30ff03b02233b3679a395ecb2440b2ac0810a5343beb3e83e8edf589bf1f44e08666fa9c38f50c51a67a45df34b49",
"2f1e1f3c4343edfbb15a0c1a3e9d1444fb8572cc2f1671ad7dd3e762c6c9be6f7fea468bda8c62ec294bfd4af3aea18e148afaa4095071c9f7e1a092ba5dab6f",
"925deffc721454144784d93d4cb1bef3c3bff645245835a962d77f07dc99a431db0f27d1cb2c871b204dd2c7275130243600e2cbeddf398346c57fb8b18bf547",
"ced197f92b1ef24c42675d3d917374bb51711cee94e9165d58f7c9a4a85ec0474a94d1c8ed3d59b89db805fae20252f40ff9e905d63416b44a97500cac11eb4f",
"c8f46c8e2a989759f08ef8364d9def127f840eb445313fec1616afa039c7b2699866c14cd9754b7b8dd420e03b4834bed7107a472e8ccf24a63699f27f441e5b",
"83acfa747b532fa367de37d99e3d8c2d89e455546d9eddd5dd59becfd799550400e07c6fb770eb189351dae4291228df60b7cb8979a3b1cf9e4ca71daf352c3c",
"71e17360fb9d8cc6575517cfc498d0271bc8d1112d36cd79c3a2d7a6b4ec8041731b5f6fd5cf000e1d78603b20910e3b115df4af6b3f5cab62da139e04102c3e",
"25ecaaea3a3f78dc2c4c1857ef3622f0bd3d17a7972b4490fc4cc529762a9e6a29d68026ce68bf960c2565da18d72493f56e904ce2f2d1be65fe8367627f8e42",
"060b40c5e248e14be078dafb8aab3d749d42aa86413a7c54b871b803aeeb1e08348aecb913810f1aa9ecd5f4def09962fa34bb963c78c86af991b0fac109d752",
"745d63b8b43df89338b57f54febb8478c1310ddd47326bda67dfd7571b89cf707c17c216483d7455e460d1f400dae00cddaa0a684999795ac7895e1cdb07016d",
"18373bf345baaf000ee5cf37ad0166f52a3c40f2a48cd90ac31d634feeac834f3d8f3ace5831e322642a2397f63f7776ce4621bb385cd046d5150f6bdf3ae86e",
"2f6235e8332b6b9e08b3ae7ff1375113c162ea7a50e1096b6628da8c5f45213461b071cc8965ae9337f1a73ff8a96af4956e9b25284462be3a55ffc998622c6c",
"04c3d57b1e0c97d0aa1c6bb8eb226e8fd6dc7b9fbd1261ddc8a134fd126d9302d8cab1e2a60cb8dd58d88cd0051052569211bcdff65cad637a72d8aaf0c30c4a",
"2be1ffad6b5d85c813396bb4b52c1411c7cea313a4abb209f5c8318104235d6c50810521059a0554f2d44aab900c1cf789891bbfced192a504c534738e34945b",
"60ec452022194b8300b992b435c06a89b3a63c0879efa93a2e21bca47eeed63915703f439a2b21f8f4f21ca08d784ceeb65e468b1ca4b36aca77190743aee93b",
"b06512ef4beff17fa2905178e9ce7d2921d17a2cad3f4ff58374575a189a03729215959a067f1719d78562d668c65372fe613ed0f77dc0c3b061b335f7ec1042",
"f7e6feb765c0841d71d448ad6792d7634b1bb5ac61f148cf3f349ecf0aa93645c76f14a8f1ea176e25aeb3f6de2dbae801b96942da353e8d3a6549fc9c59a050",
"6c3ede962b417a4a24c38320c9019714185c1247012d9664322b76301bb38d2668fb3f2a795447d2c70b9378291d25229c5c779d988a9a450e3137b6272b575c",
"377dbcbf6ef06c40ce8ce7ceb286acb6320ad669230c18a7a16595003aa39940baaef4eb1bbffe1243e19802949c2fde8c3a32e4032d1a21113c7e3d359a5d6e",
"836487c6b41a355c3cc0bb1d8feb7b4893d22dae512556c4c1620f339326152fbdcf39a7c2efd45cf2e7a86de75391499b3fea41eea2772a324fbbd937070865",
"42e18339e74d3b66b05611bf70d056458856063175f1c2aeadac440c7e637f6a489926b045f9f2fc47b5725a0eadeb8b28d6e5e6e35af710b9fa9d2cb9c06750",
"38a5622b96351b2c7c0342c4e1e901f9cff1e9867ace9582ea3ef6b23c0d9e69e9657b0420d1286119408a7f7d0e746f2950490d45c4af1eba250f81a3e0c155",
"618123bc6705e2bfd83591001839caf0749268fbf740392dd3803a0e79aed6113bb73be06a1cc06e82f874a4529f26dc8f174c4bf579eb715a71f831cf06395e",
"c0d1111f8be7e2264e15287ef2dedcc568e674444ccaff208abce3b66b5f92035e8cdc759f0cc06ab4d17145603e235788d183f85731397b4217c2b00b81306f",
"0c2e121bd75396bb642a1d3d65ea80ccb0187855195762eff1ec01b79350930ad12455d6bf5cf0e764d6dccc841c3a3dcf89c9babde998432d4f77b0cc26f946",
"e5e5a8047760bc16b74b4657b32dc19a10d896828754f19ce3c7faf9f60ebb1b402b4c5fe12d3323af4b6ec20bb34751055ce3040458f3cb3469eb82b2053268",
"136b2c89c1dcab4af2c0efa3839eae755f1261845af1ec4c76c4c9643a419b4804f8bbb2e667087e6f71017c6c2b681c0d96ed03f78ab03ae364b60464fc3868",
"baeeab3f6bf8f95c0a6e3d612fd91bbb71fbfa5f729694ef383bb8d55897d4196ec955292bae8173583a133bc662142a5f69907b67777751f843f590ac32b859",
"4d702f826a67ca55f630adc5f178c3617108df1956968c5af98106659fd2c904525875ce35cdf16518fac959cf3707a7ce3d72aebcdccd0544f895debc976370",
"bf4c76aff5fdb8838995aca98a71bdc8a2a9cb85b6de3adf2fe2876fe4f17b0fe985aed50eeb04d5d5703068c0194d175232a80eb0e0181d16ac8316a46fcd57",
"3aaa8231e729c23c2e88a0ef9a2b3519cee8878e6d42f06423f23043f6516d4b275f7f3e22bb56b0a17bb550707549b61e9cdfea19efdfe16c40afa4e0453766",
"e5382397fe622845cc12bac002949fce24883696a967192b0d7d2ee8a14fe1501e01c36bdb8d5a29625071a8aa294cfd30e015bdf30faad112682138d7fc026f",
"b5c37309d59ebc85e0d65564de29206b897b6aa401a8cb653240113a1cc693677483ab9e717bb5690493b405c3b5ab9585faf749cb22c5a08e6dd422764ae862",
"721d3aad180c0b86df9471926824eb128274ac306c33b55057f4734da78f975daf5c6c66da3dd55393e91295b913c075678ce1d0498708e9c5f5859abf2bf044",
"b7bb1fe3eb11e59cd48e206a41f69a775367a1871ff977ae3ccf3e3ffb087804d0a50ca5aa370ebce9cfd4ad7ed761f0aa49581ff4b3b42be0edbf8bac7dcd47",
"976334197b10081bd382805f5b8dba2fbb63cbfc2f86918aedb065a363747c22e8de8edb57105ecb7cdd8203799cc4eb30dd83b4dd30a650050cf75a3fb87f6e",
"86ce90c5084ac5a80ff8beb45f5e4010dc42dc5502b2dda208a69b999b0b1a36faf3d5eaaba55412e4fbcee2f2d010f809ee6c325bdf2e7ae236d667b8e2f644",
"eec58acb539362faab2c8b41cd271f87a316c7b0850cda1a70868ba61d0b5203b1bc7a7f29469b63e6b3328c6bb035e1ce5c0b73db250efc2a361b7336eabb5b",
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | true |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/crs.rs | ipa-multipoint/src/crs.rs | use crate::{default_crs, ipa::slow_vartime_multiscalar_mul, lagrange_basis::LagrangeBasis};
use banderwagon::{try_reduce_to_element, Element};
#[allow(non_snake_case)]
#[derive(Debug, Clone)]
pub struct CRS {
pub n: usize,
pub G: Vec<Element>,
pub Q: Element,
}
impl Default for CRS {
fn default() -> Self {
CRS::from_hex(&default_crs::HEX_ENCODED_CRS)
}
}
impl CRS {
#[allow(non_snake_case)]
pub fn new(n: usize, seed: &'static [u8]) -> CRS {
// TODO generate the Q value from the seed also
// TODO: this will also make assert_dedup work as expected
// TODO: since we should take in `Q` too
let G: Vec<_> = generate_random_elements(n, seed).into_iter().collect();
let Q = Element::prime_subgroup_generator();
CRS::assert_dedup(&G);
CRS { n, G, Q }
}
/// Returns the maximum number of elements that can be committed to
pub fn max_number_of_elements(&self) -> usize {
self.n
}
#[allow(non_snake_case)]
// The last element is implied to be `Q`
pub fn from_bytes(bytes: &[[u8; 64]]) -> CRS {
let (q_bytes, g_vec_bytes) = bytes
.split_last()
.expect("bytes vector should not be empty");
let Q = Element::from_bytes_unchecked_uncompressed(*q_bytes);
let G: Vec<_> = g_vec_bytes
.iter()
.map(|bytes| Element::from_bytes_unchecked_uncompressed(*bytes))
.collect();
let n = G.len();
CRS { G, Q, n }
}
pub fn from_hex(hex_encoded_crs: &[&str]) -> CRS {
let bytes: Vec<[u8; 64]> = hex_encoded_crs
.iter()
.map(|hex| hex::decode(hex).unwrap())
.map(|byte_vector| byte_vector.try_into().unwrap())
.collect();
CRS::from_bytes(&bytes)
}
pub fn to_bytes(&self) -> Vec<[u8; 64]> {
let mut bytes = Vec::with_capacity(self.n + 1);
for point in &self.G {
bytes.push(point.to_bytes_uncompressed());
}
bytes.push(self.Q.to_bytes_uncompressed());
bytes
}
pub fn to_hex(&self) -> Vec<String> {
self.to_bytes().iter().map(hex::encode).collect()
}
// Asserts that not of the points generated are the same
fn assert_dedup(points: &[Element]) {
use std::collections::HashSet;
let mut map = HashSet::new();
for point in points {
let value_is_new = map.insert(point.to_bytes());
assert!(value_is_new, "crs has duplicated points")
}
}
pub fn commit_lagrange_poly(&self, polynomial: &LagrangeBasis) -> Element {
slow_vartime_multiscalar_mul(polynomial.values().iter(), self.G.iter())
}
}
impl std::ops::Index<usize> for CRS {
type Output = Element;
fn index(&self, index: usize) -> &Self::Output {
&self.G[index]
}
}
fn generate_random_elements(num_required_points: usize, seed: &'static [u8]) -> Vec<Element> {
use sha2::{Digest, Sha256};
let _choose_largest = false;
// Hash the seed + i to get a possible x value
let hash_to_x = |index: u64| -> Vec<u8> {
let mut hasher = Sha256::new();
hasher.update(seed);
hasher.update(index.to_be_bytes());
let bytes: Vec<u8> = hasher.finalize().to_vec();
bytes
};
(0u64..)
.map(hash_to_x)
.filter_map(|hash_bytes| try_reduce_to_element(&hash_bytes))
.take(num_required_points)
.collect()
}
#[test]
fn crs_consistency() {
// TODO: update hackmd as we are now using banderwagon and the point finding strategy
// TODO is a bit different
// See: https://hackmd.io/1RcGSMQgT4uREaq1CCx_cg#Methodology
use sha2::{Digest, Sha256};
let points = generate_random_elements(256, b"eth_verkle_oct_2021");
let bytes = points[0].to_bytes();
assert_eq!(
hex::encode(bytes),
"01587ad1336675eb912550ec2a28eb8923b824b490dd2ba82e48f14590a298a0",
"the first point is incorrect"
);
let bytes = points[255].to_bytes();
assert_eq!(
hex::encode(bytes),
"3de2be346b539395b0c0de56a5ccca54a317f1b5c80107b0802af9a62276a4d8",
"the 256th (last) point is incorrect"
);
let mut hasher = Sha256::new();
for point in &points {
let bytes = point.to_bytes();
hasher.update(bytes);
}
let bytes = hasher.finalize().to_vec();
assert_eq!(
hex::encode(bytes),
"1fcaea10bf24f750200e06fa473c76ff0468007291fa548e2d99f09ba9256fdb",
"unexpected point encountered"
);
}
#[test]
fn load_from_bytes_to_bytes() {
let crs = CRS::new(256, b"eth_verkle_oct_2021");
let bytes = crs.to_bytes();
let crs2 = CRS::from_bytes(&bytes);
let bytes2 = crs2.to_bytes();
let hex: Vec<_> = bytes.iter().map(hex::encode).collect();
dbg!(hex);
assert_eq!(bytes, bytes2, "bytes should be the same");
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/main.rs | ipa-multipoint/src/main.rs | use banderwagon::{trait_defs::*, Fr};
use ipa_multipoint::committer::{Committer, DefaultCommitter};
use ipa_multipoint::crs::CRS;
use std::time::Instant;
#[allow(clippy::needless_range_loop)]
fn main() {
println!("Benchmarking Pedersen hashing...");
const N: usize = 5000;
let crs = CRS::new(256, "eth_verkle_oct_2021".as_bytes());
let committer = DefaultCommitter::new(&crs.G);
let mut vec_len = 1;
while vec_len <= 256 {
println!("\twith {} elements... ", vec_len);
let mut vecs = vec![[Fr::from(0u128); 256]; N];
for (i, vecs_i) in vecs.iter_mut().enumerate() {
for j in 0..vec_len {
vecs_i[j] = Fr::from((i + j + 0x424242) as u128);
}
for j in vec_len..vecs_i.len() {
vecs_i[j] = Fr::zero();
}
}
let start = Instant::now();
for i in 0..N {
std::hint::black_box(committer.commit_lagrange(&vecs[i][0..vec_len]));
}
let duration = start.elapsed();
println!("takes {}µs", duration.as_micros() / (N as u128));
vec_len <<= 1;
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/lagrange_basis.rs | ipa-multipoint/src/lagrange_basis.rs | use banderwagon::{trait_defs::*, Fr};
use std::ops::{Add, Mul, Sub};
#[derive(Clone, Debug)]
pub struct LagrangeBasis {
// We assume that the domain starts at zero,
// so we only need to supply the upperbound
domain: usize,
values: Vec<Fr>,
}
impl Add<LagrangeBasis> for LagrangeBasis {
type Output = LagrangeBasis;
fn add(mut self, rhs: LagrangeBasis) -> Self::Output {
if self.domain == 0 {
return rhs;
} else if rhs.domain == 0 {
return self;
}
self.values
.iter_mut()
.zip(rhs.values)
.for_each(|(lhs, rhs)| *lhs += rhs);
self
}
}
impl Mul<Fr> for LagrangeBasis {
type Output = LagrangeBasis;
fn mul(mut self, rhs: Fr) -> Self::Output {
self.values.iter_mut().for_each(|values| *values *= rhs);
self
}
}
impl Sub<&Fr> for LagrangeBasis {
type Output = LagrangeBasis;
fn sub(mut self, rhs: &Fr) -> Self::Output {
self.values.iter_mut().for_each(|values| *values -= rhs);
self
}
}
impl Sub<&Fr> for &LagrangeBasis {
type Output = LagrangeBasis;
fn sub(self, rhs: &Fr) -> Self::Output {
LagrangeBasis::new(self.values.iter().map(|values| *values - rhs).collect())
}
}
impl Sub<&LagrangeBasis> for &LagrangeBasis {
type Output = LagrangeBasis;
fn sub(self, rhs: &LagrangeBasis) -> Self::Output {
LagrangeBasis::new(
self.values
.iter()
.zip(rhs.values.iter())
.map(|(lhs, rhs)| *lhs - rhs)
.collect(),
)
}
}
pub struct PrecomputedWeights {
// This stores A'(x_i) and 1/A'(x_i)
barycentric_weights: Vec<Fr>,
// This stores 1/k for k \in [-255, 255]
inverted_domain: Vec<Fr>,
}
impl PrecomputedWeights {
// domain_size is 256 in our case
pub fn new(domain_size: usize) -> PrecomputedWeights {
let mut barycentric_weights = vec![Fr::zero(); domain_size * 2];
let midpoint = domain_size;
for x_i in 0..domain_size {
// computes A'(x_i)
let a_x_i = PrecomputedWeights::compute_barycentric_weight_for(x_i, domain_size);
barycentric_weights[x_i] = a_x_i;
barycentric_weights[x_i + midpoint] = a_x_i.inverse().unwrap()
}
// We do not have 1/0 , so the domain_size for these are one less
let mut inverted_domain = vec![Fr::zero(); (domain_size - 1) * 2];
let midpoint = domain_size - 1;
for x_i in 1..domain_size {
let k = Fr::from(x_i as u128).inverse().unwrap();
inverted_domain[x_i - 1] = k;
inverted_domain[x_i - 1 + midpoint] = -k
}
PrecomputedWeights {
barycentric_weights,
inverted_domain,
}
}
pub fn get_inverted_element(&self, domain_element: usize, is_negative: bool) -> Fr {
let mut index = domain_element - 1;
if is_negative {
index += self.inverted_domain.len() / 2;
}
self.inverted_domain[index]
}
// computes A'(x_m) / A'(x_i)
pub fn get_ratio_of_barycentric_weights(&self, m: usize, i: usize) -> Fr {
self.barycentric_weights[m]
* self.barycentric_weights[i + (self.barycentric_weights.len() / 2)]
}
// gets A'(x_i)
pub fn get_barycentric_weight(&self, i: usize) -> Fr {
self.barycentric_weights[i]
}
// gets 1 / A'(x_i)
pub fn get_inverse_barycentric_weight(&self, i: usize) -> Fr {
self.barycentric_weights[i + (self.barycentric_weights.len() / 2)]
}
// A'(x_j) where x_j = domain_element
pub fn compute_barycentric_weight_for(domain_element: usize, domain_size: usize) -> Fr {
let domain_element_fr = Fr::from(domain_element as u128);
// First generate all of the values in the domain [0,domain_size]
// then remove the element that we are computing the weight for from the range
let weight: Fr = (0..domain_size)
.filter(|element| element != &domain_element)
.map(|element| Fr::from(element as u128))
.map(|element| domain_element_fr - element)
.product();
weight
}
}
impl LagrangeBasis {
pub fn new(values: Vec<Fr>) -> LagrangeBasis {
let domain = values.len();
LagrangeBasis { domain, values }
}
// This is used so that we can use fold, it is never called outside of that context
pub(crate) fn zero() -> LagrangeBasis {
LagrangeBasis {
domain: 0,
values: vec![],
}
}
// A'(x_j) where x_j = domain_element
#[deprecated(note = "Use PrecomputedWeights::compute_barycentric_weight_for instead")]
#[allow(unused)]
pub(crate) fn compute_barycentric_weight_for(&self, domain_element: usize) -> Fr {
let domain_element_fr = Fr::from(domain_element as u128);
let domain_size = self.domain;
// First generate all of the values in the domain [0,domain_size]
// then remove the element that we are computing the weight for from the range
let weight: Fr = (0..domain_size)
.filter(|element| element != &domain_element)
.map(|element| Fr::from(element as u128))
.map(|element| domain_element_fr - element)
.product();
weight
}
// XXX: Maybe rename this to `divide on domain` or `divide on linear domain`
// computes f(x) - f(x_i) / x - x_i where x_i is an element in the domain
pub(crate) fn divide_by_linear_vanishing(
&self,
precomp: &PrecomputedWeights,
index: usize,
) -> LagrangeBasis {
let mut q = vec![Fr::zero(); self.domain];
let y = self.values[index];
for i in 0..self.domain {
if i != index {
let den = i32::try_from(i).unwrap() - i32::try_from(index).unwrap();
let is_negative = den < 0;
let den = den.abs();
let den_inv = precomp.get_inverted_element(den as usize, is_negative);
let q_i = (self.values[i] - y) * den_inv;
q[i] = q_i;
let weight_ratio = precomp.get_ratio_of_barycentric_weights(index, i);
q[index] -= weight_ratio * q_i
}
}
LagrangeBasis::new(q)
}
pub fn evaluate_in_domain(&self, index: usize) -> Fr {
self.values[index]
}
pub(crate) fn values(&self) -> &[Fr] {
&self.values
}
// We use this method to compute L_i(z) where z is not in the domain
pub(crate) fn evaluate_lagrange_coefficients(
precomp: &PrecomputedWeights,
domain_size: usize,
point: Fr,
) -> Vec<Fr> {
let mut lagrange_evaluations: Vec<_> = (0..domain_size)
.map(|i| precomp.get_barycentric_weight(i) * (point - Fr::from(i as u128)))
.collect();
let a_z: Fr = (0..domain_size)
.map(|i| Fr::from(i as u128))
.map(|element| point - element)
.product();
batch_inversion_and_mul(&mut lagrange_evaluations, &a_z);
lagrange_evaluations
}
}
#[cfg(test)]
mod tests {
use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial};
impl LagrangeBasis {
#[cfg(test)]
// TODO: Lets see if we can remove this way of testing
// This is only for testing purposes
pub(crate) fn interpolate(&self) -> ark_poly::univariate::DensePolynomial<Fr> {
let domain: Vec<_> = (0..self.domain).map(|i| Fr::from(i as u128)).collect();
let points: Vec<_> = domain
.into_iter()
.zip(self.values.iter().cloned())
.collect();
let polynomial = interpolate(&points).unwrap();
DensePolynomial::from_coefficients_vec(polynomial)
}
pub(crate) fn evaluate_outside_domain(
&self,
precomp: &PrecomputedWeights,
point: Fr,
) -> Fr {
let mut summand = Fr::zero();
// z - x_i
let mut point_minus_domain: Vec<_> = (0..self.domain)
.map(|i| point - Fr::from(i as u128))
.collect();
batch_inversion(&mut point_minus_domain);
for (x_i, (y_i, inv_z_min_xi)) in self.values.iter().zip(point_minus_domain).enumerate()
{
let weight = precomp.get_inverse_barycentric_weight(x_i);
let term = weight * y_i * inv_z_min_xi;
summand += term;
}
let a_z: Fr = (0..self.domain)
.map(|i| Fr::from(i as u128))
.map(|element| point - element)
.product();
summand * a_z
}
}
use super::*;
#[test]
fn basic_interpolation() {
use ark_poly::Polynomial;
let p1 = Fr::from(8u128);
let p2 = Fr::from(2u128);
let lag_poly = LagrangeBasis::new(vec![p1, p2]);
let coeff_poly = lag_poly.interpolate();
let got_p1 = coeff_poly.evaluate(&Fr::from(0u128));
let got_p2 = coeff_poly.evaluate(&Fr::from(1u128));
assert_eq!(got_p1, p1);
assert_eq!(got_p2, p2);
}
#[test]
fn simple_eval_outside_domain() {
use ark_poly::Polynomial;
let numerator_lag =
LagrangeBasis::new(vec![-Fr::from(2), Fr::from(0), Fr::from(12), Fr::from(40)]);
let numerator_coeff = numerator_lag.interpolate();
let precomp = PrecomputedWeights::new(numerator_lag.domain);
let point = Fr::from(300u128);
let got = numerator_lag.evaluate_outside_domain(&precomp, point);
let expected = numerator_coeff.evaluate(&point);
assert_eq!(got, expected);
// Another way to evaluate a point not in the domain,
// is to compute the lagrange coefficients first and then take the inner product of those and
// the evaluation points
let lag_evals =
LagrangeBasis::evaluate_lagrange_coefficients(&precomp, numerator_lag.domain, point);
let mut got = Fr::zero();
for (l_i, y_i) in lag_evals.into_iter().zip(numerator_lag.values().iter()) {
got += l_i * y_i
}
assert_eq!(got, expected)
}
#[test]
fn simple_division() {
let domain_size = 4;
// (X-1))(X+1)(X+2)
let numerator_lag =
LagrangeBasis::new(vec![-Fr::from(2), Fr::from(0), Fr::from(12), Fr::from(40)]);
let numerator_coeff = numerator_lag.interpolate();
// X - 1
let index = 1;
let denom_coeff = DensePolynomial::from_coefficients_vec(vec![-Fr::one(), Fr::one()]);
let precomp = PrecomputedWeights::new(domain_size);
let quotient_lag = numerator_lag.divide_by_linear_vanishing(&precomp, index);
let quotient_coeff = quotient_lag.interpolate();
let quotient_expected = &numerator_coeff / &denom_coeff;
assert_eq!(quotient_expected, quotient_coeff)
}
// Taken from sapling-crypto -- O(n^2)
fn interpolate(points: &[(Fr, Fr)]) -> Option<Vec<Fr>> {
let max_degree_plus_one = points.len();
assert!(
max_degree_plus_one >= 2,
"should interpolate for degree >= 1"
);
let mut coeffs = vec![Fr::zero(); max_degree_plus_one];
// external iterator
for (k, p_k) in points.iter().enumerate() {
let (x_k, y_k) = p_k;
// coeffs from 0 to max_degree - 1
let mut contribution = vec![Fr::zero(); max_degree_plus_one];
let mut denominator = Fr::one();
let mut max_contribution_degree = 0;
// internal iterator
for (j, p_j) in points.iter().enumerate() {
let (x_j, _) = p_j;
if j == k {
continue;
}
let mut diff = *x_k;
diff -= x_j;
denominator *= diff;
if max_contribution_degree == 0 {
max_contribution_degree = 1;
*contribution
.get_mut(0)
.expect("must have enough coefficients") -= x_j;
*contribution
.get_mut(1)
.expect("must have enough coefficients") += Fr::one();
} else {
let mul_by_minus_x_j: Vec<Fr> = contribution
.iter()
.map(|el| {
let mut tmp = *el;
tmp *= x_j;
-tmp
})
.collect();
contribution.insert(0, Fr::zero());
contribution.truncate(max_degree_plus_one);
assert_eq!(mul_by_minus_x_j.len(), max_degree_plus_one);
for (i, c) in contribution.iter_mut().enumerate() {
let other = mul_by_minus_x_j
.get(i)
.expect("should have enough elements");
*c += other;
}
}
}
denominator = denominator.inverse().expect("denominator must be non-zero");
for (i, this_contribution) in contribution.into_iter().enumerate() {
let c = coeffs.get_mut(i).expect("should have enough coefficients");
let mut tmp = this_contribution;
tmp *= denominator;
tmp *= y_k;
*c += tmp;
}
}
Some(coeffs)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/src/transcript.rs | ipa-multipoint/src/transcript.rs | use banderwagon::{trait_defs::*, Element, Fr};
pub trait TranscriptProtocol {
/// Compute a `label`ed challenge variable.
fn challenge_scalar(&mut self, label: &'static [u8]) -> Fr;
fn append_point(&mut self, label: &'static [u8], point: &Element);
fn append_scalar(&mut self, label: &'static [u8], point: &Fr);
fn domain_sep(&mut self, label: &'static [u8]);
}
use sha2::{Digest, Sha256};
pub struct Transcript {
state: Vec<u8>,
}
impl Transcript {
pub fn new(label: &'static [u8]) -> Transcript {
// TODO: add a with capacity method, so we don't reallocate alot
let mut state = Vec::new();
state.extend(label);
Transcript { state }
}
fn append_message(&mut self, message: &[u8], label: &'static [u8]) {
self.state.extend(label);
self.state.extend(message);
}
// TODO: Add this to the other implementations! or most likely, we just need to add
// TODO sub protocol specific domain separators ipa_domain_sep(n) and under the roof
// TODO it adds the ipa label and the argument size n
pub fn append_u64(&mut self, label: &'static [u8], number: u64) {
self.state.extend(label);
self.state.extend(number.to_be_bytes());
}
}
impl TranscriptProtocol for Transcript {
fn challenge_scalar(&mut self, label: &'static [u8]) -> Fr {
self.domain_sep(label);
// Hash entire transcript state
let mut sha256 = Sha256::new();
sha256.update(&self.state);
let hash: Vec<u8> = sha256.finalize_reset().to_vec();
// Clear the state
self.state.clear();
let scalar = Fr::from_le_bytes_mod_order(&hash);
self.append_scalar(label, &scalar);
scalar
}
fn append_point(&mut self, label: &'static [u8], point: &Element) {
let mut bytes = [0u8; 32];
point.serialize_compressed(&mut bytes[..]).unwrap();
self.append_message(&bytes, label)
}
fn append_scalar(&mut self, label: &'static [u8], scalar: &Fr) {
let mut bytes = [0u8; 32];
scalar.serialize_compressed(&mut bytes[..]).unwrap();
self.append_message(&bytes, label)
}
fn domain_sep(&mut self, label: &'static [u8]) {
self.state.extend(label)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_vector_0() {
let mut tr = Transcript::new(b"simple_protocol");
let first_challenge = tr.challenge_scalar(b"simple_challenge");
let second_challenge = tr.challenge_scalar(b"simple_challenge");
// We can never even accidentally, generate the same challenge
assert_ne!(first_challenge, second_challenge)
}
#[test]
fn test_vector_1() {
let mut tr = Transcript::new(b"simple_protocol");
let first_challenge = tr.challenge_scalar(b"simple_challenge");
let expected = "c2aa02607cbdf5595f00ee0dd94a2bbff0bed6a2bf8452ada9011eadb538d003";
let got = scalar_to_hex(&first_challenge);
assert_eq!(got, expected)
}
#[test]
fn test_vector_2() {
let mut tr = Transcript::new(b"simple_protocol");
let five = Fr::from(5_u128);
tr.append_scalar(b"five", &five);
tr.append_scalar(b"five again", &five);
let challenge = tr.challenge_scalar(b"simple_challenge");
let expected = "498732b694a8ae1622d4a9347535be589e4aee6999ffc0181d13fe9e4d037b0b";
let got = scalar_to_hex(&challenge);
assert_eq!(got, expected)
}
#[test]
fn test_vector_3() {
let mut tr = Transcript::new(b"simple_protocol");
let one = Fr::from(1_u128);
let minus_one = -one;
tr.append_scalar(b"-1", &minus_one);
tr.domain_sep(b"separate me");
tr.append_scalar(b"-1 again", &minus_one);
tr.domain_sep(b"separate me again");
tr.append_scalar(b"now 1", &one);
let challenge = tr.challenge_scalar(b"simple_challenge");
let expected = "14f59938e9e9b1389e74311a464f45d3d88d8ac96adf1c1129ac466de088d618";
let got = scalar_to_hex(&challenge);
assert_eq!(got, expected)
}
#[test]
fn test_vector_4() {
let mut tr = Transcript::new(b"simple_protocol");
let generator = Element::prime_subgroup_generator();
tr.append_point(b"generator", &generator);
let challenge = tr.challenge_scalar(b"simple_challenge");
let expected = "8c2dafe7c0aabfa9ed542bb2cbf0568399ae794fc44fdfd7dff6cc0e6144921c";
let got = scalar_to_hex(&challenge);
assert_eq!(got, expected)
}
fn scalar_to_hex(s: &Fr) -> String {
let mut bytes = [0u8; 32];
s.serialize_compressed(&mut bytes[..]).unwrap();
hex::encode(bytes)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/benches/benchmark_main.rs | ipa-multipoint/benches/benchmark_main.rs | use criterion::criterion_main;
mod benchmarks;
criterion_main! {
benchmarks::ipa_prove::benches,
benchmarks::ipa_verify::benches,
benchmarks::multipoint_verify::benches,
benchmarks::multipoint_prove::benches,
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/benches/benchmarks/multipoint_verify.rs | ipa-multipoint/benches/benchmarks/multipoint_verify.rs | use ark_std::UniformRand;
use banderwagon::Fr;
use criterion::BenchmarkId;
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
use ipa_multipoint::crs::CRS;
use ipa_multipoint::lagrange_basis::*;
use ipa_multipoint::multiproof::*;
use ipa_multipoint::transcript::Transcript;
pub fn criterion_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("multipoint - verify (256)");
use ark_std::test_rng;
// Setup parameters, n is the degree + 1
// CRs is the G_Vec, H_Vec, Q group elements
let n = 256;
let crs = CRS::new(n, b"random seed");
let mut rng = test_rng();
let poly = LagrangeBasis::new((0..n).map(|_| Fr::rand(&mut rng)).collect());
let poly_comm = crs.commit_lagrange_poly(&poly);
for num_polynomials in [1, 1_000, 2_000, 4_000, 8_000, 16_000, 128_000] {
// For verification, we simply generate one polynomial and then clone it `num_polynomial`
// time. whether it is the same polynomial or different polynomial does not affect verification.
let mut polys: Vec<LagrangeBasis> = Vec::with_capacity(num_polynomials);
for _ in 0..num_polynomials {
polys.push(poly.clone())
}
let mut prover_queries = Vec::with_capacity(num_polynomials);
for poly in polys.into_iter() {
let point = 1;
let y_i = poly.evaluate_in_domain(point);
let prover_query = ProverQuery {
commitment: poly_comm,
poly,
point,
result: y_i,
};
prover_queries.push(prover_query);
}
let precomp = PrecomputedWeights::new(n);
let mut transcript = Transcript::new(b"foo");
let multiproof = MultiPoint::open(
crs.clone(),
&precomp,
&mut transcript,
prover_queries.clone(),
);
let mut verifier_queries: Vec<VerifierQuery> = Vec::with_capacity(num_polynomials);
for prover_query in prover_queries {
verifier_queries.push(prover_query.into())
}
group.bench_with_input(
BenchmarkId::from_parameter(num_polynomials),
&num_polynomials,
|b, _| {
b.iter_batched(
|| Transcript::new(b"foo"),
|mut transcript| {
black_box(multiproof.check(
&crs,
&precomp,
&verifier_queries,
&mut transcript,
))
},
BatchSize::SmallInput,
)
},
);
}
group.finish();
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/benches/benchmarks/mod.rs | ipa-multipoint/benches/benchmarks/mod.rs | pub mod ipa_prove;
pub mod ipa_verify;
pub mod multipoint_prove;
pub mod multipoint_verify;
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/benches/benchmarks/ipa_verify.rs | ipa-multipoint/benches/benchmarks/ipa_verify.rs | use ark_std::rand::SeedableRng;
use ark_std::UniformRand;
use banderwagon::Fr;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ipa_multipoint::crs::CRS;
use ipa_multipoint::ipa::create;
use ipa_multipoint::lagrange_basis::LagrangeBasis;
use ipa_multipoint::math_utils::{inner_product, powers_of};
use ipa_multipoint::transcript::Transcript;
use rand_chacha::ChaCha20Rng;
pub fn criterion_benchmark(c: &mut Criterion) {
let n = 256;
let mut rng = ChaCha20Rng::from_seed([0u8; 32]);
let a: Vec<Fr> = (0..n).map(|_| Fr::rand(&mut rng)).collect();
let input_point = Fr::rand(&mut rng);
let b_vec = powers_of(input_point, n);
let output_point = inner_product(&a, &b_vec);
let crs = CRS::new(n, "lol".as_bytes());
let mut prover_transcript = Transcript::new(b"ip_no_zk");
let a_lagrange = LagrangeBasis::new(a.clone());
let a_comm = crs.commit_lagrange_poly(&a_lagrange);
let proof = create(
&mut prover_transcript,
crs.clone(),
a,
a_comm,
b_vec.clone(),
input_point,
);
c.bench_function("ipa - verify (multi exp2 256)", |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"ip_no_zk");
black_box(proof.verify_multiexp(
&mut verifier_transcript,
&crs,
b_vec.clone(),
a_comm,
input_point,
output_point,
))
})
});
c.bench_function("ipa - verify (256)", |b| {
b.iter(|| {
let mut verifier_transcript = Transcript::new(b"ip_no_zk");
black_box(proof.verify(
&mut verifier_transcript,
crs.clone(),
b_vec.clone(),
a_comm,
input_point,
output_point,
))
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/benches/benchmarks/ipa_prove.rs | ipa-multipoint/benches/benchmarks/ipa_prove.rs | use ark_std::rand::SeedableRng;
use ark_std::UniformRand;
use banderwagon::Fr;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use ipa_multipoint::crs::CRS;
use ipa_multipoint::ipa::create;
use ipa_multipoint::lagrange_basis::LagrangeBasis;
use ipa_multipoint::math_utils::powers_of;
use ipa_multipoint::transcript::Transcript;
use rand_chacha::ChaCha20Rng;
pub fn criterion_benchmark(c: &mut Criterion) {
let n = 256;
let mut rng = ChaCha20Rng::from_seed([0u8; 32]);
let a: Vec<Fr> = (0..n).map(|_| Fr::rand(&mut rng)).collect();
let input_point = Fr::rand(&mut rng);
let b_vec = powers_of(input_point, n);
let crs = CRS::new(n, "lol".as_bytes());
let mut prover_transcript = Transcript::new(b"ip_no_zk");
let a_lagrange = LagrangeBasis::new(a.clone());
let a_comm = crs.commit_lagrange_poly(&a_lagrange);
c.bench_function("ipa - prove (256)", |b| {
b.iter(|| {
black_box(create(
&mut prover_transcript,
crs.clone(),
a.clone(),
a_comm,
b_vec.clone(),
input_point,
))
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/ipa-multipoint/benches/benchmarks/multipoint_prove.rs | ipa-multipoint/benches/benchmarks/multipoint_prove.rs | use ark_std::UniformRand;
use banderwagon::Fr;
use criterion::BenchmarkId;
use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion};
use ipa_multipoint::crs::CRS;
use ipa_multipoint::lagrange_basis::*;
use ipa_multipoint::multiproof::*;
use ipa_multipoint::transcript::Transcript;
pub fn criterion_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("multiproof - prove (256)");
use ark_std::test_rng;
// Setup parameters, n is the degree + 1
// CRs is the G_Vec, H_Vec, Q group elements
let n = 256;
let crs = CRS::new(n, b"random seed");
let mut rng = test_rng();
let poly = LagrangeBasis::new((0..n).map(|_| Fr::rand(&mut rng)).collect());
let poly_comm = crs.commit_lagrange_poly(&poly);
for num_polynomials in [1, 1_000, 2_000, 4_000, 8_000, 16_000, 128_000] {
let mut polys: Vec<LagrangeBasis> = Vec::with_capacity(num_polynomials);
for _ in 0..num_polynomials {
polys.push(poly.clone())
}
let mut prover_queries = Vec::with_capacity(num_polynomials);
for (i, poly) in polys.into_iter().enumerate() {
let point = i % n;
let y_i = poly.evaluate_in_domain(point);
let prover_query = ProverQuery {
commitment: poly_comm,
poly,
point,
result: y_i,
};
prover_queries.push(prover_query);
}
let precomp = PrecomputedWeights::new(n);
group.bench_with_input(
BenchmarkId::from_parameter(num_polynomials),
&num_polynomials,
|b, _| {
b.iter_batched(
|| (Transcript::new(b"foo"), prover_queries.clone()),
|(mut transcript, prover_queries)| {
black_box(MultiPoint::open(
crs.clone(),
&precomp,
&mut transcript,
prover_queries,
))
},
BatchSize::SmallInput,
)
},
);
}
group.finish();
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/config.rs | verkle-trie/src/config.rs | use crate::constants::new_crs;
use ipa_multipoint::committer::DefaultCommitter;
// TODO: We may not need to have this be generic, now that we have gotten rid of
// TODO the config with precomputed points
/// Generic configuration file to initialize a verkle trie struct
#[derive(Debug, Clone)]
pub struct Config<Storage, PolyCommit> {
pub db: Storage,
pub committer: PolyCommit,
}
pub type DefaultConfig<Storage> = Config<Storage, DefaultCommitter>;
impl<Storage> DefaultConfig<Storage> {
pub fn new(db: Storage) -> Self {
let committer = DefaultCommitter::new(&new_crs().G);
Config { db, committer }
}
}
pub type VerkleConfig<Storage> = DefaultConfig<Storage>;
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/errors.rs | verkle-trie/src/errors.rs | use banderwagon::trait_defs::*;
use thiserror::Error;
// Right now there are lots of unwraps which are immediately switched to Results, but in the future
// We likely be moved back to unwraps with safety comments
#[derive(Debug, Error)]
pub enum HintError {
#[error("General IO Error")]
IoError(#[from] std::io::Error),
}
#[derive(Debug, Error)]
pub enum VerificationError {
#[error("Invalid proof supplied")]
InvalidProof,
#[error("Invalid Length for Updated Values")]
UnexpectedUpdatedLength(usize, usize),
#[error("Mismatched Length of Supplied Keys from expected")]
MismatchedKeyLength,
#[error("All Keys must be unique")]
DuplicateKeys,
#[error("Since the extension was not present in the trie, the suffix cannot have any previous values")]
OldValueIsPopulated,
#[error("Prefix Cannot be Empty")]
EmptyPrefix,
}
#[derive(Debug, Error)]
pub enum ConfigError {
#[error("Precomputed Points Exist Already")]
PrecomputedPointsFileExists,
#[error("Issue opening PrecomputedPointsFile")]
FileError(std::io::Error),
#[error("Precomputed Lagrange Points File Couldn't not be found")]
PrecomputedPointsNotFound,
#[error("Serialization Either Failed or Data is Invalid")]
SerializationError(#[from] SerializationError),
}
#[derive(Debug, Error)]
pub enum ProofCreationError {
#[error("Empty Key Set")]
EmptyKeySet,
#[error("Expected to have atleast one query, which will be against the root")]
ExpectedOneQueryAgainstRoot,
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/proof.rs | verkle-trie/src/proof.rs | use crate::{
constants::{CRS, PRECOMPUTED_WEIGHTS},
errors::HintError,
};
use banderwagon::Element;
use ipa_multipoint::multiproof::MultiPointProof;
use ipa_multipoint::transcript::Transcript;
use std::collections::{BTreeMap, BTreeSet};
use std::io::{Read, Write};
pub mod golang_proof_format;
mod key_path_finder;
mod opening_data;
pub(crate) mod prover;
pub mod stateless_updater;
pub(crate) mod verifier;
// Every stem node has an associated extension node
// This extension node commits to all of the data in a stem
// This is needed because a stem has multiple commitments associated with it,
// ie C1, C2, stem_commitment
// TODO we could probably not use ExtPresent and use KeyState directly?
// TODO Need to check if this is fine with the Verifier algorithm
// TODO Note KeyState holds more information
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExtPresent {
// This means that there is no extensions present at all
// this corresponds to the case of when the key is not in the trie
// and the place where we would place it is empty.
None,
// This means that there is an extension for a stem in the place where we would insert this key
// but it is not the stem for the key in question
// This also corresponds to the case where the key is not in the trie
DifferentStem,
// This means there is an extension for the stem for the key in question.
// Note: This does not tell us if the key is in the trie.
Present,
}
// Auxillary data that the verifier needs in order to reconstruct the verifier queries
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct VerificationHint {
// depths and extension present status sorted by stem
pub depths: Vec<u8>,
pub extension_present: Vec<ExtPresent>,
// All of the stems which are in the trie,
// however, we are not directly proving any of their values
pub diff_stem_no_proof: BTreeSet<[u8; 31]>,
}
impl std::fmt::Display for VerificationHint {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
for d in &self.depths {
write!(f, "{} ", d)?;
}
for e in &self.extension_present {
write!(f, "{:?} ", e)?;
}
for s in &self.diff_stem_no_proof {
write!(f, "{} ", hex::encode(s))?;
}
std::fmt::Result::Ok(())
}
}
impl VerificationHint {
// We need the number of keys because we do not serialize the length of
// the ext_status|| depth. This is equal to the number of keys in the proof, which
// we assume the user knows.
pub fn read<R: Read>(mut reader: R) -> Result<VerificationHint, HintError> {
// First extract the stems with no values opened for them
let mut num_stems = [0u8; 4];
reader.read_exact(&mut num_stems)?;
let num_stems = u32::from_le_bytes(num_stems);
let mut diff_stem_no_proof: BTreeSet<[u8; 31]> = BTreeSet::new();
for _ in 0..num_stems {
let mut stem = [0u8; 31];
reader.read_exact(&mut stem)?;
diff_stem_no_proof.insert(stem);
}
// Now extract the depth and ext status
let mut num_depths = [0u8; 4];
reader.read_exact(&mut num_depths)?;
let num_depths: usize = u32::from_le_bytes(num_depths) as usize; // Assuming hardware is 32/64 bit, so usize is at least a u32
let mut depths = Vec::new();
let mut extension_present = Vec::new();
let mut buffer = vec![0u8; num_depths];
reader.read_exact(&mut buffer)?;
for byte in buffer {
// use a mask to get the last two bits
const MASK: u8 = 3;
let ext_status = MASK & byte;
let ext_status = match ext_status {
0 => ExtPresent::None,
1 => ExtPresent::DifferentStem,
2 => ExtPresent::Present,
x => panic!("unexpected ext status number {} ", x),
};
// shift away the last 3 bits in order to get the depth
let depth = byte >> 3;
depths.push(depth);
extension_present.push(ext_status)
}
Ok(VerificationHint {
depths,
extension_present,
diff_stem_no_proof,
})
}
pub fn write<W: Write>(&self, writer: &mut W) -> Result<(), HintError> {
// Encode the number of stems with no value openings
let num_stems = self.diff_stem_no_proof.len() as u32;
writer.write_all(&num_stems.to_le_bytes())?;
for stem in &self.diff_stem_no_proof {
writer.write_all(stem)?;
}
let num_depths = self.depths.len() as u32;
writer.write_all(&num_depths.to_le_bytes())?;
// The depths and extension status can be put into a single byte
// because extension status only needs 3 bits and depth only needs at most 5 bits
for (depth, ext_status) in self.depths.iter().zip(&self.extension_present) {
let mut byte = 0;
// Encode extension status into the byte
match ext_status {
ExtPresent::None => {
// For None, we set the bit to be zero, so do nothing
}
ExtPresent::DifferentStem => {
// For different stem, we set the first bit to be 1
// This corresponds to the number 1.
byte = 1;
}
ExtPresent::Present => {
// For present, we set the second bit to be 1
// and the first bit to be zero
// This corresponds to the number 2.
byte = 2;
}
};
// Encode depth into the byte, it should only be less
// than or equal to 32, and so we only need 5 bits.
debug_assert!(*depth <= 32);
byte |= depth << 3;
writer.write_all(&[byte])?;
}
Ok(())
}
}
// Auxillary information that the verifier needs in order to update the root statelessly
pub struct UpdateHint {
depths_and_ext_by_stem: BTreeMap<[u8; 31], (ExtPresent, u8)>,
// This will be used to get the old commitment for a particular node
// So that we can compute the delta between it and the new commitment
commitments_by_path: BTreeMap<Vec<u8>, Element>,
other_stems_by_prefix: BTreeMap<Vec<u8>, [u8; 31]>,
}
// TODO: We make the fields of VerkleProof public due to these being exposed in
// TODO: the Block/golang code, so for now they need to be public.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct VerkleProof {
pub verification_hint: VerificationHint,
// Commitments sorted by their paths and then their indices
// The root is taken out when we serialize, so the verifier does not receive it
pub comms_sorted: Vec<Element>,
//
pub proof: MultiPointProof,
}
impl VerkleProof {
pub fn read<R: Read>(mut reader: R) -> Result<VerkleProof, HintError> {
let verification_hint = VerificationHint::read(&mut reader)?;
let mut num_comms = [0u8; 4];
reader.read_exact(&mut num_comms)?;
let num_comms = u32::from_le_bytes(num_comms);
let mut comms_sorted = Vec::new();
for _ in 0..num_comms {
let mut comm_serialized = [0u8; 32];
reader.read_exact(&mut comm_serialized)?;
let point = Element::from_bytes(&comm_serialized).ok_or(HintError::from(
std::io::Error::from(std::io::ErrorKind::InvalidData),
))?;
comms_sorted.push(point);
}
let mut bytes = Vec::new();
reader.read_to_end(&mut bytes)?;
let proof = MultiPointProof::from_bytes(&bytes, crate::constants::VERKLE_NODE_WIDTH)?;
Ok(VerkleProof {
verification_hint,
comms_sorted,
proof,
})
}
pub fn write<W: Write>(&self, mut writer: W) -> Result<(), HintError> {
// Errors are handled via anyhow because they are generic IO errors, not Verkle-specific
self.verification_hint.write(&mut writer)?;
let num_comms = self.comms_sorted.len() as u32;
writer.write_all(&num_comms.to_le_bytes())?;
for comm in &self.comms_sorted {
let comm_serialized = comm.to_bytes();
writer.write_all(&comm_serialized)?;
}
// Serialize the Multipoint proof
let proof_bytes = self.proof.to_bytes()?;
writer.write_all(&proof_bytes)?;
Ok(())
}
pub fn check(
self,
keys: Vec<[u8; 32]>,
values: Vec<Option<[u8; 32]>>,
root: Element,
) -> (bool, Option<UpdateHint>) {
// TODO: check the commitments are in the correct subgroup
// TODO: possibly will be done with Decaf
// TODO: remove need for this Clone, by splitting off the IPA proof object
// TODO here and sending the rest of the struct to create_verifier_queries
let proof = self.proof.clone();
let queries_update_hint = verifier::create_verifier_queries(self, keys, values, root);
let (queries, update_hint) = match queries_update_hint {
Some((queries, update_hint)) => (queries, update_hint),
None => return (false, None),
};
let mut transcript = Transcript::new(b"vt");
let ok = proof.check(&CRS, &PRECOMPUTED_WEIGHTS, &queries, &mut transcript);
(ok, Some(update_hint))
}
}
impl std::fmt::Display for VerkleProof {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
writeln!(f, "Verkle proof:")?;
writeln!(f, " * verification hints: {}", self.verification_hint)?;
write!(f, " * commitments: ")?;
for comm in self
.comms_sorted
.iter()
.map(|comm| hex::encode(comm.to_bytes()))
{
write!(f, "{} ", comm)?;
}
std::fmt::Result::Ok(())
}
}
#[cfg(test)]
mod test {
use super::VerkleProof;
use crate::database::{memory_db::MemoryDb, ReadOnlyHigherDb};
use crate::proof::{prover, verifier};
use crate::{trie::Trie, DefaultConfig, TrieTrait};
use banderwagon::Fr;
#[test]
fn basic_proof_true() {
let db = MemoryDb::new();
let mut trie = Trie::new(DefaultConfig::new(db));
let mut keys = Vec::new();
for i in 0..=3 {
let mut key_0 = [0u8; 32];
key_0[0] = i;
keys.push(key_0);
trie.insert_single(key_0, key_0);
}
let root = vec![];
let meta = trie.storage.get_branch_meta(&root).unwrap();
let proof = prover::create_verkle_proof(&trie.storage, keys.clone()).unwrap();
let values: Vec<_> = keys.iter().map(|val| Some(*val)).collect();
let (ok, _) = proof.check(keys, values, meta.commitment);
assert!(ok);
}
#[test]
fn proof_of_absence_edge_case() {
let db = MemoryDb::new();
let trie = Trie::new(DefaultConfig::new(db));
let absent_keys = vec![[3; 32]];
let absent_values = vec![None];
let root = vec![];
let meta = trie.storage.get_branch_meta(&root).unwrap();
let proof = prover::create_verkle_proof(&trie.storage, absent_keys.clone()).unwrap();
let (ok, _) = proof.check(absent_keys, absent_values, meta.commitment);
assert!(ok);
}
#[test]
fn prover_queries_match_verifier_queries() {
let db = MemoryDb::new();
let mut trie = Trie::new(DefaultConfig::new(db));
let mut keys = Vec::new();
for i in 0..=3 {
let mut key_0 = [0u8; 32];
key_0[0] = i;
keys.push(key_0);
trie.insert_single(key_0, key_0);
}
let root = vec![];
let meta = trie.storage.get_branch_meta(&root).unwrap();
let (pq, _) = prover::create_prover_queries(&trie.storage, keys.clone());
let proof = prover::create_verkle_proof(&trie.storage, keys.clone()).unwrap();
let values: Vec<_> = keys.iter().map(|val| Some(*val)).collect();
let (vq, _) =
verifier::create_verifier_queries(proof, keys, values, meta.commitment).unwrap();
for (p, v) in pq.into_iter().zip(vq) {
assert_eq!(p.commitment, v.commitment);
assert_eq!(Fr::from(p.point as u128), v.point);
assert_eq!(p.result, v.result);
}
}
#[test]
fn simple_serialization_consistency() {
let db = MemoryDb::new();
let mut trie = Trie::new(DefaultConfig::new(db));
let mut keys = Vec::new();
for i in 0..=3 {
let mut key_0 = [0u8; 32];
key_0[0] = i;
keys.push(key_0);
trie.insert_single(key_0, key_0);
}
let root = vec![];
let _meta = trie.storage.get_branch_meta(&root).unwrap();
let proof = prover::create_verkle_proof(&trie.storage, keys.clone()).unwrap();
let mut bytes = Vec::new();
proof.write(&mut bytes).unwrap();
let deserialized_proof = VerkleProof::read(&bytes[..]).unwrap();
assert_eq!(proof, deserialized_proof);
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/lib.rs | verkle-trie/src/lib.rs | #[deny(unreachable_patterns)]
// pub mod committer;
pub mod config;
pub mod constants;
pub mod database;
pub mod errors;
pub mod from_to_bytes;
pub mod proof;
pub mod trie;
pub use config::*;
use errors::ProofCreationError;
pub use trie::Trie;
pub use banderwagon::{Element, Fr};
pub type Key = [u8; 32];
pub type Value = [u8; 32];
pub type Stem = [u8; 31];
pub trait TrieTrait {
/// Inserts multiple values into the trie
/// If the number of items is below FLUSH_BATCH, they will be persisted
/// atomically
/// This method will implicitly compute the new root
fn insert(&mut self, kv: impl Iterator<Item = (Key, Value)>);
/// Inserts a single value
/// This method will implicitly compute the new root
fn insert_single(&mut self, key: Key, value: Value) {
self.insert(vec![(key, value)].into_iter())
}
/// Gets the value at the `Key` if it exists
/// Returns an error if it does not exist
/// TODO: Find out if this method is ever needed
fn get(&self, key: Key) -> Option<Value>;
/// Returns the root of the trie
fn root_hash(&self) -> Fr;
/// Returns the root commitment of the trie
fn root_commitment(&self) -> Element;
/// Creates a verkle proof over many keys
fn create_verkle_proof(
&self,
key: impl Iterator<Item = Key>,
) -> Result<proof::VerkleProof, ProofCreationError>;
}
// TODO: remove this, its here for backwards compatibility
pub(crate) fn group_to_field(point: &Element) -> Fr {
point.map_to_scalar_field()
}
// TODO: Possible optimization. This means we never allocate for paths
use smallvec::SmallVec;
pub type SmallVec32 = SmallVec<[u8; 32]>;
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/database.rs | verkle-trie/src/database.rs | pub mod default;
mod generic;
pub mod memory_db;
pub mod meta;
pub use default::VerkleDb;
pub use meta::{BranchChild, BranchMeta, Meta, StemMeta};
pub trait ReadWriteHigherDb: ReadOnlyHigherDb + WriteOnlyHigherDb {}
impl<T: ReadOnlyHigherDb + WriteOnlyHigherDb> ReadWriteHigherDb for T {}
// There are two ways to use your database with this trie implementation:
// 1) Implement the traits in this file; Flush, WriteOnlyHigherDb, ReadOnlyHigherDb
//
// 2) Implement lower level traits in verkle-db, then use VerkleDb. The traits in this file will
// be automatically implemented using the traits in verkle-db
// TODO we need a populate cache method that populates the cache from storage
// TODO Think of a better name than ReadOnlyHigherDb, WriteOnlyHigherDb
// Allows a component to flush their memory database to disk
// This is a no-op for components which are just memory databases
pub trait Flush {
fn flush(&mut self);
}
// WriteOnly trait which will be implemented by BatchWriters and memory databases
// This will not be implemented by disk storage directly, they just need to flush
// the BatchWriter
// TODO: we could auto implement `update` methods which assert that
// TODO there was a previous value
// TODO they would just wrap the insert methods and check for `None`
pub trait WriteOnlyHigherDb {
fn insert_leaf(&mut self, key: [u8; 32], value: [u8; 32], _depth: u8) -> Option<Vec<u8>>;
fn insert_stem(&mut self, key: [u8; 31], meta: StemMeta, _depth: u8) -> Option<StemMeta>;
// TODO we can probably combine `add_stem_as_branch_child` and `insert_branch`
// TODO into a single method called `insert_branch_child`
fn add_stem_as_branch_child(
&mut self,
branch_child_id: Vec<u8>,
stem_id: [u8; 31],
_depth: u8,
) -> Option<BranchChild>;
// TODO maybe we can return BranchChild, as the previous data could have been a stem or branch_meta
// TODO then we can leave it upto the caller on how to deal with it
fn insert_branch(&mut self, key: Vec<u8>, meta: BranchMeta, _depth: u8) -> Option<BranchMeta>;
}
// Notice that these take self, which effectively forces the implementer
// to implement these for self or use a struct which is Copyable
// One should aim for the former
pub trait ReadOnlyHigherDb {
fn get_stem_meta(&self, stem_key: [u8; 31]) -> Option<StemMeta>;
fn get_branch_meta(&self, key: &[u8]) -> Option<BranchMeta>;
// TODO add a range query for the default database in verkle_db
fn get_branch_children(&self, branch_id: &[u8]) -> Vec<(u8, BranchChild)>;
fn get_branch_child(&self, branch_id: &[u8], index: u8) -> Option<BranchChild>;
// TODO add a range query for the default database in verkle_db
fn get_stem_children(&self, stem_key: [u8; 31]) -> Vec<(u8, [u8; 32])>;
fn get_leaf(&self, key: [u8; 32]) -> Option<[u8; 32]>;
fn root_is_missing(&self) -> bool {
let root = vec![];
self.get_branch_meta(&root).is_none()
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/from_to_bytes.rs | verkle-trie/src/from_to_bytes.rs | use banderwagon::trait_defs::*;
pub trait ToBytes<T> {
fn to_bytes(&self) -> Result<T, SerializationError>;
}
pub trait FromBytes<T> {
fn from_bytes(bytes: T) -> Result<Self, SerializationError>
where
Self: Sized;
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/constants.rs | verkle-trie/src/constants.rs | use banderwagon::fr_from_u64_limbs;
pub use banderwagon::Fr;
use ipa_multipoint::{crs::CRS, lagrange_basis::PrecomputedWeights};
use once_cell::sync::Lazy;
pub const FLUSH_BATCH: u32 = 20_000;
// This library only works for a width of 256. It can be modified to work for other widths, but this is
// out of scope for this project.
pub const VERKLE_NODE_WIDTH: usize = 256;
// Seed used to compute the 256 pedersen generators
// using try-and-increment
const PEDERSEN_SEED: &[u8] = b"eth_verkle_oct_2021";
pub(crate) const TWO_POW_128: Fr = fr_from_u64_limbs([0, 0, 1, 0]);
pub static CRS: Lazy<CRS> = Lazy::new(|| CRS::new(VERKLE_NODE_WIDTH, PEDERSEN_SEED));
pub fn new_crs() -> CRS {
CRS::new(VERKLE_NODE_WIDTH, PEDERSEN_SEED)
}
pub static PRECOMPUTED_WEIGHTS: Lazy<PrecomputedWeights> =
Lazy::new(|| PrecomputedWeights::new(VERKLE_NODE_WIDTH));
#[cfg(test)]
mod tests {
use super::TWO_POW_128;
use banderwagon::{trait_defs::*, Fr};
#[test]
fn test_two_pow128_constant() {
let mut arr = [0u8; 17];
arr[0] = 1;
let expected = Fr::from_be_bytes_mod_order(&arr);
assert_eq!(TWO_POW_128, expected)
}
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/trie.rs | verkle-trie/src/trie.rs | #![allow(clippy::large_enum_variant)]
use crate::constants::{CRS, TWO_POW_128};
use crate::database::{BranchMeta, Flush, Meta, ReadWriteHigherDb, StemMeta};
use crate::Config;
use crate::{group_to_field, TrieTrait};
use ipa_multipoint::committer::Committer;
use banderwagon::{trait_defs::*, Element, Fr};
#[derive(Debug, Clone)]
// The trie implements the logic to insert values, fetch values, and create paths to said values
pub struct Trie<Storage, PolyCommit: Committer> {
pub(crate) storage: Storage,
committer: PolyCommit,
}
// Implementation of the trie trait that should be considered the public API for the trie
impl<S: ReadWriteHigherDb, P: Committer> TrieTrait for Trie<S, P> {
fn insert(&mut self, kv: impl Iterator<Item = (crate::Key, crate::Value)>) {
for (key_bytes, value_bytes) in kv {
let ins = self.create_insert_instructions(key_bytes, value_bytes);
self.process_instructions(ins);
}
}
fn get(&self, key: crate::Key) -> Option<crate::Value> {
self.storage.get_leaf(key)
}
fn root_hash(&self) -> Fr {
// This covers the case when the tree is empty
// If the number of stems is zero, then this branch will return zero
let root_node = self
.storage
.get_branch_meta(&[])
.expect("this should be infallible as every trie should have a root upon creation");
root_node.hash_commitment
}
fn create_verkle_proof(
&self,
keys: impl Iterator<Item = [u8; 32]>,
) -> Result<crate::proof::VerkleProof, crate::errors::ProofCreationError> {
use crate::proof::prover;
prover::create_verkle_proof(&self.storage, keys.collect())
}
fn root_commitment(&self) -> Element {
// TODO: This is needed for proofs, can we remove the root hash as the root?
let root_node = self.storage.get_branch_meta(&[]).unwrap();
root_node.commitment
}
}
// To identify a branch, we only need to provide the path to the branch
pub(crate) type BranchId = Vec<u8>;
// Modifying the Trie is done by creating Instructions and
// then executing them. The trie can only be modified via the
// component that executes the instruction. However, it can be
// read by any component.
//
// The main reason to do it like this, is so that on insertion
// we can "read and prepare" all of the necessary updates, which
// works well with Rust's somewhat limited borrow checker (pre-polonius).
#[derive(Debug)]
enum Ins {
// This Opcode modifies the leaf, stem and inner node all at once!
// We know that whenever a leaf is modified, the stem metadata is also modified,
// and the inner node which references the stem's metadata is also modified
UpdateLeaf {
// Data needed for leaf
//
key: [u8; 32],
new_leaf_value: [u8; 32],
// depth is needed for caching
depth: u8,
//
// Data needed for a internal node
//
// internal nodes are referenced using 8 bytes
// This is the internal node which references the stem of the leaf we just modified
branch_id: BranchId,
// This is the index of the stem in the inner node
branch_child_index: u8,
//
// We know the key for the child node since we have the leaf
},
// ChainInsert is only initiated when the key being inserted shares < 31 indices with an
// existing key
ChainInsert {
starting_depth: u8,
chain_insert_path: Vec<u8>,
parent_branch_node: BranchId,
// This is the index of the child which currently has a stem node,
// but wil become a branch node
child_index: u8,
// This is the index in the new branch node where we should store this old leaf (the previous stem)
old_leaf_index: u8,
// previous_stem_value : we can omit this and just fetch it when we process the instruction (maybe change this everywhere, so insert does not hold the old values)
new_leaf_key: [u8; 32],
new_leaf_value: [u8; 32],
new_leaf_index: u8,
},
// This instruction updates the map for the internal node.
// Specifically it specifies that the branch now points to some child.
InternalNodeFallThrough {
// internal nodes are referenced using 8 bytes
branch_id: BranchId,
// This is the index of the child that the inner node points to,
// that has triggered the node to update its commitment
// We track this because if the same child triggers multiple updates
// within a child, we only need the last one.
// Maybe we should have this as one instruction with InsertLeaf?
branch_child_index: u8,
child: BranchId,
old_child_value: Option<Meta>,
// depth is needed for caching
depth: u8,
},
}
impl<Storage: ReadWriteHigherDb, PolyCommit: Committer> Trie<Storage, PolyCommit> {
// Creates a new Trie object
pub fn new(config: Config<Storage, PolyCommit>) -> Self {
// TODO: We should have a way to populate the cache from the persistent db here.
// TODO: we first check if it is an new database and if it is not
// TODO: then we pull in all nodes on level 3 or lower
// TODO: This way, if it is not in the cache, we know it is not in the key-value db either
let mut db = config.db;
let pc = config.committer;
// Add the root node to the database with the root index, if the database does not have it
// If the root is missing, then it means it is a fresh database
if db.root_is_missing() {
let old_val = db.insert_branch(vec![], BranchMeta::zero(), 0);
assert!(old_val.is_none());
}
Trie {
storage: db,
committer: pc,
}
}
// Inserting a leaf in the trie is done in two steps
// First we need to modify the corresponding parts of the
// tree to account for the new leaf
// Then, we need to store the leaf in the key-value database
// and possibly the cached layer depending on the depth of the
// leaf in the trie. The first 3/4 layers are stored in the cache
fn create_insert_instructions(&self, key_bytes: [u8; 32], value_bytes: [u8; 32]) -> Vec<Ins> {
let mut instructions = Vec::new();
let path_indices = key_bytes.into_iter();
let mut current_node_index = vec![];
// The loop index lets us know what level in the tree we are at
for (loop_index, path_index) in path_indices.enumerate() {
// enumerate starts counting at 0, we want to start from 1
let loop_index = loop_index + 1;
// Note: For each layer that we pass, we need to re-compute the
// inner node's commitment for that layer.
// Lets find the child node of the current path_index
let child = self
.storage
.get_branch_child(¤t_node_index, path_index);
let child = match child {
Some(child) => child,
None => {
// Case 1: The child was empty. This means that this is a new leaf, since it has no stem or branch.
//
instructions.push(Ins::UpdateLeaf {
key: key_bytes,
new_leaf_value: value_bytes,
depth: loop_index as u8,
branch_id: current_node_index,
branch_child_index: path_index,
});
return instructions;
}
};
// Lets first figure out if it was a stem or a branch
//
// Case2: We have encountered an internal node
if child.is_branch() {
let mut node_path = current_node_index.clone();
node_path.push(path_index);
instructions.push(Ins::InternalNodeFallThrough {
branch_id: current_node_index,
branch_child_index: path_index,
child: node_path.clone(),
depth: loop_index as u8,
// TODO this does not need to be optional
old_child_value: child.branch().map(Meta::from),
});
current_node_index = node_path;
continue;
}
// Since the child is neither empty nor an inner node,
// it must be a stem.
// We have some sub-cases to consider:
// Case3a: The existing stem already has this key saved or it should be saved under this stem. In which case, we need to update the node
// Case3b: The existing node does not have this key stored, however the stem shares a path with this key. In which case, we need to create branch nodes
// to represent this.
let (shared_path, path_diff_old, path_diff_new) =
path_difference(child.stem().unwrap(), key_bytes[0..31].try_into().unwrap());
// Case3a: Lets check if this key belongs under the stem
if shared_path.len() == 31 {
// TODO this is the only place here, where we require a get_leaf
// TODO should we just allow users to update keys to be the same
// TODO value and catch it maybe when we compute the delta?
// TODO rationale is that get_leaf only gets a single 32 bytes
// TODO and database work in pages of ~4Kb
// The new key and the old child belong under the same stem
let leaf_val = self.storage.get_leaf(key_bytes);
let (old_leaf_val, leaf_already_present_in_trie) = match leaf_val {
Some(old_val) => {
// There was an old value in the stem, so this is an update
(old_val, true)
}
None => {
// There are other values under this stem, but this is the first value under this entry
([0u8; 32], false)
}
};
// If the key is being updated to exactly the same value, we just return nothing
// This is an optimization that allows one to avoid doing work,
// when the value being inserted has not been updated
if path_diff_old.is_none() {
// This means that they share all 32 bytes
assert!(path_diff_new.is_none());
// We return nothing if the value is the same
// and the leaf was already present.
// This means that if one inserts a leaf with value zero,
// it is still inserted in the trie
if (old_leaf_val == value_bytes) & leaf_already_present_in_trie {
return Vec::new();
}
}
instructions.push(Ins::UpdateLeaf {
key: key_bytes,
new_leaf_value: value_bytes,
depth: loop_index as u8,
branch_id: current_node_index,
branch_child_index: path_index,
});
return instructions;
}
// Case3b: The key shares a path with the child, but not 31,so we need to add branch nodes
// path_difference returns all shared_paths.
// Even shared paths before the current internal node.
// Lets remove all of those paths
let relative_shared_path = &shared_path[(loop_index - 1)..];
// p_diff_a and p_diff_b tell us the first path index that these paths disagree
// since the keys are not equal, these should have values
let p_diff_old = path_diff_old.unwrap();
let p_diff_new = path_diff_new.unwrap();
instructions.push(Ins::ChainInsert {
chain_insert_path: relative_shared_path.to_vec(),
starting_depth: loop_index as u8,
parent_branch_node: current_node_index,
child_index: path_index,
old_leaf_index: p_diff_old,
new_leaf_key: key_bytes,
new_leaf_value: value_bytes,
new_leaf_index: p_diff_new,
});
return instructions;
}
instructions
}
// Process instructions in reverse order
fn process_instructions(&mut self, instructions: Vec<Ins>) {
for ins in instructions.into_iter().rev() {
match ins {
Ins::InternalNodeFallThrough {
branch_id,
branch_child_index,
child,
depth,
old_child_value,
} => {
// By the time we get to this instruction, the child would have been modified by a previous instruction
let new_branch_meta = self.storage.get_branch_meta(&child).unwrap();
let new_hash_comm = new_branch_meta.hash_commitment;
let old_hash_comm = match old_child_value {
Some(old_branch_meta) => old_branch_meta.into_branch().hash_commitment,
None => Fr::zero(),
};
let delta = new_hash_comm - old_hash_comm;
let delta_comm = self
.committer
.scalar_mul(delta, branch_child_index as usize);
let old_parent_branch_metadata =
self.storage.get_branch_meta(&branch_id).unwrap();
let old_branch_comm = old_parent_branch_metadata.commitment;
let updated_comm = old_branch_comm + delta_comm;
let hash_updated_comm = group_to_field(&updated_comm);
self.storage.insert_branch(
branch_id,
BranchMeta {
commitment: updated_comm,
hash_commitment: hash_updated_comm,
},
depth,
);
// Then compute the delta between the old and new Value, we use the index to compute the delta commitment
// Then modify the branch commitment data
}
Ins::UpdateLeaf {
key,
new_leaf_value,
depth,
branch_id,
branch_child_index,
} => {
let leaf_update = match self.update_leaf_table(key, new_leaf_value, depth) {
Some(leaf_update) => leaf_update,
None => {
// No value was updated, early exit
return;
}
};
let stem_update = self.update_stem_table(leaf_update, depth);
self.update_branch_table(stem_update, branch_id, branch_child_index, depth);
}
// TODO update comments on this function
Ins::ChainInsert {
chain_insert_path,
starting_depth,
old_leaf_index,
parent_branch_node,
child_index,
new_leaf_key,
new_leaf_value,
new_leaf_index,
} => {
assert!(!chain_insert_path.is_empty());
//0. Compute the path for each inner node
let mut inner_node_paths =
expand_path(&parent_branch_node, &chain_insert_path).rev();
//
// 1. First check that before modification, the node which starts the chain is a stem
// we will later replace it later with an inner node.
// If it is not a stem, then this is a bug, as chain insert should not have been called.
let old_child = self
.storage
.get_branch_child(&parent_branch_node, child_index)
.unwrap();
let old_stem_child = old_child.stem().unwrap();
//2a. Now lets create the inner node which will hold the two stems
// Note; it's position will be at the bottom of the chain.
let bottom_inner_node_path = inner_node_paths.next().unwrap();
let bottom_inode_depth = bottom_inner_node_path.len() as u8;
self.storage.insert_branch(
bottom_inner_node_path.clone(),
BranchMeta::zero(),
bottom_inode_depth,
);
//2b We then attach the two stems as children in the correct positions
// The new leaf has not been saved yet, so we need to put it in the leaf and stem table first
let leaf_update = self
.update_leaf_table(new_leaf_key, new_leaf_value, bottom_inode_depth)
.unwrap();
let new_stem_update = self.update_stem_table(leaf_update, bottom_inode_depth);
self.update_branch_table(
new_stem_update,
bottom_inner_node_path.clone(),
new_leaf_index,
bottom_inode_depth,
);
// Add second stem to branch, since it is already in the database
// We just need to state that this branch node points to it and
// update this nodes commitment and commitment value
let stem_meta_data = self.storage.get_stem_meta(old_stem_child).unwrap();
let old_stem_updated = StemUpdated {
old_val: None,
new_val: stem_meta_data.hash_stem_commitment,
stem: old_stem_child,
};
let bottom_branch_root = self.update_branch_table(
old_stem_updated,
bottom_inner_node_path.clone(),
old_leaf_index,
bottom_inode_depth,
);
//3) We now have the root for the branch node which holds the two stem nodes.
// We now need to create a chain of branch nodes up to the parent, updating their commitments
// along the way
// The inner node at the depth below, will become the child for the node at the depth above
//
//
// Note: We could now use a single for loop, however, we can optimise the next section by observing that:
// All nodes except the first node will have an old_value of 0 (Since they are being created now)
// This allows us to skip fetching their values from the database. We will just need to manually update the
// First node which had an old value equal to the stems value
let shortened_path = inner_node_paths;
// We now want to start from the bottom and update each inner node's commitment and hash
let mut inner_node_below_val = bottom_branch_root;
for (child_path, parent_branch_node) in
chain_insert_path.iter().rev().zip(shortened_path)
{
let depth = parent_branch_node.len() as u8;
let delta = inner_node_below_val; // Remember the old value will be zero, since we just created it.
let updated_comm = self.committer.scalar_mul(delta, *child_path as usize);
let branch_root = group_to_field(&updated_comm);
self.storage.insert_branch(
parent_branch_node.clone(),
BranchMeta {
commitment: updated_comm,
hash_commitment: branch_root,
},
depth,
);
inner_node_below_val = branch_root;
}
// 4) We now only need to modify the branch node which was previously holding the stem
// This is the parent branch node
let old_stem_value = stem_meta_data.hash_stem_commitment;
let new_inner_node_value = inner_node_below_val;
let delta = new_inner_node_value - old_stem_value;
let top_parent = self.storage.get_branch_meta(&parent_branch_node).unwrap();
let updated_top_comm = top_parent.commitment
+ self.committer.scalar_mul(delta, child_index as usize);
let top_parent_root = group_to_field(&updated_top_comm);
self.storage.insert_branch(
parent_branch_node.clone(),
BranchMeta {
commitment: updated_top_comm,
hash_commitment: top_parent_root,
},
starting_depth,
);
}
}
}
}
}
#[derive(Debug)]
pub(crate) struct LeafUpdated {
old_val: Option<Vec<u8>>,
new_value: Vec<u8>,
key: Vec<u8>,
}
#[derive(Debug)]
pub(crate) struct StemUpdated {
old_val: Option<Fr>,
new_val: Fr,
stem: [u8; 31],
}
impl<Storage: ReadWriteHigherDb, PolyCommit: Committer> Trie<Storage, PolyCommit> {
// Store the leaf, we return data on the old leaf, so that we can do the delta optimization
//
// If a leaf was not updated, this function will return None
// else Some will be returned with the old value
fn update_leaf_table(
&mut self,
key: [u8; 32],
value: [u8; 32],
depth: u8,
) -> Option<LeafUpdated> {
let old_val = match self.storage.insert_leaf(key, value, depth) {
Some(vec) => {
// Check if they have just inserted the previous value
// if so, we early exit and return None
if vec == value {
return None;
}
Some(vec)
}
None => None,
};
Some(LeafUpdated {
old_val,
new_value: value.to_vec(),
key: key.to_vec(),
})
// Storing a leaf means we need to change the stem table too
}
fn update_stem_table(&mut self, update_leaf: LeafUpdated, depth: u8) -> StemUpdated {
// If a leaf is updated, then we need to update the stem.
// In particular, we need to update the commitment for that stem and the stem value
//
// There are two cases here:
// - old_value is None. So there was a fresh update
// - old_value as Some and we have modified a value
// We can treat both cases as one because to compute the delta we do (new_value - old_value)
// When the value has not changed, it's (new_value - 0)
//
// Split values into low_16 and high_16
let new_value_low_16 = update_leaf.new_value[0..16].to_vec();
let new_value_high_16 = update_leaf.new_value[16..32].to_vec();
let (old_value_low_16, old_value_high_16) = match update_leaf.old_val {
Some(val) => (
Fr::from_le_bytes_mod_order(&val[0..16]) + TWO_POW_128,
Fr::from_le_bytes_mod_order(&val[16..32]),
),
None => (Fr::zero(), Fr::zero()),
};
// We need to compute two deltas
let delta_low =
Fr::from_le_bytes_mod_order(&new_value_low_16) + TWO_POW_128 - old_value_low_16;
let delta_high = Fr::from_le_bytes_mod_order(&new_value_high_16) - old_value_high_16;
// We need to compute which group elements in the srs are being used
// We know that the first 128 values are mapped to the first 256 group elements
// and the last 128 values are mapped to the second 256 group elements
//
// So given our position is `0`, the values would map to (0,1)
// Given our position is `1` the values would map to (2,3)
// Given our position is `2`, the values would map to (4,5)
// Given our position is `n`. the values would map to (2n, 2n+1) where n < 128 ie 0 <= n <= 127
//
// For n >= 128, we mod 128 n then apply the same algorithm as above.
// Given our position is `255`, 255 mod 128 = 127. The values would be (254,255)
// Given our position is `128`, 128 mod 128 = 0. The values would be (0,1)
let position = update_leaf.key[31];
let pos_mod_128 = position % 128;
let low_index = 2 * pos_mod_128 as usize;
let high_index = low_index + 1;
let generator_low = self.committer.scalar_mul(delta_low, low_index);
let generator_high = self.committer.scalar_mul(delta_high, high_index);
let stem: [u8; 31] = update_leaf.key[0..31].try_into().unwrap();
let (c_1, old_hash_c1, c_2, old_hash_c2, stem_comm, old_hash_stem_comm) =
match self.storage.get_stem_meta(stem) {
Some(comm_val) => (
comm_val.c_1,
comm_val.hash_c1,
comm_val.c_2,
comm_val.hash_c2,
comm_val.stem_commitment,
Some(comm_val.hash_stem_commitment),
),
None => {
// This is the first leaf for the stem, so the C1, C2 commitments will be zero
// The stem commitment will be 1 * G_1 + stem * G_2
let stem_comm = CRS[0]
+ self
.committer
.scalar_mul(Fr::from_le_bytes_mod_order(&stem), 1);
(
Element::zero(),
group_to_field(&Element::zero()),
Element::zero(),
group_to_field(&Element::zero()),
stem_comm,
None,
)
}
};
// Compute the delta for the stem commitment
let (updated_c_1, new_hash_c1, updated_c_2, new_hash_c2, updated_stem_comm) =
if position < 128 {
// update c_1
let updated_c_1 = c_1 + generator_low + generator_high;
let new_hash_c1 = group_to_field(&updated_c_1);
let c_1_delta = new_hash_c1 - old_hash_c1;
let c_1_point = self.committer.scalar_mul(c_1_delta, 2);
let updated_stem_comm = stem_comm + c_1_point;
(
updated_c_1,
new_hash_c1,
c_2,
old_hash_c2,
updated_stem_comm,
)
} else {
// update c_2
let updated_c_2 = c_2 + generator_low + generator_high;
let new_hash_c2 = group_to_field(&updated_c_2);
let c_2_delta = new_hash_c2 - old_hash_c2;
let c_2_point = self.committer.scalar_mul(c_2_delta, 3);
let updated_stem_comm = stem_comm + c_2_point;
(
c_1,
old_hash_c1,
updated_c_2,
new_hash_c2,
updated_stem_comm,
)
};
let updated_hash_stem_comm = group_to_field(&updated_stem_comm);
self.storage.insert_stem(
stem,
StemMeta {
c_1: updated_c_1,
hash_c1: new_hash_c1,
c_2: updated_c_2,
hash_c2: new_hash_c2,
stem_commitment: updated_stem_comm,
hash_stem_commitment: updated_hash_stem_comm,
},
depth,
);
StemUpdated {
old_val: old_hash_stem_comm,
new_val: updated_hash_stem_comm,
stem,
}
}
fn update_branch_table(
&mut self,
stem_update: StemUpdated,
branch_id: BranchId,
branch_index: u8,
depth: u8,
) -> Fr {
// To update the branch, we need to compute the delta and figure out the
// generator we want to use.
//
// If the hash of the stem commitment is None,
// then this means that this is the first time we are inserting this stem.
// We return the hash as zero because if the stem did not exist, the branch node
// does not commit to it.
let old_stem_hash = stem_update.old_val.unwrap_or_else(Fr::zero);
let new_stem_hash = stem_update.new_val;
let delta = new_stem_hash - old_stem_hash;
let old_branch_comm = self.storage.get_branch_meta(&branch_id).unwrap().commitment;
let delta_comm = self.committer.scalar_mul(delta, branch_index as usize);
let updated_branch_comm = old_branch_comm + delta_comm;
let hash_updated_branch_comm = group_to_field(&updated_branch_comm);
// Update the branch metadata
self.storage.insert_branch(
branch_id.clone(),
BranchMeta {
commitment: updated_branch_comm,
hash_commitment: hash_updated_branch_comm,
},
depth,
);
let mut branch_child_id = branch_id;
branch_child_id.push(branch_index);
self.storage
.add_stem_as_branch_child(branch_child_id, stem_update.stem, depth);
hash_updated_branch_comm
}
}
impl<Storage: ReadWriteHigherDb + Flush, PolyCommit: Committer> Trie<Storage, PolyCommit> {
// TODO: maybe make this private, and automatically flush
// TODO after each insert. This will promote users to use insert()
// TODO If the amount of items in insert is too much, we will need to chop it up
// TODO and flush multiple times
pub fn flush_database(&mut self) {
self.storage.flush()
}
}
// Returns a list of all of the path indices where the two stems
// are the same and the next path index where they both differ for each
// stem.
// TODO: Clean up
fn path_difference(key_a: [u8; 31], key_b: [u8; 31]) -> (Vec<u8>, Option<u8>, Option<u8>) {
const AVERAGE_NUMBER_OF_SHARED_INDICES: usize = 3;
let mut same_path_indices = Vec::with_capacity(AVERAGE_NUMBER_OF_SHARED_INDICES);
for (p_a, p_b) in key_a.into_iter().zip(key_b.into_iter()) {
if p_a != p_b {
return (same_path_indices, Some(p_a), Some(p_b));
}
same_path_indices.push(p_a)
}
(same_path_indices, None, None)
}
// TODO: Is this hurting performance? If so can we rewrite it to be more efficient?
// TODO Eagerly, we can use SmallVec32
/// Expand a base path by the sequential children of relative path.
///
/// # Example
///
/// Given a base path [0, 1, 2] and relative path [5, 6, 7] the base path
/// will be expanded to 3 paths:
/// [0, 1, 2, 5]
/// [0, 1, 2, 5, 6]
/// [0, 1, 2, 5, 6, 7]
fn expand_path<'a>(
base: &'a [u8],
relative: &'a [u8],
) -> impl DoubleEndedIterator<Item = Vec<u8>> + 'a {
assert!(!relative.is_empty());
(0..relative.len()).map(|idx| Vec::from_iter(base.iter().chain(&relative[0..=idx]).cloned()))
}
#[cfg(test)]
mod tests {
use crate::constants::{CRS, TWO_POW_128};
use crate::database::memory_db::MemoryDb;
use crate::database::ReadOnlyHigherDb;
use crate::trie::Trie;
use crate::TrieTrait;
use crate::{group_to_field, DefaultConfig};
use banderwagon::{trait_defs::*, Element, Fr};
use std::ops::Mul;
#[test]
// Inserting where the key and value are all zeros
// The zeroes cancel out a lot of components, so this is a general fuzz test
// and hopefully the easiest to pass
fn insert_key0value0() {
let db = MemoryDb::new();
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | true |
crate-crypto/rust-verkle | https://github.com/crate-crypto/rust-verkle/blob/e27b8b4edf1992b4afa636c2fc7983bcc27ddb88/verkle-trie/src/proof/opening_data.rs | verkle-trie/src/proof/opening_data.rs | #![allow(clippy::large_enum_variant)]
use super::ExtPresent;
use crate::{
constants::TWO_POW_128,
database::{Meta, ReadOnlyHigherDb},
proof::key_path_finder::{KeyNotFound, KeyPathFinder, KeyState},
};
use banderwagon::{trait_defs::*, Fr};
use ipa_multipoint::{lagrange_basis::LagrangeBasis, multiproof::ProverQuery};
use std::collections::{BTreeMap, BTreeSet};
// Stores opening data that can then be used to form opening queries
#[derive(Debug, Default)]
pub(crate) struct OpeningData {
pub(crate) openings: BTreeMap<Vec<u8>, Openings>,
// Auxillary data that we collect while fetching the opening data
pub(crate) extension_present_by_stem: BTreeMap<[u8; 31], ExtPresent>,
pub(crate) depths_by_stem: BTreeMap<[u8; 31], u8>,
}
impl OpeningData {
fn insert_stem_extension_status(&mut self, stem: [u8; 31], ext: ExtPresent) {
self.extension_present_by_stem.insert(stem, ext);
}
fn insert_branch_opening(&mut self, path: Vec<u8>, child_index: u8, meta: Meta) {
let bo = BranchOpeningData {
meta,
children: BTreeSet::new(),
};
// Check if this opening has already been inserted.
// If so, we just need to append the child_index to the existing list
let old_branch = self
.openings
.entry(path)
.or_insert(Openings::Branch(bo))
.as_mut_branch();
old_branch.children.insert(child_index);
}
fn insert_ext_opening(&mut self, path: Vec<u8>, stem: [u8; 31], meta: Meta) {
let ext_open = ExtOpeningData { stem, meta };
// Check if there is already an extension opening for this path inserted
// If there is, then it will just have the same data, if not we insert it
self.openings
.entry(path)
.or_insert(Openings::Extension(ext_open));
}
fn insert_suffix_opening(
&mut self,
path: Vec<u8>,
ext_open: ExtOpeningData,
suffix_value: (u8, Option<[u8; 32]>),
) {
let mut suffices = BTreeSet::new();
suffices.insert(suffix_value);
let so = SuffixOpeningData {
ext: ext_open,
suffices,
};
// Check if this suffix opening has already been inserted
// Note, it could also be an opening for an extension at the path
// In that case, we overwrite the extension opening data with the suffix opening data
match self.openings.get_mut(&path) {
Some(old_val) => {
// If the previous value was an extension opening
// then we can just overwrite it, since the suffix opening
// implicitly opens the extension
//
// If it was a suffix opening, then we just need to append
// this suffix to that list
match old_val {
Openings::Suffix(so) => {
assert_eq!(so.ext, ext_open);
so.suffices.insert(suffix_value);
}
Openings::Extension(eo) => {
assert_eq!(eo, &ext_open);
*old_val = Openings::Suffix(so);
}
Openings::Branch(_) => unreachable!(),
}
}
None => {
// If there was nothing inserted at this path,
// then we just add the new suffixOpening
self.openings.insert(path, Openings::Suffix(so));
}
};
}
pub(crate) fn collect_opening_data<Storage: ReadOnlyHigherDb>(
keys: Vec<[u8; 32]>,
storage: &Storage,
) -> OpeningData {
let mut opening_data = OpeningData::default();
for key in keys {
let key_path = KeyPathFinder::find_key_path(storage, key);
let requires_ext_proof = key_path.requires_extension_proof();
let node_path = key_path.nodes;
let key_state = key_path.key_state;
let value = key_state.value();
let stem: [u8; 31] = key[0..31].try_into().unwrap();
let suffix = key[31];
let ext_pres = match key_state {
KeyState::Found(_) => ExtPresent::Present,
KeyState::NotFound(nf) => match nf {
KeyNotFound::DifferentStem(_) => ExtPresent::DifferentStem,
KeyNotFound::StemFound => ExtPresent::Present,
KeyNotFound::Empty => ExtPresent::None,
},
};
let (last_node_path, _, last_node_meta) = node_path.last().cloned().unwrap();
// First iterate the node path and add the necessary branch opening data
for (path, z, node) in node_path.into_iter() {
if node.is_branch_meta() {
opening_data.insert_branch_opening(path, z, node);
}
}
// We now need to check if the node_path leads to the key we want
// or if it leads to a key-not-present state, we can check this with the KeyPath object.
// Alternatively, we can note:
//
// - If the meta data for the last node was a branch
// then no key was found and instead the slot where the key _would_
// be found, if we inserted it, is empty.
//
// - If the metadata for the last node was a stem, then this does not mean that the key is present
//
// Here are the following cases:
//
// - It could be the case that the stem we found does not belong to the key
// This means that the key we searched for and the stem have a common prefix.
//
// - It could also be the case that the stem does match, however the key
// is still not present. This means that there is a key in the trie
// which shares the same stem, as the key we are inserting.
//
// It could be the case that the key was found
// If an extension proof is not required, then no stem was found
//
if !requires_ext_proof {
opening_data.depths_by_stem.insert(stem, key_path.depth);
opening_data.insert_stem_extension_status(stem, ext_pres);
continue;
};
assert!(last_node_meta.is_stem_meta());
// Arriving here means that the key path terminated at a stem
// Unconditionally, we need to provide an opening for the first two elements in the stems
// extension, this is (1, stem)
opening_data.depths_by_stem.insert(stem, key_path.depth - 1);
let current_stem = key_state.different_stem().unwrap_or(stem);
// Lets see if it was the stem for the key in question
// If it is for a different stem, then we only need to show
// existence of the extension, and not open C1 or C2
if key_state.different_stem().is_some() {
opening_data.insert_stem_extension_status(stem, ext_pres);
opening_data.insert_ext_opening(last_node_path, current_stem, last_node_meta);
continue;
}
// We now know that the key does in fact correspond to the stem
// we found
// If value is None, then the key is not in the trie
// This function however does care whether the value was None or if it was written to
// since both cases lead to one needing to have a Suffix Opening
opening_data.insert_stem_extension_status(stem, ext_pres);
let ext_open = ExtOpeningData {
stem: current_stem,
meta: last_node_meta,
};
opening_data.insert_suffix_opening(last_node_path, ext_open, (suffix, value))
}
opening_data
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
// Data needed to open an extension node
pub(crate) struct ExtOpeningData {
pub(crate) stem: [u8; 31],
pub(crate) meta: Meta,
}
impl ExtOpeningData {
// Creates two openings for at points 0 and 1
pub fn open_query(&self, open_c1: bool, open_c2: bool) -> Vec<ProverQuery> {
let stem = self.stem;
let stem_meta = self.meta.into_stem();
let ext_func = vec![
Fr::one(),
Fr::from_le_bytes_mod_order(&stem),
stem_meta.hash_c1,
stem_meta.hash_c2,
];
// Open(Ext, 0) = 1
let open_at_one = ProverQuery {
commitment: stem_meta.stem_commitment,
point: 0,
result: Fr::one(),
poly: LagrangeBasis::new(ext_func.clone()),
};
// Open(Ext, 1) = stem
let open_at_stem = ProverQuery {
commitment: stem_meta.stem_commitment,
point: 1,
result: Fr::from_le_bytes_mod_order(&stem),
poly: LagrangeBasis::new(ext_func.clone()),
};
let mut open_queries = Vec::with_capacity(4);
open_queries.push(open_at_one);
open_queries.push(open_at_stem);
if open_c1 {
let open_at_c1 = ProverQuery {
commitment: stem_meta.stem_commitment,
point: 2,
result: stem_meta.hash_c1,
poly: LagrangeBasis::new(ext_func.clone()),
};
open_queries.push(open_at_c1);
}
if open_c2 {
let open_at_c2 = ProverQuery {
commitment: stem_meta.stem_commitment,
point: 3,
result: stem_meta.hash_c2,
poly: LagrangeBasis::new(ext_func),
};
open_queries.push(open_at_c2);
}
open_queries
}
}
// Data needed to open a suffix
// This does not include all of the values in the polynomial
// for that, we make an external call to the database when creating OpeningQueries
#[derive(Debug)]
pub(crate) struct SuffixOpeningData {
// All suffixes must have an associated extension opening
pub(crate) ext: ExtOpeningData,
// The suffices to open this suffix tree at
// and their associated value. value is none if the key is not in
// the trie
pub(crate) suffices: BTreeSet<(u8, Option<[u8; 32]>)>,
}
impl SuffixOpeningData {
// Returns all of the queries needed for the associated extension
// and the suffices
pub fn open_query<Storage: ReadOnlyHigherDb>(&self, storage: &Storage) -> Vec<ProverQuery> {
// Find out if we need to open up at C1 and/or C2
let mut open_c1 = false;
let mut open_c2 = false;
for (sfx, _) in self.suffices.iter() {
if *sfx < 128 {
open_c1 = true;
} else {
open_c2 = true;
}
}
// Open the extension
let mut ext_queries = self.ext.open_query(open_c1, open_c2);
// Open all suffices
let mut suffice_queries = Vec::with_capacity(self.suffices.len());
let stem_meta = self.ext.meta.into_stem();
for (sfx, value) in &self.suffices {
let value_lower_index = 2 * (sfx % 128);
let value_upper_index = value_lower_index + 1;
let (value_low, value_high) = match value {
Some(bytes) => (
Fr::from_le_bytes_mod_order(&bytes[0..16]) + TWO_POW_128,
Fr::from_le_bytes_mod_order(&bytes[16..32]),
),
None => (Fr::zero(), Fr::zero()),
};
let offset = if *sfx < 128 { 0 } else { 128 };
let c1_or_c2 =
get_half_of_stem_children_children_hashes(self.ext.stem, offset, storage);
let commitment = if *sfx < 128 {
stem_meta.c_1
} else {
stem_meta.c_2
};
let open_at_val_low = ProverQuery {
commitment,
point: value_lower_index as usize,
result: value_low,
poly: LagrangeBasis::new(c1_or_c2.clone()),
};
let open_at_val_upper = ProverQuery {
commitment,
point: value_upper_index as usize,
result: value_high,
poly: LagrangeBasis::new(c1_or_c2),
};
suffice_queries.push(open_at_val_low);
suffice_queries.push(open_at_val_upper);
}
ext_queries.extend(suffice_queries);
ext_queries
}
}
#[derive(Debug)]
pub(crate) struct BranchOpeningData {
pub(crate) meta: Meta,
// open this node at these children
pub(crate) children: BTreeSet<u8>,
}
impl BranchOpeningData {
pub fn open_query<Storage: ReadOnlyHigherDb>(
&self,
branch_path: &[u8],
storage: &Storage,
) -> Vec<ProverQuery> {
let mut branch_queries = Vec::with_capacity(self.children.len());
let branch_meta = self.meta.into_branch();
// Get all children hashes for this branch, return zero if the child is missing
let polynomial = get_branch_children_hashes(branch_path.to_vec(), storage);
// Create queries for all of the children we need
for child_index in &self.children {
let child_value = polynomial[*child_index as usize];
let branch_query = ProverQuery {
commitment: branch_meta.commitment,
point: *child_index as usize,
result: child_value,
poly: LagrangeBasis::new(polynomial.clone()),
};
branch_queries.push(branch_query);
}
branch_queries
}
}
#[derive(Debug)]
pub(crate) enum Openings {
Suffix(SuffixOpeningData),
Branch(BranchOpeningData),
Extension(ExtOpeningData),
}
impl Openings {
pub(crate) fn as_mut_branch(&mut self) -> &mut BranchOpeningData {
match self {
Openings::Suffix(_) | Openings::Extension(_) => {
panic!("unexpected enum variant")
}
Openings::Branch(b) => b,
}
}
}
fn get_branch_children_hashes<Storage: ReadOnlyHigherDb>(
path: Vec<u8>,
storage: &Storage,
) -> Vec<Fr> {
let mut child_hashes = Vec::with_capacity(256);
for i in 0..=255 {
let child = storage.get_branch_child(&path, i); // TODO this should use a range query
let hash = match child {
Some(b_child) => match b_child {
crate::database::BranchChild::Stem(stem_id) => {
storage.get_stem_meta(stem_id).unwrap().hash_stem_commitment
}
crate::database::BranchChild::Branch(b_meta) => b_meta.hash_commitment,
},
None => Fr::zero(),
};
child_hashes.push(hash);
}
child_hashes
}
fn get_half_of_stem_children_children_hashes<Storage: ReadOnlyHigherDb>(
stem_id: [u8; 31],
start: u8,
storage: &Storage,
) -> Vec<Fr> {
assert!(start == 0 || start == 128);
let mut child_hashes = Vec::with_capacity(256);
// 0 to 127 is first 128 elements
// 128 to 255 is the second 128 elements
let end = start + 127;
for i in start..=end {
let mut leaf_key = stem_id.to_vec();
leaf_key.push(i);
let leaf_key: [u8; 32] = leaf_key.try_into().unwrap();
let leaf_val = storage.get_leaf(leaf_key); //TODO this should use a range query
let (lower, upper) = match leaf_val {
Some(bytes) => {
let lower = Fr::from_le_bytes_mod_order(&bytes[0..16]) + TWO_POW_128;
let upper = Fr::from_le_bytes_mod_order(&bytes[16..32]);
(lower, upper)
}
None => {
let val = [0u8; 32];
let lower = Fr::from_le_bytes_mod_order(&val[0..16]);
let upper = Fr::from_le_bytes_mod_order(&val[16..32]);
(lower, upper)
}
};
child_hashes.push(lower);
child_hashes.push(upper);
}
assert_eq!(child_hashes.len(), 256);
child_hashes
}
| rust | Apache-2.0 | e27b8b4edf1992b4afa636c2fc7983bcc27ddb88 | 2026-01-04T20:20:39.506404Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.