repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/lib.rs | kzg/src/lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloc::{borrow::ToOwned, string::String, vec::Vec};
use arbitrary::Arbitrary;
use core::fmt::Debug;
use msm::precompute::PrecomputationTable;
pub mod common_utils;
mod das;
pub mod eip_4844;
pub mod eth;
pub mod msm;
pub use das::{EcBackend, DAS};
pub trait Fr: Default + Clone + PartialEq + Sync + for<'a> Arbitrary<'a> {
fn null() -> Self;
fn zero() -> Self;
fn one() -> Self;
#[cfg(feature = "rand")]
fn rand() -> Self;
fn from_bytes(bytes: &[u8]) -> Result<Self, String>;
fn from_bytes_unchecked(bytes: &[u8]) -> Result<Self, String> {
Self::from_bytes(bytes)
}
fn from_hex(hex: &str) -> Result<Self, String>;
fn from_u64_arr(u: &[u64; 4]) -> Self;
fn from_u64(u: u64) -> Self;
fn to_bytes(&self) -> [u8; 32];
fn to_u64_arr(&self) -> [u64; 4];
fn is_one(&self) -> bool;
fn is_zero(&self) -> bool;
fn is_null(&self) -> bool;
fn sqr(&self) -> Self;
fn mul(&self, b: &Self) -> Self;
fn add(&self, b: &Self) -> Self;
fn sub(&self, b: &Self) -> Self;
fn eucl_inverse(&self) -> Self;
fn negate(&self) -> Self;
fn inverse(&self) -> Self;
fn pow(&self, n: usize) -> Self;
fn div(&self, b: &Self) -> Result<Self, String>;
fn equals(&self, b: &Self) -> bool;
fn eq(&self, other: &Self) -> bool {
self.equals(other)
}
fn to_scalar(&self) -> Scalar256;
}
pub trait G1: Clone + Default + PartialEq + Sync + Debug + Send {
fn zero() -> Self;
fn identity() -> Self;
fn generator() -> Self;
fn negative_generator() -> Self;
#[cfg(feature = "rand")]
fn rand() -> Self;
fn from_bytes(bytes: &[u8]) -> Result<Self, String>;
fn from_hex(hex: &str) -> Result<Self, String>;
fn to_bytes(&self) -> [u8; 48];
fn add_or_dbl(&self, b: &Self) -> Self;
fn is_inf(&self) -> bool;
fn is_valid(&self) -> bool;
fn dbl(&self) -> Self;
fn add(&self, b: &Self) -> Self;
fn sub(&self, b: &Self) -> Self;
fn equals(&self, b: &Self) -> bool;
fn eq(&self, other: &Self) -> bool {
self.equals(other)
}
fn add_or_dbl_assign(&mut self, b: &Self);
fn add_assign(&mut self, b: &Self);
fn dbl_assign(&mut self);
}
pub trait G1GetFp<TFp: G1Fp>: G1 + Clone {
// Return field X of G1
fn x(&self) -> &TFp;
// Return field Y of G1
fn y(&self) -> &TFp;
// Return field Z of G1
fn z(&self) -> &TFp;
// Return field X of G1 as mutable
fn x_mut(&mut self) -> &mut TFp;
// Return field Y of G1 as mutable
fn y_mut(&mut self) -> &mut TFp;
// Return field Z of G1 as mutable
fn z_mut(&mut self) -> &mut TFp;
// Construct G1 point from jacobian coordinates (x, y, z)
fn from_jacobian(x: TFp, y: TFp, z: TFp) -> Self;
}
pub trait G1Mul<TFr: Fr>: G1 + Clone {
fn mul(&self, b: &TFr) -> Self;
}
pub trait G1LinComb<
TFr: Fr,
TG1Fp: G1Fp,
TG1Affine: G1Affine<Self, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<Self, TG1Fp, TG1Affine>,
>: G1 + G1Mul<TFr> + G1GetFp<TG1Fp> + Clone
{
fn g1_lincomb(
points: &[Self],
scalars: &[TFr],
len: usize,
precomputation: Option<&PrecomputationTable<TFr, Self, TG1Fp, TG1Affine, TG1ProjAddAffine>>,
) -> Self;
fn g1_lincomb_batch(
points: &[Vec<Self>],
scalars: &[Vec<TFr>],
precomputation: Option<&PrecomputationTable<TFr, Self, TG1Fp, TG1Affine, TG1ProjAddAffine>>,
) -> Result<Vec<Self>, String> {
if points.len() != scalars.len() {
return Err("Invalid batch size".to_owned());
}
if let Some(precomputation) = precomputation {
Ok(precomputation.multiply_batch(scalars))
} else {
let mut result = Vec::new();
for (points, scalars) in points.iter().zip(scalars.iter()) {
if points.len() != scalars.len() {
return Err("Invalid point count length".to_owned());
}
result.push(Self::g1_lincomb(points, scalars, points.len(), None));
}
Ok(result)
}
}
}
pub trait G1Fp: Clone + Default + Sync + Copy + PartialEq + Debug + Send {
fn zero() -> Self;
fn one() -> Self;
fn bls12_381_rx_p() -> Self;
fn inverse(&self) -> Option<Self>;
fn square(&self) -> Self;
fn double(&self) -> Self;
fn from_underlying_arr(arr: &[u64; 6]) -> Self;
fn mul3(&self) -> Self;
fn neg_assign(&mut self);
fn mul_assign_fp(&mut self, b: &Self);
fn sub_assign_fp(&mut self, b: &Self);
fn add_assign_fp(&mut self, b: &Self);
fn neg(mut self) -> Self {
self.neg_assign();
self
}
fn mul_fp(mut self, b: &Self) -> Self {
self.mul_assign_fp(b);
self
}
fn sub_fp(mut self, b: &Self) -> Self {
self.sub_assign_fp(b);
self
}
fn add_fp(mut self, b: &Self) -> Self {
self.add_assign_fp(b);
self
}
fn is_zero(&self) -> bool {
*self == Self::zero()
}
fn set_zero(&mut self) {
*self = Self::zero();
}
fn is_one(&self) -> bool {
*self == Self::one()
}
fn set_one(&mut self) {
*self = Self::one();
}
}
pub trait G1Affine<TG1: G1, TG1Fp: G1Fp>:
Clone + Default + PartialEq + Sync + Copy + Send + Debug + for<'a> Arbitrary<'a>
{
fn zero() -> Self;
fn from_xy(x: TG1Fp, y: TG1Fp) -> Self;
fn into_affine(g1: &TG1) -> Self;
// Batch conversion can be faster than transforming each individually
fn into_affines_loc(out: &mut [Self], g1: &[TG1]);
fn into_affines(g1: &[TG1]) -> Vec<Self> {
let mut vec = Vec::<Self>::with_capacity(g1.len());
#[allow(clippy::uninit_vec)]
unsafe {
vec.set_len(g1.len());
}
Self::into_affines_loc(&mut vec, g1);
vec
}
fn to_proj(&self) -> TG1;
// Return field X of Affine
fn x(&self) -> &TG1Fp;
// Return field Y of Affine
fn y(&self) -> &TG1Fp;
// Return field X of Affine as mutable
fn x_mut(&mut self) -> &mut TG1Fp;
// Return field Y of Affine as mutable
fn y_mut(&mut self) -> &mut TG1Fp;
// Return whether Affine is at infinity
fn is_infinity(&self) -> bool;
// Return whether Affine is zero
fn is_zero(&self) -> bool {
*self == Self::zero()
}
fn neg(&self) -> Self;
fn set_zero(&mut self) {
*self = Self::zero();
}
fn to_bytes_uncompressed(&self) -> [u8; 96];
fn from_bytes_uncompressed(bytes: [u8; 96]) -> Result<Self, String>;
}
pub trait G1ProjAddAffine<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>>:
Sized + Sync + Send
{
fn add_assign_affine(proj: &mut TG1, aff: &TG1Affine);
fn add_or_double_assign_affine(proj: &mut TG1, aff: &TG1Affine);
fn add_affine(mut proj: TG1, aff: &TG1Affine) -> TG1 {
Self::add_assign_affine(&mut proj, aff);
proj
}
fn add_or_double_affine(mut proj: TG1, aff: &TG1Affine) -> TG1 {
Self::add_or_double_assign_affine(&mut proj, aff);
proj
}
fn sub_assign_affine(proj: &mut TG1, mut aff: TG1Affine) {
aff.y_mut().neg_assign();
Self::add_assign_affine(proj, &aff);
}
}
#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)]
pub struct Scalar256 {
data: [u64; 4],
}
#[allow(unused)]
impl Scalar256 {
const ONE: Self = Self { data: [1, 0, 0, 0] };
const ZERO: Self = Self { data: [0; 4] };
pub fn from_u64_s(arr: u64) -> Self {
Scalar256 {
data: [arr, 0, 0, 0],
}
}
pub fn from_u64(arr: [u64; 4]) -> Self {
Scalar256 { data: arr }
}
pub fn from_u8(arr: &[u8; 32]) -> Self {
Scalar256 {
data: Self::cast_scalar_to_u64_arr(arr),
}
}
pub fn as_u8(&self) -> &[u8] {
// FIXME: This is probably not super correct
unsafe { core::slice::from_raw_parts(&*(self.data.as_ptr() as *const u8), 32) }
}
const fn cast_scalar_to_u64_arr<const N: usize, const N_U8: usize>(
input: &[u8; N_U8],
) -> [u64; N] {
let ptr = input.as_ptr();
unsafe { core::slice::from_raw_parts(&*(ptr as *const [u64; N]), 1)[0] }
}
fn is_zero(&self) -> bool {
self.data == Self::ZERO.data
}
fn divn(&mut self, mut n: u32) {
const N: usize = 4;
if n >= (64 * N) as u32 {
*self = Self::from_u64_s(0);
return;
}
while n >= 64 {
let mut t = 0;
for i in 0..N {
core::mem::swap(&mut t, &mut self.data[N - i - 1]);
}
n -= 64;
}
if n > 0 {
let mut t = 0;
#[allow(unused)]
for i in 0..N {
let a = &mut self.data[N - i - 1];
let t2 = *a << (64 - n);
*a >>= n;
*a |= t;
t = t2;
}
}
}
}
pub trait G2: Clone + Default {
fn generator() -> Self;
fn negative_generator() -> Self;
fn from_bytes(bytes: &[u8]) -> Result<Self, String>;
fn to_bytes(&self) -> [u8; 96];
fn add_or_dbl(&mut self, b: &Self) -> Self;
fn dbl(&self) -> Self;
fn sub(&self, b: &Self) -> Self;
fn equals(&self, b: &Self) -> bool;
}
pub trait G2Mul<Fr>: Clone {
fn mul(&self, b: &Fr) -> Self;
}
pub trait PairingVerify<TG1: G1, TG2: G2> {
fn verify(a1: &TG1, a2: &TG2, b1: &TG1, b2: &TG2) -> bool;
}
pub trait FFTFr<Coeff: Fr> {
fn fft_fr(&self, data: &[Coeff], inverse: bool) -> Result<Vec<Coeff>, String>;
}
pub trait FFTG1<Coeff: G1> {
fn fft_g1(&self, data: &[Coeff], inverse: bool) -> Result<Vec<Coeff>, String>;
}
pub trait DASExtension<Coeff: Fr> {
fn das_fft_extension(&self, evens: &[Coeff]) -> Result<Vec<Coeff>, String>;
}
pub trait ZeroPoly<Coeff: Fr, Polynomial: Poly<Coeff>> {
/// Calculates the minimal polynomial that evaluates to zero for powers of roots of unity at the
/// given indices.
/// The returned polynomial has a length of `idxs.len() + 1`.
///
/// Uses straightforward long multiplication to calculate the product of `(x - r^i)` where `r`
/// is a root of unity and the `i`s are the indices at which it must evaluate to zero.
fn do_zero_poly_mul_partial(&self, idxs: &[usize], stride: usize)
-> Result<Polynomial, String>;
/// Reduce partials using a specified domain size.
/// Calculates the product of all polynomials via FFT and then applies an inverse FFT to produce
/// a new Polynomial.
fn reduce_partials(
&self,
domain_size: usize,
partials: &[Polynomial],
) -> Result<Polynomial, String>;
/// Calculate the minimal polynomial that evaluates to zero for powers of roots of unity that
/// correspond to missing indices.
/// This is done simply by multiplying together `(x - r^i)` for all the `i` that are missing
/// indices, using a combination of direct multiplication ([`Self::do_zero_poly_mul_partial()`])
/// and iterated multiplication via convolution (#reduce_partials).
/// Also calculates the FFT (the "evaluation polynomial").
fn zero_poly_via_multiplication(
&self,
domain_size: usize,
idxs: &[usize],
) -> Result<(Vec<Coeff>, Polynomial), String>;
}
pub trait FFTSettings<Coeff: Fr>: Default + Clone {
fn new(scale: usize) -> Result<Self, String>;
fn get_max_width(&self) -> usize;
fn get_reverse_roots_of_unity_at(&self, i: usize) -> Coeff;
fn get_reversed_roots_of_unity(&self) -> &[Coeff];
fn get_roots_of_unity_at(&self, i: usize) -> Coeff;
fn get_roots_of_unity(&self) -> &[Coeff];
fn get_brp_roots_of_unity(&self) -> &[Coeff];
fn get_brp_roots_of_unity_at(&self, i: usize) -> Coeff;
}
pub trait FFTSettingsPoly<Coeff: Fr, Polynomial: Poly<Coeff>, FSettings: FFTSettings<Coeff>> {
fn poly_mul_fft(
a: &Polynomial,
b: &Polynomial,
len: usize,
fs: Option<&FSettings>,
) -> Result<Polynomial, String>;
}
pub trait Poly<Coeff: Fr>: Default + Clone {
fn new(size: usize) -> Self;
// Default implementation not as efficient, should be implemented by type itself!
fn from_coeffs(coeffs: &[Coeff]) -> Self {
let mut poly = Self::new(coeffs.len());
for (i, coeff) in coeffs.iter().enumerate() {
poly.set_coeff_at(i, coeff);
}
poly
}
fn get_coeff_at(&self, i: usize) -> Coeff;
fn set_coeff_at(&mut self, i: usize, x: &Coeff);
fn get_coeffs(&self) -> &[Coeff];
fn len(&self) -> usize;
fn is_empty(&self) -> bool {
self.len() == 0
}
fn eval(&self, x: &Coeff) -> Coeff;
fn scale(&mut self);
fn unscale(&mut self);
fn inverse(&mut self, new_len: usize) -> Result<Self, String>;
fn div(&mut self, x: &Self) -> Result<Self, String>;
fn long_div(&mut self, x: &Self) -> Result<Self, String>;
fn fast_div(&mut self, x: &Self) -> Result<Self, String>;
fn mul_direct(&mut self, x: &Self, len: usize) -> Result<Self, String>;
}
pub trait PolyRecover<Coeff: Fr, Polynomial: Poly<Coeff>, FSettings: FFTSettings<Coeff>> {
fn recover_poly_coeffs_from_samples(
samples: &[Option<Coeff>],
fs: &FSettings,
) -> Result<Polynomial, String>;
fn recover_poly_from_samples(
samples: &[Option<Coeff>],
fs: &FSettings,
) -> Result<Polynomial, String>;
}
pub trait KZGSettings<
Coeff1: Fr,
Coeff2: G1 + G1Mul<Coeff1> + G1GetFp<TG1Fp>,
Coeff3: G2,
Fs: FFTSettings<Coeff1>,
Polynomial: Poly<Coeff1>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<Coeff2, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<Coeff2, TG1Fp, TG1Affine>,
>: Default + Clone
{
fn new(
g1_monomial: &[Coeff2],
g1_lagrange_brp: &[Coeff2],
g2_monomial: &[Coeff3],
fs: &Fs,
cell_size: usize,
) -> Result<Self, String>;
fn commit_to_poly(&self, p: &Polynomial) -> Result<Coeff2, String>;
fn compute_proof_single(&self, p: &Polynomial, x: &Coeff1) -> Result<Coeff2, String>;
fn check_proof_single(
&self,
com: &Coeff2,
proof: &Coeff2,
x: &Coeff1,
value: &Coeff1,
) -> Result<bool, String>;
fn compute_proof_multi(&self, p: &Polynomial, x: &Coeff1, n: usize) -> Result<Coeff2, String>;
fn check_proof_multi(
&self,
com: &Coeff2,
proof: &Coeff2,
x: &Coeff1,
values: &[Coeff1],
n: usize,
) -> Result<bool, String>;
fn get_roots_of_unity_at(&self, i: usize) -> Coeff1;
fn get_fft_settings(&self) -> &Fs;
fn get_g1_monomial(&self) -> &[Coeff2];
fn get_g1_lagrange_brp(&self) -> &[Coeff2];
fn get_g2_monomial(&self) -> &[Coeff3];
fn get_precomputation(
&self,
) -> Option<&PrecomputationTable<Coeff1, Coeff2, TG1Fp, TG1Affine, TG1ProjAddAffine>>;
fn get_x_ext_fft_columns(&self) -> &[Vec<Coeff2>];
fn get_cell_size(&self) -> usize;
}
pub trait FK20SingleSettings<
Coeff1: Fr,
Coeff2: G1 + G1Mul<Coeff1> + G1GetFp<TG1Fp>,
Coeff3: G2,
Fs: FFTSettings<Coeff1>,
Polynomial: Poly<Coeff1>,
Ks: KZGSettings<Coeff1, Coeff2, Coeff3, Fs, Polynomial, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<Coeff2, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<Coeff2, TG1Fp, TG1Affine>,
>: Default + Clone
{
fn new(ks: &Ks, n2: usize) -> Result<Self, String>;
fn data_availability(&self, p: &Polynomial) -> Result<Vec<Coeff2>, String>;
fn data_availability_optimized(&self, p: &Polynomial) -> Result<Vec<Coeff2>, String>;
}
pub trait FK20MultiSettings<
Coeff1: Fr,
Coeff2: G1 + G1Mul<Coeff1> + G1GetFp<TG1Fp>,
Coeff3: G2,
Fs: FFTSettings<Coeff1>,
Polynomial: Poly<Coeff1>,
Ks: KZGSettings<Coeff1, Coeff2, Coeff3, Fs, Polynomial, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<Coeff2, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<Coeff2, TG1Fp, TG1Affine>,
>: Default + Clone
{
fn new(ks: &Ks, n2: usize, chunk_len: usize) -> Result<Self, String>;
fn data_availability(&self, p: &Polynomial) -> Result<Vec<Coeff2>, String>;
fn data_availability_optimized(&self, p: &Polynomial) -> Result<Vec<Coeff2>, String>;
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/eip_4844.rs | kzg/src/eip_4844.rs | #![allow(non_camel_case_types)]
extern crate alloc;
use crate::common_utils::reverse_bit_order;
use crate::eth;
use crate::eth::c_bindings::CKZGSettings;
use crate::eth::FIELD_ELEMENTS_PER_EXT_BLOB;
use crate::msm::precompute::PrecomputationTable;
use crate::G1Affine;
use crate::G1Fp;
use crate::G1GetFp;
use crate::G1LinComb;
use crate::G1ProjAddAffine;
use crate::{FFTSettings, Fr, G1Mul, KZGSettings, PairingVerify, Poly, G1, G2};
use alloc::collections::BTreeMap;
use alloc::format;
use alloc::string::String;
use alloc::string::ToString;
use alloc::sync::Arc;
use alloc::vec;
use alloc::vec::Vec;
use core::hash::Hash;
use core::hash::Hasher;
use sha2::{Digest, Sha256};
use siphasher::sip::SipHasher;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
////////////////////////////// Constant values for EIP-4844 //////////////////////////////
pub const FIELD_ELEMENTS_PER_BLOB: usize = 4096;
pub const BYTES_PER_G1: usize = 48;
pub const BYTES_PER_G2: usize = 96;
pub const BYTES_PER_BLOB: usize = BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB;
pub const BYTES_PER_FIELD_ELEMENT: usize = 32;
pub const BYTES_PER_PROOF: usize = 48;
pub const BYTES_PER_COMMITMENT: usize = 48;
pub const TRUSTED_SETUP_PATH: &str = "src/trusted_setup.txt";
// Currently, we only support fixed amount of G1 and G2 points contained in trusted setups.
// Issue arises when a binding using the C API loads different G1 point quantities each time.
pub static mut TRUSTED_SETUP_NUM_G1_POINTS: usize = 0;
pub const TRUSTED_SETUP_NUM_G2_POINTS: usize = 65;
pub const CHALLENGE_INPUT_SIZE: usize =
FIAT_SHAMIR_PROTOCOL_DOMAIN.len() + 16 + BYTES_PER_BLOB + BYTES_PER_COMMITMENT;
pub const FIAT_SHAMIR_PROTOCOL_DOMAIN: [u8; 16] = [
70, 83, 66, 76, 79, 66, 86, 69, 82, 73, 70, 89, 95, 86, 49, 95,
]; // "FSBLOBVERIFY_V1_"
pub const RANDOM_CHALLENGE_KZG_BATCH_DOMAIN: [u8; 16] = [
82, 67, 75, 90, 71, 66, 65, 84, 67, 72, 95, 95, 95, 86, 49, 95,
]; // "RCKZGBATCH___V1_"
////////////////////////////// Constant values for EIP-7594 //////////////////////////////
////////////////////////////// C API for EIP-4844 //////////////////////////////
#[allow(clippy::type_complexity)]
pub struct PrecomputationTableManager<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
tables: BTreeMap<u64, Arc<PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>>>,
}
impl<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine> Default
for PrecomputationTableManager<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
fn default() -> Self {
Self::new()
}
}
impl<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
PrecomputationTableManager<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
pub const fn new() -> Self {
Self {
tables: BTreeMap::new(),
}
}
#[allow(clippy::type_complexity)]
pub fn save_precomputation(
&mut self,
precomputation: Option<
Arc<PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>>,
>,
c_settings: &CKZGSettings,
) {
if c_settings.g1_values_lagrange_brp.is_null() {
return;
}
if let Some(precomputation) = precomputation {
self.tables
.insert(Self::get_key(c_settings), precomputation);
}
}
pub fn remove_precomputation(&mut self, c_settings: &CKZGSettings) {
if c_settings.g1_values_lagrange_brp.is_null() {
return;
}
self.tables.remove(&Self::get_key(c_settings));
}
#[allow(clippy::type_complexity)]
pub fn get_precomputation(
&self,
c_settings: &CKZGSettings,
) -> Option<Arc<PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>>> {
if c_settings.g1_values_lagrange_brp.is_null() {
return None;
}
self.tables.get(&Self::get_key(c_settings)).cloned()
}
fn get_key(settings: &CKZGSettings) -> u64 {
let mut hasher = SipHasher::new();
settings.g1_values_lagrange_brp.hash(&mut hasher);
hasher.finish()
}
}
////////////////////////////// Utility functions for EIP-4844 //////////////////////////////
#[allow(clippy::type_complexity)]
pub fn load_trusted_setup_string(contents: &str) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>), String> {
let mut offset = 0;
const TRUSTED_SETUP_ERROR: &str = "Incorrect trusted setup format";
#[inline(always)]
fn scan_number(offset: &mut usize, contents: &str) -> Result<usize, String> {
*offset += contents[(*offset)..]
.find(|c: char| !c.is_whitespace())
.ok_or_else(|| String::from(TRUSTED_SETUP_ERROR))?;
let start = *offset;
*offset += contents[(*offset)..]
.find(|c: char| !c.is_ascii_digit())
.ok_or_else(|| String::from(TRUSTED_SETUP_ERROR))?;
let end = *offset;
contents[start..end]
.parse::<usize>()
.map_err(|_| String::from(TRUSTED_SETUP_ERROR))
}
let g1_point_count = scan_number(&mut offset, contents)?;
// FIXME: must be TRUSTED_SETUP_NUM_G1_POINTS
if g1_point_count != FIELD_ELEMENTS_PER_BLOB {
return Err(String::from(TRUSTED_SETUP_ERROR));
}
let g2_point_count = scan_number(&mut offset, contents)?;
if g2_point_count != TRUSTED_SETUP_NUM_G2_POINTS {
return Err(String::from(TRUSTED_SETUP_ERROR));
}
let mut g1_monomial_bytes = vec![0u8; g1_point_count * BYTES_PER_G1];
let mut g1_lagrange_bytes = vec![0u8; g1_point_count * BYTES_PER_G1];
let mut g2_monomial_bytes = vec![0u8; g2_point_count * BYTES_PER_G2];
#[inline(always)]
fn scan_hex_byte(offset: &mut usize, contents: &str) -> Result<u8, String> {
*offset += contents[(*offset)..]
.find(|c: char| !c.is_whitespace())
.ok_or_else(|| String::from(TRUSTED_SETUP_ERROR))?;
let start = *offset;
let end = if contents
.get((*offset + 1)..)
.map(|it| {
it.chars()
.next()
.map(|c| c.is_ascii_hexdigit())
.unwrap_or(false)
})
.unwrap_or(false)
{
*offset += 2;
*offset
} else {
*offset += 1;
*offset
};
u8::from_str_radix(&contents[start..end], 16).map_err(|_| String::from(TRUSTED_SETUP_ERROR))
}
for byte in &mut g1_lagrange_bytes {
*byte = scan_hex_byte(&mut offset, contents)?
}
for byte in &mut g2_monomial_bytes {
*byte = scan_hex_byte(&mut offset, contents)?
}
for byte in &mut g1_monomial_bytes {
*byte = scan_hex_byte(&mut offset, contents)?
}
Ok((g1_monomial_bytes, g1_lagrange_bytes, g2_monomial_bytes))
}
pub fn bytes_of_uint64(out: &mut [u8], mut n: u64) {
for byte in out.iter_mut().rev().take(8) {
*byte = (n & 0xff) as u8;
n >>= 8;
}
}
pub fn hash(x: &[u8]) -> [u8; 32] {
Sha256::digest(x).into()
}
#[macro_export]
macro_rules! cfg_into_iter {
($e: expr) => {{
#[cfg(feature = "parallel")]
let result = $e.into_par_iter();
#[cfg(not(feature = "parallel"))]
let result = $e.into_iter();
result
}};
}
////////////////////////////// Trait based implementations of functions for EIP-4844 //////////////////////////////
fn poly_to_kzg_commitment<
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp> + G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
p: &TPoly,
s: &TKZGSettings,
) -> TG1 {
TG1::g1_lincomb(
s.get_g1_lagrange_brp(),
p.get_coeffs(),
FIELD_ELEMENTS_PER_BLOB,
s.get_precomputation(),
)
}
pub fn blob_to_kzg_commitment_rust<
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine> + G1GetFp<TG1Fp>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: &[TFr],
settings: &TKZGSettings,
) -> Result<TG1, String> {
let polynomial = blob_to_polynomial(blob)?;
Ok(poly_to_kzg_commitment(&polynomial, settings))
}
pub fn blob_to_kzg_commitment_raw<
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine> + G1GetFp<TG1Fp>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: [u8; BYTES_PER_BLOB],
settings: &TKZGSettings,
) -> Result<TG1, String> {
let blob = bytes_to_blob(&blob)?;
blob_to_kzg_commitment_rust(&blob, settings)
}
pub fn compute_powers<TFr: Fr>(base: &TFr, num_powers: usize) -> Vec<TFr> {
let mut powers: Vec<TFr> = vec![TFr::default(); num_powers];
if num_powers == 0 {
return powers;
}
powers[0] = TFr::one();
for i in 1..num_powers {
powers[i] = powers[i - 1].mul(base);
}
powers
}
fn compute_r_powers<TG1: G1, TFr: Fr>(
commitments_g1: &[TG1],
zs_fr: &[TFr],
ys_fr: &[TFr],
proofs_g1: &[TG1],
) -> Result<Vec<TFr>, String> {
let n = commitments_g1.len();
let input_size =
32 + n * (BYTES_PER_COMMITMENT + 2 * BYTES_PER_FIELD_ELEMENT + BYTES_PER_PROOF);
let mut bytes: Vec<u8> = vec![0; input_size];
// Copy domain separator
bytes[..16].copy_from_slice(&RANDOM_CHALLENGE_KZG_BATCH_DOMAIN);
bytes_of_uint64(&mut bytes[16..24], FIELD_ELEMENTS_PER_BLOB as u64);
bytes_of_uint64(&mut bytes[24..32], n as u64);
let mut offset = 32;
for i in 0..n {
// Copy commitment
let v = commitments_g1[i].to_bytes();
bytes[offset..(v.len() + offset)].copy_from_slice(&v[..]);
offset += BYTES_PER_COMMITMENT;
// Copy evaluation challenge
let v = zs_fr[i].to_bytes();
bytes[offset..(v.len() + offset)].copy_from_slice(&v[..]);
offset += BYTES_PER_FIELD_ELEMENT;
// Copy polynomial's evaluation value
let v = ys_fr[i].to_bytes();
bytes[offset..(v.len() + offset)].copy_from_slice(&v[..]);
offset += BYTES_PER_FIELD_ELEMENT;
// Copy proof
let v = proofs_g1[i].to_bytes();
bytes[offset..(v.len() + offset)].copy_from_slice(&v[..]);
offset += BYTES_PER_PROOF;
}
// Make sure we wrote the entire buffer
if offset != input_size {
return Err(String::from("Error while copying commitments"));
}
// Now let's create the challenge!
let eval_challenge = hash(&bytes);
let r = hash_to_bls_field(&eval_challenge);
Ok(compute_powers(&r, n))
}
fn verify_kzg_proof_batch<
TFr: Fr,
TG1: G1
+ G1Mul<TFr>
+ G1GetFp<TG1Fp>
+ PairingVerify<TG1, TG2>
+ G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
commitments_g1: &[TG1],
zs_fr: &[TFr],
ys_fr: &[TFr],
proofs_g1: &[TG1],
ts: &TKZGSettings,
) -> Result<bool, String> {
let n = commitments_g1.len();
let mut c_minus_y: Vec<TG1> = Vec::with_capacity(n);
let mut r_times_z: Vec<TFr> = Vec::with_capacity(n);
// Compute the random lincomb challenges
let r_powers = compute_r_powers(commitments_g1, zs_fr, ys_fr, proofs_g1)?;
// Compute \sum r^i * Proof_i
let proof_lincomb = TG1::g1_lincomb(proofs_g1, &r_powers, n, None);
for i in 0..n {
// Get [y_i]
let ys_encrypted = TG1::generator().mul(&ys_fr[i]);
// Get C_i - [y_i]
c_minus_y.push(commitments_g1[i].sub(&ys_encrypted));
// Get r^i * z_i
r_times_z.push(r_powers[i].mul(&zs_fr[i]));
}
// Get \sum r^i z_i Proof_i
let proof_z_lincomb = TG1::g1_lincomb(proofs_g1, &r_times_z, n, None);
// Get \sum r^i (C_i - [y_i])
let c_minus_y_lincomb = TG1::g1_lincomb(&c_minus_y, &r_powers, n, None);
// Get C_minus_y_lincomb + proof_z_lincomb
let rhs_g1 = c_minus_y_lincomb.add_or_dbl(&proof_z_lincomb);
// Do the pairing check!
Ok(TG1::verify(
&proof_lincomb,
&ts.get_g2_monomial()[1],
&rhs_g1,
&TG2::generator(),
))
}
pub fn compute_kzg_proof_rust<
TFr: Fr + Copy,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp> + G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: &[TFr],
z: &TFr,
s: &TKZGSettings,
) -> Result<(TG1, TFr), String> {
let polynomial = blob_to_polynomial(blob)?;
let y = evaluate_polynomial_in_evaluation_form(&polynomial, z, s)?;
let mut tmp: TFr;
let mut m: usize = 0;
let mut q: TPoly = TPoly::new(FIELD_ELEMENTS_PER_BLOB);
let mut inverses_in: Vec<TFr> = vec![TFr::default(); FIELD_ELEMENTS_PER_BLOB];
let mut inverses: Vec<TFr> = vec![TFr::default(); FIELD_ELEMENTS_PER_BLOB];
let roots_of_unity = s.get_fft_settings().get_brp_roots_of_unity();
let poly_coeffs = polynomial.get_coeffs();
for i in 0..FIELD_ELEMENTS_PER_BLOB {
if z.equals(&roots_of_unity[i]) {
// We are asked to compute a KZG proof inside the domain
m = i + 1;
inverses_in[i] = TFr::one();
continue;
}
// (p_i - y) / (ω_i - z)
q.set_coeff_at(i, &poly_coeffs[i].sub(&y));
inverses_in[i] = roots_of_unity[i].sub(z);
}
fr_batch_inv(&mut inverses, &inverses_in, FIELD_ELEMENTS_PER_BLOB)?;
for (i, inverse) in inverses.iter().enumerate().take(FIELD_ELEMENTS_PER_BLOB) {
q.set_coeff_at(i, &q.get_coeff_at(i).mul(inverse));
}
if m != 0 {
// ω_{m-1} == z
m -= 1;
q.set_coeff_at(m, &TFr::zero());
for i in 0..FIELD_ELEMENTS_PER_BLOB {
if i == m {
continue;
}
// Build denominator: z * (z - ω_i)
tmp = z.sub(&roots_of_unity[i]);
inverses_in[i] = tmp.mul(z);
}
fr_batch_inv(&mut inverses, &inverses_in, FIELD_ELEMENTS_PER_BLOB)?;
for i in 0..FIELD_ELEMENTS_PER_BLOB {
if i == m {
continue;
}
// Build numerator: ω_i * (p_i - y)
tmp = poly_coeffs[i].sub(&y);
tmp = tmp.mul(&roots_of_unity[i]);
// Do the division: (p_i - y) * ω_i / (z * (z - ω_i))
tmp = tmp.mul(&inverses[i]);
q.set_coeff_at(m, &q.get_coeff_at(m).add(&tmp))
}
}
let proof = TG1::g1_lincomb(
s.get_g1_lagrange_brp(),
q.get_coeffs(),
FIELD_ELEMENTS_PER_BLOB,
s.get_precomputation(),
);
Ok((proof, y))
}
pub fn compute_kzg_proof_raw<
TFr: Fr + Copy,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp> + G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: [u8; BYTES_PER_BLOB],
z: [u8; BYTES_PER_FIELD_ELEMENT],
s: &TKZGSettings,
) -> Result<(TG1, TFr), String> {
let blob = bytes_to_blob(&blob)?;
let z = TFr::from_bytes(&z)?;
compute_kzg_proof_rust(&blob, &z, s)
}
pub fn compute_blob_kzg_proof_rust<
TFr: Fr + Copy,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp> + G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: &[TFr],
commitment: &TG1,
ts: &TKZGSettings,
) -> Result<TG1, String> {
if !commitment.is_inf() && !commitment.is_valid() {
return Err("Invalid commitment".to_string());
}
let evaluation_challenge_fr = compute_challenge_rust(blob, commitment);
let (proof, _) = compute_kzg_proof_rust(blob, &evaluation_challenge_fr, ts)?;
Ok(proof)
}
pub fn compute_blob_kzg_proof_raw<
TFr: Fr + Copy,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp> + G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: [u8; BYTES_PER_BLOB],
commitment: [u8; BYTES_PER_G1],
ts: &TKZGSettings,
) -> Result<TG1, String> {
let blob = bytes_to_blob(&blob)?;
let commitment = TG1::from_bytes(&commitment)?;
compute_blob_kzg_proof_rust(&blob, &commitment, ts)
}
pub fn verify_kzg_proof_rust<
TFr: Fr,
TG1: G1 + G1GetFp<TG1Fp> + G1Mul<TFr>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
commitment: &TG1,
z: &TFr,
y: &TFr,
proof: &TG1,
s: &TKZGSettings,
) -> Result<bool, String> {
if !commitment.is_inf() && !commitment.is_valid() {
return Err("Invalid commitment".to_string());
}
if !proof.is_inf() && !proof.is_valid() {
return Err("Invalid proof".to_string());
}
s.check_proof_single(commitment, proof, z, y)
}
pub fn verify_kzg_proof_raw<
TFr: Fr,
TG1: G1 + G1GetFp<TG1Fp> + G1Mul<TFr>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
commitment: [u8; BYTES_PER_G1],
z: [u8; BYTES_PER_FIELD_ELEMENT],
y: [u8; BYTES_PER_FIELD_ELEMENT],
proof: [u8; BYTES_PER_G1],
s: &TKZGSettings,
) -> Result<bool, String> {
let commitment = TG1::from_bytes(&commitment)?;
let z = TFr::from_bytes(&z)?;
let y = TFr::from_bytes(&y)?;
let proof = TG1::from_bytes(&proof)?;
verify_kzg_proof_rust(&commitment, &z, &y, &proof, s)
}
pub fn verify_blob_kzg_proof_rust<
TFr: Fr + Copy,
TG1: G1 + G1GetFp<TG1Fp> + G1Mul<TFr>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: &[TFr],
commitment_g1: &TG1,
proof_g1: &TG1,
ts: &TKZGSettings,
) -> Result<bool, String> {
if !commitment_g1.is_inf() && !commitment_g1.is_valid() {
return Err("Invalid commitment".to_string());
}
if !proof_g1.is_inf() && !proof_g1.is_valid() {
return Err("Invalid proof".to_string());
}
let polynomial = blob_to_polynomial(blob)?;
let evaluation_challenge_fr = compute_challenge_rust(blob, commitment_g1);
let y_fr = evaluate_polynomial_in_evaluation_form(&polynomial, &evaluation_challenge_fr, ts)?;
verify_kzg_proof_rust(commitment_g1, &evaluation_challenge_fr, &y_fr, proof_g1, ts)
}
pub fn verify_blob_kzg_proof_raw<
TFr: Fr + Copy,
TG1: G1 + G1GetFp<TG1Fp> + G1Mul<TFr>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blob: [u8; BYTES_PER_BLOB],
commitment_g1: [u8; BYTES_PER_G1],
proof_g1: [u8; BYTES_PER_G1],
ts: &TKZGSettings,
) -> Result<bool, String> {
let blob = bytes_to_blob(&blob)?;
let commitment_g1 = TG1::from_bytes(&commitment_g1)?;
let proof_g1 = TG1::from_bytes(&proof_g1)?;
verify_blob_kzg_proof_rust(&blob, &commitment_g1, &proof_g1, ts)
}
fn compute_challenges_and_evaluate_polynomial<
TFr: Fr + Copy,
TG1: G1 + G1GetFp<TG1Fp> + G1Mul<TFr>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blobs: &[Vec<TFr>],
commitments_g1: &[TG1],
ts: &TKZGSettings,
) -> Result<(Vec<TFr>, Vec<TFr>), String> {
let mut evaluation_challenges_fr = Vec::with_capacity(blobs.len());
let mut ys_fr = Vec::with_capacity(blobs.len());
for i in 0..blobs.len() {
let polynomial = blob_to_polynomial(&blobs[i])?;
let evaluation_challenge_fr = compute_challenge_rust(&blobs[i], &commitments_g1[i]);
let y_fr =
evaluate_polynomial_in_evaluation_form(&polynomial, &evaluation_challenge_fr, ts)?;
evaluation_challenges_fr.push(evaluation_challenge_fr);
ys_fr.push(y_fr);
}
Ok((evaluation_challenges_fr, ys_fr))
}
fn validate_batched_input<TG1: G1>(commitments: &[TG1], proofs: &[TG1]) -> Result<(), String> {
let invalid_commitment = cfg_into_iter!(commitments)
.any(|commitment| !commitment.is_inf() && !commitment.is_valid());
let invalid_proof = cfg_into_iter!(proofs).any(|proof| !proof.is_inf() && !proof.is_valid());
if invalid_commitment {
return Err("Invalid commitment".to_string());
}
if invalid_proof {
return Err("Invalid proof".to_string());
}
Ok(())
}
pub fn verify_blob_kzg_proof_batch_rust<
TFr: Fr + Copy,
TG1: G1
+ G1Mul<TFr>
+ PairingVerify<TG1, TG2>
+ G1GetFp<TG1Fp>
+ G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine> + Sync,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blobs: &[Vec<TFr>],
commitments_g1: &[TG1],
proofs_g1: &[TG1],
ts: &TKZGSettings,
) -> Result<bool, String> {
// Exit early if we are given zero blobs
if blobs.is_empty() {
return Ok(true);
}
// For a single blob, just do a regular single verification
if blobs.len() == 1 {
return verify_blob_kzg_proof_rust(&blobs[0], &commitments_g1[0], &proofs_g1[0], ts);
}
if blobs.len() != commitments_g1.len() || blobs.len() != proofs_g1.len() {
return Err("Invalid amount of arguments".to_string());
}
#[cfg(feature = "parallel")]
{
let num_blobs = blobs.len();
let num_cores = num_cpus::get_physical();
if num_blobs > num_cores {
validate_batched_input(commitments_g1, proofs_g1)?;
// Process blobs in parallel subgroups
let blobs_per_group = num_blobs / num_cores;
blobs
.par_chunks(blobs_per_group)
.enumerate()
.map(|(i, blob_group)| {
let num_blobs_in_group = blob_group.len();
let commitment_group = &commitments_g1
[blobs_per_group * i..blobs_per_group * i + num_blobs_in_group];
let proof_group =
&proofs_g1[blobs_per_group * i..blobs_per_group * i + num_blobs_in_group];
let (evaluation_challenges_fr, ys_fr) =
compute_challenges_and_evaluate_polynomial(
blob_group,
commitment_group,
ts,
)?;
verify_kzg_proof_batch(
commitment_group,
&evaluation_challenges_fr,
&ys_fr,
proof_group,
ts,
)
})
.try_reduce(|| true, |a, b| Ok(a && b))
} else {
// Each group contains either one or zero blobs, so iterate
// over the single blob verification function in parallel
(blobs, commitments_g1, proofs_g1)
.into_par_iter()
.map(|(blob, commitment, proof)| {
verify_blob_kzg_proof_rust(blob, commitment, proof, ts)
})
.try_reduce(|| true, |a, b| Ok(a && b))
}
}
#[cfg(not(feature = "parallel"))]
{
validate_batched_input(commitments_g1, proofs_g1)?;
let (evaluation_challenges_fr, ys_fr) =
compute_challenges_and_evaluate_polynomial(blobs, commitments_g1, ts)?;
verify_kzg_proof_batch(
commitments_g1,
&evaluation_challenges_fr,
&ys_fr,
proofs_g1,
ts,
)
}
}
pub fn verify_blob_kzg_proof_batch_raw<
TFr: Fr + Copy + Send,
TG1: G1
+ G1Mul<TFr>
+ PairingVerify<TG1, TG2>
+ G1GetFp<TG1Fp>
+ G1LinComb<TFr, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine> + Sync,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
blobs: &[[u8; BYTES_PER_BLOB]],
commitments_g1: &[[u8; BYTES_PER_G1]],
proofs_g1: &[[u8; BYTES_PER_G1]],
ts: &TKZGSettings,
) -> Result<bool, String> {
let blobs = cfg_into_iter!(blobs)
.map(|bytes| bytes_to_blob(bytes))
.collect::<Result<Vec<_>, _>>()?;
let commitments_g1 = cfg_into_iter!(commitments_g1)
.map(|bytes| TG1::from_bytes(bytes))
.collect::<Result<Vec<_>, _>>()?;
let proofs_g1 = cfg_into_iter!(proofs_g1)
.map(|bytes| TG1::from_bytes(bytes))
.collect::<Result<Vec<_>, _>>()?;
verify_blob_kzg_proof_batch_rust(&blobs, &commitments_g1, &proofs_g1, ts)
}
pub fn bytes_to_blob<TFr: Fr>(bytes: &[u8]) -> Result<Vec<TFr>, String> {
if bytes.len() != BYTES_PER_BLOB {
return Err(format!(
"Invalid blob: Invalid byte length. Expected {} got {}",
BYTES_PER_BLOB,
bytes.len(),
));
}
bytes
.chunks(BYTES_PER_FIELD_ELEMENT)
.map(TFr::from_bytes)
.collect()
}
fn fr_batch_inv<TFr: Fr + PartialEq + Copy>(
out: &mut [TFr],
a: &[TFr],
len: usize,
) -> Result<(), String> {
if len == 0 {
return Err(String::from("Length is less than 0."));
}
if a == out {
return Err(String::from("Destination is the same as source."));
}
let mut accumulator = TFr::one();
for i in 0..len {
out[i] = accumulator;
accumulator = accumulator.mul(&a[i]);
}
if accumulator.is_zero() {
return Err(String::from("Zero input"));
}
accumulator = accumulator.eucl_inverse();
for i in (0..len).rev() {
out[i] = out[i].mul(&accumulator);
accumulator = accumulator.mul(&a[i]);
}
Ok(())
}
pub fn hash_to_bls_field<TFr: Fr>(x: &[u8; BYTES_PER_FIELD_ELEMENT]) -> TFr {
TFr::from_bytes_unchecked(x).unwrap()
}
pub fn compute_challenge_rust<TFr: Fr, TG1: G1>(blob: &[TFr], commitment: &TG1) -> TFr {
let mut bytes: Vec<u8> = vec![0; CHALLENGE_INPUT_SIZE];
// Copy domain separator
bytes[..16].copy_from_slice(&FIAT_SHAMIR_PROTOCOL_DOMAIN);
// Set all other bytes of this 16-byte (big-endian) field to zero
bytes_of_uint64(&mut bytes[16..24], 0);
bytes_of_uint64(&mut bytes[24..32], FIELD_ELEMENTS_PER_BLOB as u64);
for (i, field) in blob.iter().enumerate() {
let v = field.to_bytes();
let size = (32 + i * BYTES_PER_FIELD_ELEMENT)..(32 + (i + 1) * BYTES_PER_FIELD_ELEMENT);
bytes[size].copy_from_slice(&v);
}
// Copy commitment
let v = commitment.to_bytes();
for i in 0..v.len() {
bytes[32 + BYTES_PER_BLOB + i] = v[i];
}
// Now let's create the challenge!
let eval_challenge = hash(&bytes);
hash_to_bls_field(&eval_challenge)
}
pub fn blob_to_polynomial<TFr: Fr, TPoly: Poly<TFr>>(blob: &[TFr]) -> Result<TPoly, String> {
if blob.len() != FIELD_ELEMENTS_PER_BLOB {
return Err(String::from("Blob length must be FIELD_ELEMENTS_PER_BLOB"));
}
Ok(TPoly::from_coeffs(blob))
}
pub fn evaluate_polynomial_in_evaluation_form<
TFr: Fr + Copy,
TG1: G1 + G1GetFp<TG1Fp> + G1Mul<TFr>,
TG2: G2,
TPoly: Poly<TFr>,
TFFTSettings: FFTSettings<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
p: &TPoly,
x: &TFr,
s: &TKZGSettings,
) -> Result<TFr, String> {
if p.len() != FIELD_ELEMENTS_PER_BLOB {
return Err(String::from("Incorrect field elements count."));
}
let mut inverses_in: Vec<TFr> = vec![TFr::default(); FIELD_ELEMENTS_PER_BLOB];
let mut inverses: Vec<TFr> = vec![TFr::default(); FIELD_ELEMENTS_PER_BLOB];
let roots_of_unity = s.get_fft_settings().get_brp_roots_of_unity();
let poly_coeffs = p.get_coeffs();
for i in 0..FIELD_ELEMENTS_PER_BLOB {
if x == &roots_of_unity[i] {
return Ok(poly_coeffs[i]);
}
inverses_in[i] = x.sub(&roots_of_unity[i]);
}
fr_batch_inv(&mut inverses, &inverses_in, FIELD_ELEMENTS_PER_BLOB)?;
let mut tmp: TFr;
let mut out = TFr::zero();
for i in 0..FIELD_ELEMENTS_PER_BLOB {
tmp = inverses[i].mul(&roots_of_unity[i]);
tmp = tmp.mul(&poly_coeffs[i]);
out = out.add(&tmp);
}
tmp = TFr::from_u64(FIELD_ELEMENTS_PER_BLOB as u64);
out = out.div(&tmp)?;
tmp = x.pow(FIELD_ELEMENTS_PER_BLOB);
tmp = tmp.sub(&TFr::one());
out = out.mul(&tmp);
Ok(out)
}
fn is_trusted_setup_in_lagrange_form<TG1: G1 + PairingVerify<TG1, TG2>, TG2: G2>(
g1_lagrange_values: &[TG1],
g2_monomial_values: &[TG2],
) -> bool {
if g1_lagrange_values.len() < 2 || g2_monomial_values.len() < 2 {
return false;
}
let is_monotomial_form = TG1::verify(
&g1_lagrange_values[1],
&g2_monomial_values[0],
&g1_lagrange_values[0],
&g2_monomial_values[1],
);
!is_monotomial_form
}
pub fn load_trusted_setup_rust<
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp> + PairingVerify<TG1, TG2>,
TG2: G2,
TFFTSettings: FFTSettings<TFr>,
TPoly: Poly<TFr>,
TKZGSettings: KZGSettings<TFr, TG1, TG2, TFFTSettings, TPoly, TG1Fp, TG1Affine, TG1ProjAddAffine>,
TG1Fp: G1Fp,
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | true |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/common_utils.rs | kzg/src/common_utils.rs | extern crate alloc;
use alloc::string::String;
use core::mem;
pub fn reverse_bit_order<T>(vals: &mut [T]) -> Result<(), String>
where
T: Clone,
{
if vals.is_empty() {
return Err(String::from("Values can not be empty"));
}
// required for tests
if vals.len() == 1 {
return Ok(());
}
if !vals.len().is_power_of_two() {
return Err(String::from("Values length has to be a power of 2"));
}
let unused_bit_len = vals.len().leading_zeros() + 1;
for i in 0..vals.len() - 1 {
let r = i.reverse_bits() >> unused_bit_len;
if r > i {
let tmp = vals[r].clone();
vals[r] = vals[i].clone();
vals[i] = tmp;
}
}
Ok(())
}
pub fn log_2_byte(b: u8) -> usize {
let mut r = u8::from(b > 0xF) << 2;
let mut b = b >> r;
let shift = u8::from(b > 0x3) << 1;
b >>= shift + 1;
r |= shift | b;
r.into()
}
pub fn log2_pow2(n: usize) -> usize {
let bytes: [usize; 5] = [0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF00, 0xFFFF0000];
let mut r: usize = usize::from((n & bytes[0]) != 0);
r |= usize::from((n & bytes[1]) != 0) << 1;
r |= usize::from((n & bytes[2]) != 0) << 2;
r |= usize::from((n & bytes[3]) != 0) << 3;
r |= usize::from((n & bytes[4]) != 0) << 4;
r
}
pub fn log2_u64(n: usize) -> usize {
let mut n2 = n;
let mut r: usize = 0;
while (n2 >> 1) != 0 {
n2 >>= 1;
r += 1;
}
r
}
const fn num_bits<T>() -> usize {
mem::size_of::<T>() * 8
}
pub fn log_2(x: usize) -> usize {
if x == 0 {
return 0;
}
num_bits::<usize>() - (x.leading_zeros() as usize) - 1
}
pub fn is_power_of_2(n: usize) -> bool {
n & (n - 1) == 0
}
pub fn next_pow_of_2(x: usize) -> usize {
if x == 0 {
return 1;
}
if is_power_of_2(x) {
return x;
}
1 << (log_2(x) + 1)
}
pub fn is_power_of_two(n: usize) -> bool {
n & (n - 1) == 0
}
pub fn reverse_bits_limited(length: usize, value: usize) -> usize {
let unused_bits = length.leading_zeros() + 1;
value.reverse_bits() >> unused_bits
}
#[macro_export]
macro_rules! cfg_iter_mut {
($collection:expr) => {{
#[cfg(feature = "parallel")]
{
$collection.par_iter_mut()
}
#[cfg(not(feature = "parallel"))]
{
$collection.iter_mut()
}
}};
}
#[macro_export]
macro_rules! cfg_iter {
($collection:expr) => {{
#[cfg(feature = "parallel")]
{
$collection.par_iter()
}
#[cfg(not(feature = "parallel"))]
{
$collection.iter()
}
}};
}
#[macro_export]
macro_rules! cfg_chunks {
($collection:expr, $chunk_size:expr) => {{
#[cfg(feature = "parallel")]
{
$collection.par_chunks($chunk_size)
}
#[cfg(not(feature = "parallel"))]
{
$collection.chunks($chunk_size)
}
}};
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/parallel_pippenger_utils.rs | kzg/src/msm/parallel_pippenger_utils.rs | use crate::msm::pippenger_utils::num_bits;
pub fn breakdown(window: usize, ncpus: usize) -> (usize, usize, usize) {
const NBITS: usize = 255;
option_env!("WINDOW_NX")
.map(|v| {
v.parse()
.expect("WINDOW_NX environment variable must be valid number")
})
.map(|nx| {
let ny = NBITS / window + 1;
(nx, ny, NBITS / ny + 1)
})
.unwrap_or({
let mut nx: usize;
let mut wnd: usize;
if NBITS > window * ncpus {
nx = 1;
wnd = num_bits(ncpus / 4);
if (window + wnd) > 18 {
wnd = window - wnd;
} else {
wnd = (NBITS / window).div_ceil(ncpus);
if (NBITS / (window + 1)).div_ceil(ncpus) < wnd {
wnd = window + 1;
} else {
wnd = window;
}
}
} else {
nx = 2;
wnd = window - 2;
while (NBITS / wnd + 1) * nx < ncpus {
nx += 1;
wnd = window - num_bits(3 * nx / 2);
}
nx -= 1;
wnd = window - num_bits(3 * nx / 2);
}
let ny = NBITS / wnd + 1;
wnd = NBITS / ny + 1;
(nx, ny, wnd)
})
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/sppark.rs | kzg/src/msm/sppark.rs | use std::ffi::c_void;
use crate::{Fr, G1Affine, G1Fp, G1GetFp, G1Mul, G1ProjAddAffine, G1};
#[derive(Debug)]
pub struct SpparkPrecomputation<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
// table holds a reference to value, initialized in C land. It should be never dereferenced
pub table: *mut c_void,
fr_marker: core::marker::PhantomData<TFr>,
g1_marker: core::marker::PhantomData<TG1>,
g1_fp_marker: core::marker::PhantomData<TG1Fp>,
g1_affine_marker: core::marker::PhantomData<TG1Affine>,
g1_proj_add_affine_marker: core::marker::PhantomData<TG1ProjAddAffine>,
}
unsafe impl<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine> Sync
for SpparkPrecomputation<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
}
unsafe impl<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine> Send
for SpparkPrecomputation<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
}
impl<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
SpparkPrecomputation<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
pub fn new(_: &[TG1], _: &[Vec<TG1>]) -> Result<Option<Self>, String> {
// TODO: currently no trait-based implementation for sppark msm, maybe in future
Ok(None)
}
pub fn multiply_batch(&self, _: &[Vec<TFr>]) -> Vec<TG1> {
panic!("This function must not be called")
}
pub fn multiply_sequential(&self, _: &[TFr]) -> TG1 {
panic!("This function must not be called")
}
#[cfg(feature = "parallel")]
pub fn multiply_parallel(&self, _: &[TFr]) -> TG1 {
panic!("This function must not be called")
}
pub fn from_ptr(table: *mut c_void) -> Self {
Self {
table,
fr_marker: core::marker::PhantomData::<TFr>,
g1_marker: core::marker::PhantomData::<TG1>,
g1_fp_marker: core::marker::PhantomData::<TG1Fp>,
g1_affine_marker: core::marker::PhantomData::<TG1Affine>,
g1_proj_add_affine_marker: core::marker::PhantomData::<TG1ProjAddAffine>,
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/diskcache.rs | kzg/src/msm/diskcache.rs | use core::marker::PhantomData;
use sha2::{Digest, Sha256};
use std::{
fs::File,
io::{BufReader, BufWriter, Read, Write},
};
use crate::{G1Affine, G1Fp, G1};
pub struct DiskCache<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>> {
pub table: Vec<TG1Affine>,
pub numpoints: usize,
pub batch_table: Vec<Vec<TG1Affine>>,
pub batch_numpoints: usize,
g1_marker: PhantomData<TG1>,
g1_fp_marker: PhantomData<TG1Fp>,
}
fn compute_content_hash<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>>(
points: &[TG1],
matrix: &[Vec<TG1>],
) -> Result<[u8; 32], String> {
let mut hasher = Sha256::new();
for point in points {
let affine = TG1Affine::into_affine(point);
hasher
.write_all(&affine.to_bytes_uncompressed())
.map_err(|e| format!("{e:?}"))?;
}
for row in matrix {
for point in row {
let affine = TG1Affine::into_affine(&point);
hasher
.write_all(&affine.to_bytes_uncompressed())
.map_err(|e| format!("{e:?}"))?;
}
}
Ok(hasher.finalize().into())
}
impl<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>> DiskCache<TG1, TG1Fp, TG1Affine> {
/// Function for loading precomputed tables from disk.
///
/// Reads file with name `rust-kzg.{algorithm}.{window}.cache.bin` from cache
/// directory. Automatically validates file version & content hash, to avoid
/// loading invalid precomputations.
///
/// If fails to load precomputation, returns error along with content hash, if
/// it was computed at that point.
pub fn load(
algorithm: &str,
window: usize,
points: &[TG1],
matrix: &[Vec<TG1>],
) -> Result<Self, (String, Option<[u8; 32]>)> {
let cache_dir = dirs::cache_dir();
let Some(cache_dir) = cache_dir else {
return Err(("Failed to get cache dir".to_owned(), None));
};
let cache_path = cache_dir.join(format!("rust-kzg.{algorithm}.{window}.cache.bin"));
let cache_file =
File::open(&cache_path).map_err(|e| (format!("Failed to read cache: {e:?}"), None))?;
println!("reading msm cache from {cache_path:?}");
let mut buf_reader = BufReader::new(cache_file);
let mut buf = [0u8; 96];
// check file format version
buf_reader
.read_exact(&mut buf[0..4])
.map_err(|e| (format!("Read failure: {e:?}"), None))?;
if &buf[0..4] != b"kzg1" {
return Err(("Invalid cache file format".to_owned(), None));
}
// check content hash
let contenthash = compute_content_hash::<TG1, TG1Fp, TG1Affine>(points, matrix)
.map_err(|e| (format!("Failed to compute content hash: {e}"), None))?;
buf_reader
.read_exact(&mut buf[0..32])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
if contenthash != &buf[0..32] {
return Err(("Invalid content hash".to_owned(), Some(contenthash)));
}
buf_reader
.read_exact(&mut buf[0..8])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
let numpoints = u64::from_be_bytes(buf[0..8].try_into().unwrap());
buf_reader
.read_exact(&mut buf[0..8])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
let table_size = u64::from_be_bytes(buf[0..8].try_into().unwrap());
let mut table = Vec::with_capacity(table_size as usize);
for _ in 0..table_size {
buf_reader
.read_exact(&mut buf[0..96])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
let point = TG1Affine::from_bytes_uncompressed(buf[0..96].try_into().unwrap())
.map_err(|e| {
(
format!("Failed to read point from cache, error: {e:?}",),
Some(contenthash),
)
})?;
table.push(point);
}
buf_reader
.read_exact(&mut buf[0..8])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
let batch_numpoints = u64::from_be_bytes(buf[0..8].try_into().unwrap());
buf_reader
.read_exact(&mut buf[0..8])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
let rows = u64::from_be_bytes(buf[0..8].try_into().unwrap());
buf_reader
.read_exact(&mut buf[0..8])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
let columns = u64::from_be_bytes(buf[0..8].try_into().unwrap());
let mut batch_table = Vec::with_capacity(rows as usize);
for _ in 0..rows {
let mut row = Vec::with_capacity(columns as usize);
for _ in 0..columns {
buf_reader
.read_exact(&mut buf[0..96])
.map_err(|e| (format!("Read failure: {e:?}"), Some(contenthash)))?;
let point = TG1Affine::from_bytes_uncompressed(buf[0..96].try_into().unwrap())
.map_err(|e| {
(
format!("Failed to read point from cache, error: {e:?}",),
Some(contenthash),
)
})?;
row.push(point);
}
batch_table.push(row);
}
Ok(Self {
table,
numpoints: numpoints as usize,
batch_table,
batch_numpoints: batch_numpoints as usize,
g1_marker: PhantomData,
g1_fp_marker: PhantomData,
})
}
pub fn save(
algorithm: &str,
window: usize,
points: &[TG1],
matrix: &[Vec<TG1>],
table: &[TG1Affine],
numpoints: usize,
batch_table: &[Vec<TG1Affine>],
batch_numpoints: usize,
contenthash: Option<[u8; 32]>,
) -> Result<(), String> {
let cache_dir = dirs::cache_dir();
let Some(cache_dir) = cache_dir else {
return Err("Failed to get cache dir".to_owned());
};
let cache_path = cache_dir.join(format!("rust-kzg.{algorithm}.{window}.cache.bin"));
let cache_file =
File::create(&cache_path).map_err(|e| format!("Failed to read cache: {e:?}"))?;
println!("writing msm cache to {cache_path:?}");
let mut writer = BufWriter::new(cache_file);
writer
.write_all(b"kzg1")
.map_err(|e| format!("Write failure: {e:?}"))?;
let contenthash = contenthash
.map(|v| Ok(v))
.unwrap_or_else(|| compute_content_hash::<TG1, TG1Fp, TG1Affine>(points, matrix))?;
writer
.write_all(&contenthash)
.map_err(|e| format!("Write failure: {e:?}"))?;
writer
.write_all(&(numpoints as u64).to_be_bytes())
.map_err(|e| format!("Write failure: {e:?}"))?;
writer
.write_all(&(table.len() as u64).to_be_bytes())
.map_err(|e| format!("Write failure: {e:?}"))?;
for point in table {
writer
.write_all(&point.to_bytes_uncompressed())
.map_err(|e| format!("Write failure: {e:?}"))?;
}
writer
.write_all(&(batch_numpoints as u64).to_be_bytes())
.map_err(|e| format!("Write failure: {e:?}"))?;
writer
.write_all(&(batch_table.len() as u64).to_be_bytes())
.map_err(|e| format!("Write failure: {e:?}"))?;
let columns = batch_table.get(0).map(|s| s.len()).unwrap_or(0);
writer
.write_all(&(columns as u64).to_be_bytes())
.map_err(|e| format!("Write failure: {e:?}"))?;
for row in batch_table {
for point in row {
writer
.write_all(&point.to_bytes_uncompressed())
.map_err(|e| format!("Write failure: {e:?}"))?;
}
}
let file = writer
.into_inner()
.map_err(|e| format!("Failed to flush: {e:?}"))?;
file.sync_all()
.map_err(|e| format!("Failed to sync data to disk: {e:?}"))?;
Ok(())
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/precompute.rs | kzg/src/msm/precompute.rs | extern crate alloc;
use alloc::{string::String, vec::Vec};
use crate::{Fr, G1Affine, G1Fp, G1GetFp, G1Mul, G1ProjAddAffine, G1};
#[cfg(any(
all(feature = "arkmsm", feature = "bgmw"),
all(feature = "arkmsm", feature = "sppark"),
all(feature = "arkmsm", feature = "wbits"),
all(feature = "bgmw", feature = "sppark"),
all(feature = "bgmw", feature = "wbits"),
all(feature = "sppark", feature = "wbits")
))]
compile_error!(
"incompatible features, please select only one: `arkmsm`, `bgmw`, `sppark` or `wbits`"
);
#[cfg(feature = "bgmw")]
pub type PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine> =
super::bgmw::BgmwTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>;
#[cfg(feature = "sppark")]
pub type PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine> =
super::sppark::SpparkPrecomputation<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>;
#[cfg(feature = "wbits")]
pub type PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine> =
super::wbits::WbitsTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>;
#[cfg(all(not(feature = "bgmw"), not(feature = "sppark"), not(feature = "wbits")))]
#[derive(Debug, Clone)]
pub struct EmptyTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
fr_marker: core::marker::PhantomData<TFr>,
g1_marker: core::marker::PhantomData<TG1>,
g1_fp_marker: core::marker::PhantomData<TG1Fp>,
g1_affine_marker: core::marker::PhantomData<TG1Affine>,
g1_affine_add_marker: core::marker::PhantomData<TG1ProjAddAffine>,
}
#[cfg(all(not(feature = "bgmw"), not(feature = "sppark"), not(feature = "wbits")))]
impl<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
EmptyTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
fn new(_: &[TG1], _: &[Vec<TG1>]) -> Result<Option<Self>, String> {
Ok(None)
}
pub fn multiply_batch(&self, _: &[Vec<TFr>]) -> Vec<TG1> {
panic!("This function must not be called")
}
pub fn multiply_sequential(&self, _: &[TFr]) -> TG1 {
panic!("This function must not be called")
}
#[cfg(feature = "parallel")]
pub fn multiply_parallel(&self, _: &[TFr]) -> TG1 {
panic!("This function must not be called")
}
}
#[cfg(all(not(feature = "bgmw"), not(feature = "sppark"), not(feature = "wbits")))]
pub type PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine> =
EmptyTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>;
#[allow(clippy::type_complexity)]
pub fn precompute<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>(
points: &[TG1],
matrix: &[Vec<TG1>],
) -> Result<Option<PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>>, String>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
PrecomputationTable::<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>::new(points, matrix)
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/tiling_parallel_pippenger.rs | kzg/src/msm/tiling_parallel_pippenger.rs | use core::{
num::Wrapping,
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::sync::Arc;
use std::sync::{mpsc::channel, Barrier};
use crate::{G1Affine, G1Fp, G1GetFp, Scalar256, G1};
use super::{
cell::Cell,
parallel_pippenger_utils::breakdown,
pippenger_utils::{pippenger_window_size, P1XYZZ},
thread_pool::{da_pool, ThreadPoolExt},
tiling_pippenger_ops::{p1s_tile_pippenger_pub, tiling_pippenger},
};
struct Tile {
x: usize,
dx: usize,
y: usize,
dy: usize,
}
pub fn parallel_affine_conv<TG1: G1, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp> + Sized>(
points: &[TG1],
) -> Vec<TG1Affine> {
let npoints = points.len();
let pool = da_pool();
let ncpus = pool.max_count();
if ncpus < 2 || npoints < 768 {
return TG1Affine::into_affines(points);
}
let mut ret = Vec::<TG1Affine>::with_capacity(npoints);
#[allow(clippy::uninit_vec)]
unsafe {
ret.set_len(npoints)
};
let mut nslices = npoints.div_ceil(512);
nslices = core::cmp::min(nslices, ncpus);
let wg = Arc::new((Barrier::new(2), AtomicUsize::new(nslices)));
let (mut delta, mut rem) = (npoints / nslices + 1, Wrapping(npoints % nslices));
let mut x = 0usize;
while x < npoints {
delta -= (rem == Wrapping(0)) as usize;
rem -= Wrapping(1);
let out = &mut ret[x..x + delta];
let inp = &points[x..x + delta];
x += delta;
let wg = wg.clone();
pool.joined_execute(move || {
TG1Affine::into_affines_loc(out, inp);
if wg.1.fetch_sub(1, Ordering::AcqRel) == 1 {
wg.0.wait();
}
});
}
wg.0.wait();
ret
}
pub fn tiling_parallel_pippenger<
TG1: G1 + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
>(
mut points: &[TG1Affine],
scalars: &[Scalar256],
) -> TG1 {
if scalars.len() < points.len() {
points = &points[0..scalars.len()];
}
let npoints = points.len();
let pool = da_pool();
let ncpus = pool.max_count();
if ncpus < 2 || npoints < 32 {
return tiling_pippenger(points, scalars);
}
let (nx, ny, window) = breakdown(pippenger_window_size(npoints), ncpus);
// |grid[]| holds "coordinates" and place for result
let mut grid: Vec<(Tile, Cell<TG1>)> = Vec::with_capacity(nx * ny);
#[allow(clippy::uninit_vec)]
unsafe {
grid.set_len(grid.capacity())
};
let dx = npoints / nx;
let mut y = window * (ny - 1);
let mut total = 0usize;
while total < nx {
grid[total].0.x = total * dx;
grid[total].0.dx = dx;
grid[total].0.y = y;
grid[total].0.dy = 255 - y;
total += 1;
}
grid[total - 1].0.dx = npoints - grid[total - 1].0.x;
while y != 0 {
y -= window;
for i in 0..nx {
grid[total].0.x = grid[i].0.x;
grid[total].0.dx = grid[i].0.dx;
grid[total].0.y = y;
grid[total].0.dy = window;
total += 1;
}
}
let grid = &grid[..];
let points = points;
let mut row_sync: Vec<AtomicUsize> = Vec::with_capacity(ny);
row_sync.resize_with(ny, Default::default);
let row_sync = Arc::new(row_sync);
let counter = Arc::new(AtomicUsize::new(0));
let (tx, rx) = channel();
let n_workers = core::cmp::min(ncpus, total);
for _ in 0..n_workers {
let tx = tx.clone();
let counter = counter.clone();
let row_sync = row_sync.clone();
pool.joined_execute(move || {
let mut buckets = vec![P1XYZZ::<TG1Fp>::default(); 1 << (window - 1)];
loop {
let work = counter.fetch_add(1, Ordering::Relaxed);
if work >= total {
break;
}
let x = grid[work].0.x;
let y = grid[work].0.y;
let dx = grid[work].0.dx;
p1s_tile_pippenger_pub(
grid[work].1.as_mut(),
&points[x..(x + dx)],
&scalars[x..],
&mut buckets,
y,
window,
);
if row_sync[y / window].fetch_add(1, Ordering::AcqRel) == nx - 1 {
tx.send(y).expect("disaster");
}
}
});
}
let mut ret = <TG1>::default();
let mut rows = vec![false; ny];
let mut row = 0usize;
for _ in 0..ny {
let mut y = rx.recv().unwrap();
rows[y / window] = true;
while grid[row].0.y == y {
while row < total && grid[row].0.y == y {
ret.add_or_dbl_assign(grid[row].1.as_mut());
row += 1;
}
if y == 0 {
break;
}
for _ in 0..window {
ret.dbl_assign();
}
y -= window;
if !rows[y / window] {
break;
}
}
}
ret
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/tiling_pippenger_ops.rs | kzg/src/msm/tiling_pippenger_ops.rs | use crate::{G1Affine, G1Fp, G1GetFp, Scalar256, G1};
use alloc::vec;
use super::pippenger_utils::{
booth_decode, booth_encode, get_wval_limb, is_zero, p1_dadd, p1_to_jacobian,
pippenger_window_size, type_is_zero, type_zero, P1XYZZ,
};
/// Calculate bucket sum
///
/// This function multiplies the point in each bucket by it's index. Then, it will sum all multiplication results and write
/// resulting point to the `out`.
///
/// ## Arguments
///
/// * out - output where bucket sum must be written
/// * buckets - pointer to the beginning of the array of buckets
/// * wbits - window size, aka exponent of q (q^window)
///
pub fn p1_integrate_buckets<TG1: G1 + G1GetFp<TFp>, TFp: G1Fp>(
out: &mut TG1,
buckets: &mut [P1XYZZ<TFp>],
wbits: usize,
) {
let mut n = (1usize << wbits) - 1;
let mut ret = buckets[n];
let mut acc = buckets[n];
type_zero(&mut buckets[n]);
loop {
if n == 0 {
break;
}
n -= 1;
if type_is_zero(&buckets[n]) == 0 {
p1_dadd(&mut acc, &buckets[n]);
type_zero(&mut buckets[n]);
}
p1_dadd(&mut ret, &acc);
}
p1_to_jacobian(out, &ret);
}
#[allow(clippy::too_many_arguments)]
pub fn p1s_tile_pippenger_pub<TG1: G1 + G1GetFp<TFp>, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp>>(
ret: &mut TG1,
points: &[TG1Affine],
scalars: &[Scalar256],
buckets: &mut [P1XYZZ<TFp>],
bit0: usize,
window: usize,
) {
const NBITS: usize = 255;
let (wbits, cbits) = if bit0 + window > NBITS {
let wbits = NBITS - bit0;
(wbits, wbits + 1)
} else {
(window, window)
};
p1s_tile_pippenger(ret, points, scalars, buckets, bit0, wbits, cbits);
}
#[allow(clippy::too_many_arguments)]
pub fn p1s_tile_pippenger<TG1: G1 + G1GetFp<TFp>, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp>>(
ret: &mut TG1,
points: &[TG1Affine],
scalars: &[Scalar256],
buckets: &mut [P1XYZZ<TFp>],
bit0: usize,
wbits: usize,
cbits: usize,
) {
// Create mask, that contains `wbits` ones at the end.
let wmask = (1u64 << (wbits + 1)) - 1;
/*
* Check if `bit0` is zero. `z` is set to `1` when `bit0 = 0`, and `0` otherwise.
*
* The `z` flag is used to do a small trick -
*/
let z = is_zero(bit0.try_into().unwrap());
// Offset `bit0` by 1, if it is not equal to zero.
let bit0 = bit0 - (z ^ 1) as usize;
// Increase `wbits` by one, if `bit0` is not equal to zero.
let wbits = wbits + (z ^ 1) as usize;
for (point, scalar) in points.iter().zip(scalars.iter()) {
// Calculate first window value (encoded bucket index)
let wval = (get_wval_limb(scalar, bit0, wbits) << z) & wmask;
let wval = booth_encode(wval, cbits);
// Move point to corresponding bucket
booth_decode(buckets, wval, cbits, point);
}
// Integrate buckets - multiply point in each bucket by scalar and sum all results
p1_integrate_buckets(ret, buckets, cbits - 1);
}
pub fn tiling_pippenger<TG1: G1 + G1GetFp<TG1Fp>, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>>(
points: &[TG1Affine],
scalars: &[Scalar256],
) -> TG1 {
let window = pippenger_window_size(points.len());
let mut buckets = vec![P1XYZZ::<TG1Fp>::default(); 1 << (window - 1)];
let mut wbits: usize = 255 % window;
let mut cbits: usize = wbits + 1;
let mut bit0: usize = 255;
let mut tile = TG1::zero();
let mut ret = TG1::zero();
loop {
bit0 -= wbits;
if bit0 == 0 {
break;
}
p1s_tile_pippenger(&mut tile, points, scalars, &mut buckets, bit0, wbits, cbits);
ret.add_or_dbl_assign(&tile);
for _ in 0..window {
ret.dbl_assign();
}
cbits = window;
wbits = window;
}
p1s_tile_pippenger(&mut tile, points, scalars, &mut buckets, 0, wbits, cbits);
ret.add_or_dbl_assign(&tile);
ret
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/pippenger_utils.rs | kzg/src/msm/pippenger_utils.rs | use core::mem::size_of;
use crate::{G1Affine, G1Fp, G1GetFp, Scalar256, G1};
#[repr(C)]
#[derive(Default, Clone, Copy, Debug)]
pub struct P1XYZZ<TFp: G1Fp> {
pub x: TFp,
pub y: TFp,
pub zzz: TFp,
pub zz: TFp,
}
#[inline(always)]
pub fn type_zero<T>(ret: &mut T) {
let rp = ret as *mut T as *mut u64;
let num = size_of::<T>() / size_of::<u64>();
for i in 0..num {
unsafe {
*rp.wrapping_add(i) = 0;
}
}
}
pub const fn is_zero(val: u64) -> u64 {
(!val & (val.wrapping_sub(1))) >> (u64::BITS - 1)
}
#[inline(always)]
pub fn vec_zero_rt(ret: *mut u64, mut num: usize) {
num /= size_of::<usize>();
for i in 0..num {
unsafe {
*ret.add(i) = 0;
}
}
}
#[inline(always)]
pub fn vec_is_zero(a: *const u8, num: usize) -> u64 {
let ap = a as *const u64;
let num = num / size_of::<u64>();
let mut acc: u64 = 0;
for i in 0..num {
unsafe {
acc |= *ap.wrapping_add(i);
}
}
is_zero(acc)
}
#[inline(always)]
pub fn type_is_zero<T>(a: &T) -> u64 {
let ap = a as *const T as *const u64;
let num = size_of::<T>() / size_of::<u64>();
let mut acc: u64 = 0;
for i in 0..num {
unsafe {
acc |= *ap.wrapping_add(i);
}
}
is_zero(acc)
}
#[inline(always)]
fn vec_copy(ret: *mut u8, a: *const u8, num: usize) {
let rp = ret as *mut u64;
let ap = a as *const u64;
let num = num / size_of::<u64>();
for i in 0..num {
unsafe {
*rp.wrapping_add(i) = *ap.wrapping_add(i);
}
}
}
pub fn p1_to_jacobian<TG1: G1 + G1GetFp<TFp>, TFp: G1Fp>(out: &mut TG1, input: &P1XYZZ<TFp>) {
if input.zz.is_zero() {
*out = TG1::zero();
} else {
let x = input.x.mul_fp(&input.zz);
let y = input.y.mul_fp(&input.zzz);
let z = input.zz;
*out = TG1::from_jacobian(x, y, z);
}
}
fn p1_dadd_affine<TG1: G1, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp>>(
out: &mut P1XYZZ<TFp>,
p2: &TG1Affine,
subtract: bool, // Need to replace this somehow
) {
if p2.is_zero() {
return;
} else if vec_is_zero(&out.zzz as *const TFp as *const u8, 2 * size_of::<TFp>()) != 0 {
vec_copy(
&mut (out.x) as *mut TFp as *mut u8,
((*p2).x()) as *const TFp as *const u8,
2 * size_of::<TFp>(),
);
out.zzz = TFp::bls12_381_rx_p();
if subtract {
out.zzz.neg_assign();
}
out.zz = TFp::bls12_381_rx_p();
return;
}
let mut p = p2.x().mul_fp(&out.zz);
let mut r = p2.y().mul_fp(&out.zzz);
if subtract {
r.neg_assign();
}
p.sub_assign_fp(&out.x);
r.sub_assign_fp(&out.y);
if type_is_zero(&p) == 0 {
let pp = p.square();
let ppp = pp.mul_fp(&p);
let mut q = out.x.mul_fp(&pp);
out.x = r.square();
p = q.add_fp(&q);
out.x.sub_assign_fp(&ppp);
out.x.sub_assign_fp(&p);
q.sub_assign_fp(&out.x);
q.mul_assign_fp(&r);
out.y.mul_assign_fp(&ppp);
out.y = q.sub_fp(&out.y);
out.zz.mul_assign_fp(&pp);
out.zzz.mul_assign_fp(&ppp);
} else if type_is_zero(&r) != 0 {
let mut u = p2.y().add_fp(p2.y());
out.zz = u.square();
out.zzz = out.zz.mul_fp(&u);
let mut s = p2.x().mul_fp(&out.zz);
let mut m = p2.x().square();
m = m.add_fp(&m).add_fp(&m);
out.x = m.square();
u = s.add_fp(&s);
out.x.sub_assign_fp(&u);
out.y = out.zzz.mul_fp(p2.y());
s.sub_assign_fp(&out.x);
s.mul_assign_fp(&m);
out.y = s.sub_fp(&out.y);
if subtract {
out.zzz.neg_assign();
}
} else {
vec_zero_rt(
&mut out.zzz as *mut TFp as *mut u64,
2 * core::mem::size_of_val(&out.zzz),
);
}
}
pub fn p1_dadd<TFp: G1Fp>(out: &mut P1XYZZ<TFp>, p2: &P1XYZZ<TFp>) {
if vec_is_zero(&p2.zzz as *const TFp as *const u8, 2 * size_of::<TFp>()) != 0 {
return;
} else if vec_is_zero(&out.zzz as *const TFp as *const u8, 2 * size_of::<TFp>()) != 0 {
*out = *p2;
return;
}
let mut u = out.x.mul_fp(&p2.zz);
let mut s = out.y.mul_fp(&p2.zzz);
let mut p = p2.x.mul_fp(&out.zz);
let mut r = p2.y.mul_fp(&out.zzz);
p.sub_assign_fp(&u);
r.sub_assign_fp(&s);
if type_is_zero(&p) == 0 {
let pp = p.square();
let ppp = pp.mul_fp(&p);
let mut q = u.mul_fp(&pp);
out.x = r.square();
p = q.add_fp(&q);
out.x.sub_assign_fp(&ppp);
out.x.sub_assign_fp(&p);
q.sub_assign_fp(&out.x);
q.mul_assign_fp(&r);
out.y = s.mul_fp(&ppp);
out.y = q.sub_fp(&out.y);
out.zz.mul_assign_fp(&p2.zz);
out.zzz.mul_assign_fp(&p2.zzz);
out.zz.mul_assign_fp(&pp);
out.zzz.mul_assign_fp(&ppp);
} else if type_is_zero(&r) != 0 {
u = out.y.add_fp(&out.y);
let v = u.square();
let w = v.mul_fp(&u);
s = out.x.mul_fp(&v);
let mut m = out.x.square();
m = m.add_fp(&m).add_fp(&m);
out.x = m.square();
u = s.add_fp(&s);
out.x.sub_assign_fp(&u);
out.y = w.mul_fp(&out.y);
s.sub_assign_fp(&out.x);
s.mul_assign_fp(&m);
out.y = s.sub_fp(&out.y);
out.zz.mul_assign_fp(&v);
out.zzz.mul_assign_fp(&w);
} else {
vec_zero_rt(&mut out.zzz as *mut TFp as *mut u64, 2 * size_of::<TFp>());
}
}
/// Extract `bits` from the beginning of `d` array, with offset `off`.
///
/// This function is used to extract N bits from the scalar, decomposing it into q-ary representation.
/// This works because `q` is `2^bits`, so extracting `bits` from scalar will break it into the correct representation.
///
/// Caution! This function guarantees only that `bits` amount of bits from the right will be extracted. All unused bits
/// will contain "trash". For example, if we try to extract first 4 bits from the array `[0b01010111u8]`, this
/// function will return `0111`, but other bits will contain trash (see tests::get_wval_limb_example_1)
///
/// # Arguments
///
/// * `d` - byte array, from which bits will be extracted
/// * `off` - index of first bit, that will be extracted
/// * `bits` - number of bits to extract (up to 25)
///
/// # Examples
///
/// See tests::get_wval_limb_example_2
///
pub fn get_wval_limb(d: &Scalar256, off: usize, bits: usize) -> u64 {
let mut d = d.as_u8();
let top = ((off + bits - 1) / 8).wrapping_sub((off / 8).wrapping_sub(1));
d = &d[off / 8..];
let mut mask = u64::MAX;
let mut ret: u64 = 0;
for i in 0..4usize {
ret |= (d[0] as u64 & mask) << (8 * i);
mask = 0u64.wrapping_sub(((i + 1).wrapping_sub(top) >> (usize::BITS - 1)) as u64);
d = &d[(1 & mask).try_into().unwrap()..];
}
ret >> (off % 8)
}
/// Window value encoding that utilizes the fact that -P is trivially
/// calculated, which allows to halve the size of the pre-computed table,
/// is attributed to A. D. Booth, hence the name of the subroutines...
///
/// TODO: figure out how this function works exactly
pub const fn booth_encode(wval: u64, sz: usize) -> u64 {
let mask = 0u64.wrapping_sub(wval >> sz);
let wval = (wval + 1) >> 1;
(wval ^ mask).wrapping_sub(mask)
}
/// Decode bucket index and move point to corresponding bucket
///
/// This method will decode `booth_idx`, and add or subtract point to bucket.
/// booth_idx contains bucket index and sign. Sign shows, if point needs to be added to or subtracted from bucket.
///
/// ## Arguments:
///
/// * buckets - pointer to the bucket array beginning
/// * booth_idx - bucket index, encoded with [booth_encode] function
/// * wbits - window size, aka exponent of q (q^window)
/// * point - point to move
///
pub fn booth_decode<TG1: G1, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp>>(
buckets: &mut [P1XYZZ<TFp>],
mut booth_idx: u64,
wbits: usize,
p: &TG1Affine,
) {
let booth_sign: bool = ((booth_idx >> wbits) & 1) != 0;
booth_idx &= (1 << wbits) - 1;
if booth_idx != 0 {
p1_dadd_affine(&mut buckets[(booth_idx - 1) as usize], p, booth_sign);
}
}
pub const fn num_bits(l: usize) -> usize {
8 * core::mem::size_of::<usize>() - l.leading_zeros() as usize
}
/// Function, which approximates minimum of this function:
/// y = ceil(255/w) * (npoints + 2^w + w + 1)
/// This function is number of additions and doublings required to compute msm using Pippenger algorithm.
/// Parts of this function:
/// ceil(255/w) - how many parts will be in decomposed scalar. Scalar width is 255 bits, so converting it into q-ary
/// representation, will produce 255/w parts. q-ary representation, where q = 2^w, for scalar a is:
/// a = a_1 + a_2 * q + ... + a_n * q^(ceil(255/w)).
/// npoints - each scalar must be assigned to a bucket (bucket accumulation). Assigning point to bucket means
/// adding it to existing point in bucket - hence, the addition.
/// 2^w - computing total bucket sum (bucket aggregation). Total number of buckets (scratch size) is 2^(w-1).
/// Adding each point to total bucket sum requires 2 point addition operations, so 2 * 2^(w-1) = 2^w.
/// w + 1 - each bucket sum must be multiplied by 2^w. To do this, we need w doublings. Adding this sum to the
/// total requires one more point addition, hence +1.
pub fn pippenger_window_size(npoints: usize) -> usize {
option_env!("WINDOW_SIZE")
.map(|v| {
v.parse()
.expect("WINDOW_SIZE environment variable must be valid number")
})
.unwrap_or({
let wbits = num_bits(npoints);
if wbits > 13 {
return wbits - 4;
}
if wbits > 5 {
return wbits - 3;
}
2
})
}
#[cfg(test)]
mod tests {
use crate::{
msm::pippenger_utils::{booth_encode, get_wval_limb},
Scalar256,
};
#[test]
fn booth_encode_must_produce_correct_results() {
assert_eq!(booth_encode(0, 1), 0);
assert_eq!(booth_encode(0, 5), 0);
assert_eq!(booth_encode(1, 1), 1);
assert_eq!(booth_encode(55, 5), 18446744073709551588);
}
#[test]
fn get_wval_limb_example_1() {
let val = get_wval_limb(
&Scalar256 {
data: [0b01010111u64, 0u64, 0u64, 0u64],
},
0,
4,
);
assert_eq!(val, 0b01010111);
// if you want to get value containing only extracted bits and zeros, do bitwise and on return value with mask:
assert_eq!(val & 0b00001111, 0b00000111);
}
#[test]
fn get_wval_limb_example_2() {
// Scalars are represented with 32 bytes. To simplify example, let's say our scalars are only 4 bytes long.
// Then, we can take `q` as `2^6`. Then consider scalar value `4244836224`, bytes: `[128u8, 15u8, 3u8, 253u8]`
// (little-endian order). So if we repeatedly take 6 bits from this scalar, we will get q-ary representation
// of this scalar:
// this is [128u8, 15u8, 3u8, 253u8] written in binary
let scalar = Scalar256 {
data: [0b11111101000000110000111110000000u64, 0u64, 0, 0],
};
let limb_1 = get_wval_limb(&scalar, 0, 6);
// function leaves trash on all other bytes, so real answer only in 6 bits from right
assert_eq!(limb_1 & 0b00111111, 0b00000000 /* 0 */); // 11111101000000110000111110|000000|
let limb_2 = get_wval_limb(&scalar, 6, 6);
assert_eq!(limb_2 & 0b00111111, 0b00111110 /* 62 */); // 11111101000000110000|111110|000000
let limb_3 = get_wval_limb(&scalar, 12, 6);
assert_eq!(limb_3 & 0b00111111, 0b00110000 /* 48 */); // 11111101000000|110000|111110000000
let limb_4 = get_wval_limb(&scalar, 18, 6);
assert_eq!(limb_4 & 0b00111111, 0b00000000 /* 0 */); // 11111101|000000|110000111110000000
let limb_5 = get_wval_limb(&scalar, 24, 6);
assert_eq!(limb_5 & 0b00111111, 0b00111101 /* 61 */); // 11|111101|000000110000111110000000
let limb_r = get_wval_limb(&scalar, 28, 8 % 6); // get remaining part
assert_eq!(limb_r & 0b00000011, 0b00000011 /* 3 */); // |11|111101000000110000111110000000
// This gives q-ary representation of scalar `4244836224`, where `q` = `2^6` = `64`:
// 4244836224 = 0 * 64^0 + 62 * 64^1 + 48 * 64^2 + 0 * 64^3 + 61 * 64^4 + 3 * 64^5
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/types.rs | kzg/src/msm/types.rs | // use ark_bls12_381::G1Affine;
// use ark_ec::{models::CurveConfig, AffineRepr};
// use ark_ff::{FpConfig, PrimeField, Field};
pub const G1_SCALAR_SIZE: u32 = 255; // TODO: Double check if not 256
pub const G1_SCALAR_SIZE_GLV: u32 = 128u32;
pub const GROUP_SIZE_IN_BITS: usize = 6;
pub const GROUP_SIZE: usize = 1 << GROUP_SIZE_IN_BITS;
// pub type G1BigInt = <<G1Affine as AffineRepr>::ScalarField as PrimeField>::BigInt;
// pub type G1Projective = <G1Affine as AffineRepr>::Group;
// pub type G1ScalarField = <G1Affine as AffineRepr>::ScalarField;
// pub type G1BaseField = <G1Affine as AffineRepr>::BaseField;
// pub type BigInt<P> = <<P as CurveConfig>::ScalarField as PrimeField>::BigInt;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/mod.rs | kzg/src/msm/mod.rs | pub mod arkmsm;
pub mod cell;
pub mod msm_impls;
pub mod precompute;
#[cfg(feature = "parallel")]
pub mod thread_pool;
#[cfg(feature = "parallel")]
pub mod tiling_parallel_pippenger;
pub mod tiling_pippenger_ops;
pub mod types;
#[cfg(feature = "parallel")]
mod parallel_pippenger_utils;
mod pippenger_utils;
#[cfg(feature = "bgmw")]
mod bgmw;
#[cfg(feature = "sppark")]
mod sppark;
#[cfg(feature = "wbits")]
mod wbits;
#[cfg(all(feature = "diskcache", feature = "wbits"))]
mod diskcache;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/thread_pool.rs | kzg/src/msm/thread_pool.rs | pub trait ThreadPoolExt {
fn joined_execute<'any, F>(&self, job: F)
where
F: FnOnce() + Send + 'any;
}
use core::mem::transmute;
use std::sync::{Mutex, Once};
use threadpool::ThreadPool;
pub fn da_pool() -> ThreadPool {
static INIT: Once = Once::new();
static mut POOL: *const Mutex<ThreadPool> = std::ptr::null();
INIT.call_once(|| {
let pool = Mutex::new(ThreadPool::default());
unsafe { POOL = transmute::<Box<_>, *const _>(Box::new(pool)) };
});
unsafe { (*POOL).lock().unwrap().clone() }
}
type Thunk<'any> = Box<dyn FnOnce() + Send + 'any>;
impl ThreadPoolExt for ThreadPool {
fn joined_execute<'scope, F>(&self, job: F)
where
F: FnOnce() + Send + 'scope,
{
// Bypass 'lifetime limitations by brute force. It works,
// because we explicitly join the threads...
self.execute(unsafe { transmute::<Thunk<'scope>, Thunk<'static>>(Box::new(job)) })
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/bgmw.rs | kzg/src/msm/bgmw.rs | use core::marker::PhantomData;
use crate::{
msm::tiling_pippenger_ops::p1_integrate_buckets, Fr, G1Affine, G1Fp, G1GetFp, G1Mul,
G1ProjAddAffine, Scalar256, G1,
};
use super::pippenger_utils::{
booth_decode, booth_encode, get_wval_limb, is_zero, num_bits, P1XYZZ,
};
#[derive(Debug, Clone)]
pub struct BgmwTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
window: BgmwWindow,
points: Vec<TG1Affine>,
numpoints: usize,
h: usize,
batch_window: BgmwWindow,
batch_points: Vec<Vec<TG1Affine>>,
batch_numpoints: usize,
batch_h: usize,
g1_marker: PhantomData<TG1>,
g1_fp_marker: PhantomData<TG1Fp>,
fr_marker: PhantomData<TFr>,
g1_affine_add_marker: PhantomData<TG1ProjAddAffine>,
}
const NBITS: usize = 255;
#[cfg(feature = "parallel")]
#[derive(Debug, Clone, Copy)]
enum BgmwWindow {
Sync(usize),
Parallel((usize, usize, usize)),
}
#[cfg(not(feature = "parallel"))]
type BgmwWindow = usize;
#[inline]
const fn get_table_dimensions(window: BgmwWindow) -> (usize, usize) {
let window_width;
#[cfg(not(feature = "parallel"))]
{
window_width = window;
}
#[cfg(feature = "parallel")]
{
window_width = match window {
BgmwWindow::Sync(wnd) => wnd,
BgmwWindow::Parallel((_, ny, wnd)) => return (wnd, ny),
}
}
let h = NBITS.div_ceil(window_width) + is_zero((NBITS % window_width) as u64) as usize;
(window_width, h)
}
#[inline]
const fn get_sequential_window_size(window: BgmwWindow) -> usize {
#[cfg(not(feature = "parallel"))]
{
window
}
#[cfg(feature = "parallel")]
{
match window {
BgmwWindow::Sync(wnd) => wnd,
BgmwWindow::Parallel(_) => {
panic!("Cannot use parallel BGMW table in sequential version")
}
}
}
}
/// Function, which approximates minimum of this function:
/// y = ceil(255/w) * (npoints) + 2^w - 2
/// This function is number of additions and doublings required to compute msm using Pippenger algorithm, with BGMW
/// precomputation table.
/// Parts of this function:
/// ceil(255/w) - how many parts will be in decomposed scalar. Scalar width is 255 bits, so converting it into q-ary
/// representation, will produce 255/w parts. q-ary representation, where q = 2^w, for scalar a is:
/// a = a_1 + a_2 * q + ... + a_n * q^(ceil(255/w)).
/// npoints - each scalar must be assigned to a bucket (bucket accumulation). Assigning point to bucket means
/// adding it to existing point in bucket - hence, the addition.
/// 2^w - 2 - computing total bucket sum (bucket aggregation). Total number of buckets (scratch size) is 2^(w-1).
/// Adding each point to total bucket sum requires 2 point addition operations, so 2 * 2^(w-1) = 2^w.
#[allow(unused)]
fn bgmw_window_size(npoints: usize) -> usize {
option_env!("WINDOW_SIZE")
.map(|v| {
v.parse()
.expect("WINDOW_SIZE environment variable must be valid number")
})
.unwrap_or({
let wbits = num_bits(npoints);
match (wbits) {
1 => 4,
2..=3 => 5,
4 => 6,
5 => 7,
6..=7 => 8,
8 => 9,
9..=10 => 10,
11 => 11,
12 => 12,
13..=14 => 13,
15..=16 => 15,
17 => 16,
18..=19 => 17,
20 => 19,
21..=22 => 20,
23..=24 => 22,
25..=26 => 24,
27..=29 => 26,
30..=32 => 29,
33..=37 => 32,
_ => 37,
}
})
}
#[cfg(feature = "parallel")]
#[allow(clippy::option_env_unwrap)]
fn bgmw_parallel_window_size(npoints: usize, ncpus: usize) -> (usize, usize, usize) {
option_env!("WINDOW_NX")
.and_then(|v| v.parse().ok())
.map(|nx| {
let wnd = option_env!("WINDOW_SIZE")
.expect(
"Unable to use BGMW: when specifying WINDOW_NX environment \
variable, please also specify WINDOW_SIZE",
)
.parse()
.expect("WINDOW_SIZE environment variable must be valid number");
(
nx,
255usize.div_ceil(wnd) + is_zero((NBITS % wnd) as u64) as usize,
wnd,
)
})
.unwrap_or({
let mut min_ops = usize::MAX;
let mut opt = 0;
let mut win = 2;
while win <= 40 {
let ops = (1 << win) + (255usize.div_ceil(win).div_ceil(ncpus) * npoints) - 2;
if min_ops >= ops {
min_ops = ops;
opt = win;
}
win += 1;
}
let mut mult = 1;
let mut opt_x = 1;
while mult <= 8 {
let nx = ncpus * mult;
let wnd = bgmw_window_size(npoints / nx);
let ops = mult * 255usize.div_ceil(wnd) * npoints.div_ceil(nx) + (1 << wnd) - 2;
if min_ops > ops {
min_ops = ops;
opt = wnd;
opt_x = nx;
}
mult += 1;
}
(
opt_x,
255usize.div_ceil(opt) + is_zero((NBITS % opt) as u64) as usize,
opt,
)
})
}
impl<
TFr: Fr,
TG1Fp: G1Fp,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
> BgmwTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
{
pub fn new(points: &[TG1], matrix: &[Vec<TG1>]) -> Result<Option<Self>, String> {
let window = Self::window(points.len());
let (window_width, h) = get_table_dimensions(window);
let mut table: Vec<TG1Affine> = Vec::new();
let q = TFr::from_u64(1u64 << window_width);
table
.try_reserve_exact(points.len() * h)
.map_err(|_| "BGMW precomputation table is too large".to_string())?;
unsafe { table.set_len(points.len() * h) };
for i in 0..points.len() {
let mut tmp_point = points[i].clone();
for j in 0..h {
let idx = j * points.len() + i;
table[idx] = TG1Affine::into_affine(&tmp_point);
tmp_point = tmp_point.mul(&q);
}
}
if matrix.is_empty() {
Ok(Some(Self {
numpoints: points.len(),
points: table,
window,
h,
batch_window: {
#[cfg(feature = "parallel")]
let w = BgmwWindow::Sync(0);
#[cfg(not(feature = "parallel"))]
let w = 0;
w
},
batch_numpoints: 0,
batch_points: Vec::new(),
batch_h: 0,
fr_marker: PhantomData,
g1_fp_marker: PhantomData,
g1_marker: PhantomData,
g1_affine_add_marker: PhantomData,
}))
} else {
let batch_numpoints = matrix[0].len();
let batch_window = Self::sequential_window(batch_numpoints);
let (batch_window_width, batch_h) = get_table_dimensions(batch_window);
let batch_q = TFr::from_u64(1u64 << batch_window_width);
let mut batch_points = Vec::new();
batch_points
.try_reserve_exact(matrix.len())
.map_err(|_| "BGMW precomputation table is too large".to_owned())?;
for row in matrix {
let mut temp_table = Vec::new();
temp_table
.try_reserve_exact(row.len() * batch_h)
.map_err(|_| "BGMW precomputation table is too large".to_owned())?;
unsafe {
temp_table.set_len(temp_table.capacity());
}
for i in 0..row.len() {
let mut tmp_point = row[i].clone();
for j in 0..batch_h {
let idx = j * row.len() + i;
temp_table[idx] = TG1Affine::into_affine(&tmp_point);
tmp_point = tmp_point.mul(&batch_q);
}
}
batch_points.push(temp_table);
}
Ok(Some(Self {
numpoints: points.len(),
points: table,
window,
h,
batch_window,
batch_numpoints,
batch_points,
batch_h,
fr_marker: PhantomData,
g1_fp_marker: PhantomData,
g1_marker: PhantomData,
g1_affine_add_marker: PhantomData,
}))
}
}
pub fn multiply_batch(&self, scalars: &[Vec<TFr>]) -> Vec<TG1> {
assert!(scalars.len() == self.batch_points.len());
#[cfg(not(feature = "parallel"))]
{
let window = get_sequential_window_size(self.batch_window);
let mut buckets = vec![P1XYZZ::<TG1Fp>::default(); 1 << (window - 1)];
self.batch_points
.iter()
.zip(scalars)
.map(|(points, scalars)| {
Self::multiply_sequential_raw(
points,
scalars,
&mut buckets,
window,
self.batch_numpoints,
self.batch_h,
)
})
.collect::<Vec<_>>()
}
#[cfg(feature = "parallel")]
{
use super::{
cell::Cell,
thread_pool::{da_pool, ThreadPoolExt},
};
use core::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
let window = get_sequential_window_size(self.batch_window);
let pool = da_pool();
let ncpus = pool.max_count();
let counter = Arc::new(AtomicUsize::new(0));
let mut results: Vec<Cell<TG1>> = Vec::with_capacity(scalars.len());
#[allow(clippy::uninit_vec)]
unsafe {
results.set_len(results.capacity())
};
let results = &results[..];
for _ in 0..ncpus {
let counter = counter.clone();
pool.joined_execute(move || {
let mut buckets = vec![P1XYZZ::<TG1Fp>::default(); 1 << (window - 1)];
loop {
let work = counter.fetch_add(1, Ordering::Relaxed);
if work >= scalars.len() {
break;
}
let result = Self::multiply_sequential_raw(
&self.batch_points[work],
&scalars[work],
&mut buckets,
window,
self.batch_numpoints,
self.batch_h,
);
unsafe { *results[work].as_ptr().as_mut().unwrap() = result };
}
});
}
pool.join();
results.iter().map(|it| it.as_mut().clone()).collect()
}
}
fn multiply_sequential_raw(
points: &[TG1Affine],
scalars: &[TFr],
buckets: &mut [P1XYZZ<TG1Fp>],
window: usize,
numpoints: usize,
h: usize,
) -> TG1 {
let scalars = scalars.iter().map(TFr::to_scalar).collect::<Vec<_>>();
let scalars = &scalars[..];
let mut wbits: usize = 255 % window;
let mut cbits: usize = wbits + 1;
let mut bit0: usize = 255;
let mut q_idx = h;
loop {
bit0 -= wbits;
q_idx -= 1;
if bit0 == 0 {
break;
}
p1_tile_bgmw(
&points[q_idx * numpoints..(q_idx + 1) * numpoints],
scalars,
buckets,
bit0,
wbits,
cbits,
);
cbits = window;
wbits = window;
}
p1_tile_bgmw(&points[0..numpoints], scalars, buckets, 0, wbits, cbits);
let mut ret = TG1::default();
p1_integrate_buckets(&mut ret, buckets, wbits - 1);
ret
}
pub fn multiply_sequential(&self, scalars: &[TFr]) -> TG1 {
let window = get_sequential_window_size(self.window);
let mut buckets = vec![P1XYZZ::<TG1Fp>::default(); 1 << (window - 1)];
Self::multiply_sequential_raw(
&self.points,
scalars,
&mut buckets,
window,
self.numpoints,
self.h,
)
}
#[cfg(feature = "parallel")]
pub fn multiply_parallel(&self, scalars: &[TFr]) -> TG1 {
use super::{
cell::Cell,
thread_pool::{da_pool, ThreadPoolExt},
tiling_pippenger_ops::tiling_pippenger,
};
use core::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{mpsc, Arc};
let npoints = scalars.len();
let pool = da_pool();
let ncpus = pool.max_count();
if ncpus > npoints || npoints < 32 {
let scalars = scalars.iter().map(TFr::to_scalar).collect::<Vec<_>>();
return tiling_pippenger(&self.points[0..npoints], &scalars);
}
struct Tile {
x: usize,
dx: usize,
y: usize,
dy: usize,
}
let (nx, ny, window) = match self.window {
BgmwWindow::Sync(_) => return self.multiply_sequential(scalars),
BgmwWindow::Parallel(values) => values,
};
let scalars = scalars.iter().map(TFr::to_scalar).collect::<Vec<_>>();
let scalars = &scalars[..];
// |grid[]| holds "coordinates"
let mut grid: Vec<Tile> = Vec::with_capacity(nx * ny);
#[allow(clippy::uninit_vec)]
unsafe {
grid.set_len(grid.capacity())
};
let dx = npoints / nx;
let mut y = window * (ny - 1);
let mut total = 0usize;
while total < nx {
grid[total].x = total * dx;
grid[total].dx = dx;
grid[total].y = y;
grid[total].dy = NBITS - y;
total += 1;
}
grid[total - 1].dx = npoints - grid[total - 1].x;
while y != 0 {
y -= window;
for i in 0..nx {
grid[total].x = grid[i].x;
grid[total].dx = grid[i].dx;
grid[total].y = y;
grid[total].dy = window;
total += 1;
}
}
let grid = &grid[..];
let mut row_sync: Vec<AtomicUsize> = Vec::with_capacity(ny);
row_sync.resize_with(ny, Default::default);
let counter = Arc::new(AtomicUsize::new(0));
let (tx, rx) = mpsc::channel();
let n_workers = core::cmp::min(ncpus, total);
let mut results: Vec<Cell<TG1>> = Vec::with_capacity(n_workers);
#[allow(clippy::uninit_vec)]
unsafe {
results.set_len(results.capacity());
};
let results = &results[..];
#[allow(clippy::needless_range_loop)]
for worker_index in 0..n_workers {
let tx = tx.clone();
let counter = counter.clone();
pool.joined_execute(move || {
let mut buckets = vec![P1XYZZ::<TG1Fp>::default(); 1 << (window - 1)];
loop {
let work = counter.fetch_add(1, Ordering::Relaxed);
if work >= total {
p1_integrate_buckets(
unsafe { results[worker_index].as_ptr().as_mut() }.unwrap(),
&mut buckets,
window - 1,
);
tx.send(worker_index).expect("disaster");
break;
}
let x = grid[work].x;
let y = grid[work].y;
let dx = grid[work].dx;
let row_start = (y / window) * self.numpoints + x;
let points = &self.points[row_start..(row_start + dx)];
let (wbits, cbits) = if y + window > NBITS {
let wbits = NBITS - y;
(wbits, wbits + 1)
} else {
(window, window)
};
p1_tile_bgmw(points, &scalars[x..(x + dx)], &mut buckets, y, wbits, cbits);
}
});
}
let mut ret = TG1::zero();
for _ in 0..n_workers {
let idx = rx.recv().unwrap();
ret.add_or_dbl_assign(results[idx].as_mut());
}
ret
}
fn window(npoints: usize) -> BgmwWindow {
#[cfg(feature = "parallel")]
{
use super::thread_pool::da_pool;
let pool = da_pool();
let ncpus = pool.max_count();
if npoints >= 32 && ncpus >= 2 {
BgmwWindow::Parallel(bgmw_parallel_window_size(npoints, ncpus))
} else {
BgmwWindow::Sync(bgmw_window_size(npoints))
}
}
#[cfg(not(feature = "parallel"))]
{
bgmw_window_size(npoints)
}
}
fn sequential_window(npoints: usize) -> BgmwWindow {
#[cfg(feature = "parallel")]
{
BgmwWindow::Sync(bgmw_window_size(npoints))
}
#[cfg(not(feature = "parallel"))]
{
bgmw_window_size(npoints)
}
}
}
#[allow(clippy::too_many_arguments)]
pub fn p1_tile_bgmw<TG1: G1 + G1GetFp<TFp>, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp>>(
points: &[TG1Affine],
scalars: &[Scalar256],
buckets: &mut [P1XYZZ<TFp>],
bit0: usize,
wbits: usize,
cbits: usize,
) {
if scalars.is_empty() {
return;
}
// Get first scalar
let scalar = &scalars[0];
// Get first point
let point = &points[0];
// Create mask, that contains `wbits` ones at the end.
let wmask = (1u64 << (wbits + 1)) - 1;
/*
* Check if `bit0` is zero. `z` is set to `1` when `bit0 = 0`, and `0` otherwise.
*
* The `z` flag is used to do a small trick -
*/
let z = is_zero(bit0.try_into().unwrap());
// Offset `bit0` by 1, if it is not equal to zero.
let bit0 = bit0 - (z ^ 1) as usize;
// Increase `wbits` by one, if `bit0` is not equal to zero.
let wbits = wbits + (z ^ 1) as usize;
// Calculate first window value (encoded bucket index)
let wval = (get_wval_limb(scalar, bit0, wbits) << z) & wmask;
let mut wval = booth_encode(wval, cbits);
if scalars.len() == 1 {
booth_decode(buckets, wval, cbits, point);
return;
}
// Get second scalar
let scalar = &scalars[1];
// Calculate second window value (encoded bucket index)
let wnxt = (get_wval_limb(scalar, bit0, wbits) << z) & wmask;
let mut wnxt = booth_encode(wnxt, cbits);
// Move first point to corresponding bucket
booth_decode(buckets, wval, cbits, point);
// Last point will be calculated separately, so decrementing point count
let npoints = scalars.len() - 1;
// Move points to buckets
for i in 1..npoints {
// Get current window value (encoded bucket index)
wval = wnxt;
// Get next scalar
let scalar = &scalars[i + 1];
// Get next window value (encoded bucket index)
wnxt = (get_wval_limb(scalar, bit0, wbits) << z) & wmask;
wnxt = booth_encode(wnxt, cbits);
// TODO: add prefetching
// POINTonE1_prefetch(buckets, wnxt, cbits);
// p1_prefetch(buckets, wnxt, cbits);
// Get current point
let point = &points[i];
// Move point to corresponding bucket (add or subtract from bucket)
// `wval` contains encoded bucket index, as well as sign, which shows if point should be subtracted or added to bucket
booth_decode(buckets, wval, cbits, point);
}
// Get last point
let point = &points[npoints];
// Move point to bucket
booth_decode(buckets, wnxt, cbits, point);
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/old_pippenger.rs | kzg/src/msm/old_pippenger.rs | // Leaving this here commented out as requested by lecturer
// use std::sync::mpsc::channel;
// use crate::{
// cfg_into_iter, common_utils::log2_u64, G1Affine, G1Fp, G1ProjAddAffine, Scalar256, G1, msm::batch_adder::{self, BatchAdder},
// };
// #[cfg(feature = "parallel")]
// use rayon::prelude::*;
// trait ThreadPoolExt {
// fn joined_execute<'any, F>(&self, job: F)
// where
// F: FnOnce() + Send + 'any;
// }
// mod mt {
// use super::*;
// use core::mem::transmute;
// use std::sync::{Mutex, Once};
// use threadpool::ThreadPool;
// pub fn da_pool() -> ThreadPool {
// static INIT: Once = Once::new();
// static mut POOL: *const Mutex<ThreadPool> =
// 0 as *const Mutex<ThreadPool>;
// INIT.call_once(|| {
// let pool = Mutex::new(ThreadPool::default());
// unsafe { POOL = transmute(Box::new(pool)) };
// });
// unsafe { (*POOL).lock().unwrap().clone() }
// }
// type Thunk<'any> = Box<dyn FnOnce() + Send + 'any>;
// impl ThreadPoolExt for ThreadPool {
// fn joined_execute<'scope, F>(&self, job: F)
// where
// F: FnOnce() + Send + 'scope,
// {
// // Bypass 'lifetime limitations by brute force. It works,
// // because we explicitly join the threads...
// self.execute(unsafe {
// transmute::<Thunk<'scope>, Thunk<'static>>(Box::new(job))
// })
// }
// }
// }
// macro_rules! cfg_into_mut_chunks {
// ($e: expr, $f: expr) => {{
// #[cfg(feature = "parallel")]
// let result = $e.par_chunks_mut($f);
// #[cfg(not(feature = "parallel"))]
// let result = $e.chunks_mut($f);
// result
// }};
// }
// pub fn parallel_pippinger<
// TG1: G1,
// TG1Fp: G1Fp,
// TG1Affine: G1Affine<TG1, TG1Fp>,
// ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
// >(
// bases: &[TG1Affine],
// scalars: &[Scalar256],
// ) -> TG1 {
// const NUM_BITS: u32 = 255;
// // Limit scalars & bases to lower of the two
// let size = std::cmp::min(bases.len(), scalars.len());
// let scalars = &scalars[..size];
// let bases = &bases[..size];
// let scalars_and_bases_iter = scalars
// .iter()
// .zip(bases)
// .filter(|(s, _)| **s != Scalar256::ZERO);
// let c = if size < 32 {
// 3
// } else {
// ((log2_u64(size) * 69 / 100) as usize) + 2
// };
// // Divide 0..NUM_BITS into windows of size c & process in parallel
// let mut window_sums = [TG1::ZERO; NUM_BITS as usize];
// cfg_into_iter!(0..NUM_BITS)
// .step_by(c)
// .zip(&mut window_sums)
// .for_each(|(w_start, window_sums)| {
// // We don't need the "zero" bucket, so we only have 2^c - 1 buckets.
// let mut buckets = vec![TG1::ZERO; (1 << c) - 1];
// scalars_and_bases_iter.clone().for_each(|(scalar, base)| {
// if *scalar == Scalar256::ONE {
// if w_start == 0 {
// ProjAddAffine::add_assign_affine(window_sums, base);
// }
// } else {
// let mut scalar = scalar.data;
// scalar_divn(&mut scalar, w_start);
// let scalar = scalar[0] % (1 << c);
// if scalar != 0 {
// let idx = (scalar - 1) as usize;
// ProjAddAffine::add_or_double_assign_affine(&mut buckets[idx], base);
// }
// }
// });
// let mut running_sum = TG1::ZERO;
// buckets.into_iter().rev().for_each(|b| {
// running_sum.add_or_dbl_assign(&b);
// window_sums.add_or_dbl_assign(&running_sum);
// });
// });
// // Traverse windows from high to low
// let lowest = window_sums.first().unwrap();
// lowest.add(
// &window_sums[1..]
// .iter()
// .rev()
// .fold(TG1::ZERO, |mut total, sum_i| {
// total.add_assign(sum_i);
// for _ in 0..c {
// total.dbl_assign();
// }
// total
// }),
// )
// }
// fn scalar_divn<const N: usize>(input: &mut [u64; N], mut n: u32) {
// if n >= (64 * N) as u32 {
// *input = [0u64; N];
// return;
// }
// while n >= 64 {
// let mut t = 0;
// for i in 0..N {
// core::mem::swap(&mut t, &mut input[N - i - 1]);
// }
// n -= 64;
// }
// if n > 0 {
// let mut t = 0;
// #[allow(unused)]
// for i in 0..N {
// let a = &mut input[N - i - 1];
// let t2 = *a << (64 - n);
// *a >>= n;
// *a |= t;
// t = t2;
// }
// }
// }
// // Compute msm using windowed non-adjacent form
// pub fn parallel_pippinger_wnaf<
// TG1: G1,
// TG1Fp: G1Fp,
// TG1Affine: G1Affine<TG1, TG1Fp>,
// ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
// >(
// bases: &[TG1Affine],
// scalars: &[Scalar256],
// ) -> TG1 {
// const NUM_BITS: usize = 255;
// // Limit scalars & bases to lower of the two
// let size = std::cmp::min(bases.len(), scalars.len());
// let scalars = &scalars[..size];
// let bases = &bases[..size];
// let c = if size < 32 {
// 3
// } else {
// ((log2_u64(size) * 69 / 100) as usize) + 2
// };
// let digits_count = (NUM_BITS + c - 1) / c;
// let mut scalar_digits = vec![0i64; digits_count * scalars.len()];
// cfg_into_mut_chunks!(scalar_digits, digits_count)
// .zip(scalars)
// // .filter(|(_, s)| **s != Scalar256::ZERO)
// .for_each(|(chunk , scalar)| {
// make_digits_into(scalar, c, chunk);
// });
// let pool = mt::da_pool();
// let ncpus = pool.max_count();
// let scalar_digits_it = scalar_digits.chunks(digits_count).zip(bases);
// let (tx, rx) = channel();
// for i in (0..digits_count).rev() {
// let tx = tx.clone();
// let i = i.clone();
// let scalar_digits_it = scalar_digits_it.clone();
// pool.joined_execute(move || {
// let mut buckets = vec![TG1::ZERO; 1 << c];
// for ( digits, base) in scalar_digits_it {
// use core::cmp::Ordering;
// let scalar = digits[i];
// match 0.cmp(&scalar) {
// Ordering::Less => ProjAddAffine::add_assign_affine(&mut buckets[(scalar - 1) as usize], base),
// Ordering::Greater => ProjAddAffine::sub_assign_affine(&mut buckets[(-scalar - 1) as usize], *base),
// Ordering::Equal => (),
// }
// }
// let mut running_sum = TG1::ZERO;
// let mut window_sum = TG1::ZERO;
// buckets.into_iter().rev().for_each(|b| {
// running_sum.add_or_dbl_assign(&b);
// window_sum.add_or_dbl_assign(&running_sum);
// });
// tx.send(window_sum);
// });
// }
// let mut total_window_sum = TG1::ZERO;
// for _ in 0..digits_count {
// total_window_sum.add_assign(&rx.recv().unwrap());
// for _ in 0..c {
// total_window_sum.dbl_assign();
// }
// }
// total_window_sum
// // let lowest = window_sums.first().unwrap();
// // lowest.add(
// // &window_sums[1..]
// // .iter()
// // .rev()
// // .fold(TG1::ZERO, |mut total, sum_i| {
// // total.add_assign(sum_i);
// // for _ in 0..c {
// // total.dbl_assign();
// // }
// // total
// // }))
// }
// // From: https://github.com/arkworks-rs/gemini/blob/main/src/kzg/msm/variable_base.rs#L20
// fn make_digits(a: &Scalar256, w: usize) -> Vec<i64> {
// let scalar = &a.data;
// let radix: u64 = 1 << w;
// let window_mask: u64 = radix - 1;
// const NUM_BITS: usize = 255;
// let mut carry = 0u64;
// let digits_count = (NUM_BITS + w - 1) / w;
// let mut digits = vec![0i64; digits_count];
// for (i, digit) in digits.iter_mut().enumerate() {
// // Construct a buffer of bits of the scalar, starting at `bit_offset`.
// let bit_offset = i * w;
// let u64_idx = bit_offset / 64;
// let bit_idx = bit_offset % 64;
// // Read the bits from the scalar
// let bit_buf = if bit_idx < 64 - w || u64_idx == scalar.len() - 1 {
// // This window's bits are contained in a single u64,
// // or it's the last u64 anyway.
// scalar[u64_idx] >> bit_idx
// } else {
// // Combine the current u64's bits with the bits from the next u64
// (scalar[u64_idx] >> bit_idx) | (scalar[1 + u64_idx] << (64 - bit_idx))
// };
// // Read the actual coefficient value from the window
// let coef = carry + (bit_buf & window_mask); // coef = [0, 2^r)
// // Recenter coefficients from [0,2^w) to [-2^w/2, 2^w/2)
// carry = (coef + radix / 2) >> w;
// *digit = (coef as i64) - (carry << w) as i64;
// }
// digits[digits_count - 1] += (carry << w) as i64;
// digits
// }
// // From: https://github.com/arkworks-rs/gemini/blob/main/src/kzg/msm/variable_base.rs#L20
// fn make_digits_into(a: &Scalar256, w: usize, digits: &mut [i64]) {
// let scalar = &a.data;
// let radix: u64 = 1 << w;
// let window_mask: u64 = radix - 1;
// const NUM_BITS: usize = 255;
// let mut carry = 0u64;
// let digits_count = (NUM_BITS + w - 1) / w;
// for (i, digit) in digits.iter_mut().enumerate() {
// // Construct a buffer of bits of the scalar, starting at `bit_offset`.
// let bit_offset = i * w;
// let u64_idx = bit_offset / 64;
// let bit_idx = bit_offset % 64;
// // Read the bits from the scalar
// let bit_buf = if bit_idx < 64 - w || u64_idx == scalar.len() - 1 {
// // This window's bits are contained in a single u64,
// // or it's the last u64 anyway.
// scalar[u64_idx] >> bit_idx
// } else {
// // Combine the current u64's bits with the bits from the next u64
// (scalar[u64_idx] >> bit_idx) | (scalar[1 + u64_idx] << (64 - bit_idx))
// };
// // Read the actual coefficient value from the window
// let coef = carry + (bit_buf & window_mask); // coef = [0, 2^r)
// // Recenter coefficients from [0,2^w) to [-2^w/2, 2^w/2)
// carry = (coef + radix / 2) >> w;
// *digit = (coef as i64) - (carry << w) as i64;
// }
// digits[digits_count - 1] += (carry << w) as i64;
// } | rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/wbits.rs | kzg/src/msm/wbits.rs | /// This algorithm is taken from https://github.com/crate-crypto/rust-eth-kzg
use core::{marker::PhantomData, ops::Neg};
use crate::{Fr, G1Affine, G1Fp, G1GetFp, G1Mul, G1ProjAddAffine, G1};
#[cfg(feature = "diskcache")]
use crate::msm::diskcache::DiskCache;
#[derive(Debug, Clone)]
pub struct WbitsTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
where
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
{
numpoints: usize,
points: Vec<TG1Affine>,
batch_numpoints: usize,
batch_points: Vec<Vec<TG1Affine>>,
g1_marker: PhantomData<TG1>,
g1_fp_marker: PhantomData<TG1Fp>,
fr_marker: PhantomData<TFr>,
g1_affine_add_marker: PhantomData<TG1ProjAddAffine>,
}
fn get_window_size() -> usize {
option_env!("WINDOW_SIZE")
.map(|v| {
v.parse()
.expect("WINDOW_SIZE environment variable must be valid number")
})
.unwrap_or(8)
}
// Code was taken from: https://github.com/privacy-scaling-explorations/halo2curves/blob/b753a832e92d5c86c5c997327a9cf9de86a18851/src/msm.rs#L13
pub fn get_booth_index(window_index: usize, window_size: usize, el: &[u8]) -> i32 {
// Booth encoding:
// * step by `window` size
// * slice by size of `window + 1``
// * each window overlap by 1 bit
// * append a zero bit to the least significant end
// Indexing rule for example window size 3 where we slice by 4 bits:
// `[0, +1, +1, +2, +2, +3, +3, +4, -4, -3, -3 -2, -2, -1, -1, 0]``
// So we can reduce the bucket size without preprocessing scalars
// and remembering them as in classic signed digit encoding
let skip_bits = (window_index * window_size).saturating_sub(1);
let skip_bytes = skip_bits / 8;
// fill into a u32
let mut v: [u8; 4] = [0; 4];
for (dst, src) in v.iter_mut().zip(el.iter().skip(skip_bytes)) {
*dst = *src
}
let mut tmp = u32::from_le_bytes(v);
// pad with one 0 if slicing the least significant window
if window_index == 0 {
tmp <<= 1;
}
// remove further bits
tmp >>= skip_bits - (skip_bytes * 8);
// apply the booth window
tmp &= (1 << (window_size + 1)) - 1;
let sign = tmp & (1 << window_size) == 0;
// div ceil by 2
tmp = (tmp + 1) >> 1;
// find the booth action index
if sign {
tmp as i32
} else {
((!(tmp - 1) & ((1 << window_size) - 1)) as i32).neg()
}
}
/// This is the threshold to which batching the inversions in affine
/// formula costs more than doing mixed addition.
const BATCH_INVERSE_THRESHOLD: usize = 16;
/// Chooses between point addition and point doubling based on the input points.
///
/// Note: This does not handle the case where p1 == -p2.
///
/// This case is unlikely for our usecase, and is not trivial
/// to handle.
#[inline(always)]
fn choose_add_or_double<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>>(
p1: TG1Affine,
p2: TG1Affine,
) -> TG1Fp {
if p1 == p2 {
p2.y().double()
} else {
p2.x().sub_fp(p1.x())
}
}
/// Given a vector of field elements {v_i}, compute the vector {v_i^(-1)}
///
/// A scratchpad is used to avoid excessive allocations in the case that this method is
/// called repeatedly.
///
/// Panics if any of the elements are zero
pub fn batch_inverse_scratch_pad<F: G1Fp>(v: &mut [F], scratchpad: &mut Vec<F>) {
// Montgomery's Trick and Fast Implementation of Masked AES
// Genelle, Prouff and Quisquater
// Section 3.2
// but with an optimization to multiply every element in the returned vector by coeff
// Clear the scratchpad and ensure it has enough capacity
scratchpad.clear();
scratchpad.reserve(v.len());
// First pass: compute [a, ab, abc, ...]
let mut tmp = F::one();
for f in v.iter() {
tmp = tmp.mul_fp(f);
scratchpad.push(tmp.clone());
}
// Invert `tmp`.
tmp = tmp
.inverse()
.expect("guaranteed to be non-zero since we filtered out zero field elements");
// Second pass: iterate backwards to compute inverses
for (f, s) in v
.iter_mut()
// Backwards
.rev()
// Backwards, skip last element, fill in one for last term.
.zip(scratchpad.iter().rev().skip(1).chain(Some(&F::one())))
{
// tmp := tmp * f; f := tmp * s = 1/f
let new_tmp = tmp.mul_fp(f);
*f = tmp.mul_fp(s);
tmp = new_tmp;
}
}
/// Adds two elliptic curve points using the point addition/doubling formula.
///
/// Note: The inversion is precomputed and passed as a parameter.
///
/// This function handles both addition of distinct points and point doubling.
#[inline(always)]
fn point_add_double<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>>(
p1: TG1Affine,
p2: TG1Affine,
inv: &TG1Fp,
) -> TG1Affine {
let lambda = if p1 == p2 {
p1.x().square().mul3().mul_fp(inv)
} else {
p2.y().sub_fp(p1.y()).mul_fp(inv)
};
let x = lambda.square().sub_fp(p1.x()).sub_fp(p2.x());
let y = lambda.mul_fp(&p1.x().sub_fp(&x)).sub_fp(p1.y());
TG1Affine::from_xy(x, y)
}
/// Performs multi-batch addition of multiple sets of elliptic curve points.
///
/// This function efficiently adds multiple sets of points amortizing the cost of the
/// inversion over all of the sets, using the same binary tree approach with striding
/// as the single-batch version.
pub fn multi_batch_addition_binary_tree_stride<
TG1: G1,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
mut multi_points: Vec<Vec<TG1Affine>>,
) -> Vec<TG1> {
multi_points
.iter_mut()
.for_each(|points| points.retain(|p| !p.is_infinity()));
let total_num_points: usize = multi_points.iter().map(|p| p.len()).sum();
let mut scratchpad = Vec::with_capacity(total_num_points);
// Find the largest buckets, this will be the bottleneck for the number of iterations
let mut max_bucket_length = 0;
for points in multi_points.iter() {
max_bucket_length = std::cmp::max(max_bucket_length, points.len());
}
// Compute the total number of "unit of work"
// In the single batch addition case this is analogous to
// the batch inversion threshold
#[inline(always)]
fn compute_threshold<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>>(
points: &[Vec<TG1Affine>],
) -> usize {
points
.iter()
.map(|p| {
if p.len() % 2 == 0 {
p.len() / 2
} else {
(p.len() - 1) / 2
}
})
.sum()
}
let mut denominators = Vec::with_capacity(max_bucket_length);
let mut total_amount_of_work = compute_threshold(&multi_points);
let mut sums = vec![TG1::identity(); multi_points.len()];
assert!(
BATCH_INVERSE_THRESHOLD >= 2,
"THRESHOLD cannot be below the number of points needed for group addition"
);
// TODO: total_amount_of_work does not seem to be changing performance that much
while total_amount_of_work > BATCH_INVERSE_THRESHOLD {
// For each point, we check if they are odd and pop off
// one of the points
for (points, sum) in multi_points.iter_mut().zip(sums.iter_mut()) {
// Make the number of points even
if points.len() % 2 != 0 {
TG1ProjAddAffine::add_or_double_assign_affine(sum, &points.pop().unwrap());
}
}
denominators.clear();
// For each pair of points over all
// vectors, we collect them and put them in the
// inverse array
for points in multi_points.iter_mut() {
if points.len() < 2 {
continue;
}
*points = points
.chunks_exact(2)
.filter(|v| v[0] != v[1].neg())
.flat_map(|v| v)
.cloned()
.collect::<Vec<_>>();
for i in (0..=points.len() - 2).step_by(2) {
denominators.push(choose_add_or_double(points[i], points[i + 1]));
}
}
batch_inverse_scratch_pad(&mut denominators, &mut scratchpad);
let mut denominators_offset = 0;
for points in multi_points.iter_mut() {
if points.len() < 2 {
continue;
}
for (i, inv) in (0..=points.len() - 2)
.step_by(2)
.zip(&denominators[denominators_offset..])
{
let p1 = points[i];
let p2 = points[i + 1];
points[i / 2] = point_add_double(p1, p2, inv);
}
let num_points = points.len() / 2;
// The latter half of the vector is now unused,
// all results are stored in the former half.
points.truncate(num_points);
denominators_offset += num_points
}
total_amount_of_work = compute_threshold(&multi_points);
}
for (sum, points) in sums.iter_mut().zip(multi_points) {
for point in points {
TG1ProjAddAffine::add_or_double_assign_affine(sum, &point);
}
}
sums
}
impl<
TFr: Fr,
TG1Fp: G1Fp,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Affine: G1Affine<TG1, TG1Fp>,
TG1ProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
> WbitsTable<TFr, TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>
{
fn try_read_cache(points: &[TG1], matrix: &[Vec<TG1>]) -> Result<Self, Option<[u8; 32]>> {
#[cfg(feature = "diskcache")]
{
DiskCache::<TG1, TG1Fp, TG1Affine>::load("wbits", get_window_size(), points, matrix)
.map_err(|(err, contenthash)| {
println!("Failed to load cache: {err}");
contenthash
})
.map(|cache| Self {
numpoints: cache.numpoints,
points: cache.table,
batch_numpoints: cache.batch_numpoints,
batch_points: cache.batch_table,
g1_marker: PhantomData,
g1_fp_marker: PhantomData,
fr_marker: PhantomData,
g1_affine_add_marker: PhantomData,
})
}
#[cfg(not(feature = "diskcache"))]
Err(None)
}
fn try_write_cache(
points: &[TG1],
matrix: &[Vec<TG1>],
table: &[TG1Affine],
numpoints: usize,
batch_table: &[Vec<TG1Affine>],
batch_numpoints: usize,
contenthash: Option<[u8; 32]>,
) -> Result<(), String> {
#[cfg(feature = "diskcache")]
{
DiskCache::<TG1, TG1Fp, TG1Affine>::save(
"wbits",
get_window_size(),
points,
matrix,
table,
numpoints,
batch_table,
batch_numpoints,
contenthash,
)
.inspect_err(|err| println!("Failed to save cache: {err}"))
}
#[cfg(not(feature = "diskcache"))]
Ok(())
}
pub fn new(points: &[TG1], matrix: &[Vec<TG1>]) -> Result<Option<Self>, String> {
let contenthash = match Self::try_read_cache(points, matrix) {
Ok(v) => return Ok(Some(v)),
Err(e) => e,
};
let mut table = Vec::new();
table
.try_reserve_exact(points.len() * (1 << (get_window_size() - 1)))
.map_err(|_| "WBITS precomputation table is too large".to_string())?;
for point in points {
let mut current = point.clone();
for _ in 0..(1 << (get_window_size() - 1)) {
table.push(TG1Affine::into_affine(¤t));
current = current.add_or_dbl(point);
}
}
if matrix.is_empty() {
Self::try_write_cache(points, matrix, &table, points.len(), &[], 0, contenthash)?;
Ok(Some(Self {
numpoints: points.len(),
points: table,
batch_numpoints: 0,
batch_points: Vec::new(),
g1_marker: PhantomData,
g1_fp_marker: PhantomData,
fr_marker: PhantomData,
g1_affine_add_marker: PhantomData,
}))
} else {
let batch_numpoints = matrix[0].len();
let mut batch_points = Vec::new();
batch_points
.try_reserve_exact(matrix.len())
.map_err(|_| "WBITS precomputation table is too large".to_owned())?;
for row in matrix {
let mut temp_table = Vec::new();
temp_table
.try_reserve_exact(row.len() * (1 << (get_window_size() - 1)))
.map_err(|_| "WBITS precomputation table is too large".to_owned())?;
for point in row {
let mut current = point.clone();
for _ in 0..(1 << (get_window_size() - 1)) {
temp_table.push(TG1Affine::into_affine(¤t));
current = current.add_or_dbl(point);
}
}
batch_points.push(temp_table);
}
Self::try_write_cache(
points,
matrix,
&table,
points.len(),
&batch_points,
batch_numpoints,
contenthash,
)?;
Ok(Some(Self {
numpoints: points.len(),
points: table,
batch_numpoints,
batch_points,
fr_marker: PhantomData,
g1_fp_marker: PhantomData,
g1_marker: PhantomData,
g1_affine_add_marker: PhantomData,
}))
}
}
fn multiply_sequential_raw(bases: &[TG1Affine], scalars: &[TFr]) -> TG1 {
let scalars = scalars.iter().map(TFr::to_scalar).collect::<Vec<_>>();
let number_of_windows = 255 / get_window_size() + 1;
let mut windows_of_points = vec![Vec::with_capacity(scalars.len()); number_of_windows];
for window_idx in 0..windows_of_points.len() {
for (scalar_idx, scalar_bytes) in scalars.iter().enumerate() {
let sub_table = &bases[scalar_idx * (1 << (get_window_size() - 1))
..(scalar_idx + 1) * (1 << (get_window_size() - 1))];
let point_idx =
get_booth_index(window_idx, get_window_size(), scalar_bytes.as_u8());
if point_idx == 0 {
continue;
}
let is_scalar_positive = point_idx.is_positive();
let point_idx = point_idx.unsigned_abs() as usize - 1;
let mut point = sub_table[point_idx];
if !is_scalar_positive {
point = point.neg();
}
windows_of_points[window_idx].push(point);
}
}
let accumulated_points =
multi_batch_addition_binary_tree_stride::<TG1, TG1Fp, TG1Affine, TG1ProjAddAffine>(
windows_of_points,
);
// Now accumulate the windows by doubling wbits times
let mut result: TG1 = accumulated_points.last().unwrap().clone();
for point in accumulated_points.into_iter().rev().skip(1) {
// Double the result 'wbits' times
for _ in 0..get_window_size() {
result = result.dbl();
}
// Add the accumulated point for this window
result.add_or_dbl_assign(&point);
}
result
}
pub fn multiply_sequential(&self, scalars: &[TFr]) -> TG1 {
Self::multiply_sequential_raw(&self.points, scalars)
}
pub fn multiply_batch(&self, scalars: &[Vec<TFr>]) -> Vec<TG1> {
assert!(scalars.len() == self.batch_points.len());
#[cfg(not(feature = "parallel"))]
{
self.batch_points
.iter()
.zip(scalars)
.map(|(points, scalars)| Self::multiply_sequential_raw(points, scalars))
.collect::<Vec<_>>()
}
#[cfg(feature = "parallel")]
{
use super::{
cell::Cell,
thread_pool::{da_pool, ThreadPoolExt},
};
use core::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
let pool = da_pool();
let ncpus = pool.max_count();
let counter = Arc::new(AtomicUsize::new(0));
let mut results: Vec<Cell<TG1>> = Vec::with_capacity(scalars.len());
#[allow(clippy::uninit_vec)]
unsafe {
results.set_len(results.capacity())
};
let results = &results[..];
for _ in 0..ncpus {
let counter = counter.clone();
pool.joined_execute(move || loop {
let work = counter.fetch_add(1, Ordering::Relaxed);
if work >= scalars.len() {
break;
}
let result =
Self::multiply_sequential_raw(&self.batch_points[work], &scalars[work]);
unsafe { *results[work].as_ptr().as_mut().unwrap() = result };
});
}
pool.join();
results.iter().map(|it| it.as_mut().clone()).collect()
}
}
#[cfg(feature = "parallel")]
pub fn multiply_parallel(&self, scalars: &[TFr]) -> TG1 {
self.multiply_sequential(scalars)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/cell.rs | kzg/src/msm/cell.rs | // Minimalist core::cell::Cell stand-in, but with Sync marker, which
// makes it possible to pass it to multiple threads. It works, because
// *here* each Cell is written only once and by just one thread.
#[repr(transparent)]
pub struct Cell<T: ?Sized> {
value: T,
}
unsafe impl<T: ?Sized + Sync> Sync for Cell<T> {}
impl<T> Cell<T> {
pub fn as_ptr(&self) -> *mut T {
&self.value as *const T as *mut T
}
// Helper to avoid as_ptr().as_ref().unwrap()
#[allow(clippy::mut_from_ref)]
pub fn as_mut(&self) -> &mut T {
unsafe { &mut *(self.as_ptr()) }
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/msm_impls.rs | kzg/src/msm/msm_impls.rs | use crate::{Fr, G1Affine, G1Fp, G1GetFp, G1Mul, G1ProjAddAffine, G1};
use alloc::vec::Vec;
#[cfg(all(feature = "arkmsm", not(feature = "parallel")))]
use super::arkmsm::arkmsm_msm::VariableBaseMSM;
use super::precompute::PrecomputationTable;
use super::tiling_pippenger_ops::tiling_pippenger;
#[cfg(feature = "parallel")]
use super::tiling_parallel_pippenger::{parallel_affine_conv, tiling_parallel_pippenger};
#[cfg(feature = "parallel")]
fn msm_parallel<
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
points: &[TG1],
scalars: &[TFr],
precomputation: Option<&PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TProjAddAffine>>,
) -> TG1 {
if let Some(precomputation) = precomputation {
precomputation.multiply_parallel(scalars)
} else {
let (points, scalars): (Vec<_>, Vec<_>) = points
.iter()
.cloned()
.zip(scalars.iter())
.filter(|(p, _)| !p.is_inf())
.collect();
let points = batch_convert::<TG1, TG1Fp, TG1Affine>(&points);
let scalars = scalars.iter().map(|s| s.to_scalar()).collect::<Vec<_>>();
tiling_parallel_pippenger(&points, &scalars)
}
}
pub fn pippenger<
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
points: &[TG1],
scalars: &[TFr],
) -> TG1 {
let (points, scalars): (Vec<_>, Vec<_>) = points
.iter()
.cloned()
.zip(scalars.iter())
.filter(|(p, _)| !p.is_inf())
.collect();
let points = batch_convert::<TG1, TG1Fp, TG1Affine>(&points);
let scalars = scalars.iter().map(|s| s.to_scalar()).collect::<Vec<_>>();
tiling_pippenger(&points, &scalars)
}
#[cfg(not(feature = "parallel"))]
#[allow(clippy::extra_unused_type_parameters)]
#[allow(unused_variables)]
fn msm_sequential<
TFr: Fr,
TG1: G1 + G1Mul<TFr> + G1GetFp<TG1Fp>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
points: &[TG1],
scalars: &[TFr],
precomputation: Option<&PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TProjAddAffine>>,
) -> TG1 {
#[cfg(not(feature = "arkmsm"))]
{
assert!(core::cmp::min(points.len(), scalars.len()) > 1);
if let Some(precomputation) = precomputation {
precomputation.multiply_sequential(scalars)
} else {
pippenger::<TFr, TG1, TG1Fp, TG1Affine, TProjAddAffine>(points, scalars)
}
}
#[cfg(feature = "arkmsm")]
{
let (points, scalars): (Vec<_>, Vec<_>) = points
.iter()
.cloned()
.zip(scalars.iter())
.filter(|(p, _)| !p.is_inf())
.collect();
let points = batch_convert::<TG1, TG1Fp, TG1Affine>(&points);
let scalars = scalars.iter().map(|s| s.to_scalar()).collect::<Vec<_>>();
VariableBaseMSM::multi_scalar_mul::<TG1, TG1Fp, TG1Affine, TProjAddAffine>(
&points, &scalars,
)
}
}
pub fn batch_convert<TG1: G1, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp> + Sized>(
points: &[TG1],
) -> Vec<TG1Affine> {
#[cfg(feature = "parallel")]
return parallel_affine_conv::<TG1, TFp, TG1Affine>(points);
#[cfg(not(feature = "parallel"))]
return TG1Affine::into_affines(points);
}
#[allow(clippy::extra_unused_type_parameters)]
pub fn msm<
TG1: G1 + G1GetFp<TG1Fp> + G1Mul<TFr>,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
TFr: Fr,
>(
points: &[TG1],
scalars: &[TFr],
len: usize,
precomputation: Option<&PrecomputationTable<TFr, TG1, TG1Fp, TG1Affine, TProjAddAffine>>,
) -> TG1 {
if len < 8 {
let mut out = TG1::zero();
for i in 0..len {
let tmp = points[i].mul(&scalars[i]);
out.add_or_dbl_assign(&tmp);
}
return out;
}
#[cfg(feature = "parallel")]
return msm_parallel::<TFr, TG1, TG1Fp, TG1Affine, TProjAddAffine>(
&points[0..len],
&scalars[0..len],
precomputation,
);
#[cfg(not(feature = "parallel"))]
return msm_sequential::<TFr, TG1, TG1Fp, TG1Affine, TProjAddAffine>(
&points[0..len],
&scalars[0..len],
precomputation,
);
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/arkmsm/mod.rs | kzg/src/msm/arkmsm/mod.rs | pub mod arkmsm_msm;
pub mod batch_adder;
pub mod bitmap;
pub mod bucket_msm;
pub mod glv;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/arkmsm/bucket_msm.rs | kzg/src/msm/arkmsm/bucket_msm.rs | use core::marker::PhantomData;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use alloc::vec;
use alloc::vec::Vec;
use crate::{
cfg_into_iter,
msm::arkmsm::{batch_adder::BatchAdder, bitmap::Bitmap, glv::endomorphism},
msm::types::{GROUP_SIZE, GROUP_SIZE_IN_BITS},
G1Affine, G1Fp, G1ProjAddAffine, G1,
};
pub struct BucketMSM<
TG1: G1,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
> {
pub num_windows: u32,
pub window_bits: u32,
pub bucket_bits: u32,
pub max_batch_cnt: u32, // max slices allowed in a batch
pub max_collision_cnt: u32,
pub buckets: Vec<TG1Affine>, // size (num_windows << window_bits) * 2
// current batch state
pub bitmap: Bitmap,
pub batch_buckets_and_points: Vec<(u32, u32)>,
pub collision_buckets_and_points: Vec<(u32, TG1Affine)>,
pub cur_points: Vec<TG1Affine>, // points of current batch, size batch_size
// batch affine adder
pub batch_adder: BatchAdder<TG1, TG1Fp, TG1Affine>,
_p: PhantomData<TProjAddAffine>,
}
impl<
TG1: G1,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
> BucketMSM<TG1, TG1Fp, TG1Affine, TProjAddAffine>
{
pub fn new(
scalar_bits: u32,
window_bits: u32,
max_batch_cnt: u32, // default: 4096
max_collision_cnt: u32, // default: 128
) -> Self {
// TODO: Check if these can be turned into consts
let num_windows = scalar_bits.div_ceil(window_bits);
let batch_size = core::cmp::max(8192, max_batch_cnt);
let bucket_bits = window_bits - 1; // half buckets needed because of signed-bucket-index
let bucket_size = num_windows << bucket_bits;
// size of batch_adder will be the max of batch_size and num_windows * groups per window
let batch_adder_size = core::cmp::max(batch_size, bucket_size >> GROUP_SIZE_IN_BITS);
BucketMSM {
num_windows,
window_bits,
bucket_bits,
max_batch_cnt,
max_collision_cnt,
buckets: vec![TG1Affine::zero(); bucket_size as usize],
bitmap: Bitmap::new(bucket_size as usize / 32),
batch_buckets_and_points: Vec::with_capacity(batch_size as usize),
collision_buckets_and_points: Vec::with_capacity(max_collision_cnt as usize),
cur_points: vec![TG1Affine::zero(); batch_size as usize],
batch_adder: BatchAdder::new(batch_adder_size as usize),
_p: PhantomData,
}
}
pub fn process_point_and_slices_glv(
&mut self,
point: &TG1Affine,
normal_slices: &[u32],
phi_slices: &[u32],
is_neg_scalar: bool,
is_neg_normal: bool,
) {
assert!(
self.num_windows as usize == normal_slices.len()
&& normal_slices.len() == phi_slices.len(),
"slice len check failed: normal_slices {}, phi_slices {}, num_windows {}",
normal_slices.len(),
phi_slices.len(),
self.num_windows
);
let mut p = *point; // copy
if is_neg_scalar {
p.y_mut().neg_assign();
};
// TODO: Can be replaced with XOR?
if is_neg_normal {
p.y_mut().neg_assign();
};
self.cur_points.push(p);
for (win, normal_slice) in normal_slices.iter().enumerate() {
if (*normal_slice as i32) > 0 {
let bucket_id = (win << self.bucket_bits) as u32 + normal_slice - 1;
self._process_slices(bucket_id, self.cur_points.len() as u32 - 1);
}
}
p.y_mut().neg_assign();
self.cur_points.push(p);
for (win, normal_slice) in normal_slices.iter().enumerate() {
if (*normal_slice as i32) < 0 {
let slice = normal_slice & 0x7FFFFFFF;
if slice > 0 {
let bucket_id = (win << self.bucket_bits) as u32 + slice - 1;
self._process_slices(bucket_id, self.cur_points.len() as u32 - 1);
}
}
}
// process phi slices
p.y_mut().neg_assign();
if is_neg_normal {
p.y_mut().neg_assign();
}
// this isn't the cleanest of doing this, we'd better figure out a way to do this at compile time
let p_g1: &mut TG1Affine = &mut p;
endomorphism(p_g1);
self.cur_points.push(p);
for (win, phi_slice) in phi_slices.iter().enumerate() {
if (*phi_slice as i32) > 0 {
let bucket_id = (win << self.bucket_bits) as u32 + phi_slice - 1;
self._process_slices(bucket_id, self.cur_points.len() as u32 - 1);
}
}
p.y_mut().neg_assign();
self.cur_points.push(p);
for (win, phi_slice) in phi_slices.iter().enumerate() {
if (*phi_slice as i32) < 0 {
let slice = phi_slice & 0x7FFFFFFF;
if slice > 0 {
let bucket_id = (win << self.bucket_bits) as u32 + slice - 1;
self._process_slices(bucket_id, self.cur_points.len() as u32 - 1);
}
}
}
}
pub fn process_point_and_slices(&mut self, point: &TG1Affine, slices: &[u32]) {
assert!(
self.num_windows as usize == slices.len(),
"slices.len() {} should equal num_windows {}",
slices.len(),
self.num_windows
);
self.cur_points.push(*point);
for (win, slice) in slices.iter().enumerate() {
if (*slice as i32) > 0 {
let bucket_id = (win << self.bucket_bits) as u32 + slice - 1; // skip slice == 0
self._process_slices(bucket_id, self.cur_points.len() as u32 - 1);
}
}
let mut neg_p = *point;
neg_p.y_mut().neg_assign();
self.cur_points.push(neg_p);
for (win, slice) in slices.iter().enumerate() {
if (*slice as i32) < 0 {
let slice = slice & 0x7FFFFFFF;
if slice > 0 {
let bucket_id = (win << self.bucket_bits) as u32 + slice - 1; // skip slice == 0
self._process_slices(bucket_id, self.cur_points.len() as u32 - 1);
}
}
}
}
pub fn process_complete(&mut self) {
self._process_batch();
while !(self.collision_buckets_and_points.is_empty()
&& self.batch_buckets_and_points.is_empty())
{
self._process_batch();
}
}
fn _process_slices(&mut self, bucket_id: u32, point_idx: u32) {
if !self.bitmap.test_and_set(bucket_id) {
// if no collision found, add point to current batch
self.batch_buckets_and_points.push((bucket_id, point_idx));
} else {
self.collision_buckets_and_points
.push((bucket_id, self.cur_points[point_idx as usize]));
}
if self.collision_buckets_and_points.len() as u32 >= self.max_collision_cnt
|| self.batch_buckets_and_points.len() as u32 >= self.max_batch_cnt
{
self._process_batch();
}
}
fn _process_batch(&mut self) {
if self.batch_buckets_and_points.is_empty() {
return;
}
// batch addition
let (bucket_ids, point_idxs): (Vec<u32>, Vec<u32>) = self
.batch_buckets_and_points
.iter()
.map(|(b, p)| (*b, *p))
.unzip();
self.batch_adder.batch_add_indexed(
&mut self.buckets,
&bucket_ids,
&self.cur_points,
&point_idxs,
);
// clean up current batch
self.bitmap.clear();
self.batch_buckets_and_points.clear();
// memorize the last point which is the current processing point and we need to
// push it back to the cur_points list since we're processing slices in a for loop
let slicing_point = self.cur_points.pop();
self.cur_points.clear();
let mut next_pos = 0;
for i in 0..self.collision_buckets_and_points.len() {
let (bucket_id, point) = self.collision_buckets_and_points[i];
if self.bitmap.test_and_set(bucket_id) {
// collision found
self.collision_buckets_and_points.swap(next_pos, i);
next_pos += 1;
} else {
self.batch_buckets_and_points
.push((bucket_id, self.cur_points.len() as u32));
self.cur_points.push(point);
}
}
self.collision_buckets_and_points.truncate(next_pos);
self.cur_points.push(slicing_point.unwrap());
}
pub fn batch_reduce(&mut self) -> TG1 {
let window_starts: Vec<_> = (0..self.num_windows as usize).collect();
let num_groups =
(self.num_windows as usize) << (self.bucket_bits as usize - GROUP_SIZE_IN_BITS);
let mut running_sums: Vec<_> = vec![TG1Affine::zero(); num_groups];
let mut sum_of_sums: Vec<_> = vec![TG1Affine::zero(); num_groups];
// calculate running sum and sum of sum for each group
for i in (0..GROUP_SIZE).rev() {
// running sum
self.batch_adder.batch_add_step_n(
&mut running_sums,
1,
&self.buckets[i..],
GROUP_SIZE,
num_groups,
);
// sum of sum
self.batch_adder.batch_add(&mut sum_of_sums, &running_sums);
}
let sum_by_window: Vec<TG1> = cfg_into_iter!(window_starts)
.map(|w_start| {
let group_start = w_start << (self.bucket_bits as usize - GROUP_SIZE_IN_BITS);
let group_end = (w_start + 1) << (self.bucket_bits as usize - GROUP_SIZE_IN_BITS);
self.inner_window_reduce(
&running_sums[group_start..group_end],
&sum_of_sums[group_start..group_end],
)
})
.collect();
self.intra_window_reduce(&sum_by_window)
}
fn inner_window_reduce(&self, running_sums: &[TG1Affine], sum_of_sums: &[TG1Affine]) -> TG1 {
self.calc_sum_of_sum_total(sum_of_sums)
.add_or_dbl(&self.calc_running_sum_total(running_sums))
}
fn calc_running_sum_total(&self, running_sums: &[TG1Affine]) -> TG1 {
let mut running_sum_total = TG1::zero();
for (i, running_sum) in running_sums.iter().enumerate().skip(1) {
for _ in 0..i {
TProjAddAffine::add_or_double_assign_affine(&mut running_sum_total, running_sum);
}
}
for _ in 0..GROUP_SIZE_IN_BITS {
running_sum_total.dbl_assign();
}
running_sum_total
}
fn calc_sum_of_sum_total(&self, sum_of_sums: &[TG1Affine]) -> TG1 {
let mut sum = TG1::zero();
sum_of_sums
.iter()
.for_each(|p| TProjAddAffine::add_or_double_assign_affine(&mut sum, p));
sum
}
fn intra_window_reduce(&mut self, window_sums: &[TG1]) -> TG1 {
// Traverse windows from high to low
let lowest = window_sums.first().unwrap();
lowest.add(
&window_sums[1..]
.iter()
.rev()
.fold(TG1::zero(), |mut total, sum_i| {
total.add_assign(sum_i);
for _ in 0..self.window_bits {
total.dbl_assign();
}
total
}),
)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/arkmsm/arkmsm_msm.rs | kzg/src/msm/arkmsm/arkmsm_msm.rs | use crate::{
common_utils::log2_u64,
msm::arkmsm::bucket_msm::BucketMSM,
msm::arkmsm::glv::decompose,
msm::types::{G1_SCALAR_SIZE, G1_SCALAR_SIZE_GLV},
Fr, G1Affine, G1Fp, G1ProjAddAffine, Scalar256, G1,
};
use alloc::vec;
pub struct VariableBaseMSM;
impl VariableBaseMSM {
/// WARNING: this function is derived from benchmark results running
/// on a Ubuntu 20.04.2 LTS server with AMD EPYC 7282 16-Core CPU
/// and 128G memory, the optimal performance may vary on a different
/// configuration.
fn get_opt_window_size(k: u32) -> u32 {
option_env!("WINDOW_SIZE")
.and_then(|v| v.parse().ok())
.unwrap_or({
match k {
0..=9 => 8,
10..=12 => 10,
13..=14 => 12,
15..=19 => 13,
20..=22 => 15,
23.. => 16,
}
})
}
pub fn msm_slice(mut scalar: Scalar256, slices: &mut [u32], window_bits: u32) {
assert!(window_bits <= 31); // reserve one bit for marking signed slices
let mut carry = 0;
let total = 1 << window_bits;
let half = total >> 1;
slices.iter_mut().for_each(|el| {
*el = (scalar.data.as_ref()[0] % (1 << window_bits)) as u32;
scalar.divn(window_bits);
*el += carry;
if half < *el {
// slices[i] == half is okay, since (slice[i]-1) will be used for bucket_id
*el = total - *el;
carry = 1;
*el |= 1 << 31; // mark the highest bit for later
} else {
carry = 0;
}
});
assert!(
carry == 0,
"msm_slice overflows when apply signed-bucket-index"
);
}
#[allow(dead_code)]
fn multi_scalar_mul_g1_glv<
TG1: G1,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
TFr: Fr,
>(
points: &[TG1Affine],
scalars: &[Scalar256],
window_bits: u32,
max_batch: u32,
max_collisions: u32,
) -> TG1 {
let num_slices: usize = G1_SCALAR_SIZE_GLV.div_ceil(window_bits) as usize;
let mut bucket_msm = BucketMSM::<TG1, TG1Fp, TG1Affine, TProjAddAffine>::new(
G1_SCALAR_SIZE_GLV,
window_bits,
max_batch,
max_collisions,
);
// scalar = phi * lambda + normal
let mut phi_slices = vec![0u32; num_slices];
let mut normal_slices = vec![0u32; num_slices];
scalars
.iter()
.zip(points)
.filter(|(s, _)| !s.is_zero())
.for_each(|(scalar, point)| {
let (phi, _normal, is_neg_scalar, is_neg_normal) =
decompose(&TFr::from_u64_arr(&scalar.data), window_bits);
Self::msm_slice(
Scalar256::from_u64(phi.to_u64_arr()),
&mut phi_slices[..num_slices],
window_bits,
);
Self::msm_slice(
Scalar256::from_u64(phi.to_u64_arr()),
&mut normal_slices[..num_slices],
window_bits,
);
bucket_msm.process_point_and_slices_glv(
point,
&normal_slices[..num_slices],
&phi_slices[..num_slices],
is_neg_scalar,
is_neg_normal,
);
});
bucket_msm.process_complete();
bucket_msm.batch_reduce()
}
fn multi_scalar_mul_general<
TG1: G1,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
points: &[TG1Affine],
scalars: &[Scalar256],
window_bits: u32,
max_batch: u32,
max_collisions: u32,
) -> TG1 {
let num_slices: usize = G1_SCALAR_SIZE.div_ceil(window_bits) as usize;
let mut bucket_msm = BucketMSM::<TG1, TG1Fp, TG1Affine, TProjAddAffine>::new(
G1_SCALAR_SIZE,
window_bits,
max_batch,
max_collisions,
);
let mut slices = vec![0u32; num_slices];
scalars
.iter()
.zip(points)
.filter(|(s, _)| !s.is_zero())
.for_each(|(&scalar, point)| {
Self::msm_slice(scalar, &mut slices[..num_slices], window_bits);
bucket_msm.process_point_and_slices(point, &slices[..num_slices]);
});
bucket_msm.process_complete();
bucket_msm.batch_reduce()
}
pub fn multi_scalar_mul<
TG1: G1,
TG1Fp: G1Fp,
TG1Affine: G1Affine<TG1, TG1Fp>,
TProjAddAffine: G1ProjAddAffine<TG1, TG1Fp, TG1Affine>,
>(
points: &[TG1Affine],
scalars: &[Scalar256],
) -> TG1 {
let opt_window_size = Self::get_opt_window_size(log2_u64(points.len()) as u32);
// Self::multi_scalar_mul_g1_glv::<TG1, TG1Fp, TG1Affine, TProjAddAffine, TFr>(points, scalars, opt_window_size, 2048, 256)
Self::multi_scalar_mul_general::<TG1, TG1Fp, TG1Affine, TProjAddAffine>(
points,
scalars,
opt_window_size,
2048,
256,
)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/arkmsm/batch_adder.rs | kzg/src/msm/arkmsm/batch_adder.rs | use crate::{G1Affine, G1Fp, G1};
use core::marker::PhantomData;
use alloc::vec;
use alloc::vec::Vec;
pub struct BatchAdder<TG1: G1, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp>> {
pub inverse_state: TFp,
pub inverses: Vec<TFp>,
// Zero sized fields so that batch adder doesn't complain about unused types
// TG1 & TG1Affine are needed for the BatchAdder impl
phantom_g1: PhantomData<TG1>,
phantom_affine: PhantomData<TG1Affine>,
}
impl<TG1: G1, TFp: G1Fp, TG1Affine: G1Affine<TG1, TFp>> BatchAdder<TG1, TFp, TG1Affine>
where
TG1: G1,
{
pub fn new(max_batch_cnt: usize) -> Self {
BatchAdder {
inverse_state: TFp::one(),
inverses: vec![TFp::one(); max_batch_cnt],
phantom_g1: PhantomData,
phantom_affine: PhantomData,
}
}
/// Batch add vector dest and src, the results will be stored in dest, i.e. dest[i] = dest[i] + src[i]
pub fn batch_add(&mut self, dest: &mut [TG1Affine], src: &[TG1Affine]) {
assert!(
dest.len() == src.len(),
"length of dest and src don't match!"
);
assert!(dest.len() <= self.inverses.len(),
"input length exceeds the max_batch_cnt, please increase max_batch_cnt during initialization!");
self.reset();
for i in 0..dest.len() {
self.batch_add_phase_one(&dest[i], &src[i], i);
}
self.inverse();
for i in (0..dest.len()).rev() {
self.batch_add_phase_two(&mut dest[i], &src[i], i);
}
}
/// Batch add vector dest and src of len entries, skipping dest_step and src_step entries each
/// the results will be stored in dest, i.e. dest[i] = dest[i] + src[i]
pub fn batch_add_step_n(
&mut self,
dest: &mut [TG1Affine],
dest_step: usize,
src: &[TG1Affine],
src_step: usize,
len: usize,
) {
assert!(
dest.len() > (len - 1) * dest_step,
"insufficient entries in dest array"
);
assert!(
src.len() > (len - 1) * src_step,
"insufficient entries in src array"
);
assert!(len <= self.inverses.len(),
"input length exceeds the max_batch_cnt, please increase max_batch_cnt during initialization!");
self.reset();
for i in 0..len {
self.batch_add_phase_one(&dest[i * dest_step], &src[i * src_step], i);
}
self.inverse();
for i in (0..len).rev() {
self.batch_add_phase_two(&mut dest[i * dest_step], &src[i * src_step], i);
}
}
/// Batch add vector dest[dest_index] and src[src_index] using the specified indexes in input
/// the results will be stored in dest, i.e. dest[i] = dest[i] + src[i]
pub fn batch_add_indexed(
&mut self,
dest: &mut [TG1Affine],
dest_indexes: &[u32],
src: &[TG1Affine],
src_indexes: &[u32],
) {
assert!(
dest.len() >= dest_indexes.len(),
"insufficient entries in dest array"
);
assert!(dest_indexes.len() <= self.inverses.len(),
"input length exceeds the max_batch_cnt, please increase max_batch_cnt during initialization!");
assert_eq!(
dest_indexes.len(),
src_indexes.len(),
"length of dest_indexes and src_indexes don't match!"
);
self.reset();
for i in 0..dest_indexes.len() {
self.batch_add_phase_one(
&dest[dest_indexes[i] as usize],
&src[src_indexes[i] as usize],
i,
);
}
self.inverse();
for i in (0..dest_indexes.len()).rev() {
self.batch_add_phase_two(
&mut dest[dest_indexes[i] as usize],
&src[src_indexes[i] as usize],
i,
);
}
}
pub fn inverse(&mut self) {
self.inverse_state = self.inverse_state.inverse().unwrap();
}
pub fn reset(&mut self) {
self.inverse_state.set_one();
}
/// Two-pass batch affine addition
/// - 1st pass calculates from left to right
/// - inverse_state: accumulated product of deltaX
/// - inverses[]: accumulated product left to a point
/// - call inverse()
/// - 2nd pass calculates from right to left
/// - slope s and ss from state
/// - inverse_state = inverse_state * deltaX
/// - addition result acc
pub fn batch_add_phase_one(&mut self, p: &TG1Affine, q: &TG1Affine, idx: usize) {
assert!(
idx < self.inverses.len(),
"index exceeds the max_batch_cnt, please increase max_batch_cnt during initialization!"
);
if p.is_zero() || q.is_zero() {
return;
}
let mut delta_x = q.x().sub_fp(p.x());
if delta_x.is_zero() {
let delta_y = q.y().sub_fp(p.y());
if !delta_y.is_zero() {
// p = -q, return
return;
}
// if P == Q
// if delta_x is zero, we need to invert 2y
delta_x = q.y().add_fp(q.y());
}
if self.inverse_state.is_zero() {
self.inverses[idx].set_one();
self.inverse_state = delta_x;
} else {
self.inverses[idx] = self.inverse_state;
self.inverse_state.mul_assign_fp(&delta_x);
}
}
/// should call inverse() between phase_one and phase_two
pub fn batch_add_phase_two(&mut self, p: &mut TG1Affine, q: &TG1Affine, idx: usize) {
assert!(
idx < self.inverses.len(),
"index exceeds the max_batch_cnt, please increase max_batch_cnt during initialization!"
);
if q.is_zero() {
return;
} else if p.is_zero() {
*p = *q;
return;
}
let mut _inverse = self.inverses[idx];
_inverse.mul_assign_fp(&self.inverse_state);
let mut delta_x = q.x().sub_fp(p.x());
let mut delta_y = q.y().sub_fp(p.y());
if delta_x.is_zero() {
if !delta_y.is_zero() {
// p = -q, result should be pt at infinity
p.set_zero();
return;
}
// Otherwise, p = q, and it's point doubling
// Processing is almost the same, except s=3*affine.x^2 / 2*affine.y
// set delta_y = 3*q.x^2
delta_y = q.x().square();
delta_y = delta_y.add_fp(&delta_y).add_fp(&delta_y);
delta_x = q.y().double();
}
// get the state ready for the next iteration
self.inverse_state.mul_assign_fp(&delta_x);
let s = delta_y.mul_fp(&_inverse);
let ss = s.mul_fp(&s);
*p.x_mut() = ss.sub_fp(q.x()).sub_fp(p.x());
delta_x = q.x().sub_fp(p.x());
*p.y_mut() = s.mul_fp(&delta_x).sub_fp(q.y());
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/arkmsm/bitmap.rs | kzg/src/msm/arkmsm/bitmap.rs | // Implementation of atomic bitmap
// Modified from non-sync implementation
use core::sync::atomic::{AtomicU32, Ordering};
use alloc::vec::Vec;
pub struct Bitmap {
size: usize,
data: Vec<AtomicU32>,
}
impl Bitmap {
pub fn new(size: usize) -> Bitmap {
let mut data: Vec<AtomicU32> = Vec::with_capacity(size);
data.resize_with(size, Default::default);
Bitmap { size, data }
}
pub fn test_and_set(&self, bucket: u32) -> bool {
let word = (bucket >> 5) as usize;
let bit = 1 << (bucket & 0x1F);
let mut old = self.data[word].load(Ordering::Relaxed);
loop {
// If bit is 'already' set then return true
if old & bit != 0 {
return true;
}
let new = old | bit;
match self.data[word].compare_exchange_weak(
old,
new,
Ordering::Release,
Ordering::Relaxed,
) {
// We managed to take bit, return false
Ok(_) => return false,
// If write failed either another bit at index was set, or wanted index was set
Err(x) => old = x,
};
}
}
pub fn clear(&self) {
for i in 0..self.size {
self.data[i].store(0, Ordering::Relaxed);
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/msm/arkmsm/glv.rs | kzg/src/msm/arkmsm/glv.rs | // Based on
// Decompose scalar = q * lambda + r with barret reduction
// Here we implement algorithm 2 example 1 described in
// https://hackmd.io/@chaosma/SyAvcYFxh
use crate::{Fr, G1Affine, G1Fp, G1};
const LMDA1: u128 = 0xac45a4010001a402; // lambda high 64 bit
const LMDA0: u128 = 0x00000000ffffffff; // lambda low 64 bit
const INV1: u128 = 0x7c6becf1e01faadd; // 2**256 // lambda - [64, 127] bit
const INV0: u128 = 0x63f6e522f6cfee30; // 2**256 // lambda - [0, 63] bit
const MASK64: u128 = 0xffffffffffffffff;
pub fn decompose<TFr: Fr>(scalar: &TFr, window_bits: u32) -> (TFr, TFr, bool, bool) {
let (s0, s1, s2, s3, is_neg_scalar) = glv_preprocess_scalar(scalar, window_bits);
// 255 bits in four 64b limbs
// let s2: u128 = scalar.into_repr().as_ref()[2] as u128; // 64 bit
// let s3: u128 = scalar.into_repr().as_ref()[3] as u128; // 63 bit
// quotient = (scalar_top127b * inv_approx) >> 128, 127 + 129 - 128 = 128b
let q0: u128 = INV0 * s2;
let q1: u128 = INV1 * s2 + INV0 * s3 + (q0 >> 64);
let q2: u128 = s2 + INV1 * s3 + (q1 >> 64);
let q3: u128 = s3 + (q2 >> 64);
let mut quotient0: u128 = q2 & MASK64;
let mut quotient1: u128 = q3 & MASK64;
// t = quotient * LAMBDA
let t0: u128 = quotient0 * LMDA0;
let t1: u128 = quotient1 * LMDA0 + quotient0 * LMDA1 + (t0 >> 64);
let t2: u128 = quotient1 * LMDA1 + (t1 >> 64);
// let t3: u128 = t2 >> 64;
// r = scalar - t
let mut carry: i128 = s0 as i128 - (t0 & MASK64) as i128;
let mut r0: u64 = (carry as u128 & MASK64) as u64;
carry >>= 64;
carry += s1 as i128 - (t1 & MASK64) as i128;
let mut r1: u64 = (carry as u128 & MASK64) as u64;
carry >>= 64;
carry += s2 as i128 - (t2 & MASK64) as i128;
let mut r2: u64 = (carry as u128 & MASK64) as u64;
// remainder is at most 3 * LAMBDA, 130 bit
assert!(r2 < 4, "remainder at most 130 bit");
let mut correction = 0u32;
loop {
carry = r0 as i128 - LMDA0 as i128;
let t0: u64 = (carry as u128 & MASK64) as u64;
carry >>= 64;
carry += r1 as i128 - LMDA1 as i128;
let t1: u64 = (carry as u128 & MASK64) as u64;
carry >>= 64;
if carry < 0 && r2 == 0 {
// went negative
break;
}
r2 = (r2 as i128 + carry) as u64;
r0 = t0;
r1 = t1;
correction += 1;
}
// correction
quotient0 += correction as u128;
quotient1 += quotient0 >> 64;
quotient0 &= MASK64;
let mut is_neg_remainder = false;
if 128 % window_bits == 0 {
is_neg_remainder = glv_post_processing(&mut quotient0, &mut quotient1, &mut r0, &mut r1);
}
(
TFr::from_u64_arr(&[quotient0 as u64, quotient1 as u64, 0, 0]),
TFr::from_u64_arr(&[r0, r1, 0, 0]),
is_neg_scalar,
is_neg_remainder,
)
}
// With the signed-bucket-index trick, slice[i] add a carry to slice[i+1] when
// MSB of slice[i] is set. If the scalar_bit_length mod by slice_bit_length
// (aka window_size) is zero, an extra slice need to be created for the
// signed-bucket-index trick to work, which introduces performance penalty.
// This happens when window size is 15 or 17 for 255 bits scalars, or window
// size 16 for 128 bits scalars with GLV decomposition
//
// pre and post processing here ensure that MSBs of both quotient and remainder
// are zero, with a trick similar to signed-bucket-index
const R3: i128 = 0x73eda753299d7d48;
const R2: i128 = 0x3339d80809a1d805;
const R1: i128 = 0x53bda402fffe5bfe;
const R0: i128 = 0xffffffff00000001;
// use sP = (N - s)(-P) to make scalar smaller, which ensures scalar MSB is not
// set, and the decomposed phi has MSB unset
fn glv_preprocess_scalar<TFr: Fr>(
scalar: &TFr,
window_bits: u32,
) -> (u128, u128, u128, u128, bool) {
let mut s = scalar.to_u64_arr();
let mut is_neg_scalar = false;
if 128 % window_bits == 0 {
if s[3] >= 0x3FFFFFFFFFFFFFFF {
is_neg_scalar = true;
let mut carry: i128 = 0;
carry = carry + R0 - s[0] as i128;
s[0] = (carry as u128 & MASK64) as u64;
carry >>= 64;
carry = carry + R1 - s[1] as i128;
s[1] = (carry as u128 & MASK64) as u64;
carry >>= 64;
carry = carry + R2 - s[2] as i128;
s[2] = (carry as u128 & MASK64) as u64;
carry >>= 64;
carry = carry + R3 - s[3] as i128;
s[3] = (carry as u128 & MASK64) as u64;
}
assert!(s[3] < 0x3FFFFFFFFFFFFFFF);
}
(
s[0] as u128,
s[1] as u128,
s[2] as u128,
s[3] as u128,
is_neg_scalar,
)
}
// if remainder has MSB set, clear MSB by using lambda - remainder, and add
// carry to quotient
fn glv_post_processing(q0: &mut u128, q1: &mut u128, r0: &mut u64, r1: &mut u64) -> bool {
if *r1 >= 0x8000000000000000 {
// add carry to q
*q0 += 1;
*q1 += (*q0 == 0) as u128;
// r = lambda - r
let mut carry: i128 = 0;
carry = carry + LMDA0 as i128 - *r0 as i128;
*r0 = (carry as u128 & MASK64) as u64;
carry >>= 64;
carry = carry + LMDA1 as i128 - *r1 as i128;
*r1 = (carry as u128 & MASK64) as u64;
assert!(*r1 < 0x8000000000000000);
return true;
}
false
}
const BETA: [u64; 6usize] = [
14772873186050699377,
6749526151121446354,
6372666795664677781,
10283423008382700446,
286397964926079186,
1796971870900422465,
];
// lambda * (x, y) = (beta * x, y)
pub fn endomorphism<TG1: G1, TG1Fp: G1Fp, TG1Affine: G1Affine<TG1, TG1Fp>>(point: &mut TG1Affine) {
if point.is_zero() {
return;
}
point
.x_mut()
.mul_assign_fp(&TG1Fp::from_underlying_arr(&BETA));
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/eth/eip_7594.rs | kzg/src/eth/eip_7594.rs | #[cfg(feature = "parallel")]
use rayon::prelude::*;
use alloc::{format, string::String, vec::Vec};
use crate::{
cfg_chunks, cfg_iter,
das::{EcBackend, DAS},
eip_4844::bytes_to_blob,
eth::{
BYTES_PER_BLOB, BYTES_PER_CELL, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT,
BYTES_PER_PROOF, CELLS_PER_EXT_BLOB, FIELD_ELEMENTS_PER_CELL, FIELD_ELEMENTS_PER_EXT_BLOB,
},
Fr, G1,
};
pub type CellsKzgProofs = (Vec<[u8; BYTES_PER_CELL]>, Vec<[u8; BYTES_PER_PROOF]>);
pub fn recover_cells_and_kzg_proofs_raw<B: EcBackend>(
cell_indices: &[usize],
cells: &[[u8; BYTES_PER_CELL]],
das: &impl DAS<B>,
) -> Result<CellsKzgProofs, String>
where
B::G1: Copy,
B::Fr: Copy,
{
let cells = cfg_chunks!(cells.as_flattened(), BYTES_PER_FIELD_ELEMENT)
.enumerate()
.map(|(index, bytes)| {
B::Fr::from_bytes(bytes)
.map_err(|e| {
let start = index * BYTES_PER_FIELD_ELEMENT;
let end = start + bytes.len();
format!("Failed to deserialize field element with index {index} (bytes [{start}; {end})) with error: {e}")
})
})
.collect::<Result<Vec<_>, _>>()?;
let mut recovered_cells = [B::Fr::default(); FIELD_ELEMENTS_PER_EXT_BLOB];
let mut recovered_proofs = [B::G1::default(); CELLS_PER_EXT_BLOB];
das.recover_cells_and_kzg_proofs(
&mut recovered_cells,
Some(&mut recovered_proofs),
cell_indices,
&cells,
)
.map_err(|err| format!("Cell and proof recovery failed with error: {err}"))?;
let converted_cells = cells_elements_to_cells_bytes::<B>(&recovered_cells)?;
let converted_proofs = recovered_proofs
.into_iter()
.map(|proof| proof.to_bytes())
.collect::<Vec<_>>();
Ok((converted_cells, converted_proofs))
}
pub fn compute_cells_and_kzg_proofs_raw<B: EcBackend>(
blob: [u8; BYTES_PER_BLOB],
das: &impl DAS<B>,
) -> Result<CellsKzgProofs, String>
where
B::G1: Copy,
B::Fr: Copy,
{
let blob = bytes_to_blob(&blob)?;
let mut recovered_cells = [B::Fr::default(); FIELD_ELEMENTS_PER_EXT_BLOB];
let mut recovered_proofs = [B::G1::default(); CELLS_PER_EXT_BLOB];
das.compute_cells_and_kzg_proofs(
Some(&mut recovered_cells),
Some(&mut recovered_proofs),
&blob,
)?;
let converted_cells = cells_elements_to_cells_bytes::<B>(&recovered_cells)?;
let converted_proofs = recovered_proofs
.into_iter()
.map(|proof| proof.to_bytes())
.collect::<Vec<_>>();
Ok((converted_cells, converted_proofs))
}
pub fn compute_cells_raw<B: EcBackend>(
blob: [u8; BYTES_PER_BLOB],
das: &impl DAS<B>,
) -> Result<Vec<[u8; BYTES_PER_CELL]>, String>
where
B::Fr: Copy,
{
let blob = bytes_to_blob(&blob)?;
let mut recovered_cells = [B::Fr::default(); FIELD_ELEMENTS_PER_EXT_BLOB];
das.compute_cells_and_kzg_proofs(Some(&mut recovered_cells), None, &blob)?;
let converted_cells = cells_elements_to_cells_bytes::<B>(&recovered_cells)?;
Ok(converted_cells)
}
pub fn verify_cell_kzg_proof_batch_raw<B: EcBackend>(
commitments: &[[u8; BYTES_PER_COMMITMENT]],
cell_indices: &[usize],
cells: &[[u8; BYTES_PER_CELL]],
proofs: &[[u8; BYTES_PER_PROOF]],
das: &impl DAS<B>,
) -> Result<bool, String> {
let commitments = cfg_iter!(commitments)
.enumerate()
.map(|(index, commitment)| B::G1::from_bytes(commitment).map_err(|err| format!("Failed to deserialize commitment at index {index}, commitment 0x{commitment}: {err}", commitment = hex::encode(commitment))))
.collect::<Result<Vec<_>, _>>()?;
let cells = cfg_chunks!(cells.as_flattened(), BYTES_PER_FIELD_ELEMENT)
.enumerate()
.map(|(index, fr)| B::Fr::from_bytes(fr).map_err(|err| {
let cell_index = index / FIELD_ELEMENTS_PER_CELL;
let cell_part_index = index % FIELD_ELEMENTS_PER_CELL;
format!("Failed to deserialize cell's {cell_index} element with index {cell_part_index}: {err}")
}))
.collect::<Result<Vec<_>, _>>()?;
let proofs = cfg_iter!(proofs)
.enumerate()
.map(|(index, proof)| {
B::G1::from_bytes(proof).map_err(|err| {
format!(
"Failed to deserialize proof at index {index}, proof {proof}: {err}",
proof = hex::encode(proof)
)
})
})
.collect::<Result<Vec<_>, _>>()?;
das.verify_cell_kzg_proof_batch(&commitments, cell_indices, &cells, &proofs)
}
fn cells_elements_to_cells_bytes<B: EcBackend>(
bytes: &[B::Fr],
) -> Result<Vec<[u8; BYTES_PER_CELL]>, String> {
// NOTE: chunk_size = BYTES_PER_CELL / BYTES_PER_FIELD_ELEMENT
if bytes.len() != FIELD_ELEMENTS_PER_EXT_BLOB {
return Err(format!(
"Invalid field elements length. Expected {} got {}",
FIELD_ELEMENTS_PER_EXT_BLOB,
bytes.len(),
));
}
Ok(cfg_chunks!(bytes, FIELD_ELEMENTS_PER_CELL)
.map(|cell_bytes| {
let mut result = [0u8; BYTES_PER_CELL];
for (idx, field_element) in cell_bytes.iter().enumerate() {
let bytes_element = field_element.to_bytes();
let start = idx * BYTES_PER_FIELD_ELEMENT;
let end = start + BYTES_PER_FIELD_ELEMENT;
result[start..end].copy_from_slice(&bytes_element);
}
result
})
.collect())
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/eth/mod.rs | kzg/src/eth/mod.rs | pub mod c_bindings;
pub mod eip_7594;
pub const FIELD_ELEMENTS_PER_BLOB: usize = 4096;
pub const BYTES_PER_G1: usize = 48;
pub const BYTES_PER_G2: usize = 96;
pub const BYTES_PER_BLOB: usize = BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB;
pub const BYTES_PER_FIELD_ELEMENT: usize = 32;
pub const BYTES_PER_PROOF: usize = 48;
pub const BYTES_PER_COMMITMENT: usize = 48;
pub const FIELD_ELEMENTS_PER_EXT_BLOB: usize = 2 * FIELD_ELEMENTS_PER_BLOB;
pub const FIELD_ELEMENTS_PER_CELL: usize = 64;
pub const BYTES_PER_CELL: usize = FIELD_ELEMENTS_PER_CELL * BYTES_PER_FIELD_ELEMENT;
pub const CELLS_PER_EXT_BLOB: usize = FIELD_ELEMENTS_PER_EXT_BLOB / FIELD_ELEMENTS_PER_CELL;
pub const RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN: [u8; 16] = *b"RCKZGCBATCH__V1_";
pub const TRUSTED_SETUP_NUM_G1_POINTS: usize = 4096;
pub const TRUSTED_SETUP_NUM_G2_POINTS: usize = 65;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/src/eth/c_bindings.rs | kzg/src/eth/c_bindings.rs | use crate::{
eth::{CELLS_PER_EXT_BLOB, FIELD_ELEMENTS_PER_CELL},
EcBackend, Fr, DAS, G1,
};
use super::{
BYTES_PER_BLOB, BYTES_PER_CELL, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF,
};
use crate::alloc::{
string::{String, ToString},
vec,
vec::Vec,
};
#[repr(C)]
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum CKzgRet {
Ok = 0,
BadArgs = 1,
Error = 2,
Malloc = 3,
}
#[repr(C)]
pub struct Bytes32 {
pub bytes: [u8; 32],
}
#[repr(C)]
pub struct Bytes48 {
pub bytes: [u8; 48],
}
#[repr(C)]
pub struct BLSFieldElement {
pub bytes: [u8; BYTES_PER_FIELD_ELEMENT],
}
#[repr(C)]
pub struct Blob {
pub bytes: [u8; BYTES_PER_BLOB],
}
#[repr(C)]
pub struct KZGCommitment {
pub bytes: [u8; BYTES_PER_COMMITMENT],
}
#[repr(C)]
pub struct KZGProof {
pub bytes: [u8; BYTES_PER_PROOF],
}
#[repr(C)]
pub struct CKZGSettings {
/**
* Roots of unity for the subgroup of size `FIELD_ELEMENTS_PER_EXT_BLOB`.
*
* The array contains `FIELD_ELEMENTS_PER_EXT_BLOB + 1` elements.
* The array starts and ends with Fr::one().
*/
pub roots_of_unity: *mut blst_fr,
/**
* Roots of unity for the subgroup of size `FIELD_ELEMENTS_PER_EXT_BLOB` in bit-reversed order.
*
* This array is derived by applying a bit-reversal permutation to `roots_of_unity`
* excluding the last element. Essentially:
* `brp_roots_of_unity = bit_reversal_permutation(roots_of_unity[:-1])`
*
* The array contains `FIELD_ELEMENTS_PER_EXT_BLOB` elements.
*/
pub brp_roots_of_unity: *mut blst_fr,
/**
* Roots of unity for the subgroup of size `FIELD_ELEMENTS_PER_EXT_BLOB` in reversed order.
*
* It is the reversed version of `roots_of_unity`. Essentially:
* `reverse_roots_of_unity = reverse(roots_of_unity)`
*
* This array is primarily used in FFTs.
* The array contains `FIELD_ELEMENTS_PER_EXT_BLOB + 1` elements.
* The array starts and ends with Fr::one().
*/
pub reverse_roots_of_unity: *mut blst_fr,
/**
* G1 group elements from the trusted setup in monomial form.
* The array contains `NUM_G1_POINTS = FIELD_ELEMENTS_PER_BLOB` elements.
*/
pub g1_values_monomial: *mut blst_p1,
/**
* G1 group elements from the trusted setup in Lagrange form and bit-reversed order.
* The array contains `NUM_G1_POINTS = FIELD_ELEMENTS_PER_BLOB` elements.
*/
pub g1_values_lagrange_brp: *mut blst_p1,
/**
* G2 group elements from the trusted setup in monomial form.
* The array contains `NUM_G2_POINTS` elements.
*/
pub g2_values_monomial: *mut blst_p2,
/** Data used during FK20 proof generation. */
pub x_ext_fft_columns: *mut *mut blst_p1,
/** The precomputed tables for fixed-base MSM. */
pub tables: *mut *mut blst_p1_affine,
/** The window size for the fixed-base MSM. */
pub wbits: usize,
/** The scratch size for the fixed-base MSM. */
pub scratch_size: usize,
}
#[repr(C)]
pub struct Cell {
pub bytes: [u8; BYTES_PER_CELL],
}
unsafe fn deserialize_blob<B: EcBackend>(
blob: *const Blob,
) -> core::result::Result<Vec<B::Fr>, CKzgRet> {
(*blob)
.bytes
.chunks(BYTES_PER_FIELD_ELEMENT)
.map(|chunk| {
let mut bytes = [0u8; BYTES_PER_FIELD_ELEMENT];
bytes.copy_from_slice(chunk);
if let Ok(result) = B::Fr::from_bytes(&bytes) {
Ok(result)
} else {
Err(CKzgRet::BadArgs)
}
})
.collect::<Result<Vec<_>, _>>()
}
/// # Safety
pub unsafe fn compute_cells_and_kzg_proofs<
B: EcBackend,
D: DAS<B> + for<'a> TryFrom<&'a CKZGSettings, Error = String>,
>(
cells: *mut Cell,
proofs: *mut KZGProof,
blob: *const Blob,
settings: *const CKZGSettings,
) -> CKzgRet {
unsafe fn inner<B: EcBackend, D: DAS<B> + for<'a> TryFrom<&'a CKZGSettings, Error = String>>(
cells: *mut Cell,
proofs: *mut KZGProof,
blob: *const Blob,
settings: *const CKZGSettings,
) -> Result<(), String> {
let mut cells_rs: Option<Vec<B::Fr>> = if cells.is_null() {
None
} else {
Some(vec![
B::Fr::default();
CELLS_PER_EXT_BLOB * FIELD_ELEMENTS_PER_CELL
])
};
let mut proofs_rs = if proofs.is_null() {
None
} else {
Some(vec![B::G1::default(); CELLS_PER_EXT_BLOB])
};
let blob = deserialize_blob::<B>(blob).map_err(|_| "Invalid blob".to_string())?;
let settings: D = (&*settings).try_into()?;
settings.compute_cells_and_kzg_proofs(
cells_rs.as_deref_mut(),
proofs_rs.as_deref_mut(),
&blob,
)?;
if let Some(cells_rs) = cells_rs {
let cells = core::slice::from_raw_parts_mut(cells, CELLS_PER_EXT_BLOB);
for (cell_rs, cell_c) in cells_rs.chunks(FIELD_ELEMENTS_PER_CELL).zip(cells) {
cell_c.bytes.copy_from_slice(
&cell_rs
.iter()
.flat_map(|fr| fr.to_bytes())
.collect::<Vec<u8>>(),
);
}
}
if let Some(proofs_rs) = proofs_rs {
let proofs = core::slice::from_raw_parts_mut(proofs, CELLS_PER_EXT_BLOB);
for (proof_index, proof) in proofs_rs.iter().enumerate() {
proofs[proof_index].bytes.copy_from_slice(&proof.to_bytes());
}
}
Ok(())
}
match inner::<B, D>(cells, proofs, blob, settings) {
Ok(()) => CKzgRet::Ok,
Err(_) => CKzgRet::BadArgs,
}
}
/// # Safety
pub unsafe fn recover_cells_and_kzg_proofs<
B: EcBackend,
D: DAS<B> + for<'a> TryFrom<&'a CKZGSettings, Error = String>,
>(
recovered_cells: *mut Cell,
recovered_proofs: *mut KZGProof,
cell_indices: *const u64,
cells: *const Cell,
num_cells: u64,
s: *const CKZGSettings,
) -> CKzgRet {
unsafe fn inner<B: EcBackend, D: DAS<B> + for<'a> TryFrom<&'a CKZGSettings, Error = String>>(
recovered_cells: *mut Cell,
recovered_proofs: *mut KZGProof,
cell_indices: *const u64,
cells: *const Cell,
num_cells: u64,
s: *const CKZGSettings,
) -> Result<(), String> {
let mut recovered_cells_rs: Vec<B::Fr> =
vec![B::Fr::default(); FIELD_ELEMENTS_PER_CELL * CELLS_PER_EXT_BLOB];
let mut recovered_proofs_rs = if recovered_proofs.is_null() {
None
} else {
Some(vec![B::G1::default(); CELLS_PER_EXT_BLOB])
};
let cell_indicies = core::slice::from_raw_parts(cell_indices, num_cells as usize)
.iter()
.map(|it| *it as usize)
.collect::<Vec<_>>();
let cells = core::slice::from_raw_parts(cells, num_cells as usize)
.iter()
.flat_map(|it| {
it.bytes
.chunks(BYTES_PER_FIELD_ELEMENT)
.map(B::Fr::from_bytes)
})
.collect::<Result<Vec<_>, String>>()?;
let settings: D = (&*s).try_into()?;
settings.recover_cells_and_kzg_proofs(
&mut recovered_cells_rs,
recovered_proofs_rs.as_deref_mut(),
&cell_indicies,
&cells,
)?;
let recovered_cells = core::slice::from_raw_parts_mut(recovered_cells, CELLS_PER_EXT_BLOB);
for (cell_c, cell_rs) in recovered_cells
.iter_mut()
.zip(recovered_cells_rs.chunks(FIELD_ELEMENTS_PER_CELL))
{
cell_c.bytes.copy_from_slice(
&cell_rs
.iter()
.flat_map(|fr| fr.to_bytes())
.collect::<Vec<_>>(),
);
}
if let Some(recovered_proofs_rs) = recovered_proofs_rs {
let recovered_proofs =
core::slice::from_raw_parts_mut(recovered_proofs, CELLS_PER_EXT_BLOB);
for (proof_c, proof_rs) in recovered_proofs.iter_mut().zip(recovered_proofs_rs.iter()) {
proof_c.bytes = proof_rs.to_bytes();
}
}
Ok(())
}
match inner::<B, D>(
recovered_cells,
recovered_proofs,
cell_indices,
cells,
num_cells,
s,
) {
Ok(()) => CKzgRet::Ok,
Err(_) => CKzgRet::BadArgs,
}
}
/// # Safety
pub unsafe fn verify_cell_kzg_proof_batch<
B: EcBackend,
D: DAS<B> + for<'a> TryFrom<&'a CKZGSettings, Error = String>,
>(
ok: *mut bool,
commitments_bytes: *const Bytes48,
cell_indices: *const u64,
cells: *const Cell,
proofs_bytes: *const Bytes48,
num_cells: u64,
s: *const CKZGSettings,
) -> CKzgRet {
unsafe fn inner<B: EcBackend, D: DAS<B> + for<'a> TryFrom<&'a CKZGSettings, Error = String>>(
ok: *mut bool,
commitments_bytes: *const Bytes48,
cell_indices: *const u64,
cells: *const Cell,
proofs_bytes: *const Bytes48,
num_cells: u64,
s: *const CKZGSettings,
) -> Result<(), String> {
let commitments = core::slice::from_raw_parts(commitments_bytes, num_cells as usize)
.iter()
.map(|bytes| B::G1::from_bytes(&bytes.bytes))
.collect::<Result<Vec<_>, String>>()?;
let cell_indices = core::slice::from_raw_parts(cell_indices, num_cells as usize)
.iter()
.map(|it| *it as usize)
.collect::<Vec<_>>();
let cells = core::slice::from_raw_parts(cells, num_cells as usize)
.iter()
.flat_map(|it| {
it.bytes
.chunks(BYTES_PER_FIELD_ELEMENT)
.map(B::Fr::from_bytes)
})
.collect::<Result<Vec<_>, String>>()?;
let proofs = core::slice::from_raw_parts(proofs_bytes, num_cells as usize)
.iter()
.map(|bytes| B::G1::from_bytes(&bytes.bytes))
.collect::<Result<Vec<_>, String>>()?;
let settings: D = (&*s).try_into()?;
*ok = settings.verify_cell_kzg_proof_batch(&commitments, &cell_indices, &cells, &proofs)?;
Ok(())
}
match inner::<B, D>(
ok,
commitments_bytes,
cell_indices,
cells,
proofs_bytes,
num_cells,
s,
) {
Ok(()) => CKzgRet::Ok,
Err(_) => CKzgRet::BadArgs,
}
}
#[macro_export]
macro_rules! c_bindings_eip7594 {
($backend:ty) => {
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn compute_cells_and_kzg_proofs(
cells: *mut kzg::eth::c_bindings::Cell,
proofs: *mut kzg::eth::c_bindings::KZGProof,
blob: *const kzg::eth::c_bindings::Blob,
settings: *const kzg::eth::c_bindings::CKZGSettings,
) -> kzg::eth::c_bindings::CKzgRet {
kzg::eth::c_bindings::compute_cells_and_kzg_proofs::<
$backend,
<$backend as kzg::EcBackend>::KZGSettings,
>(cells, proofs, blob, settings)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn recover_cells_and_kzg_proofs(
recovered_cells: *mut kzg::eth::c_bindings::Cell,
recovered_proofs: *mut kzg::eth::c_bindings::KZGProof,
cell_indices: *const u64,
cells: *const kzg::eth::c_bindings::Cell,
num_cells: u64,
s: *const kzg::eth::c_bindings::CKZGSettings,
) -> kzg::eth::c_bindings::CKzgRet {
kzg::eth::c_bindings::recover_cells_and_kzg_proofs::<
$backend,
<$backend as kzg::EcBackend>::KZGSettings,
>(
recovered_cells,
recovered_proofs,
cell_indices,
cells,
num_cells,
s,
)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_cell_kzg_proof_batch(
ok: *mut bool,
commitments_bytes: *const kzg::eth::c_bindings::Bytes48,
cell_indices: *const u64,
cells: *const kzg::eth::c_bindings::Cell,
proofs_bytes: *const kzg::eth::c_bindings::Bytes48,
num_cells: u64,
s: *const kzg::eth::c_bindings::CKZGSettings,
) -> kzg::eth::c_bindings::CKzgRet {
kzg::eth::c_bindings::verify_cell_kzg_proof_batch::<
$backend,
<$backend as kzg::EcBackend>::KZGSettings,
>(
ok,
commitments_bytes,
cell_indices,
cells,
proofs_bytes,
num_cells,
s,
)
}
};
}
// Below types are copied from `blst` crate.
// It is needed so other backends do not depend on blst runtime, but still can
// provide c-kzg-4844 compatible apis.
#[allow(non_camel_case_types)]
pub type limb_t = u64;
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub struct blst_fr {
pub l: [limb_t; 4usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub struct blst_p1 {
pub x: blst_fp,
pub y: blst_fp,
pub z: blst_fp,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub struct blst_fp {
pub l: [limb_t; 6usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub struct blst_p2 {
pub x: blst_fp2,
pub y: blst_fp2,
pub z: blst_fp2,
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub struct blst_fp2 {
pub fp: [blst_fp; 2usize],
}
#[repr(C)]
#[derive(Debug, Default, Copy, Clone, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub struct blst_p1_affine {
pub x: blst_fp,
pub y: blst_fp,
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/kzg/tests/common_utils.rs | kzg/tests/common_utils.rs | #[cfg(test)]
pub mod tests {
use kzg::common_utils::reverse_bit_order;
#[test]
fn reverse_bit_order_bad_arguments() {
// empty array should fail
assert!(reverse_bit_order(&mut [0u8; 0]).is_err());
// array with 1 element should be ignored
assert!(reverse_bit_order(&mut [1u8]).is_ok());
// array with 3 elements should fail, because 3 is not a power of 2
assert!(reverse_bit_order(&mut [1u8, 2u8, 3u8]).is_err());
// array with 4 elements should pass
assert!(reverse_bit_order(&mut [1u8, 2u8, 3u8, 4u8]).is_ok());
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/consts.rs | blst/src/consts.rs | use blst::{blst_fp, blst_fp2, blst_p1, blst_p2};
use crate::types::g1::FsG1;
use crate::types::g2::FsG2;
pub const G1_IDENTITY: FsG1 = FsG1::from_xyz(
blst_fp { l: [0; 6] },
blst_fp { l: [0; 6] },
blst_fp { l: [0; 6] },
);
pub const SCALE_FACTOR: u64 = 5;
pub const NUM_ROOTS: usize = 32;
/// The roots of unity. Every root_i equals 1 when raised to the power of (2 ^ i)
#[rustfmt::skip]
pub const SCALE2_ROOT_OF_UNITY: [[u64; 4]; 32] = [
[0x0000000000000001, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000],
[0xffffffff00000000, 0x53bda402fffe5bfe, 0x3339d80809a1d805, 0x73eda753299d7d48],
[0x0001000000000000, 0xec03000276030000, 0x8d51ccce760304d0, 0x0000000000000000],
[0x7228fd3397743f7a, 0xb38b21c28713b700, 0x8c0625cd70d77ce2, 0x345766f603fa66e7],
[0x53ea61d87742bcce, 0x17beb312f20b6f76, 0xdd1c0af834cec32c, 0x20b1ce9140267af9],
[0x360c60997369df4e, 0xbf6e88fb4c38fb8a, 0xb4bcd40e22f55448, 0x50e0903a157988ba],
[0x8140d032f0a9ee53, 0x2d967f4be2f95155, 0x14a1e27164d8fdbd, 0x45af6345ec055e4d],
[0x5130c2c1660125be, 0x98d0caac87f5713c, 0xb7c68b4d7fdd60d0, 0x6898111413588742],
[0x4935bd2f817f694b, 0x0a0865a899e8deff, 0x6b368121ac0cf4ad, 0x4f9b4098e2e9f12e],
[0x4541b8ff2ee0434e, 0xd697168a3a6000fe, 0x39feec240d80689f, 0x095166525526a654],
[0x3c28d666a5c2d854, 0xea437f9626fc085e, 0x8f4de02c0f776af3, 0x325db5c3debf77a1],
[0x4a838b5d59cd79e5, 0x55ea6811be9c622d, 0x09f1ca610a08f166, 0x6d031f1b5c49c834],
[0xe206da11a5d36306, 0x0ad1347b378fbf96, 0xfc3e8acfe0f8245f, 0x564c0a11a0f704f4],
[0x6fdd00bfc78c8967, 0x146b58bc434906ac, 0x2ccddea2972e89ed, 0x485d512737b1da3d],
[0x034d2ff22a5ad9e1, 0xae4622f6a9152435, 0xdc86b01c0d477fa6, 0x56624634b500a166],
[0xfbd047e11279bb6e, 0xc8d5f51db3f32699, 0x483405417a0cbe39, 0x3291357ee558b50d],
[0xd7118f85cd96b8ad, 0x67a665ae1fcadc91, 0x88f39a78f1aeb578, 0x2155379d12180caa],
[0x08692405f3b70f10, 0xcd7f2bd6d0711b7d, 0x473a2eef772c33d6, 0x224262332d8acbf4],
[0x6f421a7d8ef674fb, 0xbb97a3bf30ce40fd, 0x652f717ae1c34bb0, 0x2d3056a530794f01],
[0x194e8c62ecb38d9d, 0xad8e16e84419c750, 0xdf625e80d0adef90, 0x520e587a724a6955],
[0xfece7e0e39898d4b, 0x2f69e02d265e09d9, 0xa57a6e07cb98de4a, 0x03e1c54bcb947035],
[0xcd3979122d3ea03a, 0x46b3105f04db5844, 0xc70d0874b0691d4e, 0x47c8b5817018af4f],
[0xc6e7a6ffb08e3363, 0xe08fec7c86389bee, 0xf2d38f10fbb8d1bb, 0x0abe6a5e5abcaa32],
[0x5616c57de0ec9eae, 0xc631ffb2585a72db, 0x5121af06a3b51e3c, 0x73560252aa0655b2],
[0x92cf4deb77bd779c, 0x72cf6a8029b7d7bc, 0x6e0bcd91ee762730, 0x291cf6d68823e687],
[0xce32ef844e11a51e, 0xc0ba12bb3da64ca5, 0x0454dc1edc61a1a3, 0x019fe632fd328739],
[0x531a11a0d2d75182, 0x02c8118402867ddc, 0x116168bffbedc11d, 0x0a0a77a3b1980c0d],
[0xe2d0a7869f0319ed, 0xb94f1101b1d7a628, 0xece8ea224f31d25d, 0x23397a9300f8f98b],
[0xd7b688830a4f2089, 0x6558e9e3f6ac7b41, 0x99e276b571905a7d, 0x52dd465e2f094256],
[0x474650359d8e211b, 0x84d37b826214abc6, 0x8da40c1ef2bb4598, 0x0c83ea7744bf1bee],
[0x694341f608c9dd56, 0xed3a181fabb30adc, 0x1339a815da8b398f, 0x2c6d4e4511657e1e],
[0x63e7cb4906ffc93f, 0xf070bb00e28a193d, 0xad1715b02e5713b5, 0x4b5371495990693f]
];
pub const G1_GENERATOR: FsG1 = FsG1(blst_p1 {
x: blst_fp {
l: [
0x5cb38790fd530c16,
0x7817fc679976fff5,
0x154f95c7143ba1c1,
0xf0ae6acdf3d0e747,
0xedce6ecc21dbf440,
0x120177419e0bfb75,
],
},
y: blst_fp {
l: [
0xbaac93d50ce72271,
0x8c22631a7918fd8e,
0xdd595f13570725ce,
0x51ac582950405194,
0x0e1c8c3fad0059c0,
0x0bbc3efc5008a26a,
],
},
z: blst_fp {
l: [
0x760900000002fffd,
0xebf4000bc40c0002,
0x5f48985753c758ba,
0x77ce585370525745,
0x5c071a97a256ec6d,
0x15f65ec3fa80e493,
],
},
});
pub const G1_NEGATIVE_GENERATOR: FsG1 = FsG1(blst_p1 {
x: blst_fp {
l: [
0x5cb38790fd530c16,
0x7817fc679976fff5,
0x154f95c7143ba1c1,
0xf0ae6acdf3d0e747,
0xedce6ecc21dbf440,
0x120177419e0bfb75,
],
},
y: blst_fp {
l: [
0xff526c2af318883a,
0x92899ce4383b0270,
0x89d7738d9fa9d055,
0x12caf35ba344c12a,
0x3cff1b76964b5317,
0x0e44d2ede9774430,
],
},
z: blst_fp {
l: [
0x760900000002fffd,
0xebf4000bc40c0002,
0x5f48985753c758ba,
0x77ce585370525745,
0x5c071a97a256ec6d,
0x15f65ec3fa80e493,
],
},
});
pub const G2_GENERATOR: FsG2 = FsG2(blst_p2 {
x: blst_fp2 {
fp: [
blst_fp {
l: [
0xf5f28fa202940a10,
0xb3f5fb2687b4961a,
0xa1a893b53e2ae580,
0x9894999d1a3caee9,
0x6f67b7631863366b,
0x058191924350bcd7,
],
},
blst_fp {
l: [
0xa5a9c0759e23f606,
0xaaa0c59dbccd60c3,
0x3bb17e18e2867806,
0x1b1ab6cc8541b367,
0xc2b6ed0ef2158547,
0x11922a097360edf3,
],
},
],
},
y: blst_fp2 {
fp: [
blst_fp {
l: [
0x4c730af860494c4a,
0x597cfa1f5e369c5a,
0xe7e6856caa0a635a,
0xbbefb5e96e0d495f,
0x07d3a975f0ef25a2,
0x0083fd8e7e80dae5,
],
},
blst_fp {
l: [
0xadc0fc92df64b05d,
0x18aa270a2b1461dc,
0x86adac6a3be4eba0,
0x79495c4ec93da33a,
0xe7175850a43ccaed,
0x0b2bc2a163de1bf2,
],
},
],
},
z: blst_fp2 {
fp: [
blst_fp {
l: [
0x760900000002fffd,
0xebf4000bc40c0002,
0x5f48985753c758ba,
0x77ce585370525745,
0x5c071a97a256ec6d,
0x15f65ec3fa80e493,
],
},
blst_fp {
l: [
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
],
},
],
},
});
pub const G2_NEGATIVE_GENERATOR: FsG2 = FsG2(blst_p2 {
x: blst_fp2 {
fp: [
blst_fp {
l: [
0xf5f28fa202940a10,
0xb3f5fb2687b4961a,
0xa1a893b53e2ae580,
0x9894999d1a3caee9,
0x6f67b7631863366b,
0x058191924350bcd7,
],
},
blst_fp {
l: [
0xa5a9c0759e23f606,
0xaaa0c59dbccd60c3,
0x3bb17e18e2867806,
0x1b1ab6cc8541b367,
0xc2b6ed0ef2158547,
0x11922a097360edf3,
],
},
],
},
y: blst_fp2 {
fp: [
blst_fp {
l: [
0x6d8bf5079fb65e61,
0xc52f05df531d63a5,
0x7f4a4d344ca692c9,
0xa887959b8577c95f,
0x4347fe40525c8734,
0x197d145bbaff0bb5,
],
},
blst_fp {
l: [
0x0c3e036d209afa4e,
0x0601d8f4863f9e23,
0xe0832636bacc0a84,
0xeb2def362a476f84,
0x64044f659f0ee1e9,
0x0ed54f48d5a1caa7,
],
},
],
},
z: blst_fp2 {
fp: [
blst_fp {
l: [
0x760900000002fffd,
0xebf4000bc40c0002,
0x5f48985753c758ba,
0x77ce585370525745,
0x5c071a97a256ec6d,
0x15f65ec3fa80e493,
],
},
blst_fp {
l: [
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
0x0000000000000000,
],
},
],
},
});
pub const TRUSTED_SETUP_GENERATOR: [u8; 32usize] = [
0xa4, 0x73, 0x31, 0x95, 0x28, 0xc8, 0xb6, 0xea, 0x4d, 0x08, 0xcc, 0x53, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/lib.rs | blst/src/lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
pub mod consts;
pub mod data_availability_sampling;
pub mod eip_4844;
pub mod eip_7594;
pub mod fft_fr;
pub mod fft_g1;
pub mod fk20_proofs;
pub mod kzg_proofs;
pub mod recovery;
pub mod types;
pub mod utils;
pub mod zero_poly;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/eip_4844.rs | blst/src/eip_4844.rs | extern crate alloc;
#[cfg(feature = "c_bindings")]
use alloc::{boxed::Box, vec::Vec};
#[cfg(feature = "c_bindings")]
use blst::{blst_fr, blst_p1};
#[cfg(feature = "c_bindings")]
use core::ptr;
use kzg::eip_4844::load_trusted_setup_rust;
#[cfg(feature = "c_bindings")]
use kzg::{
eip_4844::{
BYTES_PER_G1, FIELD_ELEMENTS_PER_BLOB, TRUSTED_SETUP_NUM_G1_POINTS,
TRUSTED_SETUP_NUM_G2_POINTS,
},
eth::{
c_bindings::{Blob, Bytes32, Bytes48, CKZGSettings, CKzgRet, KZGCommitment, KZGProof},
FIELD_ELEMENTS_PER_CELL, FIELD_ELEMENTS_PER_EXT_BLOB,
},
Fr, G1,
};
#[cfg(all(feature = "std", feature = "c_bindings"))]
use libc::FILE;
#[cfg(feature = "std")]
use std::fs::File;
#[cfg(feature = "std")]
use std::io::Read;
#[cfg(feature = "std")]
use kzg::eip_4844::load_trusted_setup_string;
#[cfg(feature = "c_bindings")]
use crate::{
handle_ckzg_badargs,
types::{fr::FsFr, g1::FsG1, kzg_settings::FsKZGSettings},
utils::PRECOMPUTATION_TABLES,
};
#[cfg(feature = "c_bindings")]
fn kzg_settings_to_c(rust_settings: &FsKZGSettings) -> CKZGSettings {
use kzg::eth::c_bindings::{blst_fp, blst_fp2, blst_fr, blst_p1, blst_p2};
CKZGSettings {
roots_of_unity: Box::leak(
rust_settings
.fs
.roots_of_unity
.iter()
.map(|r| blst_fr { l: r.0.l })
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr(),
brp_roots_of_unity: Box::leak(
rust_settings
.fs
.brp_roots_of_unity
.iter()
.map(|r| blst_fr { l: r.0.l })
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr(),
reverse_roots_of_unity: Box::leak(
rust_settings
.fs
.reverse_roots_of_unity
.iter()
.map(|r| blst_fr { l: r.0.l })
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr(),
g1_values_monomial: Box::leak(
rust_settings
.g1_values_monomial
.iter()
.map(|r| blst_p1 {
x: blst_fp { l: r.0.x.l },
y: blst_fp { l: r.0.y.l },
z: blst_fp { l: r.0.z.l },
})
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr(),
g1_values_lagrange_brp: Box::leak(
rust_settings
.g1_values_lagrange_brp
.iter()
.map(|r| blst_p1 {
x: blst_fp { l: r.0.x.l },
y: blst_fp { l: r.0.y.l },
z: blst_fp { l: r.0.z.l },
})
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr(),
g2_values_monomial: Box::leak(
rust_settings
.g2_values_monomial
.iter()
.map(|r| blst_p2 {
x: blst_fp2 {
fp: [blst_fp { l: r.0.x.fp[0].l }, blst_fp { l: r.0.x.fp[1].l }],
},
y: blst_fp2 {
fp: [blst_fp { l: r.0.y.fp[0].l }, blst_fp { l: r.0.y.fp[1].l }],
},
z: blst_fp2 {
fp: [blst_fp { l: r.0.z.fp[0].l }, blst_fp { l: r.0.z.fp[1].l }],
},
})
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr(),
x_ext_fft_columns: Box::leak(
rust_settings
.x_ext_fft_columns
.iter()
.map(|r| {
Box::leak(
r.iter()
.map(|r| blst_p1 {
x: blst_fp { l: r.0.x.l },
y: blst_fp { l: r.0.y.l },
z: blst_fp { l: r.0.z.l },
})
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr()
})
.collect::<Vec<_>>()
.into_boxed_slice(),
)
.as_mut_ptr(),
tables: core::ptr::null_mut(),
wbits: 0,
scratch_size: 0,
}
}
#[cfg(feature = "std")]
pub fn load_trusted_setup_filename_rust(
filepath: &str,
) -> Result<crate::types::kzg_settings::FsKZGSettings, alloc::string::String> {
let mut file = File::open(filepath).map_err(|_| "Unable to open file".to_string())?;
let mut contents = String::new();
file.read_to_string(&mut contents)
.map_err(|_| "Unable to read file".to_string())?;
let (g1_monomial_bytes, g1_lagrange_bytes, g2_monomial_bytes) =
load_trusted_setup_string(&contents)?;
load_trusted_setup_rust(&g1_monomial_bytes, &g1_lagrange_bytes, &g2_monomial_bytes)
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn blob_to_kzg_commitment(
out: *mut KZGCommitment,
blob: *const Blob,
s: &CKZGSettings,
) -> CKzgRet {
use kzg::eip_4844::blob_to_kzg_commitment_raw;
let settings: FsKZGSettings = handle_ckzg_badargs!(s.try_into());
let result = handle_ckzg_badargs!(blob_to_kzg_commitment_raw((*blob).bytes, &settings));
(*out).bytes = result.to_bytes();
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn load_trusted_setup(
out: *mut CKZGSettings,
g1_monomial_bytes: *const u8,
num_g1_monomial_bytes: u64,
g1_lagrange_bytes: *const u8,
num_g1_lagrange_bytes: u64,
g2_monomial_bytes: *const u8,
num_g2_monomial_bytes: u64,
_precompute: u64,
) -> CKzgRet {
*out = CKZGSettings {
brp_roots_of_unity: ptr::null_mut(),
roots_of_unity: ptr::null_mut(),
reverse_roots_of_unity: ptr::null_mut(),
g1_values_monomial: ptr::null_mut(),
g1_values_lagrange_brp: ptr::null_mut(),
g2_values_monomial: ptr::null_mut(),
x_ext_fft_columns: ptr::null_mut(),
tables: ptr::null_mut(),
wbits: 0,
scratch_size: 0,
};
let g1_monomial_bytes =
core::slice::from_raw_parts(g1_monomial_bytes, num_g1_monomial_bytes as usize);
let g1_lagrange_bytes =
core::slice::from_raw_parts(g1_lagrange_bytes, num_g1_lagrange_bytes as usize);
let g2_monomial_bytes =
core::slice::from_raw_parts(g2_monomial_bytes, num_g2_monomial_bytes as usize);
TRUSTED_SETUP_NUM_G1_POINTS = num_g1_monomial_bytes as usize / BYTES_PER_G1;
let mut settings = handle_ckzg_badargs!(load_trusted_setup_rust(
g1_monomial_bytes,
g1_lagrange_bytes,
g2_monomial_bytes
));
let c_settings = kzg_settings_to_c(&settings);
PRECOMPUTATION_TABLES.save_precomputation(settings.precomputation.take(), &c_settings);
*out = c_settings;
CKzgRet::Ok
}
/// # Safety
#[cfg(all(feature = "std", feature = "c_bindings"))]
#[no_mangle]
pub unsafe extern "C" fn load_trusted_setup_file(
out: *mut CKZGSettings,
in_: *mut FILE,
) -> CKzgRet {
*out = CKZGSettings {
brp_roots_of_unity: ptr::null_mut(),
roots_of_unity: ptr::null_mut(),
reverse_roots_of_unity: ptr::null_mut(),
g1_values_monomial: ptr::null_mut(),
g1_values_lagrange_brp: ptr::null_mut(),
g2_values_monomial: ptr::null_mut(),
x_ext_fft_columns: ptr::null_mut(),
tables: ptr::null_mut(),
wbits: 0,
scratch_size: 0,
};
let mut buf = vec![0u8; 1024 * 1024];
let len: usize = libc::fread(buf.as_mut_ptr() as *mut libc::c_void, 1, buf.len(), in_);
let s = handle_ckzg_badargs!(String::from_utf8(buf[..len].to_vec()));
let (g1_monomial_bytes, g1_lagrange_bytes, g2_monomial_bytes) =
handle_ckzg_badargs!(load_trusted_setup_string(&s));
TRUSTED_SETUP_NUM_G1_POINTS = g1_monomial_bytes.len() / BYTES_PER_G1;
if TRUSTED_SETUP_NUM_G1_POINTS != FIELD_ELEMENTS_PER_BLOB {
// Helps pass the Java test "shouldThrowExceptionOnIncorrectTrustedSetupFromFile",
// as well as 5 others that pass only if this one passes (likely because Java doesn't
// deallocate its KZGSettings pointer when no exception is thrown).
return CKzgRet::BadArgs;
}
let mut settings = handle_ckzg_badargs!(load_trusted_setup_rust(
&g1_monomial_bytes,
&g1_lagrange_bytes,
&g2_monomial_bytes
));
let c_settings = kzg_settings_to_c(&settings);
PRECOMPUTATION_TABLES.save_precomputation(settings.precomputation.take(), &c_settings);
*out = c_settings;
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn compute_blob_kzg_proof(
out: *mut KZGProof,
blob: *const Blob,
commitment_bytes: *const Bytes48,
s: &CKZGSettings,
) -> CKzgRet {
use kzg::eip_4844::compute_blob_kzg_proof_raw;
let settings: FsKZGSettings = handle_ckzg_badargs!(s.try_into());
let proof = handle_ckzg_badargs!(compute_blob_kzg_proof_raw(
(*blob).bytes,
(*commitment_bytes).bytes,
&settings
));
(*out).bytes = proof.to_bytes();
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn free_trusted_setup(s: *mut CKZGSettings) {
if s.is_null() {
return;
}
PRECOMPUTATION_TABLES.remove_precomputation(&*s);
if !(*s).roots_of_unity.is_null() {
let v = Box::from_raw(core::slice::from_raw_parts_mut(
(*s).roots_of_unity,
FIELD_ELEMENTS_PER_EXT_BLOB + 1,
));
drop(v);
(*s).roots_of_unity = ptr::null_mut();
}
if !(*s).brp_roots_of_unity.is_null() {
let v = Box::from_raw(core::slice::from_raw_parts_mut(
(*s).brp_roots_of_unity,
FIELD_ELEMENTS_PER_EXT_BLOB,
));
drop(v);
(*s).brp_roots_of_unity = ptr::null_mut();
}
if !(*s).reverse_roots_of_unity.is_null() {
let v = Box::from_raw(core::slice::from_raw_parts_mut(
(*s).reverse_roots_of_unity,
FIELD_ELEMENTS_PER_EXT_BLOB + 1,
));
drop(v);
(*s).reverse_roots_of_unity = ptr::null_mut();
}
if !(*s).g1_values_monomial.is_null() {
let v = Box::from_raw(core::slice::from_raw_parts_mut(
(*s).g1_values_monomial,
FIELD_ELEMENTS_PER_BLOB,
));
drop(v);
(*s).g1_values_monomial = ptr::null_mut();
}
if !(*s).g1_values_lagrange_brp.is_null() {
let v = Box::from_raw(core::slice::from_raw_parts_mut(
(*s).g1_values_lagrange_brp,
FIELD_ELEMENTS_PER_BLOB,
));
drop(v);
(*s).g1_values_lagrange_brp = ptr::null_mut();
}
if !(*s).g2_values_monomial.is_null() {
let v = Box::from_raw(core::slice::from_raw_parts_mut(
(*s).g2_values_monomial,
TRUSTED_SETUP_NUM_G2_POINTS,
));
drop(v);
(*s).g2_values_monomial = ptr::null_mut();
}
if !(*s).x_ext_fft_columns.is_null() {
let x_ext_fft_columns = core::slice::from_raw_parts_mut(
(*s).x_ext_fft_columns,
2 * ((FIELD_ELEMENTS_PER_EXT_BLOB / 2) / FIELD_ELEMENTS_PER_CELL),
);
for column in x_ext_fft_columns.iter_mut() {
if !(*column).is_null() {
let v = Box::from_raw(core::slice::from_raw_parts_mut(
*column,
FIELD_ELEMENTS_PER_CELL,
));
drop(v);
*column = ptr::null_mut();
}
}
let v = Box::from_raw(x_ext_fft_columns);
drop(v);
(*s).x_ext_fft_columns = ptr::null_mut();
}
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn verify_kzg_proof(
ok: *mut bool,
commitment_bytes: *const Bytes48,
z_bytes: *const Bytes32,
y_bytes: *const Bytes32,
proof_bytes: *const Bytes48,
s: &CKZGSettings,
) -> CKzgRet {
use kzg::eip_4844::verify_kzg_proof_raw;
let settings: FsKZGSettings = handle_ckzg_badargs!(s.try_into());
let result = handle_ckzg_badargs!(verify_kzg_proof_raw(
(*commitment_bytes).bytes,
(*z_bytes).bytes,
(*y_bytes).bytes,
(*proof_bytes).bytes,
&settings
));
*ok = result;
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn verify_blob_kzg_proof(
ok: *mut bool,
blob: *const Blob,
commitment_bytes: *const Bytes48,
proof_bytes: *const Bytes48,
s: &CKZGSettings,
) -> CKzgRet {
use kzg::eip_4844::verify_blob_kzg_proof_raw;
let settings: FsKZGSettings = handle_ckzg_badargs!(s.try_into());
let result = handle_ckzg_badargs!(verify_blob_kzg_proof_raw(
(*blob).bytes,
(*commitment_bytes).bytes,
(*proof_bytes).bytes,
&settings,
));
*ok = result;
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn verify_blob_kzg_proof_batch(
ok: *mut bool,
blobs: *const Blob,
commitments_bytes: *const Bytes48,
proofs_bytes: *const Bytes48,
n: usize,
s: &CKZGSettings,
) -> CKzgRet {
use kzg::eip_4844::verify_blob_kzg_proof_batch_raw;
let raw_blobs = core::slice::from_raw_parts(blobs, n)
.iter()
.map(|blob| blob.bytes)
.collect::<Vec<_>>();
let raw_commitments = core::slice::from_raw_parts(commitments_bytes, n)
.iter()
.map(|c| c.bytes)
.collect::<Vec<_>>();
let raw_proofs = core::slice::from_raw_parts(proofs_bytes, n)
.iter()
.map(|p| p.bytes)
.collect::<Vec<_>>();
*ok = false;
let settings: FsKZGSettings = handle_ckzg_badargs!(s.try_into());
let result = handle_ckzg_badargs!(verify_blob_kzg_proof_batch_raw(
&raw_blobs,
&raw_commitments,
&raw_proofs,
&settings
));
*ok = result;
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn compute_kzg_proof(
proof_out: *mut KZGProof,
y_out: *mut Bytes32,
blob: *const Blob,
z_bytes: *const Bytes32,
s: &CKZGSettings,
) -> CKzgRet {
use kzg::eip_4844::compute_kzg_proof_raw;
let settings: FsKZGSettings = handle_ckzg_badargs!(s.try_into());
let (proof_out_tmp, fry_tmp) = handle_ckzg_badargs!(compute_kzg_proof_raw(
(*blob).bytes,
(*z_bytes).bytes,
&settings
));
(*proof_out).bytes = proof_out_tmp.to_bytes();
(*y_out).bytes = fry_tmp.to_bytes();
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn compute_challenge(
eval_challenge_out: *mut blst_fr,
blob: *const Blob,
commitment: *const blst_p1,
) {
use kzg::eip_4844::{bytes_to_blob, compute_challenge_rust};
let output = compute_challenge_rust::<FsFr, FsG1>(
&bytes_to_blob(&(*blob).bytes).unwrap(),
&FsG1(*commitment),
);
*eval_challenge_out = output.0
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn bytes_to_kzg_commitment(out: *mut blst_p1, b: *const Bytes48) -> CKzgRet {
*out = handle_ckzg_badargs!(FsG1::from_bytes(&(*b).bytes)).0;
CKzgRet::Ok
}
/// # Safety
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn bytes_from_bls_field(out: *mut Bytes32, inp: *const blst_fr) {
(*out).bytes = FsFr(*inp).to_bytes();
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/kzg_proofs.rs | blst/src/kzg_proofs.rs | extern crate alloc;
use crate::types::fp::FsFp;
use crate::types::g1::FsG1;
use crate::types::{fr::FsFr, g1::FsG1Affine};
use crate::types::g1::FsG1ProjAddAffine;
use kzg::msm::{msm_impls::msm, precompute::PrecomputationTable};
use crate::types::g2::FsG2;
use blst::{
blst_fp12_is_one, blst_p1_affine, blst_p1_cneg, blst_p1_to_affine, blst_p2_affine,
blst_p2_to_affine, Pairing,
};
use kzg::PairingVerify;
impl PairingVerify<FsG1, FsG2> for FsG1 {
fn verify(a1: &FsG1, a2: &FsG2, b1: &FsG1, b2: &FsG2) -> bool {
pairings_verify(a1, a2, b1, b2)
}
}
pub fn g1_linear_combination(
out: &mut FsG1,
points: &[FsG1],
scalars: &[FsFr],
len: usize,
precomputation: Option<&PrecomputationTable<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>>,
) {
#[cfg(feature = "sppark")]
{
use blst::{blst_fr, blst_scalar, blst_scalar_from_fr};
use kzg::{G1Mul, G1};
if len < 8 {
*out = FsG1::default();
for i in 0..len {
let tmp = points[i].mul(&scalars[i]);
out.add_or_dbl_assign(&tmp);
}
return;
}
let scalars =
unsafe { alloc::slice::from_raw_parts(scalars.as_ptr() as *const blst_fr, len) };
let point = if let Some(precomputation) = precomputation {
rust_kzg_blst_sppark::multi_scalar_mult_prepared(precomputation.table, scalars)
} else {
let affines = kzg::msm::msm_impls::batch_convert::<FsG1, FsFp, FsG1Affine>(&points);
let affines = unsafe {
alloc::slice::from_raw_parts(affines.as_ptr() as *const blst_p1_affine, len)
};
rust_kzg_blst_sppark::multi_scalar_mult(&affines[0..len], &scalars)
};
*out = FsG1(point);
}
#[cfg(not(feature = "sppark"))]
{
*out = msm::<FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine, FsFr>(
points,
scalars,
len,
precomputation,
);
}
}
pub fn pairings_verify(a1: &FsG1, a2: &FsG2, b1: &FsG1, b2: &FsG2) -> bool {
let mut aa1 = blst_p1_affine::default();
let mut bb1 = blst_p1_affine::default();
let mut aa2 = blst_p2_affine::default();
let mut bb2 = blst_p2_affine::default();
// As an optimisation, we want to invert one of the pairings,
// so we negate one of the points.
let mut a1neg: FsG1 = *a1;
unsafe {
blst_p1_cneg(&mut a1neg.0, true);
blst_p1_to_affine(&mut aa1, &a1neg.0);
blst_p1_to_affine(&mut bb1, &b1.0);
blst_p2_to_affine(&mut aa2, &a2.0);
blst_p2_to_affine(&mut bb2, &b2.0);
let dst = [0u8; 3];
let mut pairing_blst = Pairing::new(false, &dst);
pairing_blst.raw_aggregate(&aa2, &aa1);
pairing_blst.raw_aggregate(&bb2, &bb1);
let gt_point = pairing_blst.as_fp12().final_exp();
blst_fp12_is_one(>_point)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/eip_7594.rs | blst/src/eip_7594.rs | extern crate alloc;
use kzg::EcBackend;
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fp::FsFp;
use crate::types::g1::FsG1;
use crate::types::g1::FsG1Affine;
use crate::types::g1::FsG1ProjAddAffine;
use crate::types::g2::FsG2;
use crate::types::kzg_settings::FsKZGSettings;
use crate::types::poly::FsPoly;
use crate::types::fr::FsFr;
pub struct BlstBackend;
impl EcBackend for BlstBackend {
type Fr = FsFr;
type G1Fp = FsFp;
type G1Affine = FsG1Affine;
type G1 = FsG1;
type G2 = FsG2;
type Poly = FsPoly;
type FFTSettings = FsFFTSettings;
type KZGSettings = FsKZGSettings;
type G1ProjAddAffine = FsG1ProjAddAffine;
}
#[cfg(feature = "c_bindings")]
kzg::c_bindings_eip7594!(BlstBackend);
#[cfg(feature = "c_bindings")]
#[no_mangle]
pub unsafe extern "C" fn compute_verify_cell_kzg_proof_batch_challenge(
challenge_out: *mut blst::blst_fr,
commitment_bytes: *const kzg::eth::c_bindings::Bytes48,
num_commitments: u64,
commitment_indices: *const u64,
cell_indices: *const u64,
cells: *const kzg::eth::c_bindings::Cell,
proofs_bytes: *const kzg::eth::c_bindings::Bytes48,
num_cells: u64,
) -> kzg::eth::c_bindings::CKzgRet {
use crate::handle_ckzg_badargs;
use kzg::{eip_4844::BYTES_PER_FIELD_ELEMENT, Fr, G1};
*challenge_out = blst::blst_fr::default();
let commitment_bytes =
unsafe { core::slice::from_raw_parts(commitment_bytes, num_commitments as usize) };
let commitments = handle_ckzg_badargs!(commitment_bytes
.iter()
.map(|v| FsG1::from_bytes(&v.bytes))
.collect::<Result<Vec<_>, _>>());
let commitment_indices =
unsafe { core::slice::from_raw_parts(commitment_indices, num_cells as usize) };
let commitment_indices = commitment_indices
.iter()
.map(|v| *v as usize)
.collect::<Vec<_>>();
let cell_indices = unsafe { core::slice::from_raw_parts(cell_indices, num_cells as usize) };
let cell_indices = cell_indices.iter().map(|c| *c as usize).collect::<Vec<_>>();
let cells = unsafe { core::slice::from_raw_parts(cells, num_cells as usize) };
let cells = handle_ckzg_badargs!(cells
.iter()
.flat_map(|c| c
.bytes
.chunks(BYTES_PER_FIELD_ELEMENT)
.map(|bytes| FsFr::from_bytes(&bytes)))
.collect::<Result<Vec<_>, _>>());
let proofs_bytes = unsafe { core::slice::from_raw_parts(proofs_bytes, num_cells as usize) };
let proofs = handle_ckzg_badargs!(proofs_bytes
.iter()
.map(|b| FsG1::from_bytes(&b.bytes))
.collect::<Result<Vec<_>, _>>());
let challenge = handle_ckzg_badargs!(
<FsKZGSettings as kzg::DAS<BlstBackend>>::compute_verify_cell_kzg_proof_batch_challenge(
kzg::eth::FIELD_ELEMENTS_PER_CELL,
&commitments,
&commitment_indices,
&cell_indices,
&cells,
&proofs,
kzg::eth::FIELD_ELEMENTS_PER_BLOB
)
);
*challenge_out = challenge.0;
kzg::eth::c_bindings::CKzgRet::Ok
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/zero_poly.rs | blst/src/zero_poly.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use core::cmp::{min, Ordering};
use kzg::{common_utils::next_pow_of_2, FFTFr, Fr, ZeroPoly};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
use crate::types::poly::FsPoly;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use smallvec::{smallvec, SmallVec};
// Can be tuned & optimized (must be a power of 2)
const DEGREE_OF_PARTIAL: usize = 256;
// Can be tuned & optimized (but must be a power of 2)
const REDUCTION_FACTOR: usize = 4;
/// Pad given poly it with zeros to new length
pub fn pad_poly(mut poly: Vec<FsFr>, new_length: usize) -> Result<Vec<FsFr>, String> {
if new_length < poly.len() {
return Err(String::from(
"new_length must be longer or equal to poly length",
));
}
poly.resize(new_length, FsFr::zero());
Ok(poly)
}
/// Pad given poly coefficients it with zeros to new length
pub fn pad_poly_coeffs<const N: usize, T>(
mut coeffs: SmallVec<[T; N]>,
new_length: usize,
) -> Result<SmallVec<[T; N]>, String>
where
T: Default + Clone,
{
if new_length < coeffs.len() {
return Err(String::from(
"new_length must be longer or equal to coeffs length",
));
}
coeffs.resize(new_length, T::default());
Ok(coeffs)
}
impl FsFFTSettings {
fn do_zero_poly_mul_partial(
&self,
idxs: &[usize],
stride: usize,
) -> Result<SmallVec<[FsFr; DEGREE_OF_PARTIAL]>, String> {
if idxs.is_empty() {
return Err(String::from("idx array must not be empty"));
}
// Makes use of long multiplication in terms of (x - w_0)(x - w_1)..
let mut coeffs = SmallVec::<[FsFr; DEGREE_OF_PARTIAL]>::new();
// For the first member, store -w_0 as constant term
coeffs.push(self.roots_of_unity[idxs[0] * stride].negate());
for (i, idx) in idxs.iter().copied().enumerate().skip(1) {
// For member (x - w_i) take coefficient as -(w_i + w_{i-1} + ...)
let neg_di = self.roots_of_unity[idx * stride].negate();
coeffs.push(neg_di.add(&coeffs[i - 1]));
// Multiply all previous members by (x - w_i)
// It equals multiplying by - w_i and adding x^(i - 1) coefficient (implied multiplication by x)
for j in (1..i).rev() {
coeffs[j] = coeffs[j].mul(&neg_di).add(&coeffs[j - 1]);
}
// Multiply x^0 member by - w_i
coeffs[0] = coeffs[0].mul(&neg_di);
}
coeffs.resize(idxs.len() + 1, FsFr::one());
Ok(coeffs)
}
fn reduce_partials(
&self,
domain_size: usize,
partial_coeffs: SmallVec<[SmallVec<[FsFr; DEGREE_OF_PARTIAL]>; REDUCTION_FACTOR]>,
) -> Result<SmallVec<[FsFr; DEGREE_OF_PARTIAL]>, String> {
if !domain_size.is_power_of_two() {
return Err(String::from("Expected domain size to be a power of 2"));
}
if partial_coeffs.is_empty() {
return Err(String::from("partials must not be empty"));
}
// Calculate the resulting polynomial degree
// E.g. (a * x^n + ...) (b * x^m + ...) has a degree of x^(n+m)
let out_degree = partial_coeffs
.iter()
.map(|partial| {
// TODO: Not guaranteed by function signature that this doesn't underflow
partial.len() - 1
})
.sum::<usize>();
if out_degree + 1 > domain_size {
return Err(String::from(
"Out degree is longer than possible polynomial size in domain",
));
}
let mut partial_coeffs = partial_coeffs.into_iter();
// Pad all partial polynomials to same length, compute their FFT and multiply them together
let mut padded_partial = pad_poly_coeffs(
partial_coeffs
.next()
.expect("Not empty, checked above; qed"),
domain_size,
)?;
let mut eval_result: SmallVec<[FsFr; DEGREE_OF_PARTIAL]> =
smallvec![FsFr::zero(); domain_size];
self.fft_fr_output(&padded_partial, false, &mut eval_result)?;
for partial in partial_coeffs {
padded_partial = pad_poly_coeffs(partial, domain_size)?;
let mut evaluated_partial: SmallVec<[FsFr; DEGREE_OF_PARTIAL]> =
smallvec![FsFr::zero(); domain_size];
self.fft_fr_output(&padded_partial, false, &mut evaluated_partial)?;
eval_result
.iter_mut()
.zip(evaluated_partial.iter())
.for_each(|(eval_result, evaluated_partial)| {
*eval_result = eval_result.mul(evaluated_partial);
});
}
let mut coeffs = smallvec![FsFr::zero(); domain_size];
// Apply an inverse FFT to produce a new poly. Limit its size to out_degree + 1
self.fft_fr_output(&eval_result, true, &mut coeffs)?;
coeffs.truncate(out_degree + 1);
Ok(coeffs)
}
}
impl ZeroPoly<FsFr, FsPoly> for FsFFTSettings {
fn do_zero_poly_mul_partial(&self, idxs: &[usize], stride: usize) -> Result<FsPoly, String> {
self.do_zero_poly_mul_partial(idxs, stride)
.map(|coeffs| FsPoly {
coeffs: coeffs.into_vec(),
})
}
fn reduce_partials(&self, domain_size: usize, partials: &[FsPoly]) -> Result<FsPoly, String> {
self.reduce_partials(
domain_size,
partials
.iter()
.map(|partial| SmallVec::from_slice(&partial.coeffs))
.collect(),
)
.map(|coeffs| FsPoly {
coeffs: coeffs.into_vec(),
})
}
fn zero_poly_via_multiplication(
&self,
domain_size: usize,
missing_idxs: &[usize],
) -> Result<(Vec<FsFr>, FsPoly), String> {
let zero_eval: Vec<FsFr>;
let mut zero_poly: FsPoly;
if missing_idxs.is_empty() {
zero_eval = Vec::new();
zero_poly = FsPoly { coeffs: Vec::new() };
return Ok((zero_eval, zero_poly));
}
if missing_idxs.len() >= domain_size {
return Err(String::from("Missing idxs greater than domain size"));
} else if domain_size > self.max_width {
return Err(String::from(
"Domain size greater than fft_settings.max_width",
));
} else if !domain_size.is_power_of_two() {
return Err(String::from("Domain size must be a power of 2"));
}
let missing_per_partial = DEGREE_OF_PARTIAL - 1; // Number of missing idxs needed per partial
let domain_stride = self.max_width / domain_size;
let mut partial_count = 1 + (missing_idxs.len() - 1) / missing_per_partial; // TODO: explain why -1 is used here
let next_pow: usize = next_pow_of_2(partial_count * DEGREE_OF_PARTIAL);
let domain_ceiling = min(next_pow, domain_size);
// Calculate zero poly
if missing_idxs.len() <= missing_per_partial {
// When all idxs fit into a single multiplication
zero_poly = FsPoly {
coeffs: self
.do_zero_poly_mul_partial(missing_idxs, domain_stride)?
.into_vec(),
};
} else {
// Otherwise, construct a set of partial polynomials
// Save all constructed polynomials in a shared 'work' vector
let mut work = vec![FsFr::zero(); next_pow];
let mut partial_lens = vec![DEGREE_OF_PARTIAL; partial_count];
#[cfg(not(feature = "parallel"))]
let iter = missing_idxs
.chunks(missing_per_partial)
.zip(work.chunks_exact_mut(DEGREE_OF_PARTIAL));
#[cfg(feature = "parallel")]
let iter = missing_idxs
.par_chunks(missing_per_partial)
.zip(work.par_chunks_exact_mut(DEGREE_OF_PARTIAL));
// Insert all generated partial polynomials at degree_of_partial intervals in work vector
iter.for_each(|(missing_idxs, work)| {
let partial_coeffs = self
.do_zero_poly_mul_partial(missing_idxs, domain_stride)
.expect("`missing_idxs` is guaranteed to not be empty; qed");
let partial_coeffs = pad_poly_coeffs(partial_coeffs, DEGREE_OF_PARTIAL).expect(
"`partial.coeffs.len()` (same as `missing_idxs.len() + 1`) is \
guaranteed to be at most `degree_of_partial`; qed",
);
work[..DEGREE_OF_PARTIAL].copy_from_slice(&partial_coeffs);
});
// Adjust last length to match its actual length
partial_lens[partial_count - 1] =
1 + missing_idxs.len() - (partial_count - 1) * missing_per_partial;
// Reduce all vectors into one by reducing them w/ varying size multiplications
while partial_count > 1 {
let reduced_count = 1 + (partial_count - 1) / REDUCTION_FACTOR;
let partial_size = next_pow_of_2(partial_lens[0]);
// Step over polynomial space and produce larger multiplied polynomials
for i in 0..reduced_count {
let start = i * REDUCTION_FACTOR;
let out_end = min((start + REDUCTION_FACTOR) * partial_size, domain_ceiling);
let reduced_len = min(out_end - start * partial_size, domain_size);
let partials_num = min(REDUCTION_FACTOR, partial_count - start);
// Calculate partial views from lens and offsets
// Also update offsets to match current iteration
let partial_offset = start * partial_size;
let mut partial_coeffs = SmallVec::new();
for (partial_offset, partial_len) in (partial_offset..)
.step_by(partial_size)
.zip(partial_lens.iter().skip(i).copied())
.take(partials_num)
{
// We know the capacity required in `reduce_partials()` call below to avoid
// re-allocation
let mut coeffs = SmallVec::with_capacity(reduced_len);
coeffs.extend_from_slice(&work[partial_offset..][..partial_len]);
partial_coeffs.push(coeffs);
}
if partials_num > 1 {
let mut reduced_coeffs =
self.reduce_partials(reduced_len, partial_coeffs)?;
// Update partial length to match its length after reduction
partial_lens[i] = reduced_coeffs.len();
reduced_coeffs =
pad_poly_coeffs(reduced_coeffs, partial_size * partials_num)?;
work[partial_offset..][..reduced_coeffs.len()]
.copy_from_slice(&reduced_coeffs);
} else {
// Instead of keeping track of remaining polynomials, reuse i'th partial for start'th one
partial_lens[i] = partial_lens[start];
}
}
// Number of steps done equals the number of polynomials that we still need to reduce together
partial_count = reduced_count;
}
zero_poly = FsPoly { coeffs: work };
}
// Pad resulting poly to expected
match zero_poly.coeffs.len().cmp(&domain_size) {
Ordering::Less => {
zero_poly.coeffs = pad_poly(zero_poly.coeffs, domain_size)?;
}
Ordering::Equal => {}
Ordering::Greater => {
zero_poly.coeffs.truncate(domain_size);
}
}
// Evaluate calculated poly
zero_eval = self.fft_fr(&zero_poly.coeffs, false)?;
Ok((zero_eval, zero_poly))
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/data_availability_sampling.rs | blst/src/data_availability_sampling.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec::Vec;
use core::cmp::Ordering;
use kzg::{DASExtension, Fr};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
// TODO: explain algo
impl FsFFTSettings {
pub fn das_fft_extension_stride(&self, evens: &mut [FsFr], stride: usize) {
match evens.len().cmp(&2) {
Ordering::Less => {
return;
}
Ordering::Equal => {
let x = evens[0].add(&evens[1]);
let y = evens[0].sub(&evens[1]);
let y_times_root = y.mul(&self.roots_of_unity[stride]);
evens[0] = x.add(&y_times_root);
evens[1] = x.sub(&y_times_root);
return;
}
Ordering::Greater => {}
}
let half: usize = evens.len() / 2;
for i in 0..half {
let tmp1 = evens[i].add(&evens[half + i]);
let tmp2 = evens[i].sub(&evens[half + i]);
evens[half + i] = tmp2.mul(&self.reverse_roots_of_unity[i * 2 * stride]);
evens[i] = tmp1;
}
#[cfg(feature = "parallel")]
{
if evens.len() > 32 {
let (lo, hi) = evens.split_at_mut(half);
rayon::join(
|| self.das_fft_extension_stride(hi, stride * 2),
|| self.das_fft_extension_stride(lo, stride * 2),
);
} else {
// Recurse
self.das_fft_extension_stride(&mut evens[..half], stride * 2);
self.das_fft_extension_stride(&mut evens[half..], stride * 2);
}
}
#[cfg(not(feature = "parallel"))]
{
// Recurse
self.das_fft_extension_stride(&mut evens[..half], stride * 2);
self.das_fft_extension_stride(&mut evens[half..], stride * 2);
}
for i in 0..half {
let x = evens[i];
let y = evens[half + i];
let y_times_root: FsFr = y.mul(&self.roots_of_unity[(1 + 2 * i) * stride]);
evens[i] = x.add(&y_times_root);
evens[half + i] = x.sub(&y_times_root);
}
}
}
impl DASExtension<FsFr> for FsFFTSettings {
/// Polynomial extension for data availability sampling. Given values of even indices, produce values of odd indices.
/// FFTSettings must hold at least 2 times the roots of provided evens.
/// The resulting odd indices make the right half of the coefficients of the inverse FFT of the combined indices zero.
fn das_fft_extension(&self, evens: &[FsFr]) -> Result<Vec<FsFr>, String> {
if evens.is_empty() {
return Err(String::from("A non-zero list ab expected"));
} else if !evens.len().is_power_of_two() {
return Err(String::from("A list with power-of-two length expected"));
} else if evens.len() * 2 > self.max_width {
return Err(String::from(
"Supplied list is longer than the available max width",
));
}
// In case more roots are provided with fft_settings, use a larger stride
let stride = self.max_width / (evens.len() * 2);
let mut odds = evens.to_vec();
self.das_fft_extension_stride(&mut odds, stride);
// TODO: explain why each odd member is multiplied by euclidean inverse of length
let mut inv_len = FsFr::from_u64(odds.len() as u64);
inv_len = inv_len.eucl_inverse();
let odds = odds.iter().map(|f| f.mul(&inv_len)).collect();
Ok(odds)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/fk20_proofs.rs | blst/src/fk20_proofs.rs | extern crate alloc;
use alloc::vec;
use alloc::vec::Vec;
use kzg::{FFTFr, Fr, G1Mul, Poly, FFTG1, G1};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
use crate::types::g1::FsG1;
use crate::types::poly::FsPoly;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
impl FsFFTSettings {
pub fn toeplitz_part_1(&self, x: &[FsG1]) -> Vec<FsG1> {
let n = x.len();
let n2 = n * 2;
let mut x_ext = Vec::with_capacity(n2);
x_ext.extend(x.iter().take(n));
x_ext.resize(n2, FsG1::identity());
self.fft_g1(&x_ext, false).unwrap()
}
/// poly and x_ext_fft should be of same length
pub fn toeplitz_part_2(&self, poly: &FsPoly, x_ext_fft: &[FsG1]) -> Vec<FsG1> {
let coeffs_fft = self.fft_fr(&poly.coeffs, false).unwrap();
#[cfg(feature = "parallel")]
{
coeffs_fft
.into_par_iter()
.zip(x_ext_fft)
.take(poly.len())
.map(|(coeff_fft, x_ext_fft)| x_ext_fft.mul(&coeff_fft))
.collect()
}
#[cfg(not(feature = "parallel"))]
{
coeffs_fft
.into_iter()
.zip(x_ext_fft)
.take(poly.len())
.map(|(coeff_fft, x_ext_fft)| x_ext_fft.mul(&coeff_fft))
.collect()
}
}
pub fn toeplitz_part_3(&self, h_ext_fft: &[FsG1]) -> Vec<FsG1> {
let n2 = h_ext_fft.len();
let n = n2 / 2;
let mut ret = self.fft_g1(h_ext_fft, true).unwrap();
ret[n..n2].copy_from_slice(&vec![FsG1::identity(); n2 - n]);
ret
}
}
impl FsPoly {
pub fn toeplitz_coeffs_stride(&self, offset: usize, stride: usize) -> FsPoly {
let n = self.len();
let k = n / stride;
let k2 = k * 2;
let mut ret = FsPoly::default();
ret.coeffs.push(self.coeffs[n - 1 - offset]);
let num_of_zeroes = if k + 2 < k2 { k + 2 - 1 } else { k2 - 1 };
for _ in 0..num_of_zeroes {
ret.coeffs.push(FsFr::zero());
}
let mut i = k + 2;
let mut j = 2 * stride - offset - 1;
while i < k2 {
ret.coeffs.push(self.coeffs[j]);
i += 1;
j += stride;
}
ret
}
pub fn toeplitz_coeffs_step(&self) -> FsPoly {
self.toeplitz_coeffs_stride(0, 1)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/utils.rs | blst/src/utils.rs | extern crate alloc;
use alloc::vec::Vec;
use kzg::common_utils::log2_pow2;
use kzg::eip_4844::{hash_to_bls_field, PrecomputationTableManager};
use kzg::{FFTSettings, Fr, G1Mul, G2Mul, FFTG1};
use crate::consts::{G1_GENERATOR, G2_GENERATOR};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fp::FsFp;
use crate::types::fr::FsFr;
use crate::types::g1::{FsG1, FsG1Affine, FsG1ProjAddAffine};
use crate::types::g2::FsG2;
pub fn generate_trusted_setup(
n: usize,
secret: [u8; 32usize],
) -> (Vec<FsG1>, Vec<FsG1>, Vec<FsG2>) {
let s = hash_to_bls_field(&secret);
let mut s_pow = Fr::one();
let mut g1_monomial_values = Vec::with_capacity(n);
let mut g2_monomial_values = Vec::with_capacity(n);
for _ in 0..n {
g1_monomial_values.push(G1_GENERATOR.mul(&s_pow));
g2_monomial_values.push(G2_GENERATOR.mul(&s_pow));
s_pow = s_pow.mul(&s);
}
let s = FsFFTSettings::new(log2_pow2(n)).unwrap();
let g1_lagrange_values = s.fft_g1(&g1_monomial_values, true).unwrap();
(g1_monomial_values, g1_lagrange_values, g2_monomial_values)
}
pub(crate) static mut PRECOMPUTATION_TABLES: PrecomputationTableManager<
FsFr,
FsG1,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
> = PrecomputationTableManager::new();
#[cfg(feature = "c_bindings")]
#[macro_export]
macro_rules! handle_ckzg_badargs {
($x: expr) => {
match $x {
Ok(value) => value,
Err(_) => return kzg::eth::c_bindings::CKzgRet::BadArgs,
}
};
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/fft_g1.rs | blst/src/fft_g1.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use kzg::{Fr, G1Mul, FFTG1, G1};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
use crate::types::g1::FsG1;
pub fn fft_g1_fast(
ret: &mut [FsG1],
data: &[FsG1],
stride: usize,
roots: &[FsFr],
roots_stride: usize,
) {
let half = ret.len() / 2;
if half > 0 {
#[cfg(feature = "parallel")]
{
let (lo, hi) = ret.split_at_mut(half);
rayon::join(
|| fft_g1_fast(lo, data, stride * 2, roots, roots_stride * 2),
|| fft_g1_fast(hi, &data[stride..], stride * 2, roots, roots_stride * 2),
);
}
#[cfg(not(feature = "parallel"))]
{
fft_g1_fast(&mut ret[..half], data, stride * 2, roots, roots_stride * 2);
fft_g1_fast(
&mut ret[half..],
&data[stride..],
stride * 2,
roots,
roots_stride * 2,
);
}
for i in 0..half {
let y_times_root = ret[i + half].mul(&roots[i * roots_stride]);
ret[i + half] = ret[i].sub(&y_times_root);
ret[i] = ret[i].add_or_dbl(&y_times_root);
}
} else {
ret[0] = data[0];
}
}
impl FFTG1<FsG1> for FsFFTSettings {
fn fft_g1(&self, data: &[FsG1], inverse: bool) -> Result<Vec<FsG1>, String> {
if data.len() > self.max_width {
return Err(String::from(
"Supplied list is longer than the available max width",
));
} else if !data.len().is_power_of_two() {
return Err(String::from("A list with power-of-two length expected"));
}
let stride = self.max_width / data.len();
let mut ret = vec![FsG1::default(); data.len()];
let roots = if inverse {
&self.reverse_roots_of_unity
} else {
&self.roots_of_unity
};
fft_g1_fast(&mut ret, data, 1, roots, stride);
if inverse {
let inv_fr_len = FsFr::from_u64(data.len() as u64).inverse();
ret[..data.len()]
.iter_mut()
.for_each(|f| *f = f.mul(&inv_fr_len));
}
Ok(ret)
}
}
// Used for testing
pub fn fft_g1_slow(
ret: &mut [FsG1],
data: &[FsG1],
stride: usize,
roots: &[FsFr],
roots_stride: usize,
) {
for i in 0..data.len() {
// Evaluate first member at 1
ret[i] = data[0].mul(&roots[0]);
// Evaluate the rest of members using a step of (i * J) % data.len() over the roots
// This distributes the roots over correct x^n members and saves on multiplication
for j in 1..data.len() {
let v = data[j * stride].mul(&roots[((i * j) % data.len()) * roots_stride]);
ret[i] = ret[i].add_or_dbl(&v);
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/fft_fr.rs | blst/src/fft_fr.rs | extern crate alloc;
use alloc::format;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use kzg::{FFTFr, Fr};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
/// Fast Fourier Transform for finite field elements. Polynomial ret is operated on in reverse order: ret_i * x ^ (len - i - 1)
pub fn fft_fr_fast(
ret: &mut [FsFr],
data: &[FsFr],
stride: usize,
roots: &[FsFr],
roots_stride: usize,
) {
let args = FftFrFastInner {
data,
stride,
roots,
roots_stride,
};
fft_fr_fast_inner(ret, 0, &args, 1);
}
/// A struct which holds the unmodified arguments to `fft_fr_fast`.
/// Passing a reference to this struct takes a single pointer, but passing references to its
/// arguments takes 6 pointers (in Rust, slice references contain a start and length).
struct FftFrFastInner<'caller> {
data: &'caller [FsFr],
stride: usize,
roots: &'caller [FsFr],
roots_stride: usize,
}
/// Inner recursive implementation of Fast Fourier Transform for finite field elements, with more efficient stack usage.
///
/// `fft_fr_fast` parameters are mapped as follows:
/// - `ret`: unmodified output array, can't be re-used due to Rust's mutable borrow rules.
/// - `data`: `args.data[args.data_start..]`.
/// - `stride`: `args.stride * stride_factor`.
/// - `roots`: `args.roots` (unmodified during recursion).
/// - `roots_stride`: `args.roots_stride * stride_factor`.
fn fft_fr_fast_inner(
ret: &mut [FsFr],
data_start: usize,
args: &FftFrFastInner<'_>,
stride_factor: usize,
) {
let half: usize = ret.len() / 2;
if half > 0 {
// Recurse
// Offsetting data by stride = 1 on the first iteration forces the even members to the first half
// and the odd members to the second half
let (lo, hi) = ret.split_at_mut(half);
#[cfg(not(feature = "parallel"))]
{
fft_fr_fast_inner(lo, data_start, args, stride_factor * 2);
fft_fr_fast_inner(
hi,
data_start + args.stride * stride_factor,
args,
stride_factor * 2,
);
}
#[cfg(feature = "parallel")]
{
if half > 256 {
rayon::join(
|| fft_fr_fast_inner(lo, data_start, args, stride_factor * 2),
|| {
fft_fr_fast_inner(
hi,
data_start + args.stride * stride_factor,
args,
stride_factor * 2,
)
},
);
} else {
fft_fr_fast_inner(lo, data_start, args, stride_factor * 2);
fft_fr_fast_inner(
hi,
data_start + args.stride * stride_factor,
args,
stride_factor * 2,
);
}
}
for i in 0..half {
let y_times_root =
ret[i + half].mul(&args.roots[i * args.roots_stride * stride_factor]);
ret[i + half] = ret[i].sub(&y_times_root);
ret[i] = ret[i].add(&y_times_root);
}
} else {
// When len = 1, return the permuted element
ret[0] = args.data[data_start];
}
}
impl FsFFTSettings {
/// Fast Fourier Transform for finite field elements, `output` must be zeroes
pub(crate) fn fft_fr_output(
&self,
data: &[FsFr],
inverse: bool,
output: &mut [FsFr],
) -> Result<(), String> {
if data.len() > self.max_width {
return Err(String::from(
"Supplied list is longer than the available max width",
));
}
if data.len() != output.len() {
return Err(format!(
"Output length {} doesn't match data length {}",
data.len(),
output.len()
));
}
if !data.len().is_power_of_two() {
return Err(String::from("A list with power-of-two length expected"));
}
// In case more roots are provided with fft_settings, use a larger stride
let stride = self.max_width / data.len();
// Inverse is same as regular, but all constants are reversed and results are divided by n
// This is a property of the DFT matrix
let roots = if inverse {
&self.reverse_roots_of_unity
} else {
&self.roots_of_unity
};
fft_fr_fast(output, data, 1, roots, stride);
if inverse {
let inv_fr_len = FsFr::from_u64(data.len() as u64).inverse();
output.iter_mut().for_each(|f| *f = f.mul(&inv_fr_len));
}
Ok(())
}
}
impl FFTFr<FsFr> for FsFFTSettings {
/// Fast Fourier Transform for finite field elements
fn fft_fr(&self, data: &[FsFr], inverse: bool) -> Result<Vec<FsFr>, String> {
let mut ret = vec![FsFr::default(); data.len()];
self.fft_fr_output(data, inverse, &mut ret)?;
Ok(ret)
}
}
/// Simplified Discrete Fourier Transform, mainly used for testing
pub fn fft_fr_slow(
ret: &mut [FsFr],
data: &[FsFr],
stride: usize,
roots: &[FsFr],
roots_stride: usize,
) {
for i in 0..data.len() {
// Evaluate first member at 1
ret[i] = data[0].mul(&roots[0]);
// Evaluate the rest of members using a step of (i * J) % data.len() over the roots
// This distributes the roots over correct x^n members and saves on multiplication
for j in 1..data.len() {
let v = data[j * stride].mul(&roots[((i * j) % data.len()) * roots_stride]);
ret[i] = ret[i].add(&v);
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/recovery.rs | blst/src/recovery.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec::Vec;
use kzg::{FFTFr, Fr, PolyRecover, ZeroPoly};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
use crate::types::poly::FsPoly;
use once_cell::sync::OnceCell;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
const SCALE_FACTOR: u64 = 5;
static INVERSE_FACTORS: OnceCell<Vec<FsFr>> = OnceCell::new();
static UNSCALE_FACTOR_POWERS: OnceCell<Vec<FsFr>> = OnceCell::new();
pub fn scale_poly(p: &mut [FsFr], len_p: usize) {
let factors = INVERSE_FACTORS.get_or_init(|| {
let scale_factor = FsFr::from_u64(SCALE_FACTOR);
let inv_factor = FsFr::inverse(&scale_factor);
let mut temp = Vec::with_capacity(65536);
temp.push(FsFr::one());
for i in 1..65536 {
temp.push(temp[i - 1].mul(&inv_factor));
}
temp
});
p.iter_mut()
.zip(factors)
.take(len_p)
.skip(1)
.for_each(|(p, factor)| {
*p = p.mul(factor);
});
}
pub fn unscale_poly(p: &mut [FsFr], len_p: usize) {
let factors = UNSCALE_FACTOR_POWERS.get_or_init(|| {
let scale_factor = FsFr::from_u64(SCALE_FACTOR);
let mut temp = Vec::with_capacity(65536);
temp.push(FsFr::one());
for i in 1..65536 {
temp.push(temp[i - 1].mul(&scale_factor));
}
temp
});
p.iter_mut()
.zip(factors)
.take(len_p)
.skip(1)
.for_each(|(p, factor)| {
*p = p.mul(factor);
});
}
impl PolyRecover<FsFr, FsPoly, FsFFTSettings> for FsPoly {
fn recover_poly_coeffs_from_samples(
samples: &[Option<FsFr>],
fs: &FsFFTSettings,
) -> Result<Self, String> {
let len_samples = samples.len();
if !len_samples.is_power_of_two() {
return Err(String::from(
"Samples must have a length that is a power of two",
));
}
let mut missing = Vec::with_capacity(len_samples / 2);
for (i, sample) in samples.iter().enumerate() {
if sample.is_none() {
missing.push(i);
}
}
if missing.len() > len_samples / 2 {
return Err(String::from(
"Impossible to recover, too many shards are missing",
));
}
// Calculate `Z_r,I`
let (zero_eval, mut zero_poly) = fs.zero_poly_via_multiplication(len_samples, &missing)?;
// Construct E * Z_r,I: the loop makes the evaluation polynomial
let poly_evaluations_with_zero = samples
.iter()
.zip(zero_eval)
.map(|(maybe_sample, zero_eval)| {
debug_assert_eq!(maybe_sample.is_none(), zero_eval.is_zero());
match maybe_sample {
Some(sample) => sample.mul(&zero_eval),
None => FsFr::zero(),
}
})
.collect::<Vec<_>>();
// Now inverse FFT so that poly_with_zero is (E * Z_r,I)(x) = (D * Z_r,I)(x)
let mut poly_with_zero = fs.fft_fr(&poly_evaluations_with_zero, true).unwrap();
drop(poly_evaluations_with_zero);
// x -> k * x
let len_zero_poly = zero_poly.coeffs.len();
scale_poly(&mut poly_with_zero, len_samples);
scale_poly(&mut zero_poly.coeffs, len_zero_poly);
// Q1 = (D * Z_r,I)(k * x)
let scaled_poly_with_zero = poly_with_zero;
// Q2 = Z_r,I(k * x)
let scaled_zero_poly = zero_poly.coeffs;
// Polynomial division by convolution: Q3 = Q1 / Q2
#[cfg(feature = "parallel")]
let (eval_scaled_poly_with_zero, eval_scaled_zero_poly) = {
if len_zero_poly - 1 > 1024 {
rayon::join(
|| fs.fft_fr(&scaled_poly_with_zero, false).unwrap(),
|| fs.fft_fr(&scaled_zero_poly, false).unwrap(),
)
} else {
(
fs.fft_fr(&scaled_poly_with_zero, false).unwrap(),
fs.fft_fr(&scaled_zero_poly, false).unwrap(),
)
}
};
#[cfg(not(feature = "parallel"))]
let (eval_scaled_poly_with_zero, eval_scaled_zero_poly) = {
(
fs.fft_fr(&scaled_poly_with_zero, false).unwrap(),
fs.fft_fr(&scaled_zero_poly, false).unwrap(),
)
};
drop(scaled_zero_poly);
let mut eval_scaled_reconstructed_poly = eval_scaled_poly_with_zero;
#[cfg(not(feature = "parallel"))]
let eval_scaled_reconstructed_poly_iter = eval_scaled_reconstructed_poly.iter_mut();
#[cfg(feature = "parallel")]
let eval_scaled_reconstructed_poly_iter = eval_scaled_reconstructed_poly.par_iter_mut();
eval_scaled_reconstructed_poly_iter
.zip(eval_scaled_zero_poly)
.for_each(
|(eval_scaled_reconstructed_poly, eval_scaled_poly_with_zero)| {
*eval_scaled_reconstructed_poly = eval_scaled_reconstructed_poly
.div(&eval_scaled_poly_with_zero)
.unwrap();
},
);
// The result of the division is D(k * x):
let mut scaled_reconstructed_poly =
fs.fft_fr(&eval_scaled_reconstructed_poly, true).unwrap();
drop(eval_scaled_reconstructed_poly);
// k * x -> x
unscale_poly(&mut scaled_reconstructed_poly, len_samples);
// Finally we have D(x) which evaluates to our original data at the powers of roots of unity
Ok(Self {
coeffs: scaled_reconstructed_poly,
})
}
fn recover_poly_from_samples(
samples: &[Option<FsFr>],
fs: &FsFFTSettings,
) -> Result<Self, String> {
let reconstructed_poly = Self::recover_poly_coeffs_from_samples(samples, fs)?;
// The evaluation polynomial for D(x) is the reconstructed data:
let reconstructed_data = fs.fft_fr(&reconstructed_poly.coeffs, false).unwrap();
// Check all is well
samples
.iter()
.zip(&reconstructed_data)
.for_each(|(sample, reconstructed_data)| {
debug_assert!(sample.is_none() || reconstructed_data.equals(&sample.unwrap()));
});
Ok(Self {
coeffs: reconstructed_data,
})
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/fr.rs | blst/src/types/fr.rs | extern crate alloc;
use alloc::format;
use alloc::string::String;
use alloc::string::ToString;
use arbitrary::Arbitrary;
use blst::{
blst_bendian_from_scalar, blst_fr, blst_fr_add, blst_fr_cneg, blst_fr_eucl_inverse,
blst_fr_from_scalar, blst_fr_from_uint64, blst_fr_inverse, blst_fr_mul, blst_fr_sqr,
blst_fr_sub, blst_scalar, blst_scalar_fr_check, blst_scalar_from_bendian, blst_scalar_from_fr,
blst_uint64_from_fr,
};
use kzg::eip_4844::BYTES_PER_FIELD_ELEMENT;
use kzg::Fr;
use kzg::Scalar256;
#[repr(C)]
#[derive(Debug, Clone, Copy, Eq, PartialEq, Default)]
pub struct FsFr(pub blst_fr);
impl<'a> Arbitrary<'a> for FsFr {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let val: [u64; 4] = [
u64::arbitrary(u)?,
u64::arbitrary(u)?,
u64::arbitrary(u)?,
u64::arbitrary(u)?,
];
Ok(FsFr::from_u64_arr(&val))
}
}
impl Fr for FsFr {
fn null() -> Self {
Self::from_u64_arr(&[u64::MAX, u64::MAX, u64::MAX, u64::MAX])
}
fn zero() -> Self {
Self::from_u64(0)
}
fn one() -> Self {
Self::from_u64(1)
}
#[cfg(feature = "rand")]
fn rand() -> Self {
let val: [u64; 4] = [
rand::random(),
rand::random(),
rand::random(),
rand::random(),
];
let mut ret = Self::default();
unsafe {
blst_fr_from_uint64(&mut ret.0, val.as_ptr());
}
ret
}
fn from_bytes(bytes: &[u8]) -> Result<Self, String> {
bytes
.try_into()
.map_err(|_| {
format!(
"Invalid byte length. Expected {}, got {}",
BYTES_PER_FIELD_ELEMENT,
bytes.len()
)
})
.and_then(|bytes: &[u8; BYTES_PER_FIELD_ELEMENT]| {
let mut bls_scalar = blst_scalar::default();
let mut fr = blst_fr::default();
unsafe {
blst_scalar_from_bendian(&mut bls_scalar, bytes.as_ptr());
if !blst_scalar_fr_check(&bls_scalar) {
return Err("Invalid scalar".to_string());
}
blst_fr_from_scalar(&mut fr, &bls_scalar);
}
Ok(Self(fr))
})
}
fn from_bytes_unchecked(bytes: &[u8]) -> Result<Self, String> {
bytes
.try_into()
.map_err(|_| {
format!(
"Invalid byte length. Expected {}, got {}",
BYTES_PER_FIELD_ELEMENT,
bytes.len()
)
})
.map(|bytes: &[u8; BYTES_PER_FIELD_ELEMENT]| {
let mut bls_scalar = blst_scalar::default();
let mut fr = blst_fr::default();
unsafe {
blst_scalar_from_bendian(&mut bls_scalar, bytes.as_ptr());
blst_fr_from_scalar(&mut fr, &bls_scalar);
}
Self(fr)
})
}
fn from_hex(hex: &str) -> Result<Self, String> {
let bytes = hex::decode(&hex[2..]).unwrap();
Self::from_bytes(&bytes)
}
fn from_u64_arr(u: &[u64; 4]) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_from_uint64(&mut ret.0, u.as_ptr());
}
ret
}
fn from_u64(val: u64) -> Self {
Self::from_u64_arr(&[val, 0, 0, 0])
}
fn to_bytes(&self) -> [u8; 32] {
let mut scalar = blst_scalar::default();
let mut bytes = [0u8; 32];
unsafe {
blst_scalar_from_fr(&mut scalar, &self.0);
blst_bendian_from_scalar(bytes.as_mut_ptr(), &scalar);
}
bytes
}
fn to_u64_arr(&self) -> [u64; 4] {
let mut val: [u64; 4] = [0; 4];
unsafe {
blst_uint64_from_fr(val.as_mut_ptr(), &self.0);
}
val
}
fn is_one(&self) -> bool {
let mut val: [u64; 4] = [0; 4];
unsafe {
blst_uint64_from_fr(val.as_mut_ptr(), &self.0);
}
val[0] == 1 && val[1] == 0 && val[2] == 0 && val[3] == 0
}
fn is_zero(&self) -> bool {
let mut val: [u64; 4] = [0; 4];
unsafe {
blst_uint64_from_fr(val.as_mut_ptr(), &self.0);
}
val[0] == 0 && val[1] == 0 && val[2] == 0 && val[3] == 0
}
fn is_null(&self) -> bool {
self.equals(&Self::null())
}
fn sqr(&self) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_sqr(&mut ret.0, &self.0);
}
ret
}
fn mul(&self, b: &Self) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_mul(&mut ret.0, &self.0, &b.0);
}
ret
}
fn add(&self, b: &Self) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_add(&mut ret.0, &self.0, &b.0);
}
ret
}
fn sub(&self, b: &Self) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_sub(&mut ret.0, &self.0, &b.0);
}
ret
}
fn eucl_inverse(&self) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_eucl_inverse(&mut ret.0, &self.0);
}
ret
}
fn negate(&self) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_cneg(&mut ret.0, &self.0, true);
}
ret
}
fn inverse(&self) -> Self {
let mut ret = Self::default();
unsafe {
blst_fr_inverse(&mut ret.0, &self.0);
}
ret
}
fn pow(&self, n: usize) -> Self {
let mut out = Self::one();
let mut temp = *self;
let mut n = n;
loop {
if (n & 1) == 1 {
out = out.mul(&temp);
}
n >>= 1;
if n == 0 {
break;
}
temp = temp.sqr();
}
out
}
fn div(&self, b: &Self) -> Result<Self, String> {
let tmp = b.eucl_inverse();
let out = self.mul(&tmp);
Ok(out)
}
fn equals(&self, b: &Self) -> bool {
let mut val_a: [u64; 4] = [0; 4];
let mut val_b: [u64; 4] = [0; 4];
unsafe {
blst_uint64_from_fr(val_a.as_mut_ptr(), &self.0);
blst_uint64_from_fr(val_b.as_mut_ptr(), &b.0);
}
val_a[0] == val_b[0] && val_a[1] == val_b[1] && val_a[2] == val_b[2] && val_a[3] == val_b[3]
}
fn to_scalar(&self) -> Scalar256 {
let mut blst_scalar = blst_scalar::default();
unsafe {
blst_scalar_from_fr(&mut blst_scalar, &self.0);
}
Scalar256::from_u8(&blst_scalar.b)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/fk20_single_settings.rs | blst/src/types/fk20_single_settings.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec::Vec;
use kzg::common_utils::reverse_bit_order;
use kzg::{FK20SingleSettings, Poly, FFTG1, G1};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
use crate::types::g1::FsG1;
use crate::types::g2::FsG2;
use crate::types::kzg_settings::FsKZGSettings;
use crate::types::poly::FsPoly;
use super::fp::FsFp;
use super::g1::{FsG1Affine, FsG1ProjAddAffine};
#[derive(Debug, Clone, Default)]
pub struct FsFK20SingleSettings {
pub kzg_settings: FsKZGSettings,
pub x_ext_fft: Vec<FsG1>,
}
impl
FK20SingleSettings<
FsFr,
FsG1,
FsG2,
FsFFTSettings,
FsPoly,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
> for FsFK20SingleSettings
{
fn new(kzg_settings: &FsKZGSettings, n2: usize) -> Result<Self, String> {
let n = n2 / 2;
if n2 > kzg_settings.fs.max_width {
return Err(String::from(
"n2 must be less than or equal to kzg settings max width",
));
} else if !n2.is_power_of_two() {
return Err(String::from("n2 must be a power of two"));
} else if n2 < 2 {
return Err(String::from("n2 must be greater than or equal to 2"));
}
let mut x = Vec::with_capacity(n);
for i in 0..n - 1 {
x.push(kzg_settings.g1_values_monomial[n - 2 - i]);
}
x.push(FsG1::identity());
let x_ext_fft = kzg_settings.fs.toeplitz_part_1(&x);
drop(x);
let kzg_settings = kzg_settings.clone();
let ret = Self {
kzg_settings,
x_ext_fft,
};
Ok(ret)
}
fn data_availability(&self, p: &FsPoly) -> Result<Vec<FsG1>, String> {
let n = p.len();
let n2 = n * 2;
if n2 > self.kzg_settings.fs.max_width {
return Err(String::from(
"n2 must be less than or equal to kzg settings max width",
));
} else if !n2.is_power_of_two() {
return Err(String::from("n2 must be a power of two"));
}
let mut ret = self.data_availability_optimized(p).unwrap();
reverse_bit_order(&mut ret)?;
Ok(ret)
}
fn data_availability_optimized(&self, p: &FsPoly) -> Result<Vec<FsG1>, String> {
let n = p.len();
let n2 = n * 2;
if n2 > self.kzg_settings.fs.max_width {
return Err(String::from(
"n2 must be less than or equal to kzg settings max width",
));
} else if !n2.is_power_of_two() {
return Err(String::from("n2 must be a power of two"));
}
let toeplitz_coeffs = p.toeplitz_coeffs_step();
let h_ext_fft = self
.kzg_settings
.fs
.toeplitz_part_2(&toeplitz_coeffs, &self.x_ext_fft);
let h = self.kzg_settings.fs.toeplitz_part_3(&h_ext_fft);
let ret = self.kzg_settings.fs.fft_g1(&h, false).unwrap();
Ok(ret)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/kzg_settings.rs | blst/src/types/kzg_settings.rs | extern crate alloc;
use alloc::string::{String, ToString};
use alloc::sync::Arc;
use alloc::{vec, vec::Vec};
use kzg::eth::c_bindings::CKZGSettings;
use kzg::eth::{self, FIELD_ELEMENTS_PER_EXT_BLOB};
use kzg::msm::precompute::{precompute, PrecomputationTable};
use kzg::{FFTFr, FFTSettings, Fr, G1Mul, G2Mul, KZGSettings, Poly, G1, G2};
use crate::consts::{G1_GENERATOR, G2_GENERATOR};
use crate::fft_g1::fft_g1_fast;
use crate::kzg_proofs::{g1_linear_combination, pairings_verify};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
use crate::types::g1::FsG1;
use crate::types::g2::FsG2;
use crate::types::poly::FsPoly;
use crate::utils::PRECOMPUTATION_TABLES;
use super::fp::FsFp;
use super::g1::{FsG1Affine, FsG1ProjAddAffine};
#[allow(clippy::type_complexity)]
#[derive(Debug, Clone, Default)]
pub struct FsKZGSettings {
pub fs: FsFFTSettings,
pub g1_values_monomial: Vec<FsG1>,
pub g1_values_lagrange_brp: Vec<FsG1>,
pub g2_values_monomial: Vec<FsG2>,
pub precomputation:
Option<Arc<PrecomputationTable<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>>>,
pub x_ext_fft_columns: Vec<Vec<FsG1>>,
pub cell_size: usize,
}
fn toeplitz_part_1(
field_elements_per_ext_blob: usize,
output: &mut [FsG1],
x: &[FsG1],
s: &FsFFTSettings,
) -> Result<(), String> {
let n = x.len();
let n2 = n * 2;
let mut x_ext = vec![FsG1::identity(); n2];
x_ext[..n].copy_from_slice(x);
let x_ext = &x_ext[..];
/* Ensure the length is valid */
if x_ext.len() > field_elements_per_ext_blob || !x_ext.len().is_power_of_two() {
return Err("Invalid input size".to_string());
}
let roots_stride = field_elements_per_ext_blob / x_ext.len();
fft_g1_fast(output, x_ext, 1, &s.roots_of_unity, roots_stride);
Ok(())
}
impl KZGSettings<FsFr, FsG1, FsG2, FsFFTSettings, FsPoly, FsFp, FsG1Affine, FsG1ProjAddAffine>
for FsKZGSettings
{
fn new(
g1_monomial: &[FsG1],
g1_lagrange_brp: &[FsG1],
g2_monomial: &[FsG2],
fft_settings: &FsFFTSettings,
cell_size: usize,
) -> Result<Self, String> {
if g1_monomial.len() != g1_lagrange_brp.len() {
return Err("G1 point length mismatch".to_string());
}
let field_elements_per_blob = g1_monomial.len();
let field_elements_per_ext_blob = field_elements_per_blob * 2;
let n = field_elements_per_ext_blob / 2;
let k = n / cell_size;
let k2 = 2 * k;
let mut points = vec![FsG1::default(); k2];
let mut x = vec![FsG1::default(); k];
let mut x_ext_fft_columns = vec![vec![FsG1::default(); cell_size]; k2];
for offset in 0..cell_size {
let start = n - cell_size - 1 - offset;
for (i, p) in x.iter_mut().enumerate().take(k - 1) {
let j = start - i * cell_size;
*p = g1_monomial[j];
}
x[k - 1] = FsG1::identity();
toeplitz_part_1(field_elements_per_ext_blob, &mut points, &x, fft_settings)?;
for row in 0..k2 {
x_ext_fft_columns[row][offset] = points[row];
}
}
Ok(Self {
g1_values_monomial: g1_monomial.to_vec(),
g1_values_lagrange_brp: g1_lagrange_brp.to_vec(),
g2_values_monomial: g2_monomial.to_vec(),
fs: fft_settings.clone(),
precomputation: {
#[cfg(feature = "sppark")]
{
use blst::blst_p1_affine;
let points = kzg::msm::msm_impls::batch_convert::<FsG1, FsFp, FsG1Affine>(
g1_lagrange_brp,
);
let points = unsafe {
alloc::slice::from_raw_parts(
points.as_ptr() as *const blst_p1_affine,
points.len(),
)
};
let prepared = rust_kzg_blst_sppark::prepare_multi_scalar_mult(points);
Some(Arc::new(PrecomputationTable::from_ptr(prepared)))
}
#[cfg(not(feature = "sppark"))]
{
precompute(g1_lagrange_brp, &x_ext_fft_columns)
.ok()
.flatten()
.map(Arc::new)
}
},
x_ext_fft_columns,
cell_size,
})
}
fn commit_to_poly(&self, poly: &FsPoly) -> Result<FsG1, String> {
if poly.coeffs.len() > self.g1_values_monomial.len() {
return Err(String::from("Polynomial is longer than secret g1"));
}
let mut out = FsG1::default();
g1_linear_combination(
&mut out,
&self.g1_values_monomial,
&poly.coeffs,
poly.coeffs.len(),
None,
);
Ok(out)
}
fn compute_proof_single(&self, p: &FsPoly, x: &FsFr) -> Result<FsG1, String> {
if p.coeffs.is_empty() {
return Err(String::from("Polynomial must not be empty"));
}
// `-(x0^n)`, where `n` is `1`
let divisor_0 = x.negate();
// Calculate `q = p / (x^n - x0^n)` for our reduced case (see `compute_proof_multi` for
// generic implementation)
let mut out_coeffs = Vec::from(&p.coeffs[1..]);
for i in (1..out_coeffs.len()).rev() {
let tmp = out_coeffs[i].mul(&divisor_0);
out_coeffs[i - 1] = out_coeffs[i - 1].sub(&tmp);
}
let q = FsPoly { coeffs: out_coeffs };
let ret = self.commit_to_poly(&q)?;
Ok(ret)
}
fn check_proof_single(
&self,
com: &FsG1,
proof: &FsG1,
x: &FsFr,
y: &FsFr,
) -> Result<bool, String> {
let x_g2: FsG2 = G2_GENERATOR.mul(x);
let s_minus_x: FsG2 = self.g2_values_monomial[1].sub(&x_g2);
let y_g1 = G1_GENERATOR.mul(y);
let commitment_minus_y: FsG1 = com.sub(&y_g1);
Ok(pairings_verify(
&commitment_minus_y,
&G2_GENERATOR,
proof,
&s_minus_x,
))
}
fn compute_proof_multi(&self, p: &FsPoly, x0: &FsFr, n: usize) -> Result<FsG1, String> {
if p.coeffs.is_empty() {
return Err(String::from("Polynomial must not be empty"));
}
if !n.is_power_of_two() {
return Err(String::from("n must be a power of two"));
}
// Construct x^n - x0^n = (x - x0.w^0)(x - x0.w^1)...(x - x0.w^(n-1))
let mut divisor = FsPoly {
coeffs: Vec::with_capacity(n + 1),
};
// -(x0^n)
let x_pow_n = x0.pow(n);
divisor.coeffs.push(x_pow_n.negate());
// Zeros
for _ in 1..n {
divisor.coeffs.push(Fr::zero());
}
// x^n
divisor.coeffs.push(Fr::one());
let mut new_polina = p.clone();
// Calculate q = p / (x^n - x0^n)
// let q = p.div(&divisor).unwrap();
let q = new_polina.div(&divisor)?;
let ret = self.commit_to_poly(&q)?;
Ok(ret)
}
fn check_proof_multi(
&self,
com: &FsG1,
proof: &FsG1,
x: &FsFr,
ys: &[FsFr],
n: usize,
) -> Result<bool, String> {
if !n.is_power_of_two() {
return Err(String::from("n is not a power of two"));
}
// Interpolate at a coset.
let mut interp = FsPoly {
coeffs: self.fs.fft_fr(ys, true)?,
};
let inv_x = x.inverse(); // Not euclidean?
let mut inv_x_pow = inv_x;
for i in 1..n {
interp.coeffs[i] = interp.coeffs[i].mul(&inv_x_pow);
inv_x_pow = inv_x_pow.mul(&inv_x);
}
// [x^n]_2
let x_pow = inv_x_pow.inverse();
let xn2 = G2_GENERATOR.mul(&x_pow);
// [s^n - x^n]_2
let xn_minus_yn = self.g2_values_monomial[n].sub(&xn2);
// [interpolation_polynomial(s)]_1
let is1 = self.commit_to_poly(&interp).unwrap();
// [commitment - interpolation_polynomial(s)]_1 = [commit]_1 - [interpolation_polynomial(s)]_1
let commit_minus_interp = com.sub(&is1);
let ret = pairings_verify(&commit_minus_interp, &G2_GENERATOR, proof, &xn_minus_yn);
Ok(ret)
}
fn get_roots_of_unity_at(&self, i: usize) -> FsFr {
self.fs.get_roots_of_unity_at(i)
}
fn get_fft_settings(&self) -> &FsFFTSettings {
&self.fs
}
fn get_g1_lagrange_brp(&self) -> &[FsG1] {
&self.g1_values_lagrange_brp
}
fn get_g1_monomial(&self) -> &[FsG1] {
&self.g1_values_monomial
}
fn get_g2_monomial(&self) -> &[FsG2] {
&self.g2_values_monomial
}
fn get_precomputation(
&self,
) -> Option<&PrecomputationTable<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>> {
self.precomputation.as_ref().map(|v| v.as_ref())
}
fn get_x_ext_fft_columns(&self) -> &[Vec<FsG1>] {
&self.x_ext_fft_columns
}
fn get_cell_size(&self) -> usize {
self.cell_size
}
}
impl<'a> TryFrom<&'a CKZGSettings> for FsKZGSettings {
type Error = String;
fn try_from(settings: &'a CKZGSettings) -> Result<Self, Self::Error> {
let roots_of_unity = unsafe {
core::slice::from_raw_parts(settings.roots_of_unity, FIELD_ELEMENTS_PER_EXT_BLOB + 1)
.iter()
.map(|r| FsFr(blst::blst_fr { l: r.l }))
.collect::<Vec<FsFr>>()
};
let brp_roots_of_unity = unsafe {
core::slice::from_raw_parts(settings.brp_roots_of_unity, FIELD_ELEMENTS_PER_EXT_BLOB)
.iter()
.map(|r| FsFr(blst::blst_fr { l: r.l }))
.collect::<Vec<FsFr>>()
};
let reverse_roots_of_unity = unsafe {
core::slice::from_raw_parts(
settings.reverse_roots_of_unity,
FIELD_ELEMENTS_PER_EXT_BLOB + 1,
)
.iter()
.map(|r| FsFr(blst::blst_fr { l: r.l }))
.collect::<Vec<FsFr>>()
};
let fft_settings = FsFFTSettings {
max_width: FIELD_ELEMENTS_PER_EXT_BLOB,
root_of_unity: roots_of_unity[1],
roots_of_unity,
brp_roots_of_unity,
reverse_roots_of_unity,
};
Ok(FsKZGSettings {
fs: fft_settings,
g1_values_monomial: unsafe {
core::slice::from_raw_parts(
settings.g1_values_monomial,
eth::FIELD_ELEMENTS_PER_BLOB,
)
}
.iter()
.map(|r| {
FsG1(blst::blst_p1 {
x: blst::blst_fp { l: r.x.l },
y: blst::blst_fp { l: r.y.l },
z: blst::blst_fp { l: r.z.l },
})
})
.collect::<Vec<_>>(),
g1_values_lagrange_brp: unsafe {
core::slice::from_raw_parts(
settings.g1_values_lagrange_brp,
eth::FIELD_ELEMENTS_PER_BLOB,
)
}
.iter()
.map(|r| {
FsG1(blst::blst_p1 {
x: blst::blst_fp { l: r.x.l },
y: blst::blst_fp { l: r.y.l },
z: blst::blst_fp { l: r.z.l },
})
})
.collect::<Vec<_>>(),
g2_values_monomial: unsafe {
core::slice::from_raw_parts(
settings.g2_values_monomial,
eth::TRUSTED_SETUP_NUM_G2_POINTS,
)
}
.iter()
.map(|r| {
FsG2(blst::blst_p2 {
x: blst::blst_fp2 {
fp: [
blst::blst_fp { l: r.x.fp[0].l },
blst::blst_fp { l: r.x.fp[1].l },
],
},
y: blst::blst_fp2 {
fp: [
blst::blst_fp { l: r.y.fp[0].l },
blst::blst_fp { l: r.y.fp[1].l },
],
},
z: blst::blst_fp2 {
fp: [
blst::blst_fp { l: r.z.fp[0].l },
blst::blst_fp { l: r.z.fp[1].l },
],
},
})
})
.collect::<Vec<_>>(),
x_ext_fft_columns: unsafe {
core::slice::from_raw_parts(
settings.x_ext_fft_columns,
2 * ((FIELD_ELEMENTS_PER_EXT_BLOB / 2) / eth::FIELD_ELEMENTS_PER_CELL),
)
}
.iter()
.map(|it| {
unsafe { core::slice::from_raw_parts(*it, eth::FIELD_ELEMENTS_PER_CELL) }
.iter()
.map(|r| {
FsG1(blst::blst_p1 {
x: blst::blst_fp { l: r.x.l },
y: blst::blst_fp { l: r.y.l },
z: blst::blst_fp { l: r.z.l },
})
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>(),
#[allow(static_mut_refs)]
precomputation: unsafe { PRECOMPUTATION_TABLES.get_precomputation(settings) },
cell_size: eth::FIELD_ELEMENTS_PER_CELL,
})
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/g2.rs | blst/src/types/g2.rs | extern crate alloc;
use alloc::format;
use alloc::string::String;
use alloc::string::ToString;
use blst::{
blst_fp2, blst_p2, blst_p2_add_or_double, blst_p2_affine, blst_p2_cneg, blst_p2_compress,
blst_p2_double, blst_p2_from_affine, blst_p2_is_equal, blst_p2_mult, blst_p2_uncompress,
blst_scalar, blst_scalar_from_fr, BLST_ERROR,
};
use kzg::eip_4844::BYTES_PER_G2;
#[cfg(feature = "rand")]
use kzg::Fr;
use kzg::{G2Mul, G2};
use crate::consts::{G2_GENERATOR, G2_NEGATIVE_GENERATOR};
use crate::types::fr::FsFr;
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
pub struct FsG2(pub blst_p2);
impl G2Mul<FsFr> for FsG2 {
fn mul(&self, b: &FsFr) -> Self {
let mut result = blst_p2::default();
let mut scalar = blst_scalar::default();
unsafe {
blst_scalar_from_fr(&mut scalar, &b.0);
blst_p2_mult(
&mut result,
&self.0,
scalar.b.as_ptr(),
8 * core::mem::size_of::<blst_scalar>(),
);
}
Self(result)
}
}
impl G2 for FsG2 {
fn generator() -> Self {
G2_GENERATOR
}
fn negative_generator() -> Self {
G2_NEGATIVE_GENERATOR
}
fn from_bytes(bytes: &[u8]) -> Result<Self, String> {
bytes
.try_into()
.map_err(|_| {
format!(
"Invalid byte length. Expected {}, got {}",
BYTES_PER_G2,
bytes.len()
)
})
.and_then(|bytes: &[u8; BYTES_PER_G2]| {
let mut tmp = blst_p2_affine::default();
let mut g2 = blst_p2::default();
unsafe {
// The uncompress routine also checks that the point is on the curve
if blst_p2_uncompress(&mut tmp, bytes.as_ptr()) != BLST_ERROR::BLST_SUCCESS {
return Err("Failed to uncompress".to_string());
}
blst_p2_from_affine(&mut g2, &tmp);
}
Ok(FsG2(g2))
})
}
fn to_bytes(&self) -> [u8; 96] {
let mut out = [0u8; BYTES_PER_G2];
unsafe {
blst_p2_compress(out.as_mut_ptr(), &self.0);
}
out
}
fn add_or_dbl(&mut self, b: &Self) -> Self {
let mut result = blst_p2::default();
unsafe {
blst_p2_add_or_double(&mut result, &self.0, &b.0);
}
Self(result)
}
fn dbl(&self) -> Self {
let mut result = blst_p2::default();
unsafe {
blst_p2_double(&mut result, &self.0);
}
Self(result)
}
fn sub(&self, b: &Self) -> Self {
let mut bneg: blst_p2 = b.0;
let mut result = blst_p2::default();
unsafe {
blst_p2_cneg(&mut bneg, true);
blst_p2_add_or_double(&mut result, &self.0, &bneg);
}
Self(result)
}
fn equals(&self, b: &Self) -> bool {
unsafe { blst_p2_is_equal(&self.0, &b.0) }
}
}
impl FsG2 {
pub(crate) fn _from_xyz(x: blst_fp2, y: blst_fp2, z: blst_fp2) -> Self {
FsG2(blst_p2 { x, y, z })
}
#[cfg(feature = "rand")]
pub fn rand() -> Self {
let result: FsG2 = G2_GENERATOR;
result.mul(&FsFr::rand())
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/poly.rs | blst/src/types/poly.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use kzg::common_utils::{log2_pow2, log2_u64, next_pow_of_2};
use kzg::{FFTFr, FFTSettings, FFTSettingsPoly, Fr, Poly};
use crate::consts::SCALE_FACTOR;
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
#[derive(Debug, Clone, Eq, PartialEq, Default)]
pub struct FsPoly {
pub coeffs: Vec<FsFr>,
}
impl Poly<FsFr> for FsPoly {
fn new(size: usize) -> Self {
Self {
coeffs: vec![FsFr::default(); size],
}
}
fn get_coeff_at(&self, i: usize) -> FsFr {
self.coeffs[i]
}
fn set_coeff_at(&mut self, i: usize, x: &FsFr) {
self.coeffs[i] = *x
}
fn get_coeffs(&self) -> &[FsFr] {
&self.coeffs
}
fn len(&self) -> usize {
self.coeffs.len()
}
fn eval(&self, x: &FsFr) -> FsFr {
if self.coeffs.is_empty() {
return FsFr::zero();
} else if x.is_zero() {
return self.coeffs[0];
}
let mut ret = self.coeffs[self.coeffs.len() - 1];
let mut i = self.coeffs.len() - 2;
loop {
let temp = ret.mul(x);
ret = temp.add(&self.coeffs[i]);
if i == 0 {
break;
}
i -= 1;
}
ret
}
fn scale(&mut self) {
let scale_factor = FsFr::from_u64(SCALE_FACTOR);
let inv_factor = scale_factor.inverse();
let mut factor_power = FsFr::one();
for i in 0..self.coeffs.len() {
factor_power = factor_power.mul(&inv_factor);
self.coeffs[i] = self.coeffs[i].mul(&factor_power);
}
}
fn unscale(&mut self) {
let scale_factor = FsFr::from_u64(SCALE_FACTOR);
let mut factor_power = FsFr::one();
for i in 0..self.coeffs.len() {
factor_power = factor_power.mul(&scale_factor);
self.coeffs[i] = self.coeffs[i].mul(&factor_power);
}
}
// TODO: analyze how algo works
fn inverse(&mut self, output_len: usize) -> Result<Self, String> {
if output_len == 0 {
return Err(String::from("Can't produce a zero-length result"));
} else if self.coeffs.is_empty() {
return Err(String::from("Can't inverse a zero-length poly"));
} else if self.coeffs[0].is_zero() {
return Err(String::from(
"First coefficient of polynomial mustn't be zero",
));
}
let mut ret = FsPoly {
coeffs: vec![FsFr::zero(); output_len],
};
// If the input polynomial is constant, the remainder of the series is zero
if self.coeffs.len() == 1 {
ret.coeffs[0] = self.coeffs[0].eucl_inverse();
return Ok(ret);
}
let maxd = output_len - 1;
// Max space for multiplications is (2 * length - 1)
// Don't need the following as its recalculated inside
// let scale: usize = log2_pow2(next_pow_of_2(2 * output_len - 1));
// let fft_settings = FsFFTSettings::new(scale).unwrap();
// To store intermediate results
// Base case for d == 0
ret.coeffs[0] = self.coeffs[0].eucl_inverse();
let mut d: usize = 0;
let mut mask: usize = 1 << log2_u64(maxd);
while mask != 0 {
d = 2 * d + usize::from((maxd & mask) != 0);
mask >>= 1;
// b.c -> tmp0 (we're using out for c)
// tmp0.length = min_u64(d + 1, b->length + output->length - 1);
let len_temp = (d + 1).min(self.len() + output_len - 1);
let mut tmp0 = self.mul(&ret, len_temp).unwrap();
// 2 - b.c -> tmp0
for i in 0..tmp0.len() {
tmp0.coeffs[i] = tmp0.coeffs[i].negate();
}
let fr_two = Fr::from_u64(2);
tmp0.coeffs[0] = tmp0.coeffs[0].add(&fr_two);
// c.(2 - b.c) -> tmp1;
let tmp1 = ret.mul(&tmp0, d + 1).unwrap();
for i in 0..tmp1.len() {
ret.coeffs[i] = tmp1.coeffs[i];
}
}
if d + 1 != output_len {
return Err(String::from("D + 1 must be equal to output_len"));
}
Ok(ret)
}
fn div(&mut self, divisor: &Self) -> Result<Self, String> {
if divisor.len() >= self.len() || divisor.len() < 128 {
// Tunable parameter
self.long_div(divisor)
} else {
self.fast_div(divisor)
}
}
fn long_div(&mut self, divisor: &Self) -> Result<Self, String> {
if divisor.coeffs.is_empty() {
return Err(String::from("Can't divide by zero"));
} else if divisor.coeffs[divisor.coeffs.len() - 1].is_zero() {
return Err(String::from("Highest coefficient must be non-zero"));
}
let out_length = self.poly_quotient_length(divisor);
if out_length == 0 {
return Ok(FsPoly { coeffs: vec![] });
}
// Special case for divisor.len() == 2
if divisor.len() == 2 {
let divisor_0 = divisor.coeffs[0];
let divisor_1 = divisor.coeffs[1];
let mut out_coeffs = Vec::from(&self.coeffs[1..]);
for i in (1..out_length).rev() {
out_coeffs[i] = out_coeffs[i].div(&divisor_1).unwrap();
let tmp = out_coeffs[i].mul(&divisor_0);
out_coeffs[i - 1] = out_coeffs[i - 1].sub(&tmp);
}
out_coeffs[0] = out_coeffs[0].div(&divisor_1).unwrap();
Ok(FsPoly { coeffs: out_coeffs })
} else {
let mut out: FsPoly = FsPoly {
coeffs: vec![FsFr::default(); out_length],
};
let mut a_pos = self.len() - 1;
let b_pos = divisor.len() - 1;
let mut diff = a_pos - b_pos;
let mut a = self.coeffs.clone();
while diff > 0 {
out.coeffs[diff] = a[a_pos].div(&divisor.coeffs[b_pos]).unwrap();
for i in 0..(b_pos + 1) {
let tmp = out.coeffs[diff].mul(&divisor.coeffs[i]);
a[diff + i] = a[diff + i].sub(&tmp);
}
diff -= 1;
a_pos -= 1;
}
out.coeffs[0] = a[a_pos].div(&divisor.coeffs[b_pos]).unwrap();
Ok(out)
}
}
fn fast_div(&mut self, divisor: &Self) -> Result<Self, String> {
if divisor.coeffs.is_empty() {
return Err(String::from("Cant divide by zero"));
} else if divisor.coeffs[divisor.coeffs.len() - 1].is_zero() {
return Err(String::from("Highest coefficient must be non-zero"));
}
let m: usize = self.len() - 1;
let n: usize = divisor.len() - 1;
// If the divisor is larger than the dividend, the result is zero-length
if n > m {
return Ok(FsPoly { coeffs: Vec::new() });
}
// Special case for divisor.length == 1 (it's a constant)
if divisor.len() == 1 {
let mut out = FsPoly {
coeffs: vec![FsFr::zero(); self.len()],
};
for i in 0..out.len() {
out.coeffs[i] = self.coeffs[i].div(&divisor.coeffs[0]).unwrap();
}
return Ok(out);
}
let mut a_flip = self.flip().unwrap();
let mut b_flip = divisor.flip().unwrap();
let inv_b_flip = b_flip.inverse(m - n + 1).unwrap();
let q_flip = a_flip.mul(&inv_b_flip, m - n + 1).unwrap();
let out = q_flip.flip().unwrap();
Ok(out)
}
fn mul_direct(&mut self, multiplier: &Self, output_len: usize) -> Result<Self, String> {
if self.len() == 0 || multiplier.len() == 0 {
return Ok(FsPoly::new(0));
}
let a_degree = self.len() - 1;
let b_degree = multiplier.len() - 1;
let mut ret = FsPoly {
coeffs: vec![Fr::zero(); output_len],
};
// Truncate the output to the length of the output polynomial
for i in 0..(a_degree + 1) {
let mut j = 0;
while (j <= b_degree) && ((i + j) < output_len) {
let tmp = self.coeffs[i].mul(&multiplier.coeffs[j]);
let tmp = ret.coeffs[i + j].add(&tmp);
ret.coeffs[i + j] = tmp;
j += 1;
}
}
Ok(ret)
}
}
impl FFTSettingsPoly<FsFr, FsPoly, FsFFTSettings> for FsFFTSettings {
fn poly_mul_fft(
a: &FsPoly,
b: &FsPoly,
len: usize,
_fs: Option<&FsFFTSettings>,
) -> Result<FsPoly, String> {
b.mul_fft(a, len)
}
}
impl FsPoly {
pub fn _poly_norm(&self) -> Self {
let mut ret = self.clone();
let mut temp_len: usize = ret.coeffs.len();
while temp_len > 0 && ret.coeffs[temp_len - 1].is_zero() {
temp_len -= 1;
}
if temp_len == 0 {
ret.coeffs = Vec::new();
} else {
ret.coeffs = ret.coeffs[0..temp_len].to_vec();
}
ret
}
pub fn poly_quotient_length(&self, divisor: &Self) -> usize {
if self.len() >= divisor.len() {
self.len() - divisor.len() + 1
} else {
0
}
}
pub fn pad(&self, out_length: usize) -> Self {
let mut ret = Self {
coeffs: vec![FsFr::zero(); out_length],
};
for i in 0..self.len().min(out_length) {
ret.coeffs[i] = self.coeffs[i];
}
ret
}
pub fn flip(&self) -> Result<FsPoly, String> {
let mut ret = FsPoly {
coeffs: vec![FsFr::default(); self.len()],
};
for i in 0..self.len() {
ret.coeffs[i] = self.coeffs[self.coeffs.len() - i - 1]
}
Ok(ret)
}
pub fn mul_fft(&self, multiplier: &Self, output_len: usize) -> Result<Self, String> {
let length = next_pow_of_2(self.len() + multiplier.len() - 1);
let scale = log2_pow2(length);
let fft_settings = FsFFTSettings::new(scale).unwrap();
let a_pad = self.pad(length);
let b_pad = multiplier.pad(length);
let a_fft: Vec<FsFr>;
let b_fft: Vec<FsFr>;
#[cfg(feature = "parallel")]
{
if length > 1024 {
let mut a_fft_temp = vec![];
let mut b_fft_temp = vec![];
rayon::join(
|| a_fft_temp = fft_settings.fft_fr(&a_pad.coeffs, false).unwrap(),
|| b_fft_temp = fft_settings.fft_fr(&b_pad.coeffs, false).unwrap(),
);
a_fft = a_fft_temp;
b_fft = b_fft_temp;
} else {
a_fft = fft_settings.fft_fr(&a_pad.coeffs, false).unwrap();
b_fft = fft_settings.fft_fr(&b_pad.coeffs, false).unwrap();
}
}
#[cfg(not(feature = "parallel"))]
{
// Convert Poly to values
a_fft = fft_settings.fft_fr(&a_pad.coeffs, false).unwrap();
b_fft = fft_settings.fft_fr(&b_pad.coeffs, false).unwrap();
}
// Multiply two value ranges
let mut ab_fft = a_fft;
ab_fft.iter_mut().zip(b_fft).for_each(|(a, b)| {
*a = a.mul(&b);
});
// Convert value range multiplication to a resulting polynomial
let ab = fft_settings.fft_fr(&ab_fft, true).unwrap();
drop(ab_fft);
let mut ret = FsPoly {
coeffs: vec![FsFr::zero(); output_len],
};
let range = ..output_len.min(length);
ret.coeffs[range].clone_from_slice(&ab[range]);
Ok(ret)
}
pub fn mul(&mut self, multiplier: &Self, output_len: usize) -> Result<Self, String> {
if self.len() < 64 || multiplier.len() < 64 || output_len < 128 {
// Tunable parameter
self.mul_direct(multiplier, output_len)
} else {
self.mul_fft(multiplier, output_len)
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/fft_settings.rs | blst/src/types/fft_settings.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use kzg::common_utils::reverse_bit_order;
use kzg::{FFTSettings, Fr};
use crate::consts::SCALE2_ROOT_OF_UNITY;
use crate::types::fr::FsFr;
#[derive(Debug, Clone)]
pub struct FsFFTSettings {
pub max_width: usize,
pub root_of_unity: FsFr,
pub roots_of_unity: Vec<FsFr>,
pub brp_roots_of_unity: Vec<FsFr>,
pub reverse_roots_of_unity: Vec<FsFr>,
}
impl Default for FsFFTSettings {
fn default() -> Self {
Self::new(0).unwrap()
}
}
impl FFTSettings<FsFr> for FsFFTSettings {
/// Create FFTSettings with roots of unity for a selected scale. Resulting roots will have a magnitude of 2 ^ max_scale.
fn new(scale: usize) -> Result<FsFFTSettings, String> {
if scale >= SCALE2_ROOT_OF_UNITY.len() {
return Err(String::from(
"Scale is expected to be within root of unity matrix row size",
));
}
// max_width = 2 ^ max_scale
let max_width: usize = 1 << scale;
let root_of_unity = FsFr::from_u64_arr(&SCALE2_ROOT_OF_UNITY[scale]);
// create max_width of roots & store them reversed as well
let roots_of_unity = expand_root_of_unity(&root_of_unity, max_width)?;
let mut brp_roots_of_unity = roots_of_unity.clone();
brp_roots_of_unity.pop();
reverse_bit_order(&mut brp_roots_of_unity)?;
let mut reverse_roots_of_unity = roots_of_unity.clone();
reverse_roots_of_unity.reverse();
Ok(FsFFTSettings {
max_width,
root_of_unity,
reverse_roots_of_unity,
roots_of_unity,
brp_roots_of_unity,
})
}
fn get_max_width(&self) -> usize {
self.max_width
}
fn get_reverse_roots_of_unity_at(&self, i: usize) -> FsFr {
self.reverse_roots_of_unity[i]
}
fn get_reversed_roots_of_unity(&self) -> &[FsFr] {
&self.reverse_roots_of_unity
}
fn get_roots_of_unity_at(&self, i: usize) -> FsFr {
self.roots_of_unity[i]
}
fn get_roots_of_unity(&self) -> &[FsFr] {
&self.roots_of_unity
}
fn get_brp_roots_of_unity(&self) -> &[FsFr] {
&self.brp_roots_of_unity
}
fn get_brp_roots_of_unity_at(&self, i: usize) -> FsFr {
self.brp_roots_of_unity[i]
}
}
/// Multiply a given root of unity by itself until it results in a 1 and result all multiplication values in a vector
pub fn expand_root_of_unity(root: &FsFr, width: usize) -> Result<Vec<FsFr>, String> {
let mut generated_powers = vec![FsFr::one(), *root];
while !(generated_powers.last().unwrap().is_one()) {
if generated_powers.len() > width {
return Err(String::from("Root of unity multiplied for too long"));
}
generated_powers.push(generated_powers.last().unwrap().mul(root));
}
if generated_powers.len() != width + 1 {
return Err(String::from("Root of unity has invalid scale"));
}
Ok(generated_powers)
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/g1.rs | blst/src/types/g1.rs | extern crate alloc;
use alloc::{
borrow::ToOwned,
format,
string::{String, ToString},
vec::Vec,
};
use arbitrary::Arbitrary;
use blst::{
blst_fp, blst_fp_cneg, blst_p1, blst_p1_add, blst_p1_add_or_double, blst_p1_affine,
blst_p1_affine_serialize, blst_p1_cneg, blst_p1_compress, blst_p1_double, blst_p1_from_affine,
blst_p1_in_g1, blst_p1_is_equal, blst_p1_is_inf, blst_p1_mult, blst_p1_uncompress, blst_scalar,
blst_scalar_from_fr, p1_affines, BLST_ERROR,
};
use core::{hash::Hash, ptr};
use kzg::{
common_utils::log_2_byte, eip_4844::BYTES_PER_G1, msm::precompute::PrecomputationTable,
G1Affine, G1GetFp, G1LinComb, G1Mul, G1ProjAddAffine, G1,
};
use crate::consts::{G1_GENERATOR, G1_IDENTITY, G1_NEGATIVE_GENERATOR};
use crate::kzg_proofs::g1_linear_combination;
use crate::types::fr::FsFr;
use super::fp::FsFp;
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
pub struct FsG1(pub blst_p1);
impl Hash for FsG1 {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
self.0.x.l.hash(state);
self.0.y.l.hash(state);
self.0.z.l.hash(state);
}
}
impl FsG1 {
pub(crate) const fn from_xyz(x: blst_fp, y: blst_fp, z: blst_fp) -> Self {
FsG1(blst_p1 { x, y, z })
}
}
impl G1 for FsG1 {
fn identity() -> Self {
G1_IDENTITY
}
fn generator() -> Self {
G1_GENERATOR
}
fn negative_generator() -> Self {
G1_NEGATIVE_GENERATOR
}
#[cfg(feature = "rand")]
fn rand() -> Self {
let result: FsG1 = G1_GENERATOR;
result.mul(&kzg::Fr::rand())
}
fn from_bytes(bytes: &[u8]) -> Result<Self, String> {
bytes
.try_into()
.map_err(|_| {
format!(
"Invalid byte length. Expected {}, got {}",
BYTES_PER_G1,
bytes.len()
)
})
.and_then(|bytes: &[u8; BYTES_PER_G1]| {
let mut tmp = blst_p1_affine::default();
let mut g1 = blst_p1::default();
unsafe {
// The uncompress routine also checks that the point is on the curve
if blst_p1_uncompress(&mut tmp, bytes.as_ptr()) != BLST_ERROR::BLST_SUCCESS {
return Err("Failed to uncompress".to_string());
}
blst_p1_from_affine(&mut g1, &tmp);
}
Ok(FsG1(g1))
})
}
fn from_hex(hex: &str) -> Result<Self, String> {
let bytes = hex::decode(&hex[2..]).unwrap();
Self::from_bytes(&bytes)
}
fn to_bytes(&self) -> [u8; 48] {
let mut out = [0u8; BYTES_PER_G1];
unsafe {
blst_p1_compress(out.as_mut_ptr(), &self.0);
}
out
}
fn add_or_dbl(&self, b: &Self) -> Self {
let mut ret = Self::default();
unsafe {
blst_p1_add_or_double(&mut ret.0, &self.0, &b.0);
}
ret
}
fn is_inf(&self) -> bool {
unsafe { blst_p1_is_inf(&self.0) }
}
fn is_valid(&self) -> bool {
unsafe {
// The point must be on the right subgroup
blst_p1_in_g1(&self.0)
}
}
fn dbl(&self) -> Self {
let mut result = blst_p1::default();
unsafe {
blst_p1_double(&mut result, &self.0);
}
Self(result)
}
fn add(&self, b: &Self) -> Self {
let mut ret = Self::default();
unsafe {
blst_p1_add(&mut ret.0, &self.0, &b.0);
}
ret
}
fn sub(&self, b: &Self) -> Self {
let mut b_negative: FsG1 = *b;
let mut ret = Self::default();
unsafe {
blst_p1_cneg(&mut b_negative.0, true);
blst_p1_add_or_double(&mut ret.0, &self.0, &b_negative.0);
ret
}
}
fn equals(&self, b: &Self) -> bool {
unsafe { blst_p1_is_equal(&self.0, &b.0) }
}
fn zero() -> Self {
Self(blst_p1 {
x: blst_fp {
l: [
8505329371266088957,
17002214543764226050,
6865905132761471162,
8632934651105793861,
6631298214892334189,
1582556514881692819,
],
},
y: blst_fp {
l: [
8505329371266088957,
17002214543764226050,
6865905132761471162,
8632934651105793861,
6631298214892334189,
1582556514881692819,
],
},
z: blst_fp {
l: [0, 0, 0, 0, 0, 0],
},
})
}
fn add_or_dbl_assign(&mut self, b: &Self) {
unsafe {
blst::blst_p1_add_or_double(&mut self.0, &self.0, &b.0);
}
}
fn add_assign(&mut self, b: &Self) {
unsafe {
blst::blst_p1_add(&mut self.0, &self.0, &b.0);
}
}
fn dbl_assign(&mut self) {
unsafe {
blst::blst_p1_double(&mut self.0, &self.0);
}
}
}
impl G1GetFp<FsFp> for FsG1 {
fn x(&self) -> &FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&self.0.x)
}
}
fn y(&self) -> &FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&self.0.y)
}
}
fn z(&self) -> &FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&self.0.z)
}
}
fn x_mut(&mut self) -> &mut FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&mut self.0.x)
}
}
fn y_mut(&mut self) -> &mut FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&mut self.0.y)
}
}
fn z_mut(&mut self) -> &mut FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&mut self.0.z)
}
}
fn from_jacobian(x: FsFp, y: FsFp, z: FsFp) -> Self {
Self(blst_p1 {
x: x.0,
y: y.0,
z: z.0,
})
}
}
impl G1Mul<FsFr> for FsG1 {
fn mul(&self, b: &FsFr) -> Self {
let mut scalar: blst_scalar = blst_scalar::default();
unsafe {
blst_scalar_from_fr(&mut scalar, &b.0);
}
// Count the number of bytes to be multiplied.
let mut i = scalar.b.len();
while i != 0 && scalar.b[i - 1] == 0 {
i -= 1;
}
let mut result = Self::default();
if i == 0 {
return G1_IDENTITY;
} else if i == 1 && scalar.b[0] == 1 {
return *self;
} else {
// Count the number of bits to be multiplied.
unsafe {
blst_p1_mult(
&mut result.0,
&self.0,
&(scalar.b[0]),
8 * i - 7 + log_2_byte(scalar.b[i - 1]),
);
}
}
result
}
}
impl G1LinComb<FsFr, FsFp, FsG1Affine, FsG1ProjAddAffine> for FsG1 {
fn g1_lincomb(
points: &[Self],
scalars: &[FsFr],
len: usize,
precomputation: Option<
&PrecomputationTable<FsFr, Self, FsFp, FsG1Affine, FsG1ProjAddAffine>,
>,
) -> Self {
let mut out = FsG1::default();
g1_linear_combination(&mut out, points, scalars, len, precomputation);
out
}
}
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
pub struct FsG1Affine(pub blst_p1_affine);
impl<'a> Arbitrary<'a> for FsG1Affine {
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
Ok(FsG1Affine::into_affine(
&FsG1::generator().mul(&u.arbitrary()?),
))
}
}
impl G1Affine<FsG1, FsFp> for FsG1Affine {
fn zero() -> Self {
Self(blst_p1_affine {
x: {
blst_fp {
l: [0, 0, 0, 0, 0, 0],
}
},
y: {
blst_fp {
l: [0, 0, 0, 0, 0, 0],
}
},
})
}
fn into_affine(g1: &FsG1) -> Self {
let mut ret: Self = Default::default();
unsafe {
blst::blst_p1_to_affine(&mut ret.0, &g1.0);
}
ret
}
fn into_affines_loc(out: &mut [Self], g1: &[FsG1]) {
let p: [*const blst_p1; 2] = [g1.as_ptr() as *const blst_p1, ptr::null()];
unsafe {
blst::blst_p1s_to_affine(out.as_mut_ptr() as *mut blst_p1_affine, &p[0], g1.len());
}
}
fn into_affines(g1: &[FsG1]) -> Vec<Self> {
if g1.is_empty() {
Vec::new()
} else {
let points =
unsafe { core::slice::from_raw_parts(g1.as_ptr() as *const blst_p1, g1.len()) };
let points = p1_affines::from(points);
unsafe {
// Transmute safe due to repr(C) on FsG1Affine
core::mem::transmute::<p1_affines, Vec<Self>>(points)
}
}
}
fn to_proj(&self) -> FsG1 {
let mut ret: FsG1 = Default::default();
unsafe {
blst::blst_p1_from_affine(&mut ret.0, &self.0);
}
ret
}
fn x(&self) -> &FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&self.0.x)
}
}
fn y(&self) -> &FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&self.0.y)
}
}
fn is_infinity(&self) -> bool {
unsafe { blst::blst_p1_affine_is_inf(&self.0) }
}
fn x_mut(&mut self) -> &mut FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&mut self.0.x)
}
}
fn y_mut(&mut self) -> &mut FsFp {
unsafe {
// Transmute safe due to repr(C) on FsFp
core::mem::transmute(&mut self.0.y)
}
}
fn neg(&self) -> Self {
let mut ret = *self;
if !self.is_infinity() {
unsafe {
blst_fp_cneg(&mut ret.0.y, &self.0.y, true);
}
}
ret
}
fn from_xy(x: FsFp, y: FsFp) -> Self {
Self(blst_p1_affine { x: x.0, y: y.0 })
}
fn to_bytes_uncompressed(&self) -> [u8; 96] {
let mut output = [0u8; 96];
unsafe {
blst_p1_affine_serialize(output.as_mut_ptr(), &self.0);
}
output
}
fn from_bytes_uncompressed(bytes: [u8; 96]) -> Result<Self, String> {
let mut output = Self::default();
let res = unsafe { blst::blst_p1_deserialize(&mut output.0, bytes.as_ptr()) };
if res == BLST_ERROR::BLST_SUCCESS {
Ok(output)
} else {
Err("Failed to deserialize point".to_owned())
}
}
}
#[derive(Clone, Debug, Default)]
pub struct FsG1ProjAddAffine;
impl G1ProjAddAffine<FsG1, FsFp, FsG1Affine> for FsG1ProjAddAffine {
fn add_assign_affine(proj: &mut FsG1, aff: &FsG1Affine) {
unsafe {
blst::blst_p1_add_affine(&mut proj.0, &proj.0, &aff.0);
}
}
fn add_or_double_assign_affine(proj: &mut FsG1, aff: &FsG1Affine) {
unsafe {
blst::blst_p1_add_or_double_affine(&mut proj.0, &proj.0, &aff.0);
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/fp.rs | blst/src/types/fp.rs | use blst::blst_fp;
use kzg::G1Fp;
#[repr(C)]
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
pub struct FsFp(pub blst_fp);
impl G1Fp for FsFp {
fn one() -> Self {
Self(blst_fp {
l: [
8505329371266088957,
17002214543764226050,
6865905132761471162,
8632934651105793861,
6631298214892334189,
1582556514881692819,
],
})
}
fn zero() -> Self {
Self(blst_fp {
l: [0, 0, 0, 0, 0, 0],
})
}
fn bls12_381_rx_p() -> Self {
Self(blst_fp {
l: [
8505329371266088957,
17002214543764226050,
6865905132761471162,
8632934651105793861,
6631298214892334189,
1582556514881692819,
],
})
}
fn inverse(&self) -> Option<Self> {
let mut out: Self = *self;
unsafe {
blst::blst_fp_inverse(&mut out.0, &self.0);
}
Some(out)
}
fn square(&self) -> Self {
let mut out: Self = Default::default();
unsafe {
blst::blst_fp_sqr(&mut out.0, &self.0);
}
out
}
fn double(&self) -> Self {
let mut out: Self = Default::default();
unsafe {
blst::blst_fp_add(&mut out.0, &self.0, &self.0);
}
out
}
fn from_underlying_arr(arr: &[u64; 6]) -> Self {
Self(blst_fp { l: *arr })
}
fn neg_assign(&mut self) {
unsafe {
blst::blst_fp_cneg(&mut self.0, &self.0, true);
}
}
fn mul_assign_fp(&mut self, b: &Self) {
unsafe {
blst::blst_fp_mul(&mut self.0, &self.0, &b.0);
}
}
fn sub_assign_fp(&mut self, b: &Self) {
unsafe {
blst::blst_fp_sub(&mut self.0, &self.0, &b.0);
}
}
fn add_assign_fp(&mut self, b: &Self) {
unsafe {
blst::blst_fp_add(&mut self.0, &self.0, &b.0);
}
}
fn mul3(&self) -> Self {
let mut ret = FsFp::default();
unsafe {
blst::blst_fp_mul_by_3(&mut ret.0, &self.0);
}
ret
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/mod.rs | blst/src/types/mod.rs | pub mod fft_settings;
pub mod fk20_multi_settings;
pub mod fk20_single_settings;
pub mod fp;
pub mod fr;
pub mod g1;
pub mod g2;
pub mod kzg_settings;
pub mod poly;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/src/types/fk20_multi_settings.rs | blst/src/types/fk20_multi_settings.rs | extern crate alloc;
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use kzg::common_utils::reverse_bit_order;
use kzg::{FK20MultiSettings, Poly, FFTG1, G1};
use crate::types::fft_settings::FsFFTSettings;
use crate::types::fr::FsFr;
use crate::types::g1::FsG1;
use crate::types::g2::FsG2;
use crate::types::kzg_settings::FsKZGSettings;
use crate::types::poly::FsPoly;
use super::fp::FsFp;
use super::g1::{FsG1Affine, FsG1ProjAddAffine};
pub struct FsFK20MultiSettings {
pub kzg_settings: FsKZGSettings,
pub chunk_len: usize,
pub x_ext_fft_files: Vec<Vec<FsG1>>,
}
impl Clone for FsFK20MultiSettings {
fn clone(&self) -> Self {
Self {
kzg_settings: self.kzg_settings.clone(),
chunk_len: self.chunk_len,
x_ext_fft_files: self.x_ext_fft_files.clone(),
}
}
}
impl Default for FsFK20MultiSettings {
fn default() -> Self {
Self {
kzg_settings: FsKZGSettings::default(),
chunk_len: 1,
x_ext_fft_files: vec![],
}
}
}
impl
FK20MultiSettings<
FsFr,
FsG1,
FsG2,
FsFFTSettings,
FsPoly,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
> for FsFK20MultiSettings
{
#[allow(clippy::many_single_char_names)]
fn new(ks: &FsKZGSettings, n2: usize, chunk_len: usize) -> Result<Self, String> {
if n2 > ks.fs.max_width {
return Err(String::from(
"n2 must be less than or equal to kzg settings max width",
));
} else if !n2.is_power_of_two() {
return Err(String::from("n2 must be a power of two"));
} else if n2 < 2 {
return Err(String::from("n2 must be greater than or equal to 2"));
} else if chunk_len > n2 / 2 {
return Err(String::from("chunk_len must be greater or equal to n2 / 2"));
} else if !chunk_len.is_power_of_two() {
return Err(String::from("chunk_len must be a power of two"));
}
let n = n2 / 2;
let k = n / chunk_len;
let mut ext_fft_files = Vec::with_capacity(chunk_len);
{
let mut x = Vec::with_capacity(k);
for offset in 0..chunk_len {
let mut start = 0;
if n >= chunk_len + 1 + offset {
start = n - chunk_len - 1 - offset;
}
let mut i = 0;
let mut j = start;
while i + 1 < k {
x.push(ks.g1_values_monomial[j]);
i += 1;
if j >= chunk_len {
j -= chunk_len;
} else {
j = 0;
}
}
x.push(FsG1::identity());
let ext_fft_file = ks.fs.toeplitz_part_1(&x);
x.clear();
ext_fft_files.push(ext_fft_file);
}
}
let ret = Self {
kzg_settings: ks.clone(),
chunk_len,
x_ext_fft_files: ext_fft_files,
};
Ok(ret)
}
fn data_availability(&self, p: &FsPoly) -> Result<Vec<FsG1>, String> {
let n = p.len();
let n2 = n * 2;
if n2 > self.kzg_settings.fs.max_width {
return Err(String::from(
"n2 must be less than or equal to kzg settings max width",
));
}
if !n2.is_power_of_two() {
return Err(String::from("n2 must be a power of two"));
}
let mut ret = self.data_availability_optimized(p).unwrap();
reverse_bit_order(&mut ret)?;
Ok(ret)
}
fn data_availability_optimized(&self, p: &FsPoly) -> Result<Vec<FsG1>, String> {
let n = p.len();
let n2 = n * 2;
if n2 > self.kzg_settings.fs.max_width {
return Err(String::from(
"n2 must be less than or equal to kzg settings max width",
));
} else if !n2.is_power_of_two() {
return Err(String::from("n2 must be a power of two"));
}
let n = n2 / 2;
let k = n / self.chunk_len;
let k2 = k * 2;
let mut h_ext_fft = vec![FsG1::identity(); k2];
for i in 0..self.chunk_len {
let toeplitz_coeffs = p.toeplitz_coeffs_stride(i, self.chunk_len);
let h_ext_fft_file = self
.kzg_settings
.fs
.toeplitz_part_2(&toeplitz_coeffs, &self.x_ext_fft_files[i]);
for j in 0..k2 {
h_ext_fft[j] = h_ext_fft[j].add_or_dbl(&h_ext_fft_file[j]);
}
}
let mut h = self.kzg_settings.fs.toeplitz_part_3(&h_ext_fft);
h[k..k2].copy_from_slice(&vec![FsG1::identity(); k2 - k]);
let ret = self.kzg_settings.fs.fft_g1(&h, false).unwrap();
Ok(ret)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/consts.rs | blst/tests/consts.rs | // #[path = "./local_tests/local_consts.rs"]
// pub mod local_consts;
#[cfg(test)]
mod tests {
use kzg_bench::tests::consts::{
expand_roots_is_plausible, new_fft_settings_is_plausible, roots_of_unity_are_plausible,
roots_of_unity_is_the_expected_size, roots_of_unity_out_of_bounds_fails,
};
use rust_kzg_blst::consts::SCALE2_ROOT_OF_UNITY;
use rust_kzg_blst::types::fft_settings::{expand_root_of_unity, FsFFTSettings};
use rust_kzg_blst::types::fr::FsFr;
// Shared tests
#[test]
fn roots_of_unity_is_the_expected_size_() {
roots_of_unity_is_the_expected_size(&SCALE2_ROOT_OF_UNITY);
}
#[test]
fn roots_of_unity_out_of_bounds_fails_() {
roots_of_unity_out_of_bounds_fails::<FsFr, FsFFTSettings>();
}
#[test]
fn roots_of_unity_are_plausible_() {
roots_of_unity_are_plausible::<FsFr>(&SCALE2_ROOT_OF_UNITY);
}
#[test]
fn expand_roots_is_plausible_() {
expand_roots_is_plausible::<FsFr>(&SCALE2_ROOT_OF_UNITY, &expand_root_of_unity);
}
#[test]
fn new_fft_settings_is_plausible_() {
new_fft_settings_is_plausible::<FsFr, FsFFTSettings>();
}
// Local tests
// #[test]
// fn roots_of_unity_repeat_at_stride_() {
// roots_of_unity_repeat_at_stride::<FsFr, FsFFTSettings>();
// }
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/das.rs | blst/tests/das.rs | #[cfg(test)]
mod tests {
use kzg_bench::tests::das::{das_extension_test_known, das_extension_test_random};
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
#[test]
fn das_extension_test_known_() {
das_extension_test_known::<FsFr, FsFFTSettings>();
}
#[test]
fn das_extension_test_random_() {
das_extension_test_random::<FsFr, FsFFTSettings>();
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/eip_4844.rs | blst/tests/eip_4844.rs | #[cfg(test)]
mod tests {
use kzg::eip_4844::{
blob_to_kzg_commitment_rust, blob_to_polynomial, bytes_to_blob,
compute_blob_kzg_proof_rust, compute_challenge_rust, compute_kzg_proof_rust,
compute_powers, evaluate_polynomial_in_evaluation_form, verify_blob_kzg_proof_batch_rust,
verify_blob_kzg_proof_rust, verify_kzg_proof_rust,
};
use kzg::Fr;
use kzg_bench::tests::eip_4844::{
blob_to_kzg_commitment_test, bytes_to_bls_field_test,
compute_and_verify_blob_kzg_proof_fails_with_incorrect_proof_test,
compute_and_verify_blob_kzg_proof_test,
compute_and_verify_kzg_proof_fails_with_incorrect_proof_test,
compute_and_verify_kzg_proof_round_trip_test,
compute_and_verify_kzg_proof_within_domain_test, compute_kzg_proof_empty_blob_vector_test,
compute_kzg_proof_incorrect_blob_length_test,
compute_kzg_proof_incorrect_commitments_len_test,
compute_kzg_proof_incorrect_poly_length_test, compute_kzg_proof_incorrect_proofs_len_test,
compute_kzg_proof_test, compute_powers_test, test_vectors_blob_to_kzg_commitment,
test_vectors_compute_blob_kzg_proof, test_vectors_compute_challenge,
test_vectors_compute_kzg_proof, test_vectors_verify_blob_kzg_proof,
test_vectors_verify_blob_kzg_proof_batch, test_vectors_verify_kzg_proof,
validate_batched_input_test, verify_kzg_proof_batch_fails_with_incorrect_proof_test,
verify_kzg_proof_batch_test,
};
use rust_kzg_blst::consts::SCALE2_ROOT_OF_UNITY;
use rust_kzg_blst::eip_4844::load_trusted_setup_filename_rust;
use rust_kzg_blst::types::fft_settings::expand_root_of_unity;
use rust_kzg_blst::types::fp::FsFp;
use rust_kzg_blst::types::g1::{FsG1Affine, FsG1ProjAddAffine};
use rust_kzg_blst::types::{
fft_settings::FsFFTSettings, fr::FsFr, g1::FsG1, g2::FsG2, kzg_settings::FsKZGSettings,
poly::FsPoly,
};
#[test]
pub fn bytes_to_bls_field_test_() {
bytes_to_bls_field_test::<FsFr>();
}
#[test]
pub fn compute_powers_test_() {
compute_powers_test::<FsFr>(&compute_powers);
}
#[test]
pub fn blob_to_kzg_commitment_test_() {
blob_to_kzg_commitment_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
);
}
#[test]
pub fn compute_kzg_proof_test_() {
compute_kzg_proof_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&compute_kzg_proof_rust,
&blob_to_polynomial,
&evaluate_polynomial_in_evaluation_form,
);
}
#[test]
pub fn compute_and_verify_kzg_proof_round_trip_test_() {
compute_and_verify_kzg_proof_round_trip_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_kzg_proof_rust,
&blob_to_polynomial,
&evaluate_polynomial_in_evaluation_form,
&verify_kzg_proof_rust,
);
}
#[test]
pub fn compute_and_verify_kzg_proof_within_domain_test_() {
compute_and_verify_kzg_proof_within_domain_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_kzg_proof_rust,
&blob_to_polynomial,
&evaluate_polynomial_in_evaluation_form,
&verify_kzg_proof_rust,
);
}
#[test]
pub fn compute_and_verify_kzg_proof_fails_with_incorrect_proof_test_() {
compute_and_verify_kzg_proof_fails_with_incorrect_proof_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_kzg_proof_rust,
&blob_to_polynomial,
&evaluate_polynomial_in_evaluation_form,
&verify_kzg_proof_rust,
);
}
#[test]
pub fn compute_and_verify_blob_kzg_proof_test_() {
compute_and_verify_blob_kzg_proof_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_blob_kzg_proof_rust,
&verify_blob_kzg_proof_rust,
);
}
#[test]
pub fn compute_and_verify_blob_kzg_proof_fails_with_incorrect_proof_test_() {
compute_and_verify_blob_kzg_proof_fails_with_incorrect_proof_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_blob_kzg_proof_rust,
&verify_blob_kzg_proof_rust,
);
}
#[test]
pub fn verify_kzg_proof_batch_test_() {
verify_kzg_proof_batch_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_blob_kzg_proof_rust,
&verify_blob_kzg_proof_batch_rust,
);
}
#[test]
pub fn verify_kzg_proof_batch_fails_with_incorrect_proof_test_() {
verify_kzg_proof_batch_fails_with_incorrect_proof_test::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_blob_kzg_proof_rust,
&verify_blob_kzg_proof_batch_rust,
);
}
#[test]
pub fn test_vectors_blob_to_kzg_commitment_() {
test_vectors_blob_to_kzg_commitment::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
);
}
#[test]
pub fn test_vectors_compute_kzg_proof_() {
test_vectors_compute_kzg_proof::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&compute_kzg_proof_rust,
&bytes_to_blob,
);
}
#[test]
pub fn test_vectors_compute_blob_kzg_proof_() {
test_vectors_compute_blob_kzg_proof::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&bytes_to_blob,
&compute_blob_kzg_proof_rust,
);
}
#[test]
pub fn test_vectors_verify_kzg_proof_() {
test_vectors_verify_kzg_proof::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(&load_trusted_setup_filename_rust, &verify_kzg_proof_rust);
}
#[test]
pub fn test_vectors_verify_blob_kzg_proof_() {
test_vectors_verify_blob_kzg_proof::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&bytes_to_blob,
&verify_blob_kzg_proof_rust,
);
}
#[test]
pub fn test_vectors_verify_blob_kzg_proof_batch_() {
test_vectors_verify_blob_kzg_proof_batch::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&load_trusted_setup_filename_rust,
&bytes_to_blob,
&verify_blob_kzg_proof_batch_rust,
);
}
#[test]
pub fn test_vectors_compute_challenge_() {
test_vectors_compute_challenge::<FsFr, FsG1>(&bytes_to_blob, &compute_challenge_rust);
}
#[test]
pub fn expand_root_of_unity_too_long() {
let out = expand_root_of_unity(&FsFr::from_u64_arr(&SCALE2_ROOT_OF_UNITY[1]), 1);
assert!(out.is_err());
}
#[test]
pub fn expand_root_of_unity_too_short() {
let out = expand_root_of_unity(&FsFr::from_u64_arr(&SCALE2_ROOT_OF_UNITY[1]), 3);
assert!(out.is_err());
}
#[test]
pub fn compute_kzg_proof_incorrect_blob_length() {
compute_kzg_proof_incorrect_blob_length_test::<FsFr, FsPoly>(&blob_to_polynomial);
}
#[test]
pub fn compute_kzg_proof_incorrect_poly_length() {
compute_kzg_proof_incorrect_poly_length_test::<
FsPoly,
FsFr,
FsG1,
FsG2,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(&evaluate_polynomial_in_evaluation_form);
}
#[test]
pub fn compute_kzg_proof_empty_blob_vector() {
compute_kzg_proof_empty_blob_vector_test::<
FsPoly,
FsFr,
FsG1,
FsG2,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(&verify_blob_kzg_proof_batch_rust)
}
#[test]
pub fn compute_kzg_proof_incorrect_commitments_len() {
compute_kzg_proof_incorrect_commitments_len_test::<
FsPoly,
FsFr,
FsG1,
FsG2,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(&verify_blob_kzg_proof_batch_rust)
}
#[test]
pub fn compute_kzg_proof_incorrect_proofs_len() {
compute_kzg_proof_incorrect_proofs_len_test::<
FsPoly,
FsFr,
FsG1,
FsG2,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(&verify_blob_kzg_proof_batch_rust)
}
#[test]
pub fn validate_batched_input() {
validate_batched_input_test::<
FsPoly,
FsFr,
FsG1,
FsG2,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
&verify_blob_kzg_proof_batch_rust,
&load_trusted_setup_filename_rust,
)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/kzg_proofs.rs | blst/tests/kzg_proofs.rs | #[cfg(test)]
mod tests {
use blst::{
blst_final_exp, blst_fp12, blst_fp12_mul, blst_miller_loop, blst_p1_affine, blst_p1_cneg,
blst_p1_to_affine, blst_p2_affine, blst_p2_to_affine, Pairing,
};
use kzg::G1;
use kzg_bench::tests::kzg_proofs::{
commit_to_nil_poly, commit_to_too_long_poly_returns_err, proof_multi, proof_single,
trusted_setup_in_correct_form,
};
use rust_kzg_blst::eip_7594::BlstBackend;
use rust_kzg_blst::types::g1::FsG1;
use rust_kzg_blst::types::g2::FsG2;
use rust_kzg_blst::utils::generate_trusted_setup;
#[test]
pub fn test_trusted_setup_in_correct_form() {
trusted_setup_in_correct_form::<BlstBackend>(&generate_trusted_setup);
}
#[test]
pub fn test_proof_single() {
proof_single::<BlstBackend>(&generate_trusted_setup);
}
#[test]
pub fn test_commit_to_nil_poly() {
commit_to_nil_poly::<BlstBackend>(&generate_trusted_setup);
}
#[test]
pub fn test_commit_to_too_long_poly() {
commit_to_too_long_poly_returns_err::<BlstBackend>(&generate_trusted_setup);
}
#[test]
pub fn test_proof_multi() {
proof_multi::<BlstBackend>(&generate_trusted_setup);
}
// This aims at showing that the use of the blst::Pairing engine in pairings_verify
// has the desired semantics.
#[cfg(feature = "rand")]
fn og_pairings_verify() {
let a1 = FsG1::rand();
let a2 = FsG2::rand();
let b1 = FsG1::rand();
let b2 = FsG2::rand();
let mut loop0 = blst_fp12::default();
let mut loop1 = blst_fp12::default();
let mut gt_point = blst_fp12::default();
let mut aa1 = blst_p1_affine::default();
let mut bb1 = blst_p1_affine::default();
let mut aa2 = blst_p2_affine::default();
let mut bb2 = blst_p2_affine::default();
// As an optimisation, we want to invert one of the pairings,
// so we negate one of the points.
let mut a1neg: FsG1 = a1;
unsafe {
blst_p1_cneg(&mut a1neg.0, true);
blst_p1_to_affine(&mut aa1, &a1neg.0);
blst_p1_to_affine(&mut bb1, &b1.0);
blst_p2_to_affine(&mut aa2, &a2.0);
blst_p2_to_affine(&mut bb2, &b2.0);
blst_miller_loop(&mut loop0, &aa2, &aa1);
blst_miller_loop(&mut loop1, &bb2, &bb1);
blst_fp12_mul(&mut gt_point, &loop0, &loop1);
blst_final_exp(&mut gt_point, >_point);
let dst = [0u8; 3];
let mut pairing_blst = Pairing::new(false, &dst);
pairing_blst.raw_aggregate(&aa2, &aa1);
pairing_blst.raw_aggregate(&bb2, &bb1);
assert_eq!(gt_point, pairing_blst.as_fp12().final_exp());
}
}
#[cfg(feature = "rand")]
#[test]
pub fn test_pairings_verify() {
for _i in 0..100 {
og_pairings_verify();
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/eip_7594.rs | blst/tests/eip_7594.rs | #[cfg(test)]
mod tests {
use kzg::{
eip_4844::{blob_to_kzg_commitment_rust, bytes_to_blob},
eth, DAS,
};
use kzg_bench::tests::{
eip_4844::generate_random_blob_bytes,
eip_7594::{
test_vectors_compute_cells, test_vectors_compute_cells_and_kzg_proofs,
test_vectors_compute_verify_cell_kzg_proof_batch_challenge,
test_vectors_recover_cells_and_kzg_proofs, test_vectors_verify_cell_kzg_proof_batch,
},
utils::get_trusted_setup_path,
};
use rust_kzg_blst::{
eip_4844::load_trusted_setup_filename_rust,
eip_7594::BlstBackend,
types::{fr::FsFr, g1::FsG1, kzg_settings::FsKZGSettings},
};
#[test]
pub fn test_vectors_compute_cells_() {
test_vectors_compute_cells::<BlstBackend>(
&load_trusted_setup_filename_rust,
&bytes_to_blob,
);
}
#[test]
pub fn test_vectors_compute_cells_and_kzg_proofs_() {
test_vectors_compute_cells_and_kzg_proofs::<BlstBackend>(
&load_trusted_setup_filename_rust,
&bytes_to_blob,
);
}
#[test]
pub fn test_vectors_recover_cells_and_kzg_proofs_() {
test_vectors_recover_cells_and_kzg_proofs::<BlstBackend>(&load_trusted_setup_filename_rust);
}
#[test]
pub fn test_vectors_verify_cell_kzg_proof_batch_() {
test_vectors_verify_cell_kzg_proof_batch::<BlstBackend>(&load_trusted_setup_filename_rust);
}
#[test]
pub fn test_vectors_compute_verify_cell_kzg_proof_batch_challenge_() {
test_vectors_compute_verify_cell_kzg_proof_batch_challenge::<BlstBackend>();
}
#[test]
pub fn test_recover_cells_and_kzg_proofs_succeeds_random_blob() {
let settings = load_trusted_setup_filename_rust(get_trusted_setup_path().as_str()).unwrap();
let mut rng = rand::thread_rng();
/* Get a random blob */
let blob_bytes = generate_random_blob_bytes(&mut rng);
let blob: Vec<FsFr> = bytes_to_blob(&blob_bytes).unwrap();
let mut cells =
vec![FsFr::default(); eth::CELLS_PER_EXT_BLOB * eth::FIELD_ELEMENTS_PER_CELL];
let mut proofs = vec![FsG1::default(); eth::CELLS_PER_EXT_BLOB];
/* Get the cells and proofs */
let mut result = <FsKZGSettings as DAS<BlstBackend>>::compute_cells_and_kzg_proofs(
&settings,
Some(&mut cells),
Some(&mut proofs),
&blob,
);
assert!(result.is_ok());
let cell_indices: Vec<usize> = (0..).step_by(2).take(eth::CELLS_PER_EXT_BLOB / 2).collect();
let mut partial_cells =
vec![FsFr::default(); (eth::CELLS_PER_EXT_BLOB / 2) * eth::FIELD_ELEMENTS_PER_CELL];
/* Erase half of the cells */
for i in 0..(eth::CELLS_PER_EXT_BLOB / 2) {
partial_cells[i * eth::FIELD_ELEMENTS_PER_CELL..(i + 1) * eth::FIELD_ELEMENTS_PER_CELL]
.clone_from_slice(
&cells[cell_indices[i] * eth::FIELD_ELEMENTS_PER_CELL
..(cell_indices[i] + 1) * eth::FIELD_ELEMENTS_PER_CELL],
);
}
let mut recovered_cells =
vec![FsFr::default(); eth::CELLS_PER_EXT_BLOB * eth::FIELD_ELEMENTS_PER_CELL];
let mut recovered_proofs = vec![FsG1::default(); eth::CELLS_PER_EXT_BLOB];
/* Reconstruct with half of the cells */
result = <FsKZGSettings as DAS<BlstBackend>>::recover_cells_and_kzg_proofs(
&settings,
&mut recovered_cells,
Some(&mut recovered_proofs),
&cell_indices,
&partial_cells,
);
assert!(result.is_ok());
/* Check that all of the cells match */
assert!(recovered_cells == cells, "Cells do not match");
assert!(recovered_proofs == proofs, "Proofs do not match");
}
#[test]
pub fn test_verify_cell_kzg_proof_batch_succeeds_random_blob() {
let settings = load_trusted_setup_filename_rust(get_trusted_setup_path().as_str()).unwrap();
let mut rng = rand::thread_rng();
/* Get a random blob */
let blob_bytes = generate_random_blob_bytes(&mut rng);
let blob = bytes_to_blob(&blob_bytes).unwrap();
/* Get the commitment to the blob */
let commitment_result = blob_to_kzg_commitment_rust(&blob, &settings);
assert!(commitment_result.is_ok());
let commitment = commitment_result.unwrap();
let mut cells: Vec<FsFr> =
vec![FsFr::default(); eth::CELLS_PER_EXT_BLOB * eth::FIELD_ELEMENTS_PER_CELL];
let mut proofs = vec![FsG1::default(); eth::CELLS_PER_EXT_BLOB];
/* Compute cells and proofs */
let result = <FsKZGSettings as DAS<BlstBackend>>::compute_cells_and_kzg_proofs(
&settings,
Some(&mut cells),
Some(&mut proofs),
&blob,
);
assert!(result.is_ok());
/* Initialize list of commitments & cell indices */
let commitments = vec![commitment; eth::CELLS_PER_EXT_BLOB];
let cell_indices: Vec<usize> = (0..).step_by(1).take(eth::CELLS_PER_EXT_BLOB).collect();
/* Verify all the proofs */
let verify_result = <FsKZGSettings as DAS<BlstBackend>>::verify_cell_kzg_proof_batch(
&settings,
&commitments,
&cell_indices,
&cells,
&proofs,
);
assert!(verify_result.is_ok());
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/poly.rs | blst/tests/poly.rs | // #[path = "./local_tests/local_poly.rs"]
// pub mod local_poly;
#[cfg(test)]
mod tests {
use kzg_bench::tests::poly::{
create_poly_of_length_ten, poly_div_by_zero, poly_div_fast_test, poly_div_long_test,
poly_div_random, poly_eval_0_check, poly_eval_check, poly_eval_nil_check,
poly_inverse_simple_0, poly_inverse_simple_1, poly_mul_direct_test, poly_mul_fft_test,
poly_mul_random, poly_test_div,
};
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::poly::FsPoly;
// Local tests
// #[test]
// fn local_create_poly_of_length_ten_() {
// create_poly_of_length_ten()
// }
//
// #[test]
// fn local_poly_pad_works_rand_() {
// poly_pad_works_rand()
// }
//
// #[test]
// fn local_poly_eval_check_() {
// poly_eval_check()
// }
//
// #[test]
// fn local_poly_eval_0_check_() { poly_eval_0_check() }
//
// #[test]
// fn local_poly_eval_nil_check_() {
// poly_eval_nil_check()
// }
//
// #[test]
// fn local_poly_inverse_simple_0_() {
// poly_inverse_simple_0()
// }
//
// #[test]
// fn local_poly_inverse_simple_1_() {
// poly_inverse_simple_1()
// }
//
// #[test]
// fn local_test_poly_div_by_zero_() {
// test_poly_div_by_zero()
// }
//
// #[test]
// fn local_poly_div_long_test_() {
// poly_div_long_test()
// }
//
// #[test]
// fn local_poly_div_fast_test_() {
// poly_div_fast_test()
// }
//
// #[test]
// fn local_poly_mul_direct_test_() {
// poly_mul_direct_test()
// }
//
// #[test]
// fn local_poly_mul_fft_test_() {
// poly_mul_fft_test()
// }
//
// #[test]
// fn local_poly_mul_random_() {
// poly_mul_random()
// }
//
// #[test]
// fn local_poly_div_random_() {
// poly_div_random()
// }
// Shared tests
#[test]
fn create_poly_of_length_ten_() {
create_poly_of_length_ten::<FsFr, FsPoly>()
}
#[test]
fn poly_eval_check_() {
poly_eval_check::<FsFr, FsPoly>()
}
#[test]
fn poly_eval_0_check_() {
poly_eval_0_check::<FsFr, FsPoly>()
}
#[test]
fn poly_eval_nil_check_() {
poly_eval_nil_check::<FsFr, FsPoly>()
}
#[test]
fn poly_inverse_simple_0_() {
poly_inverse_simple_0::<FsFr, FsPoly>()
}
#[test]
fn poly_inverse_simple_1_() {
poly_inverse_simple_1::<FsFr, FsPoly>()
}
#[test]
fn poly_test_div_() {
poly_test_div::<FsFr, FsPoly>()
}
#[test]
fn poly_div_by_zero_() {
poly_div_by_zero::<FsFr, FsPoly>()
}
#[test]
fn poly_mul_direct_test_() {
poly_mul_direct_test::<FsFr, FsPoly>()
}
#[test]
fn poly_mul_fft_test_() {
poly_mul_fft_test::<FsFr, FsPoly, FsFFTSettings>()
}
#[test]
fn poly_mul_random_() {
poly_mul_random::<FsFr, FsPoly, FsFFTSettings>()
}
#[test]
fn poly_div_random_() {
poly_div_random::<FsFr, FsPoly>()
}
#[test]
fn poly_div_long_test_() {
poly_div_long_test::<FsFr, FsPoly>()
}
#[test]
fn poly_div_fast_test_() {
poly_div_fast_test::<FsFr, FsPoly>()
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/zero_poly.rs | blst/tests/zero_poly.rs | #[cfg(test)]
mod tests {
use kzg_bench::tests::zero_poly::{
check_test_data, reduce_partials_random, test_reduce_partials, zero_poly_252,
zero_poly_all_but_one, zero_poly_known, zero_poly_random,
};
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::poly::FsPoly;
#[test]
fn test_reduce_partials_() {
test_reduce_partials::<FsFr, FsFFTSettings, FsPoly>();
}
#[test]
fn reduce_partials_random_() {
reduce_partials_random::<FsFr, FsFFTSettings, FsPoly>();
}
#[test]
fn check_test_data_() {
check_test_data::<FsFr, FsFFTSettings, FsPoly>();
}
#[test]
fn zero_poly_known_() {
zero_poly_known::<FsFr, FsFFTSettings, FsPoly>();
}
#[test]
fn zero_poly_random_() {
zero_poly_random::<FsFr, FsFFTSettings, FsPoly>();
}
#[test]
fn zero_poly_all_but_one_() {
zero_poly_all_but_one::<FsFr, FsFFTSettings, FsPoly>();
}
#[test]
fn zero_poly_252_() {
zero_poly_252::<FsFr, FsFFTSettings, FsPoly>();
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/bls12_381.rs | blst/tests/bls12_381.rs | #[cfg(test)]
mod tests {
use kzg::common_utils::log_2_byte;
use kzg_bench::tests::bls12_381::{
fr_div_by_zero, fr_div_works, fr_equal_works, fr_from_uint64_works, fr_is_null_works,
fr_is_one_works, fr_is_zero_works, fr_negate_works, fr_pow_works, fr_uint64s_roundtrip,
g1_identity_is_identity, g1_identity_is_infinity, g1_linear_combination_infinity_points,
g1_make_linear_combination, g1_random_linear_combination, g1_small_linear_combination,
log_2_byte_works, p1_mul_works, p1_sub_works, p2_add_or_dbl_works, p2_mul_works,
p2_sub_works, pairings_work,
};
use rust_kzg_blst::kzg_proofs::{g1_linear_combination, pairings_verify};
use rust_kzg_blst::types::fp::FsFp;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::g1::{FsG1, FsG1Affine, FsG1ProjAddAffine};
use rust_kzg_blst::types::g2::FsG2;
#[test]
fn log_2_byte_works_() {
log_2_byte_works(&log_2_byte)
}
#[test]
fn fr_is_null_works_() {
fr_is_null_works::<FsFr>()
}
#[test]
fn fr_is_zero_works_() {
fr_is_zero_works::<FsFr>()
}
#[test]
fn fr_is_one_works_() {
fr_is_one_works::<FsFr>()
}
#[test]
fn fr_from_uint64_works_() {
fr_from_uint64_works::<FsFr>()
}
#[test]
fn fr_equal_works_() {
fr_equal_works::<FsFr>()
}
#[test]
fn fr_negate_works_() {
fr_negate_works::<FsFr>()
}
#[test]
fn fr_pow_works_() {
fr_pow_works::<FsFr>()
}
#[test]
fn fr_div_works_() {
fr_div_works::<FsFr>()
}
#[test]
fn fr_div_by_zero_() {
fr_div_by_zero::<FsFr>()
}
#[test]
fn fr_uint64s_roundtrip_() {
fr_uint64s_roundtrip::<FsFr>()
}
#[test]
fn p1_mul_works_() {
p1_mul_works::<FsFr, FsG1>()
}
#[test]
fn p1_sub_works_() {
p1_sub_works::<FsG1>()
}
#[test]
fn p2_add_or_dbl_works_() {
p2_add_or_dbl_works::<FsG2>()
}
#[test]
fn p2_mul_works_() {
p2_mul_works::<FsFr, FsG2>()
}
#[test]
fn p2_sub_works_() {
p2_sub_works::<FsG2>()
}
#[test]
fn g1_identity_is_infinity_() {
g1_identity_is_infinity::<FsG1>()
}
#[test]
fn g1_identity_is_identity_() {
g1_identity_is_identity::<FsG1>()
}
#[test]
fn g1_make_linear_combination_() {
g1_make_linear_combination::<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>(
&g1_linear_combination,
)
}
#[test]
fn g1_random_linear_combination_() {
g1_random_linear_combination::<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>(
&g1_linear_combination,
)
}
#[test]
fn g1_linear_combination_infinity_points_() {
g1_linear_combination_infinity_points::<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>(
&g1_linear_combination,
);
}
#[test]
fn g1_small_linear_combination_() {
g1_small_linear_combination::<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>(
&g1_linear_combination,
)
}
#[test]
fn pairings_work_() {
pairings_work::<FsFr, FsG1, FsG2>(&pairings_verify)
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/fk20_proofs.rs | blst/tests/fk20_proofs.rs | #[cfg(test)]
mod tests {
use kzg_bench::tests::fk20_proofs::*;
use rust_kzg_blst::eip_7594::BlstBackend;
use rust_kzg_blst::types::fk20_multi_settings::FsFK20MultiSettings;
use rust_kzg_blst::types::fk20_single_settings::FsFK20SingleSettings;
use rust_kzg_blst::utils::generate_trusted_setup;
#[test]
fn test_fk_single() {
fk_single::<BlstBackend, FsFK20SingleSettings>(&generate_trusted_setup);
}
#[test]
fn test_fk_single_strided() {
fk_single_strided::<BlstBackend, FsFK20SingleSettings>(&generate_trusted_setup);
}
#[test]
fn test_fk_multi_settings() {
fk_multi_settings::<BlstBackend, FsFK20MultiSettings>(&generate_trusted_setup);
}
#[test]
fn test_fk_multi_chunk_len_1_512() {
fk_multi_chunk_len_1_512::<BlstBackend, FsFK20MultiSettings>(&generate_trusted_setup);
}
#[test]
fn test_fk_multi_chunk_len_16_512() {
fk_multi_chunk_len_16_512::<BlstBackend, FsFK20MultiSettings>(&generate_trusted_setup);
}
#[test]
fn test_fk_multi_chunk_len_16_16() {
fk_multi_chunk_len_16_16::<BlstBackend, FsFK20MultiSettings>(&generate_trusted_setup);
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/mod.rs | blst/tests/mod.rs | pub mod local_tests;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/c_bindings.rs | blst/tests/c_bindings.rs | #[cfg(all(test, feature = "c_bindings"))]
mod tests {
use kzg_bench::tests::c_bindings::{
blob_to_kzg_commitment_invalid_blob_test,
compute_blob_kzg_proof_commitment_is_point_at_infinity_test,
compute_blob_kzg_proof_invalid_blob_test, free_trusted_setup_null_ptr_test,
free_trusted_setup_set_all_values_to_null_test,
load_trusted_setup_file_invalid_format_test, load_trusted_setup_file_valid_format_test,
load_trusted_setup_invalid_form_test, load_trusted_setup_invalid_g1_byte_length_test,
load_trusted_setup_invalid_g1_point_test, load_trusted_setup_invalid_g2_byte_length_test,
load_trusted_setup_invalid_g2_point_test,
};
use rust_kzg_blst::eip_4844::{
blob_to_kzg_commitment, compute_blob_kzg_proof, free_trusted_setup, load_trusted_setup,
load_trusted_setup_file,
};
#[test]
fn blob_to_kzg_commitment_invalid_blob() {
blob_to_kzg_commitment_invalid_blob_test(blob_to_kzg_commitment, load_trusted_setup_file);
}
#[test]
fn load_trusted_setup_invalid_g1_byte_length() {
load_trusted_setup_invalid_g1_byte_length_test(load_trusted_setup);
}
#[test]
fn load_trusted_setup_invalid_g1_point() {
load_trusted_setup_invalid_g1_point_test(load_trusted_setup);
}
#[test]
fn load_trusted_setup_invalid_g2_byte_length() {
load_trusted_setup_invalid_g2_byte_length_test(load_trusted_setup);
}
#[test]
fn load_trusted_setup_invalid_g2_point() {
load_trusted_setup_invalid_g2_point_test(load_trusted_setup);
}
#[test]
fn load_trusted_setup_invalid_form() {
load_trusted_setup_invalid_form_test(load_trusted_setup);
}
#[test]
fn load_trusted_setup_file_invalid_format() {
load_trusted_setup_file_invalid_format_test(load_trusted_setup_file);
}
#[test]
fn load_trusted_setup_file_valid_format() {
load_trusted_setup_file_valid_format_test(load_trusted_setup_file);
}
#[test]
fn free_trusted_setup_null_ptr() {
free_trusted_setup_null_ptr_test(free_trusted_setup);
}
#[test]
fn free_trusted_setup_set_all_values_to_null() {
free_trusted_setup_set_all_values_to_null_test(free_trusted_setup, load_trusted_setup_file);
}
#[test]
fn compute_blob_kzg_proof_invalid_blob() {
compute_blob_kzg_proof_invalid_blob_test(compute_blob_kzg_proof, load_trusted_setup_file);
}
#[test]
fn compute_blob_kzg_proof_commitment_is_point_at_infinity() {
compute_blob_kzg_proof_commitment_is_point_at_infinity_test(
compute_blob_kzg_proof,
load_trusted_setup_file,
);
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/fft_g1.rs | blst/tests/fft_g1.rs | #[cfg(test)]
mod tests {
use kzg::G1;
use kzg_bench::tests::fft_g1::{compare_ft_fft, roundtrip_fft, stride_fft};
use rust_kzg_blst::consts::G1_GENERATOR;
use rust_kzg_blst::fft_g1::{fft_g1_fast, fft_g1_slow};
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::g1::FsG1;
fn make_data(n: usize) -> Vec<FsG1> {
if n == 0 {
return Vec::new();
}
let mut result: Vec<FsG1> = vec![FsG1::default(); n];
result[0] = G1_GENERATOR;
for i in 1..n {
result[i] = result[i - 1].add_or_dbl(&G1_GENERATOR)
}
result
}
#[test]
fn roundtrip_fft_() {
roundtrip_fft::<FsFr, FsG1, FsFFTSettings>(&make_data);
}
#[test]
fn stride_fft_() {
stride_fft::<FsFr, FsG1, FsFFTSettings>(&make_data);
}
#[test]
fn compare_sft_fft_() {
compare_ft_fft::<FsFr, FsG1, FsFFTSettings>(&fft_g1_slow, &fft_g1_fast, &make_data);
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/fft_fr.rs | blst/tests/fft_fr.rs | #[cfg(test)]
mod tests {
use kzg_bench::tests::fft_fr::{compare_sft_fft, inverse_fft, roundtrip_fft, stride_fft};
use rust_kzg_blst::fft_fr::{fft_fr_fast, fft_fr_slow};
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
#[test]
fn compare_sft_fft_() {
compare_sft_fft::<FsFr, FsFFTSettings>(&fft_fr_slow, &fft_fr_fast);
}
#[test]
fn roundtrip_fft_() {
roundtrip_fft::<FsFr, FsFFTSettings>();
}
#[test]
fn inverse_fft_() {
inverse_fft::<FsFr, FsFFTSettings>();
}
#[test]
fn stride_fft_() {
stride_fft::<FsFr, FsFFTSettings>();
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/recovery.rs | blst/tests/recovery.rs | // #[path = "./local_tests/local_recovery.rs"]
// pub mod local_recovery;
#[cfg(test)]
mod tests {
use kzg_bench::tests::recover::*;
// uncomment to use the local tests
//use crate::local_recovery::{recover_random, recover_simple};
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::poly::FsPoly;
// Shared tests
#[test]
fn recover_simple_() {
recover_simple::<FsFr, FsFFTSettings, FsPoly, FsPoly>();
}
#[test]
fn recover_random_() {
recover_random::<FsFr, FsFFTSettings, FsPoly, FsPoly>();
}
#[test]
fn more_than_half_missing_() {
more_than_half_missing::<FsFr, FsFFTSettings, FsPoly, FsPoly>();
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/batch_adder.rs | blst/tests/batch_adder.rs | #[cfg(test)]
mod tests {
use kzg_bench::tests::msm::batch_adder::{
test_batch_add, test_batch_add_indexed, test_batch_add_indexed_single_bucket,
test_batch_add_step_n, test_phase_one_p_add_p, test_phase_one_p_add_q,
test_phase_one_p_add_q_twice, test_phase_one_zero_or_neg, test_phase_two_p_add_neg,
test_phase_two_p_add_p, test_phase_two_p_add_q, test_phase_two_zero_add_p,
};
use rust_kzg_blst::types::{
fp::FsFp,
g1::{FsG1, FsG1Affine},
};
// use rust_kzg_blst::types::
#[test]
fn test_phase_one_zero_or_neg_() {
test_phase_one_zero_or_neg::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_phase_one_p_add_p_() {
test_phase_one_p_add_p::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_phase_one_p_add_q_() {
test_phase_one_p_add_q::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_phase_one_p_add_q_twice_() {
test_phase_one_p_add_q_twice::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_phase_two_zero_add_p_() {
test_phase_two_zero_add_p::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_phase_two_p_add_neg_() {
test_phase_two_p_add_neg::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_phase_two_p_add_q_() {
test_phase_two_p_add_q::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_phase_two_p_add_p_() {
test_phase_two_p_add_p::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_batch_add_() {
test_batch_add::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_batch_add_step_n_() {
test_batch_add_step_n::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_batch_add_indexed_() {
test_batch_add_indexed::<FsG1, FsFp, FsG1Affine>();
}
#[test]
fn test_batch_add_indexed_single_bucket_() {
test_batch_add_indexed_single_bucket::<FsG1, FsFp, FsG1Affine>();
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/local_tests/local_recovery.rs | blst/tests/local_tests/local_recovery.rs | use kzg::{FFTFr, FFTSettings, Fr, Poly, PolyRecover};
use rand::rngs::StdRng;
use rand::{RngCore, SeedableRng};
use std::convert::TryInto;
fn shuffle(a: &mut [usize], n: usize) {
let mut i: u64 = n as u64;
let mut j: usize;
let mut tmp: usize;
let mut rng = StdRng::seed_from_u64(0);
while i > 0 {
j = (rng.next_u64() % i) as usize;
i -= 1;
tmp = a[j];
a[j] = a[i as usize];
a[i as usize] = tmp;
}
}
fn random_missing<TFr: Fr>(
with_missing: &mut [Option<TFr>],
data: &[TFr],
len_data: usize,
known: usize,
) {
let mut missing_idx = Vec::new();
for i in 0..len_data {
missing_idx.push(i);
with_missing[i] = Some(data[i].clone());
}
shuffle(&mut missing_idx, len_data);
for i in 0..(len_data - known) {
with_missing[missing_idx[i]] = None;
}
}
pub fn recover_simple<
TFr: Fr,
TFFTSettings: FFTSettings<TFr> + FFTFr<TFr>,
TPoly: Poly<TFr>,
TPolyRecover: PolyRecover<TFr, TPoly, TFFTSettings>,
>() {
let fs_query = TFFTSettings::new(2);
assert!(fs_query.is_ok());
let fs: TFFTSettings = fs_query.unwrap();
let max_width: usize = fs.get_max_width();
let poly_query = TPoly::new(max_width);
let mut poly = poly_query;
for i in 0..(max_width / 2) {
poly.set_coeff_at(i, &TFr::from_u64(i.try_into().unwrap()));
}
for i in (max_width / 2)..max_width {
poly.set_coeff_at(i, &TFr::zero());
}
let data_query = fs.fft_fr(poly.get_coeffs(), false);
assert!(data_query.is_ok());
let data = data_query.unwrap();
let sample: [Option<TFr>; 4] = [Some(data[0].clone()), None, None, Some(data[3].clone())];
let recovered_query = TPolyRecover::recover_poly_from_samples(&sample, &fs);
assert!(recovered_query.is_ok());
let recovered = recovered_query.unwrap();
for (i, item) in data.iter().enumerate().take(max_width) {
assert!(item.equals(&recovered.get_coeff_at(i)));
}
let mut recovered_vec: Vec<TFr> = Vec::new();
for i in 0..max_width {
recovered_vec.push(recovered.get_coeff_at(i));
}
let back_query = fs.fft_fr(&recovered_vec, true);
assert!(back_query.is_ok());
let back = back_query.unwrap();
for (i, back_x) in back[..(max_width / 2)].iter().enumerate() {
assert!(back_x.equals(&poly.get_coeff_at(i)));
}
for back_x in back[(max_width / 2)..max_width].iter() {
assert!(back_x.is_zero());
}
}
pub fn recover_random<
TFr: Fr,
TFFTSettings: FFTSettings<TFr> + FFTFr<TFr>,
TPoly: Poly<TFr>,
TPolyRecover: PolyRecover<TFr, TPoly, TFFTSettings>,
>() {
let fs_query = TFFTSettings::new(12);
assert!(fs_query.is_ok());
let fs: TFFTSettings = fs_query.unwrap();
let max_width: usize = fs.get_max_width();
// let mut poly = TPoly::default();
let poly_query = TPoly::new(max_width);
let mut poly = poly_query;
for i in 0..(max_width / 2) {
poly.set_coeff_at(i, &TFr::from_u64(i.try_into().unwrap()));
}
for i in (max_width / 2)..max_width {
poly.set_coeff_at(i, &TFr::zero());
}
let data_query = fs.fft_fr(poly.get_coeffs(), false);
assert!(data_query.is_ok());
let data = data_query.unwrap();
let mut samples = vec![Some(TFr::default()); max_width]; // std::vec![TFr; max_width];
for i in 0..10 {
let known_ratio = 0.5 + (i as f32) * 0.05;
let known: usize = ((max_width as f32) * known_ratio) as usize;
for _ in 0..4 {
random_missing(&mut samples, &data, max_width, known);
let recovered_query = TPolyRecover::recover_poly_from_samples(&samples, &fs);
assert!(recovered_query.is_ok());
let recovered = recovered_query.unwrap();
for (j, item) in data.iter().enumerate().take(max_width) {
assert!(item.equals(&recovered.get_coeff_at(j)));
}
let mut recovered_vec: Vec<TFr> = Vec::new();
for i in 0..max_width {
recovered_vec.push(recovered.get_coeff_at(i));
}
let back_query = fs.fft_fr(&recovered_vec, true);
assert!(back_query.is_ok());
let back = back_query.unwrap();
for (i, back_x) in back[..(max_width / 2)].iter().enumerate() {
assert!(back_x.equals(&poly.get_coeff_at(i)));
}
for back_x in back[(max_width / 2)..max_width].iter() {
assert!(back_x.is_zero());
}
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/local_tests/local_poly.rs | blst/tests/local_tests/local_poly.rs | use kzg::{Fr, Poly};
use rand::rngs::StdRng;
use rand::{RngCore, SeedableRng};
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::poly::FsPoly;
pub fn create_poly_of_length_ten() {
let poly = FsPoly::new(10);
assert_eq!(poly.len(), 10);
}
pub fn poly_pad_works_rand() {
let mut rng = StdRng::seed_from_u64(0);
for _k in 0..256 {
let poly_length: usize = (1 + (rng.next_u64() % 1000)) as usize;
let mut poly = FsPoly::new(poly_length);
for i in 0..poly.len() {
poly.set_coeff_at(i, &FsFr::rand());
}
let padded_poly = poly.pad(1000);
for i in 0..poly_length {
assert!(padded_poly.get_coeff_at(i).equals(&poly.get_coeff_at(i)));
}
for i in poly_length..1000 {
assert!(padded_poly.get_coeff_at(i).equals(&Fr::zero()));
}
}
}
pub fn poly_eval_check() {
let n: usize = 10;
let mut poly = FsPoly::new(n);
for i in 0..n {
let fr = FsFr::from_u64((i + 1) as u64);
poly.set_coeff_at(i, &fr);
}
let expected = FsFr::from_u64((n * (n + 1) / 2) as u64);
let actual = poly.eval(&FsFr::one());
assert!(expected.equals(&actual));
}
pub fn poly_eval_0_check() {
let n: usize = 7;
let a: usize = 597;
let mut poly = FsPoly::new(n);
for i in 0..n {
let fr = FsFr::from_u64((i + a) as u64);
poly.set_coeff_at(i, &fr);
}
let expected = FsFr::from_u64(a as u64);
let actual = poly.eval(&FsFr::zero());
assert!(expected.equals(&actual));
}
pub fn poly_eval_nil_check() {
let n: usize = 0;
let poly = FsPoly::new(n);
let actual = poly.eval(&FsFr::one());
assert!(actual.equals(&FsFr::zero()));
}
pub fn poly_inverse_simple_0() {
// 1 / (1 - x) = 1 + x + x^2 + ...
let d: usize = 16;
let mut p = FsPoly::new(2);
p.set_coeff_at(0, &FsFr::one());
p.set_coeff_at(1, &FsFr::one());
p.set_coeff_at(1, &FsFr::negate(&p.get_coeff_at(1)));
let result = p.inverse(d);
assert!(result.is_ok());
let q = result.unwrap();
for i in 0..d {
assert!(q.get_coeff_at(i).is_one());
}
}
pub fn poly_inverse_simple_1() {
// 1 / (1 + x) = 1 - x + x^2 - ...
let d: usize = 16;
let mut p = FsPoly::new(2);
p.set_coeff_at(0, &FsFr::one());
p.set_coeff_at(1, &FsFr::one());
let result = p.inverse(d);
assert!(result.is_ok());
let q = result.unwrap();
for i in 0..d {
let mut tmp = q.get_coeff_at(i);
if i & 1 != 0 {
tmp = FsFr::negate(&tmp);
}
assert!(tmp.is_one());
}
}
pub const NUM_TESTS: u64 = 10;
fn test_data(a: usize, b: usize) -> Vec<i64> {
// (x^2 - 1) / (x + 1) = x - 1
let test_0_0 = vec![-1, 0, 1];
let test_0_1 = vec![1, 1];
let test_0_2 = vec![-1, 1];
// (12x^3 - 11x^2 + 9x + 18) / (4x + 3) = 3x^2 - 5x + 6
let test_1_0 = vec![18, 9, -11, 12];
let test_1_1 = vec![3, 4];
let test_1_2 = vec![6, -5, 3];
// (x + 1) / (x^2 - 1) = nil
let test_2_0 = vec![1, 1];
let test_2_1 = vec![-1, 0, 2];
let test_2_2 = vec![];
// (10x^2 + 20x + 30) / 10 = x^2 + 2x + 3
let test_3_0 = vec![30, 20, 10];
let test_3_1 = vec![10];
let test_3_2 = vec![3, 2, 1];
// (x^2 + x) / (x + 1) = x
let test_4_0 = vec![0, 1, 1];
let test_4_1 = vec![1, 1];
let test_4_2 = vec![0, 1];
// (x^2 + x + 1) / 1 = x^2 + x + 1
let test_5_0 = vec![1, 1, 1];
let test_5_1 = vec![1];
let test_5_2 = vec![1, 1, 1];
// (x^2 + x + 1) / (0x + 1) = x^2 + x + 1
let test_6_0 = vec![1, 1, 1];
let test_6_1 = vec![1, 0]; // The highest coefficient is zero
let test_6_2 = vec![1, 1, 1];
// (x^3) / (x) = (x^2)
let test_7_0 = vec![0, 0, 0, 1];
let test_7_1 = vec![0, 1];
let test_7_2 = vec![0, 0, 1];
//
let test_8_0 = vec![
236,
945,
-297698,
2489425,
-18556462,
-301325440,
2473062655,
-20699887353,
];
let test_8_1 = vec![4, 11, -5000, 45541, -454533];
let test_8_2 = vec![59, 74, -878, 45541];
// (x^4 + 2x^3 + 3x^2 + 2x + 1) / (-x^2 -x -1) = (-x^2 -x -1)
let test_9_0 = vec![1, 2, 3, 2, 1];
let test_9_1 = vec![-1, -1, -1];
let test_9_2 = vec![-1, -1, -1];
let test_data = [
[test_0_0, test_0_1, test_0_2],
[test_1_0, test_1_1, test_1_2],
[test_2_0, test_2_1, test_2_2],
[test_3_0, test_3_1, test_3_2],
[test_4_0, test_4_1, test_4_2],
[test_5_0, test_5_1, test_5_2],
[test_6_0, test_6_1, test_6_2],
[test_7_0, test_7_1, test_7_2],
[test_8_0, test_8_1, test_8_2],
[test_9_0, test_9_1, test_9_2],
];
test_data[a][b].clone()
}
fn new_test_poly(coeffs: &[i64]) -> FsPoly {
let mut p = FsPoly::new(0);
for &coeff in coeffs.iter() {
if coeff >= 0 {
let c = FsFr::from_u64(coeff as u64);
p.coeffs.push(c);
} else {
let c = FsFr::from_u64((-coeff) as u64);
let negc = c.negate();
p.coeffs.push(negc);
}
}
p
}
pub fn poly_div_long_test() {
for i in 0..9 {
// Tests are designed to throw an exception when last member is 0
if i == 6 {
continue;
}
let divided_data = test_data(i, 0);
let divisor_data = test_data(i, 1);
let expected_data = test_data(i, 2);
let mut dividend: FsPoly = new_test_poly(÷d_data);
let divisor: FsPoly = new_test_poly(&divisor_data);
let expected: FsPoly = new_test_poly(&expected_data);
let actual = dividend.long_div(&divisor).unwrap();
assert_eq!(expected.len(), actual.len());
for i in 0..actual.len() {
assert!(expected.get_coeff_at(i).equals(&actual.get_coeff_at(i)))
}
}
}
pub fn poly_div_fast_test() {
for i in 0..9 {
// Tests are designed to throw an exception when last member is 0
if i == 6 {
continue;
}
let divided_data = test_data(i, 0);
let divisor_data = test_data(i, 1);
let expected_data = test_data(i, 2);
let mut dividend: FsPoly = new_test_poly(÷d_data);
let divisor: FsPoly = new_test_poly(&divisor_data);
let expected: FsPoly = new_test_poly(&expected_data);
let actual = dividend.fast_div(&divisor).unwrap();
assert_eq!(expected.len(), actual.len());
for i in 0..actual.len() {
assert!(expected.get_coeff_at(i).equals(&actual.get_coeff_at(i)))
}
}
}
pub fn test_poly_div_by_zero() {
let mut dividend = FsPoly::new(2);
dividend.set_coeff_at(0, &FsFr::from_u64(1));
dividend.set_coeff_at(1, &FsFr::from_u64(1));
let divisor = FsPoly::new(0);
let dummy = dividend.div(&divisor);
assert!(dummy.is_err());
}
pub fn poly_mul_direct_test() {
for i in 0..9 {
let coeffs1 = test_data(i, 2);
let coeffs2 = test_data(i, 1);
let coeffs3 = test_data(i, 0);
let mut multiplicand: FsPoly = new_test_poly(&coeffs1);
let mut multiplier: FsPoly = new_test_poly(&coeffs2);
let expected: FsPoly = new_test_poly(&coeffs3);
let result0 = multiplicand.mul_direct(&multiplier, coeffs3.len()).unwrap();
for j in 0..result0.len() {
assert!(expected.get_coeff_at(j).equals(&result0.get_coeff_at(j)))
}
// Check commutativity
let result1 = multiplier.mul_direct(&multiplicand, coeffs3.len()).unwrap();
for j in 0..result1.len() {
assert!(expected.get_coeff_at(j).equals(&result1.get_coeff_at(j)))
}
}
}
pub fn poly_mul_fft_test() {
for i in 0..9 {
// Ignore 0 multiplication case because its incorrect when multiplied backwards
if i == 2 {
continue;
}
let coeffs1 = test_data(i, 2);
let coeffs2 = test_data(i, 1);
let coeffs3 = test_data(i, 0);
let multiplicand: FsPoly = new_test_poly(&coeffs1);
let multiplier: FsPoly = new_test_poly(&coeffs2);
let expected: FsPoly = new_test_poly(&coeffs3);
let result0 = multiplicand.mul_fft(&multiplier, coeffs3.len()).unwrap();
for j in 0..result0.len() {
assert!(expected.get_coeff_at(j).equals(&result0.get_coeff_at(j)))
}
// Check commutativity
let result1 = multiplier.mul_fft(&multiplicand, coeffs3.len()).unwrap();
for j in 0..result1.len() {
assert!(expected.get_coeff_at(j).equals(&result1.get_coeff_at(j)))
}
}
}
pub fn poly_mul_random() {
let mut rng = StdRng::seed_from_u64(0);
for _k in 0..256 {
let multiplicand_length: usize = (1 + (rng.next_u64() % 1000)) as usize;
let mut multiplicand = FsPoly::new(multiplicand_length);
for i in 0..multiplicand.len() {
multiplicand.set_coeff_at(i, &FsFr::rand());
}
let multiplier_length: usize = (1 + (rng.next_u64() % 1000)) as usize;
let mut multiplier = FsPoly::new(multiplier_length);
for i in 0..multiplier.len() {
multiplier.set_coeff_at(i, &FsFr::rand());
}
if multiplicand.get_coeff_at(multiplicand.len() - 1).is_zero() {
multiplicand.set_coeff_at(multiplicand.len() - 1, &Fr::one());
}
if multiplier.get_coeff_at(multiplier.len() - 1).is_zero() {
multiplier.set_coeff_at(multiplier.len() - 1, &Fr::one());
}
let out_length: usize = (1 + (rng.next_u64() % 1000)) as usize;
let q0 = multiplicand.mul_direct(&multiplier, out_length).unwrap();
let q1 = multiplicand.mul_fft(&multiplier, out_length).unwrap();
assert_eq!(q0.len(), q1.len());
for i in 0..q0.len() {
assert!(q0.get_coeff_at(i).equals(&q1.get_coeff_at(i)));
}
}
}
pub fn poly_div_random() {
let mut rng = StdRng::seed_from_u64(0);
for _k in 0..256 {
let dividend_length: usize = (2 + (rng.next_u64() % 1000)) as usize;
let divisor_length: usize = 1 + ((rng.next_u64() as usize) % dividend_length);
let mut dividend = FsPoly::new(dividend_length);
let mut divisor = FsPoly::new(divisor_length);
for i in 0..dividend_length {
dividend.set_coeff_at(i, &FsFr::rand());
}
for i in 0..divisor_length {
divisor.set_coeff_at(i, &FsFr::rand());
}
//Ensure that the polynomials' orders corresponds to their lengths
if dividend.get_coeff_at(dividend.len() - 1).is_zero() {
dividend.set_coeff_at(dividend.len() - 1, &Fr::one());
}
if divisor.get_coeff_at(divisor.len() - 1).is_zero() {
divisor.set_coeff_at(divisor.len() - 1, &Fr::one());
}
let result0 = dividend.long_div(&divisor).unwrap();
let result1 = dividend.fast_div(&divisor).unwrap();
assert_eq!(result0.len(), result1.len());
for i in 0..result0.len() {
assert!(result0.get_coeff_at(i).equals(&result1.get_coeff_at(i)));
}
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/local_tests/mod.rs | blst/tests/local_tests/mod.rs | pub mod local_consts;
pub mod local_poly;
pub mod local_recovery;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/tests/local_tests/local_consts.rs | blst/tests/local_tests/local_consts.rs | use kzg::{FFTSettings, Fr};
pub fn roots_of_unity_repeat_at_stride<TFr: Fr, TFFTSettings: FFTSettings<TFr>>() {
let fs1 = TFFTSettings::new(15).unwrap();
let fs2 = TFFTSettings::new(16).unwrap();
let fs3 = TFFTSettings::new(17).unwrap();
for i in 0..fs1.get_max_width() {
assert!(fs1
.get_roots_of_unity_at(i)
.equals(&fs2.get_roots_of_unity_at(i * 2)));
assert!(fs1
.get_roots_of_unity_at(i)
.equals(&fs3.get_roots_of_unity_at(i * 4)));
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/trusted_setup.rs | blst/benches/trusted_setup.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg::eip_4844::load_trusted_setup_rust;
use kzg_bench::benches::trusted_setup::bench_load_trusted_setup;
use rust_kzg_blst::{
eip_4844::load_trusted_setup_filename_rust,
types::{
fft_settings::FsFFTSettings,
fp::FsFp,
fr::FsFr,
g1::{FsG1, FsG1Affine, FsG1ProjAddAffine},
g2::FsG2,
kzg_settings::FsKZGSettings,
poly::FsPoly,
},
};
fn bench_load_trusted_setup_(c: &mut Criterion) {
bench_load_trusted_setup::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
c,
&load_trusted_setup_filename_rust,
&load_trusted_setup_rust,
);
}
criterion_group!(benches, bench_load_trusted_setup_);
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/das.rs | blst/benches/das.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::das::bench_das_extension;
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
fn bench_das_extension_(c: &mut Criterion) {
bench_das_extension::<FsFr, FsFFTSettings>(c)
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = bench_das_extension_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/eip_4844.rs | blst/benches/eip_4844.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg::eip_4844::{
blob_to_kzg_commitment_rust, bytes_to_blob, compute_blob_kzg_proof_rust,
compute_kzg_proof_rust, verify_blob_kzg_proof_batch_rust, verify_blob_kzg_proof_rust,
verify_kzg_proof_rust,
};
use kzg_bench::benches::eip_4844::bench_eip_4844;
use rust_kzg_blst::{
eip_4844::load_trusted_setup_filename_rust,
types::{
fft_settings::FsFFTSettings,
fp::FsFp,
fr::FsFr,
g1::{FsG1, FsG1Affine, FsG1ProjAddAffine},
g2::FsG2,
kzg_settings::FsKZGSettings,
poly::FsPoly,
},
};
fn bench_eip_4844_(c: &mut Criterion) {
bench_eip_4844::<
FsFr,
FsG1,
FsG2,
FsPoly,
FsFFTSettings,
FsKZGSettings,
FsFp,
FsG1Affine,
FsG1ProjAddAffine,
>(
c,
&load_trusted_setup_filename_rust,
&blob_to_kzg_commitment_rust,
&bytes_to_blob,
&compute_kzg_proof_rust,
&verify_kzg_proof_rust,
&compute_blob_kzg_proof_rust,
&verify_blob_kzg_proof_rust,
&verify_blob_kzg_proof_batch_rust,
);
}
criterion_group!(benches, bench_eip_4844_);
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/eip_7594.rs | blst/benches/eip_7594.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg::eip_4844::{blob_to_kzg_commitment_rust, bytes_to_blob};
use kzg_bench::benches::eip_7594::bench_eip_7594;
use rust_kzg_blst::{eip_4844::load_trusted_setup_filename_rust, eip_7594::BlstBackend};
fn bench_eip_7594_(c: &mut Criterion) {
bench_eip_7594::<BlstBackend>(
c,
&load_trusted_setup_filename_rust,
&bytes_to_blob,
&blob_to_kzg_commitment_rust,
);
}
criterion_group!(benches, bench_eip_7594_);
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/poly.rs | blst/benches/poly.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::poly::bench_new_poly_div;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::poly::FsPoly;
fn bench_new_poly_div_(c: &mut Criterion) {
bench_new_poly_div::<FsFr, FsPoly>(c);
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = bench_new_poly_div_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/zero_poly.rs | blst/benches/zero_poly.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::zero_poly::bench_zero_poly;
use rust_kzg_blst::types::{fft_settings::FsFFTSettings, fr::FsFr, poly::FsPoly};
fn bench_zero_poly_(c: &mut Criterion) {
bench_zero_poly::<FsFr, FsFFTSettings, FsPoly>(c);
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = bench_zero_poly_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/kzg.rs | blst/benches/kzg.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::kzg::{bench_commit_to_poly, bench_compute_proof_single};
use rust_kzg_blst::eip_7594::BlstBackend;
use rust_kzg_blst::utils::generate_trusted_setup;
fn bench_commit_to_poly_(c: &mut Criterion) {
bench_commit_to_poly::<BlstBackend>(c, &generate_trusted_setup)
}
fn bench_compute_proof_single_(c: &mut Criterion) {
bench_compute_proof_single::<BlstBackend>(c, &generate_trusted_setup)
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = bench_commit_to_poly_, bench_compute_proof_single_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/lincomb.rs | blst/benches/lincomb.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::lincomb::bench_g1_lincomb;
use rust_kzg_blst::kzg_proofs::g1_linear_combination;
use rust_kzg_blst::types::fp::FsFp;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::g1::{FsG1, FsG1Affine, FsG1ProjAddAffine};
fn bench_g1_lincomb_(c: &mut Criterion) {
bench_g1_lincomb::<FsFr, FsG1, FsFp, FsG1Affine, FsG1ProjAddAffine>(c, &g1_linear_combination);
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(100);
targets = bench_g1_lincomb_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/fk_20.rs | blst/benches/fk_20.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::fk20::{bench_fk_multi_da, bench_fk_single_da};
use rust_kzg_blst::eip_7594::BlstBackend;
use rust_kzg_blst::types::fk20_multi_settings::FsFK20MultiSettings;
use rust_kzg_blst::types::fk20_single_settings::FsFK20SingleSettings;
use rust_kzg_blst::utils::generate_trusted_setup;
fn bench_fk_single_da_(c: &mut Criterion) {
bench_fk_single_da::<BlstBackend, FsFK20SingleSettings>(c, &generate_trusted_setup)
}
fn bench_fk_multi_da_(c: &mut Criterion) {
bench_fk_multi_da::<BlstBackend, FsFK20MultiSettings>(c, &generate_trusted_setup)
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = bench_fk_single_da_, bench_fk_multi_da_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/recover.rs | blst/benches/recover.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::recover::bench_recover;
use rust_kzg_blst::types::{fft_settings::FsFFTSettings, fr::FsFr, poly::FsPoly};
pub fn bench_recover_(c: &mut Criterion) {
bench_recover::<FsFr, FsFFTSettings, FsPoly, FsPoly>(c)
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = bench_recover_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/blst/benches/fft.rs | blst/benches/fft.rs | use criterion::{criterion_group, criterion_main, Criterion};
use kzg_bench::benches::fft::{bench_fft_fr, bench_fft_g1};
use rust_kzg_blst::types::fft_settings::FsFFTSettings;
use rust_kzg_blst::types::fr::FsFr;
use rust_kzg_blst::types::g1::FsG1;
fn bench_fft_fr_(c: &mut Criterion) {
bench_fft_fr::<FsFr, FsFFTSettings>(c);
}
fn bench_fft_g1_(c: &mut Criterion) {
bench_fft_g1::<FsFr, FsG1, FsFFTSettings>(c);
}
criterion_group! {
name = benches;
config = Criterion::default().sample_size(10);
targets = bench_fft_fr_, bench_fft_g1_
}
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/lib.rs | zkcrypto/bls12_381/src/lib.rs | //! # `bls12_381`
//!
//! This crate provides an implementation of the BLS12-381 pairing-friendly elliptic
//! curve construction.
//!
//! * **This implementation has not been reviewed or audited. Use at your own risk.**
//! * This implementation targets Rust `1.36` or later.
//! * This implementation does not require the Rust standard library.
//! * All operations are constant time unless explicitly noted.
#![no_std]
#![cfg_attr(docsrs, feature(doc_cfg))]
// Catch documentation errors caused by code changes.
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(missing_debug_implementations)]
#![deny(unsafe_code)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::many_single_char_names)]
// This lint is described at
// https://rust-lang.github.io/rust-clippy/master/index.html#suspicious_arithmetic_impl
// In our library, some of the arithmetic involving extension fields will necessarily
// involve various binary operators, and so this lint is triggered unnecessarily.
#![allow(clippy::suspicious_arithmetic_impl)]
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
#[cfg(feature = "groups")]
mod tests;
#[macro_use]
mod util;
/// Notes about how the BLS12-381 elliptic curve is designed, specified
/// and implemented by this library.
pub mod notes {
pub mod design;
pub mod serialization;
}
pub mod scalar;
pub use scalar::Scalar;
#[cfg(feature = "groups")]
mod fp;
#[cfg(feature = "groups")]
pub mod fp2;
#[cfg(feature = "groups")]
pub mod g1;
#[cfg(feature = "groups")]
pub mod g2;
#[cfg(feature = "groups")]
pub use g1::{G1Affine, G1Projective};
#[cfg(feature = "groups")]
pub use g2::{G2Affine, G2Projective};
#[cfg(feature = "groups")]
mod fp12;
#[cfg(feature = "groups")]
mod fp6;
// The BLS parameter x for BLS12-381 is -0xd201000000010000
#[cfg(feature = "groups")]
const BLS_X: u64 = 0xd201_0000_0001_0000;
#[cfg(feature = "groups")]
const BLS_X_IS_NEGATIVE: bool = true;
pub const MODULUS: Scalar = scalar::MODULUS;
pub const R2: Scalar = scalar::R2;
#[cfg(feature = "pairings")]
mod pairings;
#[cfg(feature = "pairings")]
pub use pairings::{pairing, Bls12, Gt, MillerLoopResult};
#[cfg(all(feature = "pairings", feature = "alloc"))]
pub use pairings::{multi_miller_loop, G2Prepared};
pub use fp::Fp;
pub use fp12::Fp12;
pub use fp2::Fp2;
pub use fp6::Fp6;
/// Use the generic_array re-exported by digest to avoid a version mismatch
#[cfg(feature = "experimental")]
pub(crate) use digest::generic_array;
#[cfg(feature = "experimental")]
pub mod hash_to_curve;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/fp12.rs | zkcrypto/bls12_381/src/fp12.rs | use crate::fp::*;
use crate::fp2::*;
use crate::fp6::*;
use core::fmt;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
#[cfg(feature = "pairings")]
use rand_core::RngCore;
/// This represents an element $c_0 + c_1 w$ of $\mathbb{F}_{p^12} = \mathbb{F}_{p^6} / w^2 - v$.
pub struct Fp12 {
pub c0: Fp6,
pub c1: Fp6,
}
impl From<Fp> for Fp12 {
fn from(f: Fp) -> Fp12 {
Fp12 {
c0: Fp6::from(f),
c1: Fp6::zero(),
}
}
}
impl From<Fp2> for Fp12 {
fn from(f: Fp2) -> Fp12 {
Fp12 {
c0: Fp6::from(f),
c1: Fp6::zero(),
}
}
}
impl From<Fp6> for Fp12 {
fn from(f: Fp6) -> Fp12 {
Fp12 {
c0: f,
c1: Fp6::zero(),
}
}
}
impl PartialEq for Fp12 {
fn eq(&self, other: &Fp12) -> bool {
self.ct_eq(other).into()
}
}
impl Copy for Fp12 {}
impl Clone for Fp12 {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl Default for Fp12 {
fn default() -> Self {
Fp12::zero()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for Fp12 {}
impl fmt::Debug for Fp12 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?} + ({:?})*w", self.c0, self.c1)
}
}
impl ConditionallySelectable for Fp12 {
#[inline(always)]
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Fp12 {
c0: Fp6::conditional_select(&a.c0, &b.c0, choice),
c1: Fp6::conditional_select(&a.c1, &b.c1, choice),
}
}
}
impl ConstantTimeEq for Fp12 {
#[inline(always)]
fn ct_eq(&self, other: &Self) -> Choice {
self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1)
}
}
impl Fp12 {
#[inline]
pub fn zero() -> Self {
Fp12 {
c0: Fp6::zero(),
c1: Fp6::zero(),
}
}
#[inline]
pub fn one() -> Self {
Fp12 {
c0: Fp6::one(),
c1: Fp6::zero(),
}
}
#[cfg(feature = "pairings")]
pub(crate) fn random(mut rng: impl RngCore) -> Self {
Fp12 {
c0: Fp6::random(&mut rng),
c1: Fp6::random(&mut rng),
}
}
pub fn mul_by_014(&self, c0: &Fp2, c1: &Fp2, c4: &Fp2) -> Fp12 {
let aa = self.c0.mul_by_01(c0, c1);
let bb = self.c1.mul_by_1(c4);
let o = c1 + c4;
let c1 = self.c1 + self.c0;
let c1 = c1.mul_by_01(c0, &o);
let c1 = c1 - aa - bb;
let c0 = bb;
let c0 = c0.mul_by_nonresidue();
let c0 = c0 + aa;
Fp12 { c0, c1 }
}
#[inline(always)]
pub fn is_zero(&self) -> Choice {
self.c0.is_zero() & self.c1.is_zero()
}
#[inline(always)]
pub fn conjugate(&self) -> Self {
Fp12 {
c0: self.c0,
c1: -self.c1,
}
}
/// Raises this element to p.
#[inline(always)]
pub fn frobenius_map(&self) -> Self {
let c0 = self.c0.frobenius_map();
let c1 = self.c1.frobenius_map();
// c1 = c1 * (u + 1)^((p - 1) / 6)
let c1 = c1
* Fp6::from(Fp2 {
c0: Fp::from_raw_unchecked([
0x0708_9552_b319_d465,
0xc669_5f92_b50a_8313,
0x97e8_3ccc_d117_228f,
0xa35b_aeca_b2dc_29ee,
0x1ce3_93ea_5daa_ce4d,
0x08f2_220f_b0fb_66eb,
]),
c1: Fp::from_raw_unchecked([
0xb2f6_6aad_4ce5_d646,
0x5842_a06b_fc49_7cec,
0xcf48_95d4_2599_d394,
0xc11b_9cba_40a8_e8d0,
0x2e38_13cb_e5a0_de89,
0x110e_efda_8884_7faf,
]),
});
Fp12 { c0, c1 }
}
#[inline]
pub fn square(&self) -> Self {
let ab = self.c0 * self.c1;
let c0c1 = self.c0 + self.c1;
let c0 = self.c1.mul_by_nonresidue();
let c0 = c0 + self.c0;
let c0 = c0 * c0c1;
let c0 = c0 - ab;
let c1 = ab + ab;
let c0 = c0 - ab.mul_by_nonresidue();
Fp12 { c0, c1 }
}
pub fn invert(&self) -> CtOption<Self> {
(self.c0.square() - self.c1.square().mul_by_nonresidue())
.invert()
.map(|t| Fp12 {
c0: self.c0 * t,
c1: self.c1 * -t,
})
}
}
impl<'b> Mul<&'b Fp12> for &Fp12 {
type Output = Fp12;
#[inline]
fn mul(self, other: &'b Fp12) -> Self::Output {
let aa = self.c0 * other.c0;
let bb = self.c1 * other.c1;
let o = other.c0 + other.c1;
let c1 = self.c1 + self.c0;
let c1 = c1 * o;
let c1 = c1 - aa;
let c1 = c1 - bb;
let c0 = bb.mul_by_nonresidue();
let c0 = c0 + aa;
Fp12 { c0, c1 }
}
}
impl<'b> Add<&'b Fp12> for &Fp12 {
type Output = Fp12;
#[inline]
fn add(self, rhs: &'b Fp12) -> Self::Output {
Fp12 {
c0: self.c0 + rhs.c0,
c1: self.c1 + rhs.c1,
}
}
}
impl Neg for &Fp12 {
type Output = Fp12;
#[inline]
fn neg(self) -> Self::Output {
Fp12 {
c0: -self.c0,
c1: -self.c1,
}
}
}
impl Neg for Fp12 {
type Output = Fp12;
#[inline]
fn neg(self) -> Self::Output {
-&self
}
}
impl<'b> Sub<&'b Fp12> for &Fp12 {
type Output = Fp12;
#[inline]
fn sub(self, rhs: &'b Fp12) -> Self::Output {
Fp12 {
c0: self.c0 - rhs.c0,
c1: self.c1 - rhs.c1,
}
}
}
impl_binops_additive!(Fp12, Fp12);
impl_binops_multiplicative!(Fp12, Fp12);
#[test]
fn test_arithmetic() {
use crate::fp::*;
use crate::fp2::*;
let a = Fp12 {
c0: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x47f9_cb98_b1b8_2d58,
0x5fe9_11eb_a3aa_1d9d,
0x96bf_1b5f_4dd8_1db3,
0x8100_d27c_c925_9f5b,
0xafa2_0b96_7464_0eab,
0x09bb_cea7_d8d9_497d,
]),
c1: Fp::from_raw_unchecked([
0x0303_cb98_b166_2daa,
0xd931_10aa_0a62_1d5a,
0xbfa9_820c_5be4_a468,
0x0ba3_643e_cb05_a348,
0xdc35_34bb_1f1c_25a6,
0x06c3_05bb_19c0_e1c1,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x46f9_cb98_b162_d858,
0x0be9_109c_f7aa_1d57,
0xc791_bc55_fece_41d2,
0xf84c_5770_4e38_5ec2,
0xcb49_c1d9_c010_e60f,
0x0acd_b8e1_58bf_e3c8,
]),
c1: Fp::from_raw_unchecked([
0x8aef_cb98_b15f_8306,
0x3ea1_108f_e4f2_1d54,
0xcf79_f69f_a1b7_df3b,
0xe4f5_4aa1_d16b_1a3c,
0xba5e_4ef8_6105_a679,
0x0ed8_6c07_97be_e5cf,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xcee5_cb98_b15c_2db4,
0x7159_1082_d23a_1d51,
0xd762_30e9_44a1_7ca4,
0xd19e_3dd3_549d_d5b6,
0xa972_dc17_01fa_66e3,
0x12e3_1f2d_d6bd_e7d6,
]),
c1: Fp::from_raw_unchecked([
0xad2a_cb98_b173_2d9d,
0x2cfd_10dd_0696_1d64,
0x0739_6b86_c6ef_24e8,
0xbd76_e2fd_b1bf_c820,
0x6afe_a7f6_de94_d0d5,
0x1099_4b0c_5744_c040,
]),
},
},
c1: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x47f9_cb98_b1b8_2d58,
0x5fe9_11eb_a3aa_1d9d,
0x96bf_1b5f_4dd8_1db3,
0x8100_d27c_c925_9f5b,
0xafa2_0b96_7464_0eab,
0x09bb_cea7_d8d9_497d,
]),
c1: Fp::from_raw_unchecked([
0x0303_cb98_b166_2daa,
0xd931_10aa_0a62_1d5a,
0xbfa9_820c_5be4_a468,
0x0ba3_643e_cb05_a348,
0xdc35_34bb_1f1c_25a6,
0x06c3_05bb_19c0_e1c1,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x46f9_cb98_b162_d858,
0x0be9_109c_f7aa_1d57,
0xc791_bc55_fece_41d2,
0xf84c_5770_4e38_5ec2,
0xcb49_c1d9_c010_e60f,
0x0acd_b8e1_58bf_e3c8,
]),
c1: Fp::from_raw_unchecked([
0x8aef_cb98_b15f_8306,
0x3ea1_108f_e4f2_1d54,
0xcf79_f69f_a1b7_df3b,
0xe4f5_4aa1_d16b_1a3c,
0xba5e_4ef8_6105_a679,
0x0ed8_6c07_97be_e5cf,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xcee5_cb98_b15c_2db4,
0x7159_1082_d23a_1d51,
0xd762_30e9_44a1_7ca4,
0xd19e_3dd3_549d_d5b6,
0xa972_dc17_01fa_66e3,
0x12e3_1f2d_d6bd_e7d6,
]),
c1: Fp::from_raw_unchecked([
0xad2a_cb98_b173_2d9d,
0x2cfd_10dd_0696_1d64,
0x0739_6b86_c6ef_24e8,
0xbd76_e2fd_b1bf_c820,
0x6afe_a7f6_de94_d0d5,
0x1099_4b0c_5744_c040,
]),
},
},
};
let b = Fp12 {
c0: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x47f9_cb98_b1b8_2d58,
0x5fe9_11eb_a3aa_1d9d,
0x96bf_1b5f_4dd8_1db3,
0x8100_d272_c925_9f5b,
0xafa2_0b96_7464_0eab,
0x09bb_cea7_d8d9_497d,
]),
c1: Fp::from_raw_unchecked([
0x0303_cb98_b166_2daa,
0xd931_10aa_0a62_1d5a,
0xbfa9_820c_5be4_a468,
0x0ba3_643e_cb05_a348,
0xdc35_34bb_1f1c_25a6,
0x06c3_05bb_19c0_e1c1,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x46f9_cb98_b162_d858,
0x0be9_109c_f7aa_1d57,
0xc791_bc55_fece_41d2,
0xf84c_5770_4e38_5ec2,
0xcb49_c1d9_c010_e60f,
0x0acd_b8e1_58bf_e348,
]),
c1: Fp::from_raw_unchecked([
0x8aef_cb98_b15f_8306,
0x3ea1_108f_e4f2_1d54,
0xcf79_f69f_a1b7_df3b,
0xe4f5_4aa1_d16b_1a3c,
0xba5e_4ef8_6105_a679,
0x0ed8_6c07_97be_e5cf,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xcee5_cb98_b15c_2db4,
0x7159_1082_d23a_1d51,
0xd762_30e9_44a1_7ca4,
0xd19e_3dd3_549d_d5b6,
0xa972_dc17_01fa_66e3,
0x12e3_1f2d_d6bd_e7d6,
]),
c1: Fp::from_raw_unchecked([
0xad2a_cb98_b173_2d9d,
0x2cfd_10dd_0696_1d64,
0x0739_6b86_c6ef_24e8,
0xbd76_e2fd_b1bf_c820,
0x6afe_a7f6_de94_d0d5,
0x1099_4b0c_5744_c040,
]),
},
},
c1: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x47f9_cb98_b1b8_2d58,
0x5fe9_11eb_a3aa_1d9d,
0x96bf_1b5f_4dd2_1db3,
0x8100_d27c_c925_9f5b,
0xafa2_0b96_7464_0eab,
0x09bb_cea7_d8d9_497d,
]),
c1: Fp::from_raw_unchecked([
0x0303_cb98_b166_2daa,
0xd931_10aa_0a62_1d5a,
0xbfa9_820c_5be4_a468,
0x0ba3_643e_cb05_a348,
0xdc35_34bb_1f1c_25a6,
0x06c3_05bb_19c0_e1c1,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x46f9_cb98_b162_d858,
0x0be9_109c_f7aa_1d57,
0xc791_bc55_fece_41d2,
0xf84c_5770_4e38_5ec2,
0xcb49_c1d9_c010_e60f,
0x0acd_b8e1_58bf_e3c8,
]),
c1: Fp::from_raw_unchecked([
0x8aef_cb98_b15f_8306,
0x3ea1_108f_e4f2_1d54,
0xcf79_f69f_a117_df3b,
0xe4f5_4aa1_d16b_1a3c,
0xba5e_4ef8_6105_a679,
0x0ed8_6c07_97be_e5cf,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xcee5_cb98_b15c_2db4,
0x7159_1082_d23a_1d51,
0xd762_30e9_44a1_7ca4,
0xd19e_3dd3_549d_d5b6,
0xa972_dc17_01fa_66e3,
0x12e3_1f2d_d6bd_e7d6,
]),
c1: Fp::from_raw_unchecked([
0xad2a_cb98_b173_2d9d,
0x2cfd_10dd_0696_1d64,
0x0739_6b86_c6ef_24e8,
0xbd76_e2fd_b1bf_c820,
0x6afe_a7f6_de94_d0d5,
0x1099_4b0c_5744_c040,
]),
},
},
};
let c = Fp12 {
c0: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x47f9_cb98_71b8_2d58,
0x5fe9_11eb_a3aa_1d9d,
0x96bf_1b5f_4dd8_1db3,
0x8100_d27c_c925_9f5b,
0xafa2_0b96_7464_0eab,
0x09bb_cea7_d8d9_497d,
]),
c1: Fp::from_raw_unchecked([
0x0303_cb98_b166_2daa,
0xd931_10aa_0a62_1d5a,
0xbfa9_820c_5be4_a468,
0x0ba3_643e_cb05_a348,
0xdc35_34bb_1f1c_25a6,
0x06c3_05bb_19c0_e1c1,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x46f9_cb98_b162_d858,
0x0be9_109c_f7aa_1d57,
0x7791_bc55_fece_41d2,
0xf84c_5770_4e38_5ec2,
0xcb49_c1d9_c010_e60f,
0x0acd_b8e1_58bf_e3c8,
]),
c1: Fp::from_raw_unchecked([
0x8aef_cb98_b15f_8306,
0x3ea1_108f_e4f2_1d54,
0xcf79_f69f_a1b7_df3b,
0xe4f5_4aa1_d16b_133c,
0xba5e_4ef8_6105_a679,
0x0ed8_6c07_97be_e5cf,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xcee5_cb98_b15c_2db4,
0x7159_1082_d23a_1d51,
0xd762_40e9_44a1_7ca4,
0xd19e_3dd3_549d_d5b6,
0xa972_dc17_01fa_66e3,
0x12e3_1f2d_d6bd_e7d6,
]),
c1: Fp::from_raw_unchecked([
0xad2a_cb98_b173_2d9d,
0x2cfd_10dd_0696_1d64,
0x0739_6b86_c6ef_24e8,
0xbd76_e2fd_b1bf_c820,
0x6afe_a7f6_de94_d0d5,
0x1099_4b0c_1744_c040,
]),
},
},
c1: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x47f9_cb98_b1b8_2d58,
0x5fe9_11eb_a3aa_1d9d,
0x96bf_1b5f_4dd8_1db3,
0x8100_d27c_c925_9f5b,
0xafa2_0b96_7464_0eab,
0x09bb_cea7_d8d9_497d,
]),
c1: Fp::from_raw_unchecked([
0x0303_cb98_b166_2daa,
0xd931_10aa_0a62_1d5a,
0xbfa9_820c_5be4_a468,
0x0ba3_643e_cb05_a348,
0xdc35_34bb_1f1c_25a6,
0x06c3_05bb_19c0_e1c1,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x46f9_cb98_b162_d858,
0x0be9_109c_f7aa_1d57,
0xc791_bc55_fece_41d2,
0xf84c_5770_4e38_5ec2,
0xcb49_c1d3_c010_e60f,
0x0acd_b8e1_58bf_e3c8,
]),
c1: Fp::from_raw_unchecked([
0x8aef_cb98_b15f_8306,
0x3ea1_108f_e4f2_1d54,
0xcf79_f69f_a1b7_df3b,
0xe4f5_4aa1_d16b_1a3c,
0xba5e_4ef8_6105_a679,
0x0ed8_6c07_97be_e5cf,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xcee5_cb98_b15c_2db4,
0x7159_1082_d23a_1d51,
0xd762_30e9_44a1_7ca4,
0xd19e_3dd3_549d_d5b6,
0xa972_dc17_01fa_66e3,
0x12e3_1f2d_d6bd_e7d6,
]),
c1: Fp::from_raw_unchecked([
0xad2a_cb98_b173_2d9d,
0x2cfd_10dd_0696_1d64,
0x0739_6b86_c6ef_24e8,
0xbd76_e2fd_b1bf_c820,
0x6afe_a7f6_de94_d0d5,
0x1099_4b0c_5744_1040,
]),
},
},
};
// because a and b and c are similar to each other and
// I was lazy, this is just some arbitrary way to make
// them a little more different
let a = a.square().invert().unwrap().square() + c;
let b = b.square().invert().unwrap().square() + a;
let c = c.square().invert().unwrap().square() + b;
assert_eq!(a.square(), a * a);
assert_eq!(b.square(), b * b);
assert_eq!(c.square(), c * c);
assert_eq!((a + b) * c.square(), (c * c * a) + (c * c * b));
assert_eq!(
a.invert().unwrap() * b.invert().unwrap(),
(a * b).invert().unwrap()
);
assert_eq!(a.invert().unwrap() * a, Fp12::one());
assert!(a != a.frobenius_map());
assert_eq!(
a,
a.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
);
}
#[cfg(feature = "zeroize")]
#[test]
fn test_zeroize() {
use zeroize::Zeroize;
let mut a = Fp12::one();
a.zeroize();
assert!(bool::from(a.is_zero()));
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/g2.rs | zkcrypto/bls12_381/src/g2.rs | //! This module provides an implementation of the $\mathbb{G}_2$ group of BLS12-381.
#![allow(clippy::all)]
use core::borrow::Borrow;
use core::fmt;
use core::iter::Sum;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use group::{
prime::{PrimeCurve, PrimeCurveAffine, PrimeGroup},
Curve, Group, GroupEncoding, UncompressedEncoding,
};
use rand_core::RngCore;
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
#[cfg(feature = "alloc")]
use group::WnafGroup;
use crate::fp::Fp;
use crate::fp2::Fp2;
use crate::Scalar;
/// This is an element of $\mathbb{G}_2$ represented in the affine coordinate space.
/// It is ideal to keep elements in this representation to reduce memory usage and
/// improve performance through the use of mixed curve model arithmetic.
///
/// Values of `G2Affine` are guaranteed to be in the $q$-order subgroup unless an
/// "unchecked" API was misused.
#[cfg_attr(docsrs, doc(cfg(feature = "groups")))]
#[derive(Copy, Clone, Debug)]
pub struct G2Affine {
pub(crate) x: Fp2,
pub(crate) y: Fp2,
infinity: Choice,
}
impl Default for G2Affine {
fn default() -> G2Affine {
G2Affine::identity()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for G2Affine {}
impl fmt::Display for G2Affine {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl<'a> From<&'a G2Projective> for G2Affine {
fn from(p: &'a G2Projective) -> G2Affine {
let zinv = p.z.invert().unwrap_or(Fp2::zero());
let x = p.x * zinv;
let y = p.y * zinv;
let tmp = G2Affine {
x,
y,
infinity: Choice::from(0u8),
};
G2Affine::conditional_select(&tmp, &G2Affine::identity(), zinv.is_zero())
}
}
impl From<G2Projective> for G2Affine {
fn from(p: G2Projective) -> G2Affine {
G2Affine::from(&p)
}
}
impl ConstantTimeEq for G2Affine {
fn ct_eq(&self, other: &Self) -> Choice {
// The only cases in which two points are equal are
// 1. infinity is set on both
// 2. infinity is not set on both, and their coordinates are equal
(self.infinity & other.infinity)
| ((!self.infinity)
& (!other.infinity)
& self.x.ct_eq(&other.x)
& self.y.ct_eq(&other.y))
}
}
impl ConditionallySelectable for G2Affine {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
G2Affine {
x: Fp2::conditional_select(&a.x, &b.x, choice),
y: Fp2::conditional_select(&a.y, &b.y, choice),
infinity: Choice::conditional_select(&a.infinity, &b.infinity, choice),
}
}
}
impl Eq for G2Affine {}
impl PartialEq for G2Affine {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl<'a> Neg for &'a G2Affine {
type Output = G2Affine;
#[inline]
fn neg(self) -> G2Affine {
G2Affine {
x: self.x,
y: Fp2::conditional_select(&-self.y, &Fp2::one(), self.infinity),
infinity: self.infinity,
}
}
}
impl Neg for G2Affine {
type Output = G2Affine;
#[inline]
fn neg(self) -> G2Affine {
-&self
}
}
impl<'a, 'b> Add<&'b G2Projective> for &'a G2Affine {
type Output = G2Projective;
#[inline]
fn add(self, rhs: &'b G2Projective) -> G2Projective {
rhs.add_mixed(self)
}
}
impl<'a, 'b> Add<&'b G2Affine> for &'a G2Projective {
type Output = G2Projective;
#[inline]
fn add(self, rhs: &'b G2Affine) -> G2Projective {
self.add_mixed(rhs)
}
}
impl<'a, 'b> Sub<&'b G2Projective> for &'a G2Affine {
type Output = G2Projective;
#[inline]
fn sub(self, rhs: &'b G2Projective) -> G2Projective {
self + (-rhs)
}
}
impl<'a, 'b> Sub<&'b G2Affine> for &'a G2Projective {
type Output = G2Projective;
#[inline]
fn sub(self, rhs: &'b G2Affine) -> G2Projective {
self + (-rhs)
}
}
impl<T> Sum<T> for G2Projective
where
T: Borrow<G2Projective>,
{
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Self::identity(), |acc, item| acc + item.borrow())
}
}
impl_binops_additive!(G2Projective, G2Affine);
impl_binops_additive_specify_output!(G2Affine, G2Projective, G2Projective);
const B: Fp2 = Fp2 {
c0: Fp::from_raw_unchecked([
0xaa27_0000_000c_fff3,
0x53cc_0032_fc34_000a,
0x478f_e97a_6b0a_807f,
0xb1d3_7ebe_e6ba_24d7,
0x8ec9_733b_bf78_ab2f,
0x09d6_4551_3d83_de7e,
]),
c1: Fp::from_raw_unchecked([
0xaa27_0000_000c_fff3,
0x53cc_0032_fc34_000a,
0x478f_e97a_6b0a_807f,
0xb1d3_7ebe_e6ba_24d7,
0x8ec9_733b_bf78_ab2f,
0x09d6_4551_3d83_de7e,
]),
};
const B3: Fp2 = Fp2::add(&Fp2::add(&B, &B), &B);
impl G2Affine {
/// Returns the identity of the group: the point at infinity.
pub fn identity() -> G2Affine {
G2Affine {
x: Fp2::zero(),
y: Fp2::one(),
infinity: Choice::from(1u8),
}
}
/// Returns a fixed generator of the group. See [`notes::design`](notes/design/index.html#fixed-generators)
/// for how this generator is chosen.
pub fn generator() -> G2Affine {
G2Affine {
x: Fp2 {
c0: Fp::from_raw_unchecked([
0xf5f2_8fa2_0294_0a10,
0xb3f5_fb26_87b4_961a,
0xa1a8_93b5_3e2a_e580,
0x9894_999d_1a3c_aee9,
0x6f67_b763_1863_366b,
0x0581_9192_4350_bcd7,
]),
c1: Fp::from_raw_unchecked([
0xa5a9_c075_9e23_f606,
0xaaa0_c59d_bccd_60c3,
0x3bb1_7e18_e286_7806,
0x1b1a_b6cc_8541_b367,
0xc2b6_ed0e_f215_8547,
0x1192_2a09_7360_edf3,
]),
},
y: Fp2 {
c0: Fp::from_raw_unchecked([
0x4c73_0af8_6049_4c4a,
0x597c_fa1f_5e36_9c5a,
0xe7e6_856c_aa0a_635a,
0xbbef_b5e9_6e0d_495f,
0x07d3_a975_f0ef_25a2,
0x0083_fd8e_7e80_dae5,
]),
c1: Fp::from_raw_unchecked([
0xadc0_fc92_df64_b05d,
0x18aa_270a_2b14_61dc,
0x86ad_ac6a_3be4_eba0,
0x7949_5c4e_c93d_a33a,
0xe717_5850_a43c_caed,
0x0b2b_c2a1_63de_1bf2,
]),
},
infinity: Choice::from(0u8),
}
}
/// Serializes this element into compressed form. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn to_compressed(&self) -> [u8; 96] {
// Strictly speaking, self.x is zero already when self.infinity is true, but
// to guard against implementation mistakes we do not assume this.
let x = Fp2::conditional_select(&self.x, &Fp2::zero(), self.infinity);
let mut res = [0; 96];
(&mut res[0..48]).copy_from_slice(&x.c1.to_bytes()[..]);
(&mut res[48..96]).copy_from_slice(&x.c0.to_bytes()[..]);
// This point is in compressed form, so we set the most significant bit.
res[0] |= 1u8 << 7;
// Is this point at infinity? If so, set the second-most significant bit.
res[0] |= u8::conditional_select(&0u8, &(1u8 << 6), self.infinity);
// Is the y-coordinate the lexicographically largest of the two associated with the
// x-coordinate? If so, set the third-most significant bit so long as this is not
// the point at infinity.
res[0] |= u8::conditional_select(
&0u8,
&(1u8 << 5),
(!self.infinity) & self.y.lexicographically_largest(),
);
res
}
/// Serializes this element into uncompressed form. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn to_uncompressed(&self) -> [u8; 192] {
let mut res = [0; 192];
let x = Fp2::conditional_select(&self.x, &Fp2::zero(), self.infinity);
let y = Fp2::conditional_select(&self.y, &Fp2::zero(), self.infinity);
res[0..48].copy_from_slice(&x.c1.to_bytes()[..]);
res[48..96].copy_from_slice(&x.c0.to_bytes()[..]);
res[96..144].copy_from_slice(&y.c1.to_bytes()[..]);
res[144..192].copy_from_slice(&y.c0.to_bytes()[..]);
// Is this point at infinity? If so, set the second-most significant bit.
res[0] |= u8::conditional_select(&0u8, &(1u8 << 6), self.infinity);
res
}
/// Attempts to deserialize an uncompressed element. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn from_uncompressed(bytes: &[u8; 192]) -> CtOption<Self> {
Self::from_uncompressed_unchecked(bytes)
.and_then(|p| CtOption::new(p, p.is_on_curve() & p.is_torsion_free()))
}
/// Attempts to deserialize an uncompressed element, not checking if the
/// element is on the curve and not checking if it is in the correct subgroup.
/// **This is dangerous to call unless you trust the bytes you are reading; otherwise,
/// API invariants may be broken.** Please consider using `from_uncompressed()` instead.
pub fn from_uncompressed_unchecked(bytes: &[u8; 192]) -> CtOption<Self> {
// Obtain the three flags from the start of the byte sequence
let compression_flag_set = Choice::from((bytes[0] >> 7) & 1);
let infinity_flag_set = Choice::from((bytes[0] >> 6) & 1);
let sort_flag_set = Choice::from((bytes[0] >> 5) & 1);
// Attempt to obtain the x-coordinate
let xc1 = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[0..48]);
// Mask away the flag bits
tmp[0] &= 0b0001_1111;
Fp::from_bytes(&tmp)
};
let xc0 = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[48..96]);
Fp::from_bytes(&tmp)
};
// Attempt to obtain the y-coordinate
let yc1 = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[96..144]);
Fp::from_bytes(&tmp)
};
let yc0 = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[144..192]);
Fp::from_bytes(&tmp)
};
xc1.and_then(|xc1| {
xc0.and_then(|xc0| {
yc1.and_then(|yc1| {
yc0.and_then(|yc0| {
let x = Fp2 {
c0: xc0,
c1: xc1
};
let y = Fp2 {
c0: yc0,
c1: yc1
};
// Create a point representing this value
let p = G2Affine::conditional_select(
&G2Affine {
x,
y,
infinity: infinity_flag_set,
},
&G2Affine::identity(),
infinity_flag_set,
);
CtOption::new(
p,
// If the infinity flag is set, the x and y coordinates should have been zero.
((!infinity_flag_set) | (infinity_flag_set & x.is_zero() & y.is_zero())) &
// The compression flag should not have been set, as this is an uncompressed element
(!compression_flag_set) &
// The sort flag should not have been set, as this is an uncompressed element
(!sort_flag_set),
)
})
})
})
})
}
/// Attempts to deserialize a compressed element. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn from_compressed(bytes: &[u8; 96]) -> CtOption<Self> {
// We already know the point is on the curve because this is established
// by the y-coordinate recovery procedure in from_compressed_unchecked().
Self::from_compressed_unchecked(bytes).and_then(|p| CtOption::new(p, p.is_torsion_free()))
}
/// Attempts to deserialize an uncompressed element, not checking if the
/// element is in the correct subgroup.
/// **This is dangerous to call unless you trust the bytes you are reading; otherwise,
/// API invariants may be broken.** Please consider using `from_compressed()` instead.
pub fn from_compressed_unchecked(bytes: &[u8; 96]) -> CtOption<Self> {
// Obtain the three flags from the start of the byte sequence
let compression_flag_set = Choice::from((bytes[0] >> 7) & 1);
let infinity_flag_set = Choice::from((bytes[0] >> 6) & 1);
let sort_flag_set = Choice::from((bytes[0] >> 5) & 1);
// Attempt to obtain the x-coordinate
let xc1 = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[0..48]);
// Mask away the flag bits
tmp[0] &= 0b0001_1111;
Fp::from_bytes(&tmp)
};
let xc0 = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[48..96]);
Fp::from_bytes(&tmp)
};
xc1.and_then(|xc1| {
xc0.and_then(|xc0| {
let x = Fp2 { c0: xc0, c1: xc1 };
// If the infinity flag is set, return the value assuming
// the x-coordinate is zero and the sort bit is not set.
//
// Otherwise, return a recovered point (assuming the correct
// y-coordinate can be found) so long as the infinity flag
// was not set.
CtOption::new(
G2Affine::identity(),
infinity_flag_set & // Infinity flag should be set
compression_flag_set & // Compression flag should be set
(!sort_flag_set) & // Sort flag should not be set
x.is_zero(), // The x-coordinate should be zero
)
.or_else(|| {
// Recover a y-coordinate given x by y = sqrt(x^3 + 4)
((x.square() * x) + B).sqrt().and_then(|y| {
// Switch to the correct y-coordinate if necessary.
let y = Fp2::conditional_select(
&y,
&-y,
y.lexicographically_largest() ^ sort_flag_set,
);
CtOption::new(
G2Affine {
x,
y,
infinity: infinity_flag_set,
},
(!infinity_flag_set) & // Infinity flag should not be set
compression_flag_set, // Compression flag should be set
)
})
})
})
})
}
/// Returns true if this element is the identity (the point at infinity).
#[inline]
pub fn is_identity(&self) -> Choice {
self.infinity
}
/// Returns true if this point is free of an $h$-torsion component, and so it
/// exists within the $q$-order subgroup $\mathbb{G}_2$. This should always return true
/// unless an "unchecked" API was used.
pub fn is_torsion_free(&self) -> Choice {
// Algorithm from Section 4 of https://eprint.iacr.org/2021/1130
// Updated proof of correctness in https://eprint.iacr.org/2022/352
//
// Check that psi(P) == [x] P
let p = G2Projective::from(self);
p.psi().ct_eq(&p.mul_by_x())
}
/// Returns true if this point is on the curve. This should always return
/// true unless an "unchecked" API was used.
pub fn is_on_curve(&self) -> Choice {
// y^2 - x^3 ?= 4(u + 1)
(self.y.square() - (self.x.square() * self.x)).ct_eq(&B) | self.infinity
}
}
/// This is an element of $\mathbb{G}_2$ represented in the projective coordinate space.
#[cfg_attr(docsrs, doc(cfg(feature = "groups")))]
#[derive(Copy, Clone, Debug)]
pub struct G2Projective {
pub x: Fp2,
pub y: Fp2,
pub z: Fp2,
}
impl Default for G2Projective {
fn default() -> G2Projective {
G2Projective::identity()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for G2Projective {}
impl fmt::Display for G2Projective {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl<'a> From<&'a G2Affine> for G2Projective {
fn from(p: &'a G2Affine) -> G2Projective {
G2Projective {
x: p.x,
y: p.y,
z: Fp2::conditional_select(&Fp2::one(), &Fp2::zero(), p.infinity),
}
}
}
impl From<G2Affine> for G2Projective {
fn from(p: G2Affine) -> G2Projective {
G2Projective::from(&p)
}
}
impl ConstantTimeEq for G2Projective {
fn ct_eq(&self, other: &Self) -> Choice {
// Is (xz, yz, z) equal to (x'z', y'z', z') when converted to affine?
let x1 = self.x * other.z;
let x2 = other.x * self.z;
let y1 = self.y * other.z;
let y2 = other.y * self.z;
let self_is_zero = self.z.is_zero();
let other_is_zero = other.z.is_zero();
(self_is_zero & other_is_zero) // Both point at infinity
| ((!self_is_zero) & (!other_is_zero) & x1.ct_eq(&x2) & y1.ct_eq(&y2))
// Neither point at infinity, coordinates are the same
}
}
impl ConditionallySelectable for G2Projective {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
G2Projective {
x: Fp2::conditional_select(&a.x, &b.x, choice),
y: Fp2::conditional_select(&a.y, &b.y, choice),
z: Fp2::conditional_select(&a.z, &b.z, choice),
}
}
}
impl Eq for G2Projective {}
impl PartialEq for G2Projective {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl<'a> Neg for &'a G2Projective {
type Output = G2Projective;
#[inline]
fn neg(self) -> G2Projective {
G2Projective {
x: self.x,
y: -self.y,
z: self.z,
}
}
}
impl Neg for G2Projective {
type Output = G2Projective;
#[inline]
fn neg(self) -> G2Projective {
-&self
}
}
impl<'a, 'b> Add<&'b G2Projective> for &'a G2Projective {
type Output = G2Projective;
#[inline]
fn add(self, rhs: &'b G2Projective) -> G2Projective {
self.add(rhs)
}
}
impl<'a, 'b> Sub<&'b G2Projective> for &'a G2Projective {
type Output = G2Projective;
#[inline]
fn sub(self, rhs: &'b G2Projective) -> G2Projective {
self + (-rhs)
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a G2Projective {
type Output = G2Projective;
fn mul(self, other: &'b Scalar) -> Self::Output {
self.multiply(&other.to_bytes())
}
}
impl<'a, 'b> Mul<&'b G2Projective> for &'a Scalar {
type Output = G2Projective;
#[inline]
fn mul(self, rhs: &'b G2Projective) -> Self::Output {
rhs * self
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a G2Affine {
type Output = G2Projective;
fn mul(self, other: &'b Scalar) -> Self::Output {
G2Projective::from(self).multiply(&other.to_bytes())
}
}
impl<'a, 'b> Mul<&'b G2Affine> for &'a Scalar {
type Output = G2Projective;
#[inline]
fn mul(self, rhs: &'b G2Affine) -> Self::Output {
rhs * self
}
}
impl_binops_additive!(G2Projective, G2Projective);
impl_binops_multiplicative!(G2Projective, Scalar);
impl_binops_multiplicative_mixed!(G2Affine, Scalar, G2Projective);
impl_binops_multiplicative_mixed!(Scalar, G2Affine, G2Projective);
impl_binops_multiplicative_mixed!(Scalar, G2Projective, G2Projective);
#[inline(always)]
fn mul_by_3b(x: Fp2) -> Fp2 {
x * B3
}
impl G2Projective {
/// Returns the identity of the group: the point at infinity.
pub fn identity() -> G2Projective {
G2Projective {
x: Fp2::zero(),
y: Fp2::one(),
z: Fp2::zero(),
}
}
/// Returns a fixed generator of the group. See [`notes::design`](notes/design/index.html#fixed-generators)
/// for how this generator is chosen.
pub fn generator() -> G2Projective {
G2Projective {
x: Fp2 {
c0: Fp::from_raw_unchecked([
0xf5f2_8fa2_0294_0a10,
0xb3f5_fb26_87b4_961a,
0xa1a8_93b5_3e2a_e580,
0x9894_999d_1a3c_aee9,
0x6f67_b763_1863_366b,
0x0581_9192_4350_bcd7,
]),
c1: Fp::from_raw_unchecked([
0xa5a9_c075_9e23_f606,
0xaaa0_c59d_bccd_60c3,
0x3bb1_7e18_e286_7806,
0x1b1a_b6cc_8541_b367,
0xc2b6_ed0e_f215_8547,
0x1192_2a09_7360_edf3,
]),
},
y: Fp2 {
c0: Fp::from_raw_unchecked([
0x4c73_0af8_6049_4c4a,
0x597c_fa1f_5e36_9c5a,
0xe7e6_856c_aa0a_635a,
0xbbef_b5e9_6e0d_495f,
0x07d3_a975_f0ef_25a2,
0x0083_fd8e_7e80_dae5,
]),
c1: Fp::from_raw_unchecked([
0xadc0_fc92_df64_b05d,
0x18aa_270a_2b14_61dc,
0x86ad_ac6a_3be4_eba0,
0x7949_5c4e_c93d_a33a,
0xe717_5850_a43c_caed,
0x0b2b_c2a1_63de_1bf2,
]),
},
z: Fp2::one(),
}
}
/// Computes the doubling of this point.
pub fn double(&self) -> G2Projective {
// Algorithm 9, https://eprint.iacr.org/2015/1060.pdf
let t0 = self.y.square();
let z3 = t0 + t0;
let z3 = z3 + z3;
let z3 = z3 + z3;
let t1 = self.y * self.z;
let t2 = self.z.square();
let t2 = mul_by_3b(t2);
let x3 = t2 * z3;
let y3 = t0 + t2;
let z3 = t1 * z3;
let t1 = t2 + t2;
let t2 = t1 + t2;
let t0 = t0 - t2;
let y3 = t0 * y3;
let y3 = x3 + y3;
let t1 = self.x * self.y;
let x3 = t0 * t1;
let x3 = x3 + x3;
let tmp = G2Projective {
x: x3,
y: y3,
z: z3,
};
G2Projective::conditional_select(&tmp, &G2Projective::identity(), self.is_identity())
}
/// Adds this point to another point.
pub fn add(&self, rhs: &G2Projective) -> G2Projective {
// Algorithm 7, https://eprint.iacr.org/2015/1060.pdf
let t0 = self.x * rhs.x;
let t1 = self.y * rhs.y;
let t2 = self.z * rhs.z;
let t3 = self.x + self.y;
let t4 = rhs.x + rhs.y;
let t3 = t3 * t4;
let t4 = t0 + t1;
let t3 = t3 - t4;
let t4 = self.y + self.z;
let x3 = rhs.y + rhs.z;
let t4 = t4 * x3;
let x3 = t1 + t2;
let t4 = t4 - x3;
let x3 = self.x + self.z;
let y3 = rhs.x + rhs.z;
let x3 = x3 * y3;
let y3 = t0 + t2;
let y3 = x3 - y3;
let x3 = t0 + t0;
let t0 = x3 + t0;
let t2 = mul_by_3b(t2);
let z3 = t1 + t2;
let t1 = t1 - t2;
let y3 = mul_by_3b(y3);
let x3 = t4 * y3;
let t2 = t3 * t1;
let x3 = t2 - x3;
let y3 = y3 * t0;
let t1 = t1 * z3;
let y3 = t1 + y3;
let t0 = t0 * t3;
let z3 = z3 * t4;
let z3 = z3 + t0;
G2Projective {
x: x3,
y: y3,
z: z3,
}
}
/// Adds this point to another point in the affine model.
pub fn add_mixed(&self, rhs: &G2Affine) -> G2Projective {
// Algorithm 8, https://eprint.iacr.org/2015/1060.pdf
let t0 = self.x * rhs.x;
let t1 = self.y * rhs.y;
let t3 = rhs.x + rhs.y;
let t4 = self.x + self.y;
let t3 = t3 * t4;
let t4 = t0 + t1;
let t3 = t3 - t4;
let t4 = rhs.y * self.z;
let t4 = t4 + self.y;
let y3 = rhs.x * self.z;
let y3 = y3 + self.x;
let x3 = t0 + t0;
let t0 = x3 + t0;
let t2 = mul_by_3b(self.z);
let z3 = t1 + t2;
let t1 = t1 - t2;
let y3 = mul_by_3b(y3);
let x3 = t4 * y3;
let t2 = t3 * t1;
let x3 = t2 - x3;
let y3 = y3 * t0;
let t1 = t1 * z3;
let y3 = t1 + y3;
let t0 = t0 * t3;
let z3 = z3 * t4;
let z3 = z3 + t0;
let tmp = G2Projective {
x: x3,
y: y3,
z: z3,
};
G2Projective::conditional_select(&tmp, self, rhs.is_identity())
}
fn multiply(&self, by: &[u8]) -> G2Projective {
let mut acc = G2Projective::identity();
// This is a simple double-and-add implementation of point
// multiplication, moving from most significant to least
// significant bit of the scalar.
//
// We skip the leading bit because it's always unset for Fq
// elements.
for bit in by
.iter()
.rev()
.flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8)))
.skip(1)
{
acc = acc.double();
acc = G2Projective::conditional_select(&acc, &(acc + self), bit);
}
acc
}
fn psi(&self) -> G2Projective {
// 1 / ((u+1) ^ ((q-1)/3))
let psi_coeff_x = Fp2 {
c0: Fp::zero(),
c1: Fp::from_raw_unchecked([
0x890dc9e4867545c3,
0x2af322533285a5d5,
0x50880866309b7e2c,
0xa20d1b8c7e881024,
0x14e4f04fe2db9068,
0x14e56d3f1564853a,
]),
};
// 1 / ((u+1) ^ (p-1)/2)
let psi_coeff_y = Fp2 {
c0: Fp::from_raw_unchecked([
0x3e2f585da55c9ad1,
0x4294213d86c18183,
0x382844c88b623732,
0x92ad2afd19103e18,
0x1d794e4fac7cf0b9,
0x0bd592fc7d825ec8,
]),
c1: Fp::from_raw_unchecked([
0x7bcfa7a25aa30fda,
0xdc17dec12a927e7c,
0x2f088dd86b4ebef1,
0xd1ca2087da74d4a7,
0x2da2596696cebc1d,
0x0e2b7eedbbfd87d2,
]),
};
G2Projective {
// x = frobenius(x)/((u+1)^((p-1)/3))
x: self.x.frobenius_map() * psi_coeff_x,
// y = frobenius(y)/(u+1)^((p-1)/2)
y: self.y.frobenius_map() * psi_coeff_y,
// z = frobenius(z)
z: self.z.frobenius_map(),
}
}
fn psi2(&self) -> G2Projective {
// 1 / 2 ^ ((q-1)/3)
let psi2_coeff_x = Fp2 {
c0: Fp::from_raw_unchecked([
0xcd03c9e48671f071,
0x5dab22461fcda5d2,
0x587042afd3851b95,
0x8eb60ebe01bacb9e,
0x03f97d6e83d050d2,
0x18f0206554638741,
]),
c1: Fp::zero(),
};
G2Projective {
// x = frobenius^2(x)/2^((p-1)/3); note that q^2 is the order of the field.
x: self.x * psi2_coeff_x,
// y = -frobenius^2(y); note that q^2 is the order of the field.
y: self.y.neg(),
// z = z
z: self.z,
}
}
/// Multiply `self` by `crate::BLS_X`, using double and add.
fn mul_by_x(&self) -> G2Projective {
let mut xself = G2Projective::identity();
// NOTE: in BLS12-381 we can just skip the first bit.
let mut x = crate::BLS_X >> 1;
let mut acc = *self;
while x != 0 {
acc = acc.double();
if x % 2 == 1 {
xself += acc;
}
x >>= 1;
}
// finally, flip the sign
if crate::BLS_X_IS_NEGATIVE {
xself = -xself;
}
xself
}
/// Clears the cofactor, using [Budroni-Pintore](https://ia.cr/2017/419).
/// This is equivalent to multiplying by $h\_\textrm{eff} = 3(z^2 - 1) \cdot
/// h_2$, where $h_2$ is the cofactor of $\mathbb{G}\_2$ and $z$ is the
/// parameter of BLS12-381.
pub fn clear_cofactor(&self) -> G2Projective {
let t1 = self.mul_by_x(); // [x] P
let t2 = self.psi(); // psi(P)
self.double().psi2() // psi^2(2P)
+ (t1 + t2).mul_by_x() // psi^2(2P) + [x^2] P + [x] psi(P)
- t1 // psi^2(2P) + [x^2 - x] P + [x] psi(P)
- t2 // psi^2(2P) + [x^2 - x] P + [x - 1] psi(P)
- self // psi^2(2P) + [x^2 - x - 1] P + [x - 1] psi(P)
}
/// Converts a batch of `G2Projective` elements into `G2Affine` elements. This
/// function will panic if `p.len() != q.len()`.
pub fn batch_normalize(p: &[Self], q: &mut [G2Affine]) {
assert_eq!(p.len(), q.len());
let mut acc = Fp2::one();
for (p, q) in p.iter().zip(q.iter_mut()) {
// We use the `x` field of `G2Affine` to store the product
// of previous z-coordinates seen.
q.x = acc;
// We will end up skipping all identities in p
acc = Fp2::conditional_select(&(acc * p.z), &acc, p.is_identity());
}
// This is the inverse, as all z-coordinates are nonzero and the ones
// that are not are skipped.
acc = acc.invert().unwrap();
for (p, q) in p.iter().rev().zip(q.iter_mut().rev()) {
let skip = p.is_identity();
// Compute tmp = 1/z
let tmp = q.x * acc;
// Cancel out z-coordinate in denominator of `acc`
acc = Fp2::conditional_select(&(acc * p.z), &acc, skip);
// Set the coordinates to the correct value
q.x = p.x * tmp;
q.y = p.y * tmp;
q.infinity = Choice::from(0u8);
*q = G2Affine::conditional_select(q, &G2Affine::identity(), skip);
}
}
/// Returns true if this element is the identity (the point at infinity).
#[inline]
pub fn is_identity(&self) -> Choice {
self.z.is_zero()
}
/// Returns true if this point is on the curve. This should always return
/// true unless an "unchecked" API was used.
pub fn is_on_curve(&self) -> Choice {
// Y^2 Z = X^3 + b Z^3
(self.y.square() * self.z).ct_eq(&(self.x.square() * self.x + self.z.square() * self.z * B))
| self.z.is_zero()
}
}
#[derive(Clone, Copy)]
pub struct G2Compressed([u8; 96]);
impl fmt::Debug for G2Compressed {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0[..].fmt(f)
}
}
impl Default for G2Compressed {
fn default() -> Self {
G2Compressed([0; 96])
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for G2Compressed {}
impl AsRef<[u8]> for G2Compressed {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl AsMut<[u8]> for G2Compressed {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl ConstantTimeEq for G2Compressed {
fn ct_eq(&self, other: &Self) -> Choice {
self.0.ct_eq(&other.0)
}
}
impl Eq for G2Compressed {}
impl PartialEq for G2Compressed {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
#[derive(Clone, Copy)]
pub struct G2Uncompressed([u8; 192]);
impl fmt::Debug for G2Uncompressed {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0[..].fmt(f)
}
}
impl Default for G2Uncompressed {
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | true |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/fp6.rs | zkcrypto/bls12_381/src/fp6.rs | use crate::fp::*;
use crate::fp2::*;
use core::fmt;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
#[cfg(feature = "pairings")]
use rand_core::RngCore;
/// This represents an element $c_0 + c_1 v + c_2 v^2$ of $\mathbb{F}_{p^6} = \mathbb{F}_{p^2} / v^3 - u - 1$.
pub struct Fp6 {
pub c0: Fp2,
pub c1: Fp2,
pub c2: Fp2,
}
impl From<Fp> for Fp6 {
fn from(f: Fp) -> Fp6 {
Fp6 {
c0: Fp2::from(f),
c1: Fp2::zero(),
c2: Fp2::zero(),
}
}
}
impl From<Fp2> for Fp6 {
fn from(f: Fp2) -> Fp6 {
Fp6 {
c0: f,
c1: Fp2::zero(),
c2: Fp2::zero(),
}
}
}
impl PartialEq for Fp6 {
fn eq(&self, other: &Fp6) -> bool {
self.ct_eq(other).into()
}
}
impl Copy for Fp6 {}
impl Clone for Fp6 {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl Default for Fp6 {
fn default() -> Self {
Fp6::zero()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for Fp6 {}
impl fmt::Debug for Fp6 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?} + ({:?})*v + ({:?})*v^2", self.c0, self.c1, self.c2)
}
}
impl ConditionallySelectable for Fp6 {
#[inline(always)]
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Fp6 {
c0: Fp2::conditional_select(&a.c0, &b.c0, choice),
c1: Fp2::conditional_select(&a.c1, &b.c1, choice),
c2: Fp2::conditional_select(&a.c2, &b.c2, choice),
}
}
}
impl ConstantTimeEq for Fp6 {
#[inline(always)]
fn ct_eq(&self, other: &Self) -> Choice {
self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1) & self.c2.ct_eq(&other.c2)
}
}
impl Fp6 {
#[inline]
pub fn zero() -> Self {
Fp6 {
c0: Fp2::zero(),
c1: Fp2::zero(),
c2: Fp2::zero(),
}
}
#[inline]
pub fn one() -> Self {
Fp6 {
c0: Fp2::one(),
c1: Fp2::zero(),
c2: Fp2::zero(),
}
}
#[cfg(feature = "pairings")]
pub(crate) fn random(mut rng: impl RngCore) -> Self {
Fp6 {
c0: Fp2::random(&mut rng),
c1: Fp2::random(&mut rng),
c2: Fp2::random(&mut rng),
}
}
pub fn mul_by_1(&self, c1: &Fp2) -> Fp6 {
Fp6 {
c0: (self.c2 * c1).mul_by_nonresidue(),
c1: self.c0 * c1,
c2: self.c1 * c1,
}
}
pub fn mul_by_01(&self, c0: &Fp2, c1: &Fp2) -> Fp6 {
let a_a = self.c0 * c0;
let b_b = self.c1 * c1;
let t1 = (self.c2 * c1).mul_by_nonresidue() + a_a;
let t2 = (c0 + c1) * (self.c0 + self.c1) - a_a - b_b;
let t3 = self.c2 * c0 + b_b;
Fp6 {
c0: t1,
c1: t2,
c2: t3,
}
}
/// Multiply by quadratic nonresidue v.
pub fn mul_by_nonresidue(&self) -> Self {
// Given a + bv + cv^2, this produces
// av + bv^2 + cv^3
// but because v^3 = u + 1, we have
// c(u + 1) + av + v^2
Fp6 {
c0: self.c2.mul_by_nonresidue(),
c1: self.c0,
c2: self.c1,
}
}
/// Raises this element to p.
#[inline(always)]
pub fn frobenius_map(&self) -> Self {
let c0 = self.c0.frobenius_map();
let c1 = self.c1.frobenius_map();
let c2 = self.c2.frobenius_map();
// c1 = c1 * (u + 1)^((p - 1) / 3)
let c1 = c1
* Fp2 {
c0: Fp::zero(),
c1: Fp::from_raw_unchecked([
0xcd03_c9e4_8671_f071,
0x5dab_2246_1fcd_a5d2,
0x5870_42af_d385_1b95,
0x8eb6_0ebe_01ba_cb9e,
0x03f9_7d6e_83d0_50d2,
0x18f0_2065_5463_8741,
]),
};
// c2 = c2 * (u + 1)^((2p - 2) / 3)
let c2 = c2
* Fp2 {
c0: Fp::from_raw_unchecked([
0x890d_c9e4_8675_45c3,
0x2af3_2253_3285_a5d5,
0x5088_0866_309b_7e2c,
0xa20d_1b8c_7e88_1024,
0x14e4_f04f_e2db_9068,
0x14e5_6d3f_1564_853a,
]),
c1: Fp::zero(),
};
Fp6 { c0, c1, c2 }
}
#[inline(always)]
pub fn is_zero(&self) -> Choice {
self.c0.is_zero() & self.c1.is_zero() & self.c2.is_zero()
}
/// Returns `c = self * b`.
///
/// Implements the full-tower interleaving strategy from
/// [ePrint 2022-376](https://eprint.iacr.org/2022/367).
#[inline]
fn mul_interleaved(&self, b: &Self) -> Self {
// The intuition for this algorithm is that we can look at F_p^6 as a direct
// extension of F_p^2, and express the overall operations down to the base field
// F_p instead of only over F_p^2. This enables us to interleave multiplications
// and reductions, ensuring that we don't require double-width intermediate
// representations (with around twice as many limbs as F_p elements).
// We want to express the multiplication c = a x b, where a = (a_0, a_1, a_2) is
// an element of F_p^6, and a_i = (a_i,0, a_i,1) is an element of F_p^2. The fully
// expanded multiplication is given by (2022-376 §5):
//
// c_0,0 = a_0,0 b_0,0 - a_0,1 b_0,1 + a_1,0 b_2,0 - a_1,1 b_2,1 + a_2,0 b_1,0 - a_2,1 b_1,1
// - a_1,0 b_2,1 - a_1,1 b_2,0 - a_2,0 b_1,1 - a_2,1 b_1,0.
// = a_0,0 b_0,0 - a_0,1 b_0,1 + a_1,0 (b_2,0 - b_2,1) - a_1,1 (b_2,0 + b_2,1)
// + a_2,0 (b_1,0 - b_1,1) - a_2,1 (b_1,0 + b_1,1).
//
// c_0,1 = a_0,0 b_0,1 + a_0,1 b_0,0 + a_1,0 b_2,1 + a_1,1 b_2,0 + a_2,0 b_1,1 + a_2,1 b_1,0
// + a_1,0 b_2,0 - a_1,1 b_2,1 + a_2,0 b_1,0 - a_2,1 b_1,1.
// = a_0,0 b_0,1 + a_0,1 b_0,0 + a_1,0(b_2,0 + b_2,1) + a_1,1(b_2,0 - b_2,1)
// + a_2,0(b_1,0 + b_1,1) + a_2,1(b_1,0 - b_1,1).
//
// c_1,0 = a_0,0 b_1,0 - a_0,1 b_1,1 + a_1,0 b_0,0 - a_1,1 b_0,1 + a_2,0 b_2,0 - a_2,1 b_2,1
// - a_2,0 b_2,1 - a_2,1 b_2,0.
// = a_0,0 b_1,0 - a_0,1 b_1,1 + a_1,0 b_0,0 - a_1,1 b_0,1 + a_2,0(b_2,0 - b_2,1)
// - a_2,1(b_2,0 + b_2,1).
//
// c_1,1 = a_0,0 b_1,1 + a_0,1 b_1,0 + a_1,0 b_0,1 + a_1,1 b_0,0 + a_2,0 b_2,1 + a_2,1 b_2,0
// + a_2,0 b_2,0 - a_2,1 b_2,1
// = a_0,0 b_1,1 + a_0,1 b_1,0 + a_1,0 b_0,1 + a_1,1 b_0,0 + a_2,0(b_2,0 + b_2,1)
// + a_2,1(b_2,0 - b_2,1).
//
// c_2,0 = a_0,0 b_2,0 - a_0,1 b_2,1 + a_1,0 b_1,0 - a_1,1 b_1,1 + a_2,0 b_0,0 - a_2,1 b_0,1.
// c_2,1 = a_0,0 b_2,1 + a_0,1 b_2,0 + a_1,0 b_1,1 + a_1,1 b_1,0 + a_2,0 b_0,1 + a_2,1 b_0,0.
//
// Each of these is a "sum of products", which we can compute efficiently.
let a = self;
let b10_p_b11 = b.c1.c0 + b.c1.c1;
let b10_m_b11 = b.c1.c0 - b.c1.c1;
let b20_p_b21 = b.c2.c0 + b.c2.c1;
let b20_m_b21 = b.c2.c0 - b.c2.c1;
Fp6 {
c0: Fp2 {
c0: Fp::sum_of_products(
[a.c0.c0, -a.c0.c1, a.c1.c0, -a.c1.c1, a.c2.c0, -a.c2.c1],
[b.c0.c0, b.c0.c1, b20_m_b21, b20_p_b21, b10_m_b11, b10_p_b11],
),
c1: Fp::sum_of_products(
[a.c0.c0, a.c0.c1, a.c1.c0, a.c1.c1, a.c2.c0, a.c2.c1],
[b.c0.c1, b.c0.c0, b20_p_b21, b20_m_b21, b10_p_b11, b10_m_b11],
),
},
c1: Fp2 {
c0: Fp::sum_of_products(
[a.c0.c0, -a.c0.c1, a.c1.c0, -a.c1.c1, a.c2.c0, -a.c2.c1],
[b.c1.c0, b.c1.c1, b.c0.c0, b.c0.c1, b20_m_b21, b20_p_b21],
),
c1: Fp::sum_of_products(
[a.c0.c0, a.c0.c1, a.c1.c0, a.c1.c1, a.c2.c0, a.c2.c1],
[b.c1.c1, b.c1.c0, b.c0.c1, b.c0.c0, b20_p_b21, b20_m_b21],
),
},
c2: Fp2 {
c0: Fp::sum_of_products(
[a.c0.c0, -a.c0.c1, a.c1.c0, -a.c1.c1, a.c2.c0, -a.c2.c1],
[b.c2.c0, b.c2.c1, b.c1.c0, b.c1.c1, b.c0.c0, b.c0.c1],
),
c1: Fp::sum_of_products(
[a.c0.c0, a.c0.c1, a.c1.c0, a.c1.c1, a.c2.c0, a.c2.c1],
[b.c2.c1, b.c2.c0, b.c1.c1, b.c1.c0, b.c0.c1, b.c0.c0],
),
},
}
}
#[inline]
pub fn square(&self) -> Self {
let s0 = self.c0.square();
let ab = self.c0 * self.c1;
let s1 = ab + ab;
let s2 = (self.c0 - self.c1 + self.c2).square();
let bc = self.c1 * self.c2;
let s3 = bc + bc;
let s4 = self.c2.square();
Fp6 {
c0: s3.mul_by_nonresidue() + s0,
c1: s4.mul_by_nonresidue() + s1,
c2: s1 + s2 + s3 - s0 - s4,
}
}
#[inline]
pub fn invert(&self) -> CtOption<Self> {
let c0 = (self.c1 * self.c2).mul_by_nonresidue();
let c0 = self.c0.square() - c0;
let c1 = self.c2.square().mul_by_nonresidue();
let c1 = c1 - (self.c0 * self.c1);
let c2 = self.c1.square();
let c2 = c2 - (self.c0 * self.c2);
let tmp = ((self.c1 * c2) + (self.c2 * c1)).mul_by_nonresidue();
let tmp = tmp + (self.c0 * c0);
tmp.invert().map(|t| Fp6 {
c0: t * c0,
c1: t * c1,
c2: t * c2,
})
}
}
impl<'b> Mul<&'b Fp6> for &Fp6 {
type Output = Fp6;
#[inline]
fn mul(self, other: &'b Fp6) -> Self::Output {
self.mul_interleaved(other)
}
}
impl<'b> Add<&'b Fp6> for &Fp6 {
type Output = Fp6;
#[inline]
fn add(self, rhs: &'b Fp6) -> Self::Output {
Fp6 {
c0: self.c0 + rhs.c0,
c1: self.c1 + rhs.c1,
c2: self.c2 + rhs.c2,
}
}
}
impl Neg for &Fp6 {
type Output = Fp6;
#[inline]
fn neg(self) -> Self::Output {
Fp6 {
c0: -self.c0,
c1: -self.c1,
c2: -self.c2,
}
}
}
impl Neg for Fp6 {
type Output = Fp6;
#[inline]
fn neg(self) -> Self::Output {
-&self
}
}
impl<'b> Sub<&'b Fp6> for &Fp6 {
type Output = Fp6;
#[inline]
fn sub(self, rhs: &'b Fp6) -> Self::Output {
Fp6 {
c0: self.c0 - rhs.c0,
c1: self.c1 - rhs.c1,
c2: self.c2 - rhs.c2,
}
}
}
impl_binops_additive!(Fp6, Fp6);
impl_binops_multiplicative!(Fp6, Fp6);
#[test]
fn test_arithmetic() {
use crate::fp::*;
let a = Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x47f9_cb98_b1b8_2d58,
0x5fe9_11eb_a3aa_1d9d,
0x96bf_1b5f_4dd8_1db3,
0x8100_d27c_c925_9f5b,
0xafa2_0b96_7464_0eab,
0x09bb_cea7_d8d9_497d,
]),
c1: Fp::from_raw_unchecked([
0x0303_cb98_b166_2daa,
0xd931_10aa_0a62_1d5a,
0xbfa9_820c_5be4_a468,
0x0ba3_643e_cb05_a348,
0xdc35_34bb_1f1c_25a6,
0x06c3_05bb_19c0_e1c1,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x46f9_cb98_b162_d858,
0x0be9_109c_f7aa_1d57,
0xc791_bc55_fece_41d2,
0xf84c_5770_4e38_5ec2,
0xcb49_c1d9_c010_e60f,
0x0acd_b8e1_58bf_e3c8,
]),
c1: Fp::from_raw_unchecked([
0x8aef_cb98_b15f_8306,
0x3ea1_108f_e4f2_1d54,
0xcf79_f69f_a1b7_df3b,
0xe4f5_4aa1_d16b_1a3c,
0xba5e_4ef8_6105_a679,
0x0ed8_6c07_97be_e5cf,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xcee5_cb98_b15c_2db4,
0x7159_1082_d23a_1d51,
0xd762_30e9_44a1_7ca4,
0xd19e_3dd3_549d_d5b6,
0xa972_dc17_01fa_66e3,
0x12e3_1f2d_d6bd_e7d6,
]),
c1: Fp::from_raw_unchecked([
0xad2a_cb98_b173_2d9d,
0x2cfd_10dd_0696_1d64,
0x0739_6b86_c6ef_24e8,
0xbd76_e2fd_b1bf_c820,
0x6afe_a7f6_de94_d0d5,
0x1099_4b0c_5744_c040,
]),
},
};
let b = Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0xf120_cb98_b16f_d84b,
0x5fb5_10cf_f3de_1d61,
0x0f21_a5d0_69d8_c251,
0xaa1f_d62f_34f2_839a,
0x5a13_3515_7f89_913f,
0x14a3_fe32_9643_c247,
]),
c1: Fp::from_raw_unchecked([
0x3516_cb98_b16c_82f9,
0x926d_10c2_e126_1d5f,
0x1709_e01a_0cc2_5fba,
0x96c8_c960_b825_3f14,
0x4927_c234_207e_51a9,
0x18ae_b158_d542_c44e,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0xbf0d_cb98_b169_82fc,
0xa679_10b7_1d1a_1d5c,
0xb7c1_47c2_b8fb_06ff,
0x1efa_710d_47d2_e7ce,
0xed20_a79c_7e27_653c,
0x02b8_5294_dac1_dfba,
]),
c1: Fp::from_raw_unchecked([
0x9d52_cb98_b180_82e5,
0x621d_1111_5176_1d6f,
0xe798_8260_3b48_af43,
0x0ad3_1637_a4f4_da37,
0xaeac_737c_5ac1_cf2e,
0x006e_7e73_5b48_b824,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0xe148_cb98_b17d_2d93,
0x94d5_1104_3ebe_1d6c,
0xef80_bca9_de32_4cac,
0xf77c_0969_2827_95b1,
0x9dc1_009a_fbb6_8f97,
0x0479_3199_9a47_ba2b,
]),
c1: Fp::from_raw_unchecked([
0x253e_cb98_b179_d841,
0xc78d_10f7_2c06_1d6a,
0xf768_f6f3_811b_ea15,
0xe424_fc9a_ab5a_512b,
0x8cd5_8db9_9cab_5001,
0x0883_e4bf_d946_bc32,
]),
},
};
let c = Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x6934_cb98_b176_82ef,
0xfa45_10ea_194e_1d67,
0xff51_313d_2405_877e,
0xd0cd_efcc_2e8d_0ca5,
0x7bea_1ad8_3da0_106b,
0x0c8e_97e6_1845_be39,
]),
c1: Fp::from_raw_unchecked([
0x4779_cb98_b18d_82d8,
0xb5e9_1144_4daa_1d7a,
0x2f28_6bda_a653_2fc2,
0xbca6_94f6_8bae_ff0f,
0x3d75_e6b8_1a3a_7a5d,
0x0a44_c3c4_98cc_96a3,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x8b6f_cb98_b18a_2d86,
0xe8a1_1137_3af2_1d77,
0x3710_a624_493c_cd2b,
0xa94f_8828_0ee1_ba89,
0x2c8a_73d6_bb2f_3ac7,
0x0e4f_76ea_d7cb_98aa,
]),
c1: Fp::from_raw_unchecked([
0xcf65_cb98_b186_d834,
0x1b59_112a_283a_1d74,
0x3ef8_e06d_ec26_6a95,
0x95f8_7b59_9214_7603,
0x1b9f_00f5_5c23_fb31,
0x125a_2a11_16ca_9ab1,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0x135b_cb98_b183_82e2,
0x4e11_111d_1582_1d72,
0x46e1_1ab7_8f10_07fe,
0x82a1_6e8b_1547_317d,
0x0ab3_8e13_fd18_bb9b,
0x1664_dd37_55c9_9cb8,
]),
c1: Fp::from_raw_unchecked([
0xce65_cb98_b131_8334,
0xc759_0fdb_7c3a_1d2e,
0x6fcb_8164_9d1c_8eb3,
0x0d44_004d_1727_356a,
0x3746_b738_a7d0_d296,
0x136c_144a_96b1_34fc,
]),
},
};
assert_eq!(a.square(), a * a);
assert_eq!(b.square(), b * b);
assert_eq!(c.square(), c * c);
assert_eq!((a + b) * c.square(), (c * c * a) + (c * c * b));
assert_eq!(
a.invert().unwrap() * b.invert().unwrap(),
(a * b).invert().unwrap()
);
assert_eq!(a.invert().unwrap() * a, Fp6::one());
}
#[cfg(feature = "zeroize")]
#[test]
fn test_zeroize() {
use zeroize::Zeroize;
let mut a = Fp6::one();
a.zeroize();
assert!(bool::from(a.is_zero()));
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/pairings.rs | zkcrypto/bls12_381/src/pairings.rs | use crate::fp::Fp;
use crate::fp12::Fp12;
use crate::fp2::Fp2;
use crate::fp6::Fp6;
use crate::{G1Affine, G1Projective, G2Affine, G2Projective, Scalar, BLS_X, BLS_X_IS_NEGATIVE};
use core::borrow::Borrow;
use core::fmt;
use core::iter::Sum;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use group::Group;
use pairing::{Engine, PairingCurveAffine};
use rand_core::RngCore;
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
#[cfg(feature = "alloc")]
use pairing::MultiMillerLoop;
/// Represents results of a Miller loop, one of the most expensive portions
/// of the pairing function. `MillerLoopResult`s cannot be compared with each
/// other until `.final_exponentiation()` is called, which is also expensive.
#[cfg_attr(docsrs, doc(cfg(feature = "pairings")))]
#[derive(Copy, Clone, Debug)]
pub struct MillerLoopResult(pub(crate) Fp12);
impl Default for MillerLoopResult {
fn default() -> Self {
MillerLoopResult(Fp12::one())
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for MillerLoopResult {}
impl ConditionallySelectable for MillerLoopResult {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
MillerLoopResult(Fp12::conditional_select(&a.0, &b.0, choice))
}
}
impl MillerLoopResult {
/// This performs a "final exponentiation" routine to convert the result
/// of a Miller loop into an element of `Gt` with help of efficient squaring
/// operation in the so-called `cyclotomic subgroup` of `Fq6` so that
/// it can be compared with other elements of `Gt`.
pub fn final_exponentiation(&self) -> Gt {
#[must_use]
fn fp4_square(a: Fp2, b: Fp2) -> (Fp2, Fp2) {
let t0 = a.square();
let t1 = b.square();
let mut t2 = t1.mul_by_nonresidue();
let c0 = t2 + t0;
t2 = a + b;
t2 = t2.square();
t2 -= t0;
let c1 = t2 - t1;
(c0, c1)
}
// Adaptation of Algorithm 5.5.4, Guide to Pairing-Based Cryptography
// Faster Squaring in the Cyclotomic Subgroup of Sixth Degree Extensions
// https://eprint.iacr.org/2009/565.pdf
#[must_use]
fn cyclotomic_square(f: Fp12) -> Fp12 {
let mut z0 = f.c0.c0;
let mut z4 = f.c0.c1;
let mut z3 = f.c0.c2;
let mut z2 = f.c1.c0;
let mut z1 = f.c1.c1;
let mut z5 = f.c1.c2;
let (t0, t1) = fp4_square(z0, z1);
// For A
z0 = t0 - z0;
z0 = z0 + z0 + t0;
z1 = t1 + z1;
z1 = z1 + z1 + t1;
let (mut t0, t1) = fp4_square(z2, z3);
let (t2, t3) = fp4_square(z4, z5);
// For C
z4 = t0 - z4;
z4 = z4 + z4 + t0;
z5 = t1 + z5;
z5 = z5 + z5 + t1;
// For B
t0 = t3.mul_by_nonresidue();
z2 = t0 + z2;
z2 = z2 + z2 + t0;
z3 = t2 - z3;
z3 = z3 + z3 + t2;
Fp12 {
c0: Fp6 {
c0: z0,
c1: z4,
c2: z3,
},
c1: Fp6 {
c0: z2,
c1: z1,
c2: z5,
},
}
}
#[must_use]
fn cycolotomic_exp(f: Fp12) -> Fp12 {
let x = BLS_X;
let mut tmp = Fp12::one();
let mut found_one = false;
for i in (0..64).rev().map(|b| ((x >> b) & 1) == 1) {
if found_one {
tmp = cyclotomic_square(tmp)
} else {
found_one = i;
}
if i {
tmp *= f;
}
}
tmp.conjugate()
}
let mut f = self.0;
let mut t0 = f
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map()
.frobenius_map();
Gt(f.invert()
.map(|mut t1| {
let mut t2 = t0 * t1;
t1 = t2;
t2 = t2.frobenius_map().frobenius_map();
t2 *= t1;
t1 = cyclotomic_square(t2).conjugate();
let mut t3 = cycolotomic_exp(t2);
let mut t4 = cyclotomic_square(t3);
let mut t5 = t1 * t3;
t1 = cycolotomic_exp(t5);
t0 = cycolotomic_exp(t1);
let mut t6 = cycolotomic_exp(t0);
t6 *= t4;
t4 = cycolotomic_exp(t6);
t5 = t5.conjugate();
t4 *= t5 * t2;
t5 = t2.conjugate();
t1 *= t2;
t1 = t1.frobenius_map().frobenius_map().frobenius_map();
t6 *= t5;
t6 = t6.frobenius_map();
t3 *= t0;
t3 = t3.frobenius_map().frobenius_map();
t3 *= t1;
t3 *= t6;
f = t3 * t4;
f
})
// We unwrap() because `MillerLoopResult` can only be constructed
// by a function within this crate, and we uphold the invariant
// that the enclosed value is nonzero.
.unwrap())
}
}
impl<'b> Add<&'b MillerLoopResult> for &MillerLoopResult {
type Output = MillerLoopResult;
#[inline]
fn add(self, rhs: &'b MillerLoopResult) -> MillerLoopResult {
MillerLoopResult(self.0 * rhs.0)
}
}
impl_add_binop_specify_output!(MillerLoopResult, MillerLoopResult, MillerLoopResult);
impl AddAssign<MillerLoopResult> for MillerLoopResult {
#[inline]
fn add_assign(&mut self, rhs: MillerLoopResult) {
*self = *self + rhs;
}
}
impl<'b> AddAssign<&'b MillerLoopResult> for MillerLoopResult {
#[inline]
fn add_assign(&mut self, rhs: &'b MillerLoopResult) {
*self = *self + rhs;
}
}
/// This is an element of $\mathbb{G}_T$, the target group of the pairing function. As with
/// $\mathbb{G}_1$ and $\mathbb{G}_2$ this group has order $q$.
///
/// Typically, $\mathbb{G}_T$ is written multiplicatively but we will write it additively to
/// keep code and abstractions consistent.
#[cfg_attr(docsrs, doc(cfg(feature = "pairings")))]
#[derive(Copy, Clone, Debug)]
pub struct Gt(pub Fp12);
impl Default for Gt {
fn default() -> Self {
Self::identity()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for Gt {}
impl fmt::Display for Gt {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl ConstantTimeEq for Gt {
fn ct_eq(&self, other: &Self) -> Choice {
self.0.ct_eq(&other.0)
}
}
impl ConditionallySelectable for Gt {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Gt(Fp12::conditional_select(&a.0, &b.0, choice))
}
}
impl Eq for Gt {}
impl PartialEq for Gt {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl Gt {
/// Returns the group identity, which is $1$.
pub fn identity() -> Gt {
Gt(Fp12::one())
}
/// Doubles this group element.
pub fn double(&self) -> Gt {
Gt(self.0.square())
}
}
impl Neg for &Gt {
type Output = Gt;
#[inline]
fn neg(self) -> Gt {
// The element is unitary, so we just conjugate.
Gt(self.0.conjugate())
}
}
impl Neg for Gt {
type Output = Gt;
#[inline]
fn neg(self) -> Gt {
-&self
}
}
impl<'b> Add<&'b Gt> for &Gt {
type Output = Gt;
#[inline]
fn add(self, rhs: &'b Gt) -> Gt {
Gt(self.0 * rhs.0)
}
}
impl<'b> Sub<&'b Gt> for &Gt {
type Output = Gt;
#[inline]
fn sub(self, rhs: &'b Gt) -> Gt {
self + (-rhs)
}
}
impl<'b> Mul<&'b Scalar> for &Gt {
type Output = Gt;
fn mul(self, other: &'b Scalar) -> Self::Output {
let mut acc = Gt::identity();
// This is a simple double-and-add implementation of group element
// multiplication, moving from most significant to least
// significant bit of the scalar.
//
// We skip the leading bit because it's always unset for Fq
// elements.
for bit in other
.to_bytes()
.iter()
.rev()
.flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8)))
.skip(1)
{
acc = acc.double();
acc = Gt::conditional_select(&acc, &(acc + self), bit);
}
acc
}
}
impl_binops_additive!(Gt, Gt);
impl_binops_multiplicative!(Gt, Scalar);
impl<T> Sum<T> for Gt
where
T: Borrow<Gt>,
{
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Self::identity(), |acc, item| acc + item.borrow())
}
}
impl Group for Gt {
type Scalar = Scalar;
fn random(mut rng: impl RngCore) -> Self {
loop {
let inner = Fp12::random(&mut rng);
// Not all elements of Fp12 are elements of the prime-order multiplicative
// subgroup. We run the random element through final_exponentiation to obtain
// a valid element, which requires that it is non-zero.
if !bool::from(inner.is_zero()) {
return MillerLoopResult(inner).final_exponentiation();
}
}
}
fn identity() -> Self {
Self::identity()
}
fn generator() -> Self {
// pairing(&G1Affine::generator(), &G2Affine::generator())
Gt(Fp12 {
c0: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x1972_e433_a01f_85c5,
0x97d3_2b76_fd77_2538,
0xc8ce_546f_c96b_cdf9,
0xcef6_3e73_66d4_0614,
0xa611_3427_8184_3780,
0x13f3_448a_3fc6_d825,
]),
c1: Fp::from_raw_unchecked([
0xd263_31b0_2e9d_6995,
0x9d68_a482_f779_7e7d,
0x9c9b_2924_8d39_ea92,
0xf480_1ca2_e131_07aa,
0xa16c_0732_bdbc_b066,
0x083c_a4af_ba36_0478,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x59e2_61db_0916_b641,
0x2716_b6f4_b23e_960d,
0xc8e5_5b10_a0bd_9c45,
0x0bdb_0bd9_9c4d_eda8,
0x8cf8_9ebf_57fd_aac5,
0x12d6_b792_9e77_7a5e,
]),
c1: Fp::from_raw_unchecked([
0x5fc8_5188_b0e1_5f35,
0x34a0_6e3a_8f09_6365,
0xdb31_26a6_e02a_d62c,
0xfc6f_5aa9_7d9a_990b,
0xa12f_55f5_eb89_c210,
0x1723_703a_926f_8889,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0x9358_8f29_7182_8778,
0x43f6_5b86_11ab_7585,
0x3183_aaf5_ec27_9fdf,
0xfa73_d7e1_8ac9_9df6,
0x64e1_76a6_a64c_99b0,
0x179f_a78c_5838_8f1f,
]),
c1: Fp::from_raw_unchecked([
0x672a_0a11_ca2a_ef12,
0x0d11_b9b5_2aa3_f16b,
0xa444_12d0_699d_056e,
0xc01d_0177_221a_5ba5,
0x66e0_cede_6c73_5529,
0x05f5_a71e_9fdd_c339,
]),
},
},
c1: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0xd30a_88a1_b062_c679,
0x5ac5_6a5d_35fc_8304,
0xd0c8_34a6_a81f_290d,
0xcd54_30c2_da37_07c7,
0xf0c2_7ff7_8050_0af0,
0x0924_5da6_e2d7_2eae,
]),
c1: Fp::from_raw_unchecked([
0x9f2e_0676_791b_5156,
0xe2d1_c823_4918_fe13,
0x4c9e_459f_3c56_1bf4,
0xa3e8_5e53_b9d3_e3c1,
0x820a_121e_21a7_0020,
0x15af_6183_41c5_9acc,
]),
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x7c95_658c_2499_3ab1,
0x73eb_3872_1ca8_86b9,
0x5256_d749_4774_34bc,
0x8ba4_1902_ea50_4a8b,
0x04a3_d3f8_0c86_ce6d,
0x18a6_4a87_fb68_6eaa,
]),
c1: Fp::from_raw_unchecked([
0xbb83_e71b_b920_cf26,
0x2a52_77ac_92a7_3945,
0xfc0e_e59f_94f0_46a0,
0x7158_cdf3_7860_58f7,
0x7cc1_061b_82f9_45f6,
0x03f8_47aa_9fdb_e567,
]),
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0x8078_dba5_6134_e657,
0x1cd7_ec9a_4399_8a6e,
0xb1aa_599a_1a99_3766,
0xc9a0_f62f_0842_ee44,
0x8e15_9be3_b605_dffa,
0x0c86_ba0d_4af1_3fc2,
]),
c1: Fp::from_raw_unchecked([
0xe80f_f2a0_6a52_ffb1,
0x7694_ca48_721a_906c,
0x7583_183e_03b0_8514,
0xf567_afdd_40ce_e4e2,
0x9a6d_96d2_e526_a5fc,
0x197e_9f49_861f_2242,
]),
},
},
})
}
fn is_identity(&self) -> Choice {
self.ct_eq(&Self::identity())
}
fn double(&self) -> Self {
self.double()
}
}
#[cfg(feature = "alloc")]
#[cfg_attr(docsrs, doc(cfg(all(feature = "pairings", feature = "alloc"))))]
#[derive(Clone, Debug)]
/// This structure contains cached computations pertaining to a $\mathbb{G}_2$
/// element as part of the pairing function (specifically, the Miller loop) and
/// so should be computed whenever a $\mathbb{G}_2$ element is being used in
/// multiple pairings or is otherwise known in advance. This should be used in
/// conjunction with the [`multi_miller_loop`](crate::multi_miller_loop)
/// function provided by this crate.
///
/// Requires the `alloc` and `pairing` crate features to be enabled.
pub struct G2Prepared {
infinity: Choice,
coeffs: Vec<(Fp2, Fp2, Fp2)>,
}
#[cfg(feature = "alloc")]
impl From<G2Affine> for G2Prepared {
fn from(q: G2Affine) -> G2Prepared {
struct Adder {
cur: G2Projective,
base: G2Affine,
coeffs: Vec<(Fp2, Fp2, Fp2)>,
}
impl MillerLoopDriver for Adder {
type Output = ();
fn doubling_step(&mut self, _: Self::Output) -> Self::Output {
let coeffs = doubling_step(&mut self.cur);
self.coeffs.push(coeffs);
}
fn addition_step(&mut self, _: Self::Output) -> Self::Output {
let coeffs = addition_step(&mut self.cur, &self.base);
self.coeffs.push(coeffs);
}
fn square_output(_: Self::Output) -> Self::Output {}
fn conjugate(_: Self::Output) -> Self::Output {}
fn one() -> Self::Output {}
}
let is_identity = q.is_identity();
let q = G2Affine::conditional_select(&q, &G2Affine::generator(), is_identity);
let mut adder = Adder {
cur: G2Projective::from(q),
base: q,
coeffs: Vec::with_capacity(68),
};
miller_loop(&mut adder);
assert_eq!(adder.coeffs.len(), 68);
G2Prepared {
infinity: is_identity,
coeffs: adder.coeffs,
}
}
}
#[cfg(feature = "alloc")]
#[cfg_attr(docsrs, doc(cfg(all(feature = "pairings", feature = "alloc"))))]
/// Computes $$\sum_{i=1}^n \textbf{ML}(a_i, b_i)$$ given a series of terms
/// $$(a_1, b_1), (a_2, b_2), ..., (a_n, b_n).$$
///
/// Requires the `alloc` and `pairing` crate features to be enabled.
pub fn multi_miller_loop(terms: &[(&G1Affine, &G2Prepared)]) -> MillerLoopResult {
struct Adder<'a, 'b, 'c> {
terms: &'c [(&'a G1Affine, &'b G2Prepared)],
index: usize,
}
impl MillerLoopDriver for Adder<'_, '_, '_> {
type Output = Fp12;
fn doubling_step(&mut self, mut f: Self::Output) -> Self::Output {
let index = self.index;
for term in self.terms {
let either_identity = term.0.is_identity() | term.1.infinity;
let new_f = ell(f, &term.1.coeffs[index], term.0);
f = Fp12::conditional_select(&new_f, &f, either_identity);
}
self.index += 1;
f
}
fn addition_step(&mut self, mut f: Self::Output) -> Self::Output {
let index = self.index;
for term in self.terms {
let either_identity = term.0.is_identity() | term.1.infinity;
let new_f = ell(f, &term.1.coeffs[index], term.0);
f = Fp12::conditional_select(&new_f, &f, either_identity);
}
self.index += 1;
f
}
fn square_output(f: Self::Output) -> Self::Output {
f.square()
}
fn conjugate(f: Self::Output) -> Self::Output {
f.conjugate()
}
fn one() -> Self::Output {
Fp12::one()
}
}
let mut adder = Adder { terms, index: 0 };
let tmp = miller_loop(&mut adder);
MillerLoopResult(tmp)
}
/// Invoke the pairing function without the use of precomputation and other optimizations.
#[cfg_attr(docsrs, doc(cfg(feature = "pairings")))]
pub fn pairing(p: &G1Affine, q: &G2Affine) -> Gt {
struct Adder {
cur: G2Projective,
base: G2Affine,
p: G1Affine,
}
impl MillerLoopDriver for Adder {
type Output = Fp12;
fn doubling_step(&mut self, f: Self::Output) -> Self::Output {
let coeffs = doubling_step(&mut self.cur);
ell(f, &coeffs, &self.p)
}
fn addition_step(&mut self, f: Self::Output) -> Self::Output {
let coeffs = addition_step(&mut self.cur, &self.base);
ell(f, &coeffs, &self.p)
}
fn square_output(f: Self::Output) -> Self::Output {
f.square()
}
fn conjugate(f: Self::Output) -> Self::Output {
f.conjugate()
}
fn one() -> Self::Output {
Fp12::one()
}
}
let either_identity = p.is_identity() | q.is_identity();
let p = G1Affine::conditional_select(p, &G1Affine::generator(), either_identity);
let q = G2Affine::conditional_select(q, &G2Affine::generator(), either_identity);
let mut adder = Adder {
cur: G2Projective::from(q),
base: q,
p,
};
let tmp = miller_loop(&mut adder);
let tmp = MillerLoopResult(Fp12::conditional_select(
&tmp,
&Fp12::one(),
either_identity,
));
tmp.final_exponentiation()
}
trait MillerLoopDriver {
type Output;
fn doubling_step(&mut self, f: Self::Output) -> Self::Output;
fn addition_step(&mut self, f: Self::Output) -> Self::Output;
fn square_output(f: Self::Output) -> Self::Output;
fn conjugate(f: Self::Output) -> Self::Output;
fn one() -> Self::Output;
}
/// This is a "generic" implementation of the Miller loop to avoid duplicating code
/// structure elsewhere; instead, we'll write concrete instantiations of
/// `MillerLoopDriver` for whatever purposes we need (such as caching modes).
fn miller_loop<D: MillerLoopDriver>(driver: &mut D) -> D::Output {
let mut f = D::one();
let mut found_one = false;
for i in (0..64).rev().map(|b| (((BLS_X >> 1) >> b) & 1) == 1) {
if !found_one {
found_one = i;
continue;
}
f = driver.doubling_step(f);
if i {
f = driver.addition_step(f);
}
f = D::square_output(f);
}
f = driver.doubling_step(f);
if BLS_X_IS_NEGATIVE {
f = D::conjugate(f);
}
f
}
fn ell(f: Fp12, coeffs: &(Fp2, Fp2, Fp2), p: &G1Affine) -> Fp12 {
let mut c0 = coeffs.0;
let mut c1 = coeffs.1;
c0.c0 *= p.y;
c0.c1 *= p.y;
c1.c0 *= p.x;
c1.c1 *= p.x;
f.mul_by_014(&coeffs.2, &c1, &c0)
}
fn doubling_step(r: &mut G2Projective) -> (Fp2, Fp2, Fp2) {
// Adaptation of Algorithm 26, https://eprint.iacr.org/2010/354.pdf
let tmp0 = r.x.square();
let tmp1 = r.y.square();
let tmp2 = tmp1.square();
let tmp3 = (tmp1 + r.x).square() - tmp0 - tmp2;
let tmp3 = tmp3 + tmp3;
let tmp4 = tmp0 + tmp0 + tmp0;
let tmp6 = r.x + tmp4;
let tmp5 = tmp4.square();
let zsquared = r.z.square();
r.x = tmp5 - tmp3 - tmp3;
r.z = (r.z + r.y).square() - tmp1 - zsquared;
r.y = (tmp3 - r.x) * tmp4;
let tmp2 = tmp2 + tmp2;
let tmp2 = tmp2 + tmp2;
let tmp2 = tmp2 + tmp2;
r.y -= tmp2;
let tmp3 = tmp4 * zsquared;
let tmp3 = tmp3 + tmp3;
let tmp3 = -tmp3;
let tmp6 = tmp6.square() - tmp0 - tmp5;
let tmp1 = tmp1 + tmp1;
let tmp1 = tmp1 + tmp1;
let tmp6 = tmp6 - tmp1;
let tmp0 = r.z * zsquared;
let tmp0 = tmp0 + tmp0;
(tmp0, tmp3, tmp6)
}
fn addition_step(r: &mut G2Projective, q: &G2Affine) -> (Fp2, Fp2, Fp2) {
// Adaptation of Algorithm 27, https://eprint.iacr.org/2010/354.pdf
let zsquared = r.z.square();
let ysquared = q.y.square();
let t0 = zsquared * q.x;
let t1 = ((q.y + r.z).square() - ysquared - zsquared) * zsquared;
let t2 = t0 - r.x;
let t3 = t2.square();
let t4 = t3 + t3;
let t4 = t4 + t4;
let t5 = t4 * t2;
let t6 = t1 - r.y - r.y;
let t9 = t6 * q.x;
let t7 = t4 * r.x;
r.x = t6.square() - t5 - t7 - t7;
r.z = (r.z + t2).square() - zsquared - t3;
let t10 = q.y + r.z;
let t8 = (t7 - r.x) * t6;
let t0 = r.y * t5;
let t0 = t0 + t0;
r.y = t8 - t0;
let t10 = t10.square() - ysquared;
let ztsquared = r.z.square();
let t10 = t10 - ztsquared;
let t9 = t9 + t9 - t10;
let t10 = r.z + r.z;
let t6 = -t6;
let t1 = t6 + t6;
(t10, t1, t9)
}
impl PairingCurveAffine for G1Affine {
type Pair = G2Affine;
type PairingResult = Gt;
fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult {
pairing(self, other)
}
}
impl PairingCurveAffine for G2Affine {
type Pair = G1Affine;
type PairingResult = Gt;
fn pairing_with(&self, other: &Self::Pair) -> Self::PairingResult {
pairing(other, self)
}
}
/// A [`pairing::Engine`] for BLS12-381 pairing operations.
#[cfg_attr(docsrs, doc(cfg(feature = "pairings")))]
#[derive(Clone, Debug)]
pub struct Bls12;
impl Engine for Bls12 {
type Fr = Scalar;
type G1 = G1Projective;
type G1Affine = G1Affine;
type G2 = G2Projective;
type G2Affine = G2Affine;
type Gt = Gt;
fn pairing(p: &Self::G1Affine, q: &Self::G2Affine) -> Self::Gt {
pairing(p, q)
}
}
impl pairing::MillerLoopResult for MillerLoopResult {
type Gt = Gt;
fn final_exponentiation(&self) -> Self::Gt {
self.final_exponentiation()
}
}
#[cfg(feature = "alloc")]
impl MultiMillerLoop for Bls12 {
type G2Prepared = G2Prepared;
type Result = MillerLoopResult;
fn multi_miller_loop(terms: &[(&Self::G1Affine, &Self::G2Prepared)]) -> Self::Result {
multi_miller_loop(terms)
}
}
#[test]
fn test_gt_generator() {
assert_eq!(
Gt::generator(),
pairing(&G1Affine::generator(), &G2Affine::generator())
);
}
#[test]
fn test_bilinearity() {
use crate::Scalar;
let a = Scalar::from_raw([1, 2, 3, 4]).invert().unwrap().square();
let b = Scalar::from_raw([5, 6, 7, 8]).invert().unwrap().square();
let c = a * b;
let g = G1Affine::from(G1Affine::generator() * a);
let h = G2Affine::from(G2Affine::generator() * b);
let p = pairing(&g, &h);
assert!(p != Gt::identity());
let expected = G1Affine::from(G1Affine::generator() * c);
assert_eq!(p, pairing(&expected, &G2Affine::generator()));
assert_eq!(
p,
pairing(&G1Affine::generator(), &G2Affine::generator()) * c
);
}
#[test]
fn test_unitary() {
let g = G1Affine::generator();
let h = G2Affine::generator();
let p = -pairing(&g, &h);
let q = pairing(&g, &-h);
let r = pairing(&-g, &h);
assert_eq!(p, q);
assert_eq!(q, r);
}
#[cfg(feature = "alloc")]
#[test]
fn test_multi_miller_loop() {
let a1 = G1Affine::generator();
let b1 = G2Affine::generator();
let a2 = G1Affine::from(
G1Affine::generator() * Scalar::from_raw([1, 2, 3, 4]).invert().unwrap().square(),
);
let b2 = G2Affine::from(
G2Affine::generator() * Scalar::from_raw([4, 2, 2, 4]).invert().unwrap().square(),
);
let a3 = G1Affine::identity();
let b3 = G2Affine::from(
G2Affine::generator() * Scalar::from_raw([9, 2, 2, 4]).invert().unwrap().square(),
);
let a4 = G1Affine::from(
G1Affine::generator() * Scalar::from_raw([5, 5, 5, 5]).invert().unwrap().square(),
);
let b4 = G2Affine::identity();
let a5 = G1Affine::from(
G1Affine::generator() * Scalar::from_raw([323, 32, 3, 1]).invert().unwrap().square(),
);
let b5 = G2Affine::from(
G2Affine::generator() * Scalar::from_raw([4, 2, 2, 9099]).invert().unwrap().square(),
);
let b1_prepared = G2Prepared::from(b1);
let b2_prepared = G2Prepared::from(b2);
let b3_prepared = G2Prepared::from(b3);
let b4_prepared = G2Prepared::from(b4);
let b5_prepared = G2Prepared::from(b5);
let expected = pairing(&a1, &b1)
+ pairing(&a2, &b2)
+ pairing(&a3, &b3)
+ pairing(&a4, &b4)
+ pairing(&a5, &b5);
let test = multi_miller_loop(&[
(&a1, &b1_prepared),
(&a2, &b2_prepared),
(&a3, &b3_prepared),
(&a4, &b4_prepared),
(&a5, &b5_prepared),
])
.final_exponentiation();
assert_eq!(expected, test);
}
#[test]
fn test_miller_loop_result_default() {
assert_eq!(
MillerLoopResult::default().final_exponentiation(),
Gt::identity(),
);
}
#[cfg(feature = "zeroize")]
#[test]
fn test_miller_loop_result_zeroize() {
use zeroize::Zeroize;
let mut m = multi_miller_loop(&[
(&G1Affine::generator(), &G2Affine::generator().into()),
(&-G1Affine::generator(), &G2Affine::generator().into()),
]);
m.zeroize();
assert_eq!(m.0, MillerLoopResult::default().0);
}
#[test]
fn tricking_miller_loop_result() {
assert_eq!(
multi_miller_loop(&[(&G1Affine::identity(), &G2Affine::generator().into())]).0,
Fp12::one()
);
assert_eq!(
multi_miller_loop(&[(&G1Affine::generator(), &G2Affine::identity().into())]).0,
Fp12::one()
);
assert_ne!(
multi_miller_loop(&[
(&G1Affine::generator(), &G2Affine::generator().into()),
(&-G1Affine::generator(), &G2Affine::generator().into())
])
.0,
Fp12::one()
);
assert_eq!(
multi_miller_loop(&[
(&G1Affine::generator(), &G2Affine::generator().into()),
(&-G1Affine::generator(), &G2Affine::generator().into())
])
.final_exponentiation(),
Gt::identity()
);
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/g1.rs | zkcrypto/bls12_381/src/g1.rs | //! This module provides an implementation of the $\mathbb{G}_1$ group of BLS12-381.
use core::borrow::Borrow;
use core::fmt;
use core::iter::Sum;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use group::{
prime::{PrimeCurve, PrimeCurveAffine, PrimeGroup},
Curve, Group, GroupEncoding, UncompressedEncoding,
};
use rand_core::RngCore;
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
#[cfg(feature = "alloc")]
use group::WnafGroup;
use crate::fp::Fp;
use crate::Scalar;
/// This is an element of $\mathbb{G}_1$ represented in the affine coordinate space.
/// It is ideal to keep elements in this representation to reduce memory usage and
/// improve performance through the use of mixed curve model arithmetic.
///
/// Values of `G1Affine` are guaranteed to be in the $q$-order subgroup unless an
/// "unchecked" API was misused.
#[cfg_attr(docsrs, doc(cfg(feature = "groups")))]
#[derive(Copy, Clone, Debug)]
pub struct G1Affine {
pub x: Fp,
pub y: Fp,
pub infinity: Choice,
}
impl Default for G1Affine {
fn default() -> G1Affine {
G1Affine::identity()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for G1Affine {}
impl fmt::Display for G1Affine {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl<'a> From<&'a G1Projective> for G1Affine {
fn from(p: &'a G1Projective) -> G1Affine {
let zinv = p.z.invert().unwrap_or(Fp::zero());
let x = p.x * zinv;
let y = p.y * zinv;
let tmp = G1Affine {
x,
y,
infinity: Choice::from(0u8),
};
G1Affine::conditional_select(&tmp, &G1Affine::identity(), zinv.is_zero())
}
}
impl From<G1Projective> for G1Affine {
fn from(p: G1Projective) -> G1Affine {
G1Affine::from(&p)
}
}
impl ConstantTimeEq for G1Affine {
fn ct_eq(&self, other: &Self) -> Choice {
// The only cases in which two points are equal are
// 1. infinity is set on both
// 2. infinity is not set on both, and their coordinates are equal
(self.infinity & other.infinity)
| ((!self.infinity)
& (!other.infinity)
& self.x.ct_eq(&other.x)
& self.y.ct_eq(&other.y))
}
}
impl ConditionallySelectable for G1Affine {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
G1Affine {
x: Fp::conditional_select(&a.x, &b.x, choice),
y: Fp::conditional_select(&a.y, &b.y, choice),
infinity: Choice::conditional_select(&a.infinity, &b.infinity, choice),
}
}
}
impl Eq for G1Affine {}
impl PartialEq for G1Affine {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl Neg for &G1Affine {
type Output = G1Affine;
#[inline]
fn neg(self) -> G1Affine {
G1Affine {
x: self.x,
y: Fp::conditional_select(&-self.y, &Fp::one(), self.infinity),
infinity: self.infinity,
}
}
}
impl Neg for G1Affine {
type Output = G1Affine;
#[inline]
fn neg(self) -> G1Affine {
-&self
}
}
impl<'b> Add<&'b G1Projective> for &G1Affine {
type Output = G1Projective;
#[inline]
fn add(self, rhs: &'b G1Projective) -> G1Projective {
rhs.add_mixed(self)
}
}
impl<'b> Add<&'b G1Affine> for &G1Projective {
type Output = G1Projective;
#[inline]
fn add(self, rhs: &'b G1Affine) -> G1Projective {
self.add_mixed(rhs)
}
}
impl<'b> Sub<&'b G1Projective> for &G1Affine {
type Output = G1Projective;
#[inline]
fn sub(self, rhs: &'b G1Projective) -> G1Projective {
self + (-rhs)
}
}
impl<'b> Sub<&'b G1Affine> for &G1Projective {
type Output = G1Projective;
#[inline]
fn sub(self, rhs: &'b G1Affine) -> G1Projective {
self + (-rhs)
}
}
impl<T> Sum<T> for G1Projective
where
T: Borrow<G1Projective>,
{
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Self::identity(), |acc, item| acc + item.borrow())
}
}
impl_binops_additive!(G1Projective, G1Affine);
impl_binops_additive_specify_output!(G1Affine, G1Projective, G1Projective);
const B: Fp = Fp::from_raw_unchecked([
0xaa27_0000_000c_fff3,
0x53cc_0032_fc34_000a,
0x478f_e97a_6b0a_807f,
0xb1d3_7ebe_e6ba_24d7,
0x8ec9_733b_bf78_ab2f,
0x09d6_4551_3d83_de7e,
]);
impl G1Affine {
/// Returns the identity of the group: the point at infinity.
pub fn identity() -> G1Affine {
G1Affine {
x: Fp::zero(),
y: Fp::one(),
infinity: Choice::from(1u8),
}
}
/// Returns a fixed generator of the group. See [`notes::design`](notes/design/index.html#fixed-generators)
/// for how this generator is chosen.
pub fn generator() -> G1Affine {
G1Affine {
x: Fp::from_raw_unchecked([
0x5cb3_8790_fd53_0c16,
0x7817_fc67_9976_fff5,
0x154f_95c7_143b_a1c1,
0xf0ae_6acd_f3d0_e747,
0xedce_6ecc_21db_f440,
0x1201_7741_9e0b_fb75,
]),
y: Fp::from_raw_unchecked([
0xbaac_93d5_0ce7_2271,
0x8c22_631a_7918_fd8e,
0xdd59_5f13_5707_25ce,
0x51ac_5829_5040_5194,
0x0e1c_8c3f_ad00_59c0,
0x0bbc_3efc_5008_a26a,
]),
infinity: Choice::from(0u8),
}
}
/// Serializes this element into compressed form. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn to_compressed(&self) -> [u8; 48] {
// Strictly speaking, self.x is zero already when self.infinity is true, but
// to guard against implementation mistakes we do not assume this.
let mut res = Fp::conditional_select(&self.x, &Fp::zero(), self.infinity).to_bytes();
// This point is in compressed form, so we set the most significant bit.
res[0] |= 1u8 << 7;
// Is this point at infinity? If so, set the second-most significant bit.
res[0] |= u8::conditional_select(&0u8, &(1u8 << 6), self.infinity);
// Is the y-coordinate the lexicographically largest of the two associated with the
// x-coordinate? If so, set the third-most significant bit so long as this is not
// the point at infinity.
res[0] |= u8::conditional_select(
&0u8,
&(1u8 << 5),
(!self.infinity) & self.y.lexicographically_largest(),
);
res
}
/// Serializes this element into uncompressed form. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn to_uncompressed(&self) -> [u8; 96] {
let mut res = [0; 96];
res[0..48].copy_from_slice(
&Fp::conditional_select(&self.x, &Fp::zero(), self.infinity).to_bytes()[..],
);
res[48..96].copy_from_slice(
&Fp::conditional_select(&self.y, &Fp::zero(), self.infinity).to_bytes()[..],
);
// Is this point at infinity? If so, set the second-most significant bit.
res[0] |= u8::conditional_select(&0u8, &(1u8 << 6), self.infinity);
res
}
/// Attempts to deserialize an uncompressed element. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn from_uncompressed(bytes: &[u8; 96]) -> CtOption<Self> {
Self::from_uncompressed_unchecked(bytes)
.and_then(|p| CtOption::new(p, p.is_on_curve() & p.is_torsion_free()))
}
/// Attempts to deserialize an uncompressed element, not checking if the
/// element is on the curve and not checking if it is in the correct subgroup.
/// **This is dangerous to call unless you trust the bytes you are reading; otherwise,
/// API invariants may be broken.** Please consider using `from_uncompressed()` instead.
pub fn from_uncompressed_unchecked(bytes: &[u8; 96]) -> CtOption<Self> {
// Obtain the three flags from the start of the byte sequence
let compression_flag_set = Choice::from((bytes[0] >> 7) & 1);
let infinity_flag_set = Choice::from((bytes[0] >> 6) & 1);
let sort_flag_set = Choice::from((bytes[0] >> 5) & 1);
// Attempt to obtain the x-coordinate
let x = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[0..48]);
// Mask away the flag bits
tmp[0] &= 0b0001_1111;
Fp::from_bytes(&tmp)
};
// Attempt to obtain the y-coordinate
let y = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[48..96]);
Fp::from_bytes(&tmp)
};
x.and_then(|x| {
y.and_then(|y| {
// Create a point representing this value
let p = G1Affine::conditional_select(
&G1Affine {
x,
y,
infinity: infinity_flag_set,
},
&G1Affine::identity(),
infinity_flag_set,
);
CtOption::new(
p,
// If the infinity flag is set, the x and y coordinates should have been zero.
((!infinity_flag_set) | (infinity_flag_set & x.is_zero() & y.is_zero())) &
// The compression flag should not have been set, as this is an uncompressed element
(!compression_flag_set) &
// The sort flag should not have been set, as this is an uncompressed element
(!sort_flag_set),
)
})
})
}
/// Attempts to deserialize a compressed element. See [`notes::serialization`](crate::notes::serialization)
/// for details about how group elements are serialized.
pub fn from_compressed(bytes: &[u8; 48]) -> CtOption<Self> {
// We already know the point is on the curve because this is established
// by the y-coordinate recovery procedure in from_compressed_unchecked().
Self::from_compressed_unchecked(bytes).and_then(|p| CtOption::new(p, p.is_torsion_free()))
}
/// Attempts to deserialize an uncompressed element, not checking if the
/// element is in the correct subgroup.
/// **This is dangerous to call unless you trust the bytes you are reading; otherwise,
/// API invariants may be broken.** Please consider using `from_compressed()` instead.
pub fn from_compressed_unchecked(bytes: &[u8; 48]) -> CtOption<Self> {
// Obtain the three flags from the start of the byte sequence
let compression_flag_set = Choice::from((bytes[0] >> 7) & 1);
let infinity_flag_set = Choice::from((bytes[0] >> 6) & 1);
let sort_flag_set = Choice::from((bytes[0] >> 5) & 1);
// Attempt to obtain the x-coordinate
let x = {
let mut tmp = [0; 48];
tmp.copy_from_slice(&bytes[0..48]);
// Mask away the flag bits
tmp[0] &= 0b0001_1111;
Fp::from_bytes(&tmp)
};
x.and_then(|x| {
// If the infinity flag is set, return the value assuming
// the x-coordinate is zero and the sort bit is not set.
//
// Otherwise, return a recovered point (assuming the correct
// y-coordinate can be found) so long as the infinity flag
// was not set.
CtOption::new(
G1Affine::identity(),
infinity_flag_set & // Infinity flag should be set
compression_flag_set & // Compression flag should be set
(!sort_flag_set) & // Sort flag should not be set
x.is_zero(), // The x-coordinate should be zero
)
.or_else(|| {
// Recover a y-coordinate given x by y = sqrt(x^3 + 4)
((x.square() * x) + B).sqrt().and_then(|y| {
// Switch to the correct y-coordinate if necessary.
let y = Fp::conditional_select(
&y,
&-y,
y.lexicographically_largest() ^ sort_flag_set,
);
CtOption::new(
G1Affine {
x,
y,
infinity: infinity_flag_set,
},
(!infinity_flag_set) & // Infinity flag should not be set
compression_flag_set, // Compression flag should be set
)
})
})
})
}
/// Returns true if this element is the identity (the point at infinity).
#[inline]
pub fn is_identity(&self) -> Choice {
self.infinity
}
/// Returns true if this point is free of an $h$-torsion component, and so it
/// exists within the $q$-order subgroup $\mathbb{G}_1$. This should always return true
/// unless an "unchecked" API was used.
pub fn is_torsion_free(&self) -> Choice {
// Algorithm from Section 6 of https://eprint.iacr.org/2021/1130
// Updated proof of correctness in https://eprint.iacr.org/2022/352
//
// Check that endomorphism_p(P) == -[x^2] P
let minus_x_squared_times_p = G1Projective::from(self).mul_by_x().mul_by_x().neg();
let endomorphism_p = endomorphism(self);
minus_x_squared_times_p.ct_eq(&G1Projective::from(endomorphism_p))
}
/// Returns true if this point is on the curve. This should always return
/// true unless an "unchecked" API was used.
pub fn is_on_curve(&self) -> Choice {
// y^2 - x^3 ?= 4
(self.y.square() - (self.x.square() * self.x)).ct_eq(&B) | self.infinity
}
}
/// A nontrivial third root of unity in Fp
pub const BETA: Fp = Fp::from_raw_unchecked([
0x30f1_361b_798a_64e8,
0xf3b8_ddab_7ece_5a2a,
0x16a8_ca3a_c615_77f7,
0xc26a_2ff8_74fd_029b,
0x3636_b766_6070_1c6e,
0x051b_a4ab_241b_6160,
]);
fn endomorphism(p: &G1Affine) -> G1Affine {
// Endomorphism of the points on the curve.
// endomorphism_p(x,y) = (BETA * x, y)
// where BETA is a non-trivial cubic root of unity in Fq.
let mut res = *p;
res.x *= BETA;
res
}
/// This is an element of $\mathbb{G}_1$ represented in the projective coordinate space.
#[cfg_attr(docsrs, doc(cfg(feature = "groups")))]
#[derive(Copy, Clone, Debug)]
pub struct G1Projective {
pub x: Fp,
pub y: Fp,
pub z: Fp,
}
impl Default for G1Projective {
fn default() -> G1Projective {
G1Projective::identity()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for G1Projective {}
impl fmt::Display for G1Projective {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl<'a> From<&'a G1Affine> for G1Projective {
fn from(p: &'a G1Affine) -> G1Projective {
G1Projective {
x: p.x,
y: p.y,
z: Fp::conditional_select(&Fp::one(), &Fp::zero(), p.infinity),
}
}
}
impl From<G1Affine> for G1Projective {
fn from(p: G1Affine) -> G1Projective {
G1Projective::from(&p)
}
}
impl ConstantTimeEq for G1Projective {
fn ct_eq(&self, other: &Self) -> Choice {
// Is (xz, yz, z) equal to (x'z', y'z', z') when converted to affine?
let x1 = self.x * other.z;
let x2 = other.x * self.z;
let y1 = self.y * other.z;
let y2 = other.y * self.z;
let self_is_zero = self.z.is_zero();
let other_is_zero = other.z.is_zero();
(self_is_zero & other_is_zero) // Both point at infinity
| ((!self_is_zero) & (!other_is_zero) & x1.ct_eq(&x2) & y1.ct_eq(&y2))
// Neither point at infinity, coordinates are the same
}
}
impl ConditionallySelectable for G1Projective {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
G1Projective {
x: Fp::conditional_select(&a.x, &b.x, choice),
y: Fp::conditional_select(&a.y, &b.y, choice),
z: Fp::conditional_select(&a.z, &b.z, choice),
}
}
}
impl Eq for G1Projective {}
impl PartialEq for G1Projective {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl Neg for &G1Projective {
type Output = G1Projective;
#[inline]
fn neg(self) -> G1Projective {
G1Projective {
x: self.x,
y: -self.y,
z: self.z,
}
}
}
impl Neg for G1Projective {
type Output = G1Projective;
#[inline]
fn neg(self) -> G1Projective {
-&self
}
}
impl<'b> Add<&'b G1Projective> for &G1Projective {
type Output = G1Projective;
#[inline]
fn add(self, rhs: &'b G1Projective) -> G1Projective {
self.add(rhs)
}
}
impl<'b> Sub<&'b G1Projective> for &G1Projective {
type Output = G1Projective;
#[inline]
fn sub(self, rhs: &'b G1Projective) -> G1Projective {
self + (-rhs)
}
}
impl<'b> Mul<&'b Scalar> for &G1Projective {
type Output = G1Projective;
fn mul(self, other: &'b Scalar) -> Self::Output {
self.multiply(&other.to_bytes())
}
}
impl<'b> Mul<&'b G1Projective> for &Scalar {
type Output = G1Projective;
#[inline]
fn mul(self, rhs: &'b G1Projective) -> Self::Output {
rhs * self
}
}
impl<'b> Mul<&'b Scalar> for &G1Affine {
type Output = G1Projective;
fn mul(self, other: &'b Scalar) -> Self::Output {
G1Projective::from(self).multiply(&other.to_bytes())
}
}
impl<'b> Mul<&'b G1Affine> for &Scalar {
type Output = G1Projective;
#[inline]
fn mul(self, rhs: &'b G1Affine) -> Self::Output {
rhs * self
}
}
impl_binops_additive!(G1Projective, G1Projective);
impl_binops_multiplicative!(G1Projective, Scalar);
impl_binops_multiplicative_mixed!(G1Affine, Scalar, G1Projective);
impl_binops_multiplicative_mixed!(Scalar, G1Affine, G1Projective);
impl_binops_multiplicative_mixed!(Scalar, G1Projective, G1Projective);
#[inline(always)]
fn mul_by_3b(a: Fp) -> Fp {
let a = a + a; // 2
let a = a + a; // 4
a + a + a // 12
}
impl G1Projective {
/// Returns the identity of the group: the point at infinity.
///
pub fn identity() -> G1Projective {
G1Projective {
x: Fp::zero(),
y: Fp::one(),
z: Fp::zero(),
}
}
/// Returns a fixed generator of the group. See [`notes::design`](notes/design/index.html#fixed-generators)
/// for how this generator is chosen.
pub fn generator() -> G1Projective {
G1Projective {
x: Fp::from_raw_unchecked([
0x5cb3_8790_fd53_0c16,
0x7817_fc67_9976_fff5,
0x154f_95c7_143b_a1c1,
0xf0ae_6acd_f3d0_e747,
0xedce_6ecc_21db_f440,
0x1201_7741_9e0b_fb75,
]),
y: Fp::from_raw_unchecked([
0xbaac_93d5_0ce7_2271,
0x8c22_631a_7918_fd8e,
0xdd59_5f13_5707_25ce,
0x51ac_5829_5040_5194,
0x0e1c_8c3f_ad00_59c0,
0x0bbc_3efc_5008_a26a,
]),
z: Fp::one(),
}
}
/// Computes the doubling of this point.
pub fn double(&self) -> G1Projective {
// Algorithm 9, https://eprint.iacr.org/2015/1060.pdf
let t0 = self.y.square();
let z3 = t0 + t0;
let z3 = z3 + z3;
let z3 = z3 + z3;
let t1 = self.y * self.z;
let t2 = self.z.square();
let t2 = mul_by_3b(t2);
let x3 = t2 * z3;
let y3 = t0 + t2;
let z3 = t1 * z3;
let t1 = t2 + t2;
let t2 = t1 + t2;
let t0 = t0 - t2;
let y3 = t0 * y3;
let y3 = x3 + y3;
let t1 = self.x * self.y;
let x3 = t0 * t1;
let x3 = x3 + x3;
let tmp = G1Projective {
x: x3,
y: y3,
z: z3,
};
G1Projective::conditional_select(&tmp, &G1Projective::identity(), self.is_identity())
}
/// Adds this point to another point.
pub fn add(&self, rhs: &G1Projective) -> G1Projective {
// Algorithm 7, https://eprint.iacr.org/2015/1060.pdf
let t0 = self.x * rhs.x;
let t1 = self.y * rhs.y;
let t2 = self.z * rhs.z;
let t3 = self.x + self.y;
let t4 = rhs.x + rhs.y;
let t3 = t3 * t4;
let t4 = t0 + t1;
let t3 = t3 - t4;
let t4 = self.y + self.z;
let x3 = rhs.y + rhs.z;
let t4 = t4 * x3;
let x3 = t1 + t2;
let t4 = t4 - x3;
let x3 = self.x + self.z;
let y3 = rhs.x + rhs.z;
let x3 = x3 * y3;
let y3 = t0 + t2;
let y3 = x3 - y3;
let x3 = t0 + t0;
let t0 = x3 + t0;
let t2 = mul_by_3b(t2);
let z3 = t1 + t2;
let t1 = t1 - t2;
let y3 = mul_by_3b(y3);
let x3 = t4 * y3;
let t2 = t3 * t1;
let x3 = t2 - x3;
let y3 = y3 * t0;
let t1 = t1 * z3;
let y3 = t1 + y3;
let t0 = t0 * t3;
let z3 = z3 * t4;
let z3 = z3 + t0;
G1Projective {
x: x3,
y: y3,
z: z3,
}
}
/// Adds this point to another point in the affine model.
pub fn add_mixed(&self, rhs: &G1Affine) -> G1Projective {
// Algorithm 8, https://eprint.iacr.org/2015/1060.pdf
let t0 = self.x * rhs.x;
let t1 = self.y * rhs.y;
let t3 = rhs.x + rhs.y;
let t4 = self.x + self.y;
let t3 = t3 * t4;
let t4 = t0 + t1;
let t3 = t3 - t4;
let t4 = rhs.y * self.z;
let t4 = t4 + self.y;
let y3 = rhs.x * self.z;
let y3 = y3 + self.x;
let x3 = t0 + t0;
let t0 = x3 + t0;
let t2 = mul_by_3b(self.z);
let z3 = t1 + t2;
let t1 = t1 - t2;
let y3 = mul_by_3b(y3);
let x3 = t4 * y3;
let t2 = t3 * t1;
let x3 = t2 - x3;
let y3 = y3 * t0;
let t1 = t1 * z3;
let y3 = t1 + y3;
let t0 = t0 * t3;
let z3 = z3 * t4;
let z3 = z3 + t0;
let tmp = G1Projective {
x: x3,
y: y3,
z: z3,
};
G1Projective::conditional_select(&tmp, self, rhs.is_identity())
}
pub fn random(mut rng: impl RngCore) -> Self {
loop {
let x = Fp::random(&mut rng);
let flip_sign = !rng.next_u32().is_multiple_of(2);
// Obtain the corresponding y-coordinate given x as y = sqrt(x^3 + 4)
let p = ((x.square() * x) + B).sqrt().map(|y| G1Affine {
x,
y: if flip_sign { -y } else { y },
infinity: 0.into(),
});
if p.is_some().into() {
let p = p.unwrap().to_curve().clear_cofactor();
if bool::from(!p.is_identity()) {
return p;
}
}
}
}
fn multiply(&self, by: &[u8; 32]) -> G1Projective {
let mut acc = G1Projective::identity();
// This is a simple double-and-add implementation of point
// multiplication, moving from most significant to least
// significant bit of the scalar.
//
// We skip the leading bit because it's always unset for Fq
// elements.
for bit in by
.iter()
.rev()
.flat_map(|byte| (0..8).rev().map(move |i| Choice::from((byte >> i) & 1u8)))
.skip(1)
{
acc = acc.double();
acc = G1Projective::conditional_select(&acc, &(acc + self), bit);
}
acc
}
/// Multiply `self` by `crate::BLS_X`, using double and add.
fn mul_by_x(&self) -> G1Projective {
let mut xself = G1Projective::identity();
// NOTE: in BLS12-381 we can just skip the first bit.
let mut x = crate::BLS_X >> 1;
let mut tmp = *self;
while x != 0 {
tmp = tmp.double();
if x % 2 == 1 {
xself += tmp;
}
x >>= 1;
}
// finally, flip the sign
if crate::BLS_X_IS_NEGATIVE {
xself = -xself;
}
xself
}
/// Multiplies by $(1 - z)$, where $z$ is the parameter of BLS12-381, which
/// [suffices to clear](https://ia.cr/2019/403) the cofactor and map
/// elliptic curve points to elements of $\mathbb{G}\_1$.
pub fn clear_cofactor(&self) -> G1Projective {
self - self.mul_by_x()
}
/// Converts a batch of `G1Projective` elements into `G1Affine` elements. This
/// function will panic if `p.len() != q.len()`.
pub fn batch_normalize(p: &[Self], q: &mut [G1Affine]) {
assert_eq!(p.len(), q.len());
let mut acc = Fp::one();
for (p, q) in p.iter().zip(q.iter_mut()) {
// We use the `x` field of `G1Affine` to store the product
// of previous z-coordinates seen.
q.x = acc;
// We will end up skipping all identities in p
acc = Fp::conditional_select(&(acc * p.z), &acc, p.is_identity());
}
// This is the inverse, as all z-coordinates are nonzero and the ones
// that are not are skipped.
acc = acc.invert().unwrap();
for (p, q) in p.iter().rev().zip(q.iter_mut().rev()) {
let skip = p.is_identity();
// Compute tmp = 1/z
let tmp = q.x * acc;
// Cancel out z-coordinate in denominator of `acc`
acc = Fp::conditional_select(&(acc * p.z), &acc, skip);
// Set the coordinates to the correct value
q.x = p.x * tmp;
q.y = p.y * tmp;
q.infinity = Choice::from(0u8);
*q = G1Affine::conditional_select(q, &G1Affine::identity(), skip);
}
}
/// Returns true if this element is the identity (the point at infinity).
#[inline]
pub fn is_identity(&self) -> Choice {
self.z.is_zero()
}
/// Returns true if this point is on the curve. This should always return
/// true unless an "unchecked" API was used.
pub fn is_on_curve(&self) -> Choice {
// Y^2 Z = X^3 + b Z^3
(self.y.square() * self.z).ct_eq(&(self.x.square() * self.x + self.z.square() * self.z * B))
| self.z.is_zero()
}
}
#[derive(Clone, Copy)]
pub struct G1Compressed([u8; 48]);
impl fmt::Debug for G1Compressed {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0[..].fmt(f)
}
}
impl Default for G1Compressed {
fn default() -> Self {
G1Compressed([0; 48])
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for G1Compressed {}
impl AsRef<[u8]> for G1Compressed {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl AsMut<[u8]> for G1Compressed {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl ConstantTimeEq for G1Compressed {
fn ct_eq(&self, other: &Self) -> Choice {
self.0.ct_eq(&other.0)
}
}
impl Eq for G1Compressed {}
impl PartialEq for G1Compressed {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
#[derive(Clone, Copy)]
pub struct G1Uncompressed([u8; 96]);
impl fmt::Debug for G1Uncompressed {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0[..].fmt(f)
}
}
impl Default for G1Uncompressed {
fn default() -> Self {
G1Uncompressed([0; 96])
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for G1Uncompressed {}
impl AsRef<[u8]> for G1Uncompressed {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl AsMut<[u8]> for G1Uncompressed {
fn as_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl ConstantTimeEq for G1Uncompressed {
fn ct_eq(&self, other: &Self) -> Choice {
self.0.ct_eq(&other.0)
}
}
impl Eq for G1Uncompressed {}
impl PartialEq for G1Uncompressed {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl Group for G1Projective {
type Scalar = Scalar;
fn random(mut rng: impl RngCore) -> Self {
loop {
let x = Fp::random(&mut rng);
let flip_sign = !rng.next_u32().is_multiple_of(2);
// Obtain the corresponding y-coordinate given x as y = sqrt(x^3 + 4)
let p = ((x.square() * x) + B).sqrt().map(|y| G1Affine {
x,
y: if flip_sign { -y } else { y },
infinity: 0.into(),
});
if p.is_some().into() {
let p = p.unwrap().to_curve().clear_cofactor();
if bool::from(!p.is_identity()) {
return p;
}
}
}
}
fn identity() -> Self {
Self::identity()
}
fn generator() -> Self {
Self::generator()
}
fn is_identity(&self) -> Choice {
self.is_identity()
}
fn double(&self) -> Self {
self.double()
}
}
#[cfg(feature = "alloc")]
impl WnafGroup for G1Projective {
fn recommended_wnaf_for_num_scalars(num_scalars: usize) -> usize {
const RECOMMENDATIONS: [usize; 12] =
[1, 3, 7, 20, 43, 120, 273, 563, 1630, 3128, 7933, 62569];
let mut ret = 4;
for r in &RECOMMENDATIONS {
if num_scalars > *r {
ret += 1;
} else {
break;
}
}
ret
}
}
impl PrimeGroup for G1Projective {}
impl Curve for G1Projective {
type AffineRepr = G1Affine;
fn batch_normalize(p: &[Self], q: &mut [Self::AffineRepr]) {
Self::batch_normalize(p, q);
}
fn to_affine(&self) -> Self::AffineRepr {
self.into()
}
}
impl PrimeCurve for G1Projective {
type Affine = G1Affine;
}
impl PrimeCurveAffine for G1Affine {
type Scalar = Scalar;
type Curve = G1Projective;
fn identity() -> Self {
Self::identity()
}
fn generator() -> Self {
Self::generator()
}
fn is_identity(&self) -> Choice {
self.is_identity()
}
fn to_curve(&self) -> Self::Curve {
self.into()
}
}
impl GroupEncoding for G1Projective {
type Repr = G1Compressed;
fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {
G1Affine::from_bytes(bytes).map(Self::from)
}
fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {
G1Affine::from_bytes_unchecked(bytes).map(Self::from)
}
fn to_bytes(&self) -> Self::Repr {
G1Affine::from(self).to_bytes()
}
}
impl GroupEncoding for G1Affine {
type Repr = G1Compressed;
fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> {
Self::from_compressed(&bytes.0)
}
fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> {
Self::from_compressed_unchecked(&bytes.0)
}
fn to_bytes(&self) -> Self::Repr {
G1Compressed(self.to_compressed())
}
}
impl UncompressedEncoding for G1Affine {
type Uncompressed = G1Uncompressed;
fn from_uncompressed(bytes: &Self::Uncompressed) -> CtOption<Self> {
Self::from_uncompressed(&bytes.0)
}
fn from_uncompressed_unchecked(bytes: &Self::Uncompressed) -> CtOption<Self> {
Self::from_uncompressed_unchecked(&bytes.0)
}
fn to_uncompressed(&self) -> Self::Uncompressed {
G1Uncompressed(self.to_uncompressed())
}
}
#[test]
fn test_beta() {
assert_eq!(
BETA,
Fp::from_bytes(&[
0x00u8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x19, 0x67, 0x2f, 0xdf, 0x76,
0xce, 0x51, 0xba, 0x69, 0xc6, 0x07, 0x6a, 0x0f, 0x77, 0xea, 0xdd, 0xb3, 0xa9, 0x3b,
0xe6, 0xf8, 0x96, 0x88, 0xde, 0x17, 0xd8, 0x13, 0x62, 0x0a, 0x00, 0x02, 0x2e, 0x01,
0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe
])
.unwrap()
);
assert_ne!(BETA, Fp::one());
assert_ne!(BETA * BETA, Fp::one());
assert_eq!(BETA * BETA * BETA, Fp::one());
}
#[test]
fn test_is_on_curve() {
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | true |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/util.rs | zkcrypto/bls12_381/src/util.rs | /// Compute a + b + carry, returning the result and the new carry over.
#[inline(always)]
pub const fn adc(a: u64, b: u64, carry: u64) -> (u64, u64) {
let ret = (a as u128) + (b as u128) + (carry as u128);
(ret as u64, (ret >> 64) as u64)
}
/// Compute a - (b + borrow), returning the result and the new borrow.
#[inline(always)]
pub const fn sbb(a: u64, b: u64, borrow: u64) -> (u64, u64) {
let ret = (a as u128).wrapping_sub((b as u128) + ((borrow >> 63) as u128));
(ret as u64, (ret >> 64) as u64)
}
/// Compute a + (b * c) + carry, returning the result and the new carry over.
#[inline(always)]
pub const fn mac(a: u64, b: u64, c: u64, carry: u64) -> (u64, u64) {
let ret = (a as u128) + ((b as u128) * (c as u128)) + (carry as u128);
(ret as u64, (ret >> 64) as u64)
}
macro_rules! impl_add_binop_specify_output {
($lhs:ident, $rhs:ident, $output:ident) => {
impl<'b> Add<&'b $rhs> for $lhs {
type Output = $output;
#[inline]
fn add(self, rhs: &'b $rhs) -> $output {
&self + rhs
}
}
impl<'a> Add<$rhs> for &'a $lhs {
type Output = $output;
#[inline]
fn add(self, rhs: $rhs) -> $output {
self + &rhs
}
}
impl Add<$rhs> for $lhs {
type Output = $output;
#[inline]
fn add(self, rhs: $rhs) -> $output {
&self + &rhs
}
}
};
}
macro_rules! impl_sub_binop_specify_output {
($lhs:ident, $rhs:ident, $output:ident) => {
impl<'b> Sub<&'b $rhs> for $lhs {
type Output = $output;
#[inline]
fn sub(self, rhs: &'b $rhs) -> $output {
&self - rhs
}
}
impl<'a> Sub<$rhs> for &'a $lhs {
type Output = $output;
#[inline]
fn sub(self, rhs: $rhs) -> $output {
self - &rhs
}
}
impl Sub<$rhs> for $lhs {
type Output = $output;
#[inline]
fn sub(self, rhs: $rhs) -> $output {
&self - &rhs
}
}
};
}
macro_rules! impl_binops_additive_specify_output {
($lhs:ident, $rhs:ident, $output:ident) => {
impl_add_binop_specify_output!($lhs, $rhs, $output);
impl_sub_binop_specify_output!($lhs, $rhs, $output);
};
}
macro_rules! impl_binops_multiplicative_mixed {
($lhs:ident, $rhs:ident, $output:ident) => {
impl<'b> Mul<&'b $rhs> for $lhs {
type Output = $output;
#[inline]
fn mul(self, rhs: &'b $rhs) -> $output {
&self * rhs
}
}
impl<'a> Mul<$rhs> for &'a $lhs {
type Output = $output;
#[inline]
fn mul(self, rhs: $rhs) -> $output {
self * &rhs
}
}
impl Mul<$rhs> for $lhs {
type Output = $output;
#[inline]
fn mul(self, rhs: $rhs) -> $output {
&self * &rhs
}
}
};
}
macro_rules! impl_binops_additive {
($lhs:ident, $rhs:ident) => {
impl_binops_additive_specify_output!($lhs, $rhs, $lhs);
impl SubAssign<$rhs> for $lhs {
#[inline]
fn sub_assign(&mut self, rhs: $rhs) {
*self = &*self - &rhs;
}
}
impl AddAssign<$rhs> for $lhs {
#[inline]
fn add_assign(&mut self, rhs: $rhs) {
*self = &*self + &rhs;
}
}
impl<'b> SubAssign<&'b $rhs> for $lhs {
#[inline]
fn sub_assign(&mut self, rhs: &'b $rhs) {
*self = &*self - rhs;
}
}
impl<'b> AddAssign<&'b $rhs> for $lhs {
#[inline]
fn add_assign(&mut self, rhs: &'b $rhs) {
*self = &*self + rhs;
}
}
};
}
macro_rules! impl_binops_multiplicative {
($lhs:ident, $rhs:ident) => {
impl_binops_multiplicative_mixed!($lhs, $rhs, $lhs);
impl MulAssign<$rhs> for $lhs {
#[inline]
fn mul_assign(&mut self, rhs: $rhs) {
*self = &*self * &rhs;
}
}
impl<'b> MulAssign<&'b $rhs> for $lhs {
#[inline]
fn mul_assign(&mut self, rhs: &'b $rhs) {
*self = &*self * rhs;
}
}
};
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/scalar.rs | zkcrypto/bls12_381/src/scalar.rs | //! This module provides an implementation of the BLS12-381 scalar field $\mathbb{F}_q$
//! where `q = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001`
#![allow(clippy::all)]
use core::fmt;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use rand_core::RngCore;
use ff::{Field, PrimeField};
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
#[cfg(feature = "bits")]
use ff::{FieldBits, PrimeFieldBits};
use crate::util::{adc, mac, sbb};
/// Represents an element of the scalar field $\mathbb{F}_q$ of the BLS12-381 elliptic
/// curve construction.
// The internal representation of this type is four 64-bit unsigned
// integers in little-endian order. `Scalar` values are always in
// Montgomery form; i.e., Scalar(a) = aR mod q, with R = 2^256.
#[derive(Clone, Copy, Eq)]
pub struct Scalar(pub [u64; 4]);
impl fmt::Debug for Scalar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let tmp = self.to_bytes();
write!(f, "0x")?;
for &b in tmp.iter().rev() {
write!(f, "{:02x}", b)?;
}
Ok(())
}
}
impl fmt::Display for Scalar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<u64> for Scalar {
fn from(val: u64) -> Scalar {
Scalar([val, 0, 0, 0]) * R2
}
}
impl ConstantTimeEq for Scalar {
fn ct_eq(&self, other: &Self) -> Choice {
self.0[0].ct_eq(&other.0[0])
& self.0[1].ct_eq(&other.0[1])
& self.0[2].ct_eq(&other.0[2])
& self.0[3].ct_eq(&other.0[3])
}
}
impl PartialEq for Scalar {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl ConditionallySelectable for Scalar {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Scalar([
u64::conditional_select(&a.0[0], &b.0[0], choice),
u64::conditional_select(&a.0[1], &b.0[1], choice),
u64::conditional_select(&a.0[2], &b.0[2], choice),
u64::conditional_select(&a.0[3], &b.0[3], choice),
])
}
}
/// Constant representing the modulus
/// q = 0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001
pub const MODULUS: Scalar = Scalar([
0xffff_ffff_0000_0001,
0x53bd_a402_fffe_5bfe,
0x3339_d808_09a1_d805,
0x73ed_a753_299d_7d48,
]);
/// The modulus as u32 limbs.
#[cfg(all(feature = "bits", not(target_pointer_width = "64")))]
const MODULUS_LIMBS_32: [u32; 8] = [
0x0000_0001,
0xffff_ffff,
0xfffe_5bfe,
0x53bd_a402,
0x09a1_d805,
0x3339_d808,
0x299d_7d48,
0x73ed_a753,
];
// The number of bits needed to represent the modulus.
const MODULUS_BITS: u32 = 255;
// GENERATOR = 7 (multiplicative generator of r-1 order, that is also quadratic nonresidue)
const GENERATOR: Scalar = Scalar([
0x0000_000e_ffff_fff1,
0x17e3_63d3_0018_9c0f,
0xff9c_5787_6f84_57b0,
0x3513_3220_8fc5_a8c4,
]);
impl<'a> Neg for &'a Scalar {
type Output = Scalar;
#[inline]
fn neg(self) -> Scalar {
self.neg()
}
}
impl Neg for Scalar {
type Output = Scalar;
#[inline]
fn neg(self) -> Scalar {
-&self
}
}
impl<'a, 'b> Sub<&'b Scalar> for &'a Scalar {
type Output = Scalar;
#[inline]
fn sub(self, rhs: &'b Scalar) -> Scalar {
self.sub(rhs)
}
}
impl<'a, 'b> Add<&'b Scalar> for &'a Scalar {
type Output = Scalar;
#[inline]
fn add(self, rhs: &'b Scalar) -> Scalar {
self.add(rhs)
}
}
impl<'a, 'b> Mul<&'b Scalar> for &'a Scalar {
type Output = Scalar;
#[inline]
fn mul(self, rhs: &'b Scalar) -> Scalar {
self.mul(rhs)
}
}
impl_binops_additive!(Scalar, Scalar);
impl_binops_multiplicative!(Scalar, Scalar);
/// INV = -(q^{-1} mod 2^64) mod 2^64
const INV: u64 = 0xffff_fffe_ffff_ffff;
/// R = 2^256 mod q
const R: Scalar = Scalar([
0x0000_0001_ffff_fffe,
0x5884_b7fa_0003_4802,
0x998c_4fef_ecbc_4ff5,
0x1824_b159_acc5_056f,
]);
/// R^2 = 2^512 mod q
pub const R2: Scalar = Scalar([
0xc999_e990_f3f2_9c6d,
0x2b6c_edcb_8792_5c23,
0x05d3_1496_7254_398f,
0x0748_d9d9_9f59_ff11,
]);
/// R^3 = 2^768 mod q
const R3: Scalar = Scalar([
0xc62c_1807_439b_73af,
0x1b3e_0d18_8cf0_6990,
0x73d1_3c71_c7b5_f418,
0x6e2a_5bb9_c8db_33e9,
]);
/// 2^-1
const TWO_INV: Scalar = Scalar([
0x0000_0000_ffff_ffff,
0xac42_5bfd_0001_a401,
0xccc6_27f7_f65e_27fa,
0x0c12_58ac_d662_82b7,
]);
// 2^S * t = MODULUS - 1 with t odd
const S: u32 = 32;
/// GENERATOR^t where t * 2^s + 1 = q
/// with t odd. In other words, this
/// is a 2^s root of unity.
///
/// `GENERATOR = 7 mod q` is a generator
/// of the q - 1 order multiplicative
/// subgroup.
const ROOT_OF_UNITY: Scalar = Scalar([
0xb9b5_8d8c_5f0e_466a,
0x5b1b_4c80_1819_d7ec,
0x0af5_3ae3_52a3_1e64,
0x5bf3_adda_19e9_b27b,
]);
/// ROOT_OF_UNITY^-1
const ROOT_OF_UNITY_INV: Scalar = Scalar([
0x4256_481a_dcf3_219a,
0x45f3_7b7f_96b6_cad3,
0xf9c3_f1d7_5f7a_3b27,
0x2d2f_c049_658a_fd43,
]);
/// GENERATOR^{2^s} where t * 2^s + 1 = q with t odd.
/// In other words, this is a t root of unity.
const DELTA: Scalar = Scalar([
0x70e3_10d3_d146_f96a,
0x4b64_c089_19e2_99e6,
0x51e1_1418_6a8b_970d,
0x6185_d066_27c0_67cb,
]);
impl Default for Scalar {
#[inline]
fn default() -> Self {
Self::zero()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for Scalar {}
impl Scalar {
/// Returns zero, the additive identity.
#[inline]
pub const fn zero() -> Scalar {
Scalar([0, 0, 0, 0])
}
/// Returns one, the multiplicative identity.
#[inline]
pub const fn one() -> Scalar {
R
}
/// Doubles this field element.
#[inline]
pub const fn double(&self) -> Scalar {
// TODO: This can be achieved more efficiently with a bitshift.
self.add(self)
}
/// Attempts to convert a little-endian byte representation of
/// a scalar into a `Scalar`, failing if the input is not canonical.
pub fn from_bytes(bytes: &[u8; 32]) -> CtOption<Scalar> {
let mut tmp = Scalar([0, 0, 0, 0]);
tmp.0[0] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[0..8]).unwrap());
tmp.0[1] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap());
tmp.0[2] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap());
tmp.0[3] = u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap());
// Try to subtract the modulus
let (_, borrow) = sbb(tmp.0[0], MODULUS.0[0], 0);
let (_, borrow) = sbb(tmp.0[1], MODULUS.0[1], borrow);
let (_, borrow) = sbb(tmp.0[2], MODULUS.0[2], borrow);
let (_, borrow) = sbb(tmp.0[3], MODULUS.0[3], borrow);
// If the element is smaller than MODULUS then the
// subtraction will underflow, producing a borrow value
// of 0xffff...ffff. Otherwise, it'll be zero.
let is_some = (borrow as u8) & 1;
// Convert to Montgomery form by computing
// (a.R^0 * R^2) / R = a.R
tmp *= &R2;
CtOption::new(tmp, Choice::from(is_some))
}
/// Converts an element of `Scalar` into a byte representation in
/// little-endian byte order.
pub fn to_bytes(&self) -> [u8; 32] {
// Turn into canonical form by computing
// (a.R) / R = a
let tmp = Scalar::montgomery_reduce(self.0[0], self.0[1], self.0[2], self.0[3], 0, 0, 0, 0);
let mut res = [0; 32];
res[0..8].copy_from_slice(&tmp.0[0].to_le_bytes());
res[8..16].copy_from_slice(&tmp.0[1].to_le_bytes());
res[16..24].copy_from_slice(&tmp.0[2].to_le_bytes());
res[24..32].copy_from_slice(&tmp.0[3].to_le_bytes());
res
}
/// Converts a 512-bit little endian integer into
/// a `Scalar` by reducing by the modulus.
pub fn from_bytes_wide(bytes: &[u8; 64]) -> Scalar {
Scalar::from_u512([
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[0..8]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[32..40]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[40..48]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[48..56]).unwrap()),
u64::from_le_bytes(<[u8; 8]>::try_from(&bytes[56..64]).unwrap()),
])
}
fn from_u512(limbs: [u64; 8]) -> Scalar {
// We reduce an arbitrary 512-bit number by decomposing it into two 256-bit digits
// with the higher bits multiplied by 2^256. Thus, we perform two reductions
//
// 1. the lower bits are multiplied by R^2, as normal
// 2. the upper bits are multiplied by R^2 * 2^256 = R^3
//
// and computing their sum in the field. It remains to see that arbitrary 256-bit
// numbers can be placed into Montgomery form safely using the reduction. The
// reduction works so long as the product is less than R=2^256 multiplied by
// the modulus. This holds because for any `c` smaller than the modulus, we have
// that (2^256 - 1)*c is an acceptable product for the reduction. Therefore, the
// reduction always works so long as `c` is in the field; in this case it is either the
// constant `R2` or `R3`.
let d0 = Scalar([limbs[0], limbs[1], limbs[2], limbs[3]]);
let d1 = Scalar([limbs[4], limbs[5], limbs[6], limbs[7]]);
// Convert to Montgomery form
d0 * R2 + d1 * R3
}
/// Converts from an integer represented in little endian
/// into its (congruent) `Scalar` representation.
pub const fn from_raw(val: [u64; 4]) -> Self {
(&Scalar(val)).mul(&R2)
}
/// Squares this element.
#[inline]
pub const fn square(&self) -> Scalar {
let (r1, carry) = mac(0, self.0[0], self.0[1], 0);
let (r2, carry) = mac(0, self.0[0], self.0[2], carry);
let (r3, r4) = mac(0, self.0[0], self.0[3], carry);
let (r3, carry) = mac(r3, self.0[1], self.0[2], 0);
let (r4, r5) = mac(r4, self.0[1], self.0[3], carry);
let (r5, r6) = mac(r5, self.0[2], self.0[3], 0);
let r7 = r6 >> 63;
let r6 = (r6 << 1) | (r5 >> 63);
let r5 = (r5 << 1) | (r4 >> 63);
let r4 = (r4 << 1) | (r3 >> 63);
let r3 = (r3 << 1) | (r2 >> 63);
let r2 = (r2 << 1) | (r1 >> 63);
let r1 = r1 << 1;
let (r0, carry) = mac(0, self.0[0], self.0[0], 0);
let (r1, carry) = adc(0, r1, carry);
let (r2, carry) = mac(r2, self.0[1], self.0[1], carry);
let (r3, carry) = adc(0, r3, carry);
let (r4, carry) = mac(r4, self.0[2], self.0[2], carry);
let (r5, carry) = adc(0, r5, carry);
let (r6, carry) = mac(r6, self.0[3], self.0[3], carry);
let (r7, _) = adc(0, r7, carry);
Scalar::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7)
}
/// Exponentiates `self` by `by`, where `by` is a
/// little-endian order integer exponent.
pub fn pow(&self, by: &[u64; 4]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
let mut tmp = res;
tmp *= self;
res.conditional_assign(&tmp, (((*e >> i) & 0x1) as u8).into());
}
}
res
}
/// Exponentiates `self` by `by`, where `by` is a
/// little-endian order integer exponent.
///
/// **This operation is variable time with respect
/// to the exponent.** If the exponent is fixed,
/// this operation is effectively constant time.
pub fn pow_vartime(&self, by: &[u64; 4]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
if ((*e >> i) & 1) == 1 {
res.mul_assign(self);
}
}
}
res
}
/// Computes the multiplicative inverse of this element,
/// failing if the element is zero.
pub fn invert(&self) -> CtOption<Self> {
#[inline(always)]
fn square_assign_multi(n: &mut Scalar, num_times: usize) {
for _ in 0..num_times {
*n = n.square();
}
}
// found using https://github.com/kwantam/addchain
let mut t0 = self.square();
let mut t1 = t0 * self;
let mut t16 = t0.square();
let mut t6 = t16.square();
let mut t5 = t6 * t0;
t0 = t6 * t16;
let mut t12 = t5 * t16;
let mut t2 = t6.square();
let mut t7 = t5 * t6;
let mut t15 = t0 * t5;
let mut t17 = t12.square();
t1 *= t17;
let mut t3 = t7 * t2;
let t8 = t1 * t17;
let t4 = t8 * t2;
let t9 = t8 * t7;
t7 = t4 * t5;
let t11 = t4 * t17;
t5 = t9 * t17;
let t14 = t7 * t15;
let t13 = t11 * t12;
t12 = t11 * t17;
t15 *= &t12;
t16 *= &t15;
t3 *= &t16;
t17 *= &t3;
t0 *= &t17;
t6 *= &t0;
t2 *= &t6;
square_assign_multi(&mut t0, 8);
t0 *= &t17;
square_assign_multi(&mut t0, 9);
t0 *= &t16;
square_assign_multi(&mut t0, 9);
t0 *= &t15;
square_assign_multi(&mut t0, 9);
t0 *= &t15;
square_assign_multi(&mut t0, 7);
t0 *= &t14;
square_assign_multi(&mut t0, 7);
t0 *= &t13;
square_assign_multi(&mut t0, 10);
t0 *= &t12;
square_assign_multi(&mut t0, 9);
t0 *= &t11;
square_assign_multi(&mut t0, 8);
t0 *= &t8;
square_assign_multi(&mut t0, 8);
t0 *= self;
square_assign_multi(&mut t0, 14);
t0 *= &t9;
square_assign_multi(&mut t0, 10);
t0 *= &t8;
square_assign_multi(&mut t0, 15);
t0 *= &t7;
square_assign_multi(&mut t0, 10);
t0 *= &t6;
square_assign_multi(&mut t0, 8);
t0 *= &t5;
square_assign_multi(&mut t0, 16);
t0 *= &t3;
square_assign_multi(&mut t0, 8);
t0 *= &t2;
square_assign_multi(&mut t0, 7);
t0 *= &t4;
square_assign_multi(&mut t0, 9);
t0 *= &t2;
square_assign_multi(&mut t0, 8);
t0 *= &t3;
square_assign_multi(&mut t0, 8);
t0 *= &t2;
square_assign_multi(&mut t0, 8);
t0 *= &t2;
square_assign_multi(&mut t0, 8);
t0 *= &t2;
square_assign_multi(&mut t0, 8);
t0 *= &t3;
square_assign_multi(&mut t0, 8);
t0 *= &t2;
square_assign_multi(&mut t0, 8);
t0 *= &t2;
square_assign_multi(&mut t0, 5);
t0 *= &t1;
square_assign_multi(&mut t0, 5);
t0 *= &t1;
CtOption::new(t0, !self.ct_eq(&Self::zero()))
}
#[inline(always)]
pub const fn montgomery_reduce(
r0: u64,
r1: u64,
r2: u64,
r3: u64,
r4: u64,
r5: u64,
r6: u64,
r7: u64,
) -> Self {
// The Montgomery reduction here is based on Algorithm 14.32 in
// Handbook of Applied Cryptography
// <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
let k = r0.wrapping_mul(INV);
let (_, carry) = mac(r0, k, MODULUS.0[0], 0);
let (r1, carry) = mac(r1, k, MODULUS.0[1], carry);
let (r2, carry) = mac(r2, k, MODULUS.0[2], carry);
let (r3, carry) = mac(r3, k, MODULUS.0[3], carry);
let (r4, carry2) = adc(r4, 0, carry);
let k = r1.wrapping_mul(INV);
let (_, carry) = mac(r1, k, MODULUS.0[0], 0);
let (r2, carry) = mac(r2, k, MODULUS.0[1], carry);
let (r3, carry) = mac(r3, k, MODULUS.0[2], carry);
let (r4, carry) = mac(r4, k, MODULUS.0[3], carry);
let (r5, carry2) = adc(r5, carry2, carry);
let k = r2.wrapping_mul(INV);
let (_, carry) = mac(r2, k, MODULUS.0[0], 0);
let (r3, carry) = mac(r3, k, MODULUS.0[1], carry);
let (r4, carry) = mac(r4, k, MODULUS.0[2], carry);
let (r5, carry) = mac(r5, k, MODULUS.0[3], carry);
let (r6, carry2) = adc(r6, carry2, carry);
let k = r3.wrapping_mul(INV);
let (_, carry) = mac(r3, k, MODULUS.0[0], 0);
let (r4, carry) = mac(r4, k, MODULUS.0[1], carry);
let (r5, carry) = mac(r5, k, MODULUS.0[2], carry);
let (r6, carry) = mac(r6, k, MODULUS.0[3], carry);
let (r7, _) = adc(r7, carry2, carry);
// Result may be within MODULUS of the correct value
(&Scalar([r4, r5, r6, r7])).sub(&MODULUS)
}
/// Multiplies `rhs` by `self`, returning the result.
#[inline]
pub const fn mul(&self, rhs: &Self) -> Self {
// Schoolbook multiplication
let (r0, carry) = mac(0, self.0[0], rhs.0[0], 0);
let (r1, carry) = mac(0, self.0[0], rhs.0[1], carry);
let (r2, carry) = mac(0, self.0[0], rhs.0[2], carry);
let (r3, r4) = mac(0, self.0[0], rhs.0[3], carry);
let (r1, carry) = mac(r1, self.0[1], rhs.0[0], 0);
let (r2, carry) = mac(r2, self.0[1], rhs.0[1], carry);
let (r3, carry) = mac(r3, self.0[1], rhs.0[2], carry);
let (r4, r5) = mac(r4, self.0[1], rhs.0[3], carry);
let (r2, carry) = mac(r2, self.0[2], rhs.0[0], 0);
let (r3, carry) = mac(r3, self.0[2], rhs.0[1], carry);
let (r4, carry) = mac(r4, self.0[2], rhs.0[2], carry);
let (r5, r6) = mac(r5, self.0[2], rhs.0[3], carry);
let (r3, carry) = mac(r3, self.0[3], rhs.0[0], 0);
let (r4, carry) = mac(r4, self.0[3], rhs.0[1], carry);
let (r5, carry) = mac(r5, self.0[3], rhs.0[2], carry);
let (r6, r7) = mac(r6, self.0[3], rhs.0[3], carry);
Scalar::montgomery_reduce(r0, r1, r2, r3, r4, r5, r6, r7)
}
/// Subtracts `rhs` from `self`, returning the result.
#[inline]
pub const fn sub(&self, rhs: &Self) -> Self {
let (d0, borrow) = sbb(self.0[0], rhs.0[0], 0);
let (d1, borrow) = sbb(self.0[1], rhs.0[1], borrow);
let (d2, borrow) = sbb(self.0[2], rhs.0[2], borrow);
let (d3, borrow) = sbb(self.0[3], rhs.0[3], borrow);
// If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise
// borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus.
let (d0, carry) = adc(d0, MODULUS.0[0] & borrow, 0);
let (d1, carry) = adc(d1, MODULUS.0[1] & borrow, carry);
let (d2, carry) = adc(d2, MODULUS.0[2] & borrow, carry);
let (d3, _) = adc(d3, MODULUS.0[3] & borrow, carry);
Scalar([d0, d1, d2, d3])
}
/// Adds `rhs` to `self`, returning the result.
#[inline]
pub const fn add(&self, rhs: &Self) -> Self {
let (d0, carry) = adc(self.0[0], rhs.0[0], 0);
let (d1, carry) = adc(self.0[1], rhs.0[1], carry);
let (d2, carry) = adc(self.0[2], rhs.0[2], carry);
let (d3, _) = adc(self.0[3], rhs.0[3], carry);
// Attempt to subtract the modulus, to ensure the value
// is smaller than the modulus.
(&Scalar([d0, d1, d2, d3])).sub(&MODULUS)
}
/// Negates `self`.
#[inline]
pub const fn neg(&self) -> Self {
// Subtract `self` from `MODULUS` to negate. Ignore the final
// borrow because it cannot underflow; self is guaranteed to
// be in the field.
let (d0, borrow) = sbb(MODULUS.0[0], self.0[0], 0);
let (d1, borrow) = sbb(MODULUS.0[1], self.0[1], borrow);
let (d2, borrow) = sbb(MODULUS.0[2], self.0[2], borrow);
let (d3, _) = sbb(MODULUS.0[3], self.0[3], borrow);
// `tmp` could be `MODULUS` if `self` was zero. Create a mask that is
// zero if `self` was zero, and `u64::max_value()` if self was nonzero.
let mask = (((self.0[0] | self.0[1] | self.0[2] | self.0[3]) == 0) as u64).wrapping_sub(1);
Scalar([d0 & mask, d1 & mask, d2 & mask, d3 & mask])
}
}
impl From<Scalar> for [u8; 32] {
fn from(value: Scalar) -> [u8; 32] {
value.to_bytes()
}
}
impl<'a> From<&'a Scalar> for [u8; 32] {
fn from(value: &'a Scalar) -> [u8; 32] {
value.to_bytes()
}
}
impl Field for Scalar {
const ZERO: Self = Self::zero();
const ONE: Self = Self::one();
fn random(mut rng: impl RngCore) -> Self {
let mut buf = [0; 64];
rng.fill_bytes(&mut buf);
Self::from_bytes_wide(&buf)
}
fn square(&self) -> Self {
self.square()
}
fn double(&self) -> Self {
self.double()
}
fn invert(&self) -> CtOption<Self> {
self.invert()
}
fn sqrt_ratio(num: &Self, div: &Self) -> (Choice, Self) {
ff::helpers::sqrt_ratio_generic(num, div)
}
fn sqrt(&self) -> CtOption<Self> {
// (t - 1) // 2 = 6104339283789297388802252303364915521546564123189034618274734669823
ff::helpers::sqrt_tonelli_shanks(
self,
&[
0x7fff_2dff_7fff_ffff,
0x04d0_ec02_a9de_d201,
0x94ce_bea4_199c_ec04,
0x0000_0000_39f6_d3a9,
],
)
}
fn is_zero_vartime(&self) -> bool {
self.0 == Self::zero().0
}
}
impl PrimeField for Scalar {
type Repr = [u8; 32];
fn from_repr(r: Self::Repr) -> CtOption<Self> {
Self::from_bytes(&r)
}
fn to_repr(&self) -> Self::Repr {
self.to_bytes()
}
fn is_odd(&self) -> Choice {
Choice::from(self.to_bytes()[0] & 1)
}
const MODULUS: &'static str =
"0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001";
const NUM_BITS: u32 = MODULUS_BITS;
const CAPACITY: u32 = Self::NUM_BITS - 1;
const TWO_INV: Self = TWO_INV;
const MULTIPLICATIVE_GENERATOR: Self = GENERATOR;
const S: u32 = S;
const ROOT_OF_UNITY: Self = ROOT_OF_UNITY;
const ROOT_OF_UNITY_INV: Self = ROOT_OF_UNITY_INV;
const DELTA: Self = DELTA;
}
#[cfg(all(feature = "bits", not(target_pointer_width = "64")))]
type ReprBits = [u32; 8];
#[cfg(all(feature = "bits", target_pointer_width = "64"))]
type ReprBits = [u64; 4];
#[cfg(feature = "bits")]
impl PrimeFieldBits for Scalar {
type ReprBits = ReprBits;
fn to_le_bits(&self) -> FieldBits<Self::ReprBits> {
let bytes = self.to_bytes();
#[cfg(not(target_pointer_width = "64"))]
let limbs = [
u32::from_le_bytes(bytes[0..4].try_into().unwrap()),
u32::from_le_bytes(bytes[4..8].try_into().unwrap()),
u32::from_le_bytes(bytes[8..12].try_into().unwrap()),
u32::from_le_bytes(bytes[12..16].try_into().unwrap()),
u32::from_le_bytes(bytes[16..20].try_into().unwrap()),
u32::from_le_bytes(bytes[20..24].try_into().unwrap()),
u32::from_le_bytes(bytes[24..28].try_into().unwrap()),
u32::from_le_bytes(bytes[28..32].try_into().unwrap()),
];
#[cfg(target_pointer_width = "64")]
let limbs = [
u64::from_le_bytes(bytes[0..8].try_into().unwrap()),
u64::from_le_bytes(bytes[8..16].try_into().unwrap()),
u64::from_le_bytes(bytes[16..24].try_into().unwrap()),
u64::from_le_bytes(bytes[24..32].try_into().unwrap()),
];
FieldBits::new(limbs)
}
fn char_le_bits() -> FieldBits<Self::ReprBits> {
#[cfg(not(target_pointer_width = "64"))]
{
FieldBits::new(MODULUS_LIMBS_32)
}
#[cfg(target_pointer_width = "64")]
FieldBits::new(MODULUS.0)
}
}
impl<T> core::iter::Sum<T> for Scalar
where
T: core::borrow::Borrow<Scalar>,
{
fn sum<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Self::zero(), |acc, item| acc + item.borrow())
}
}
impl<T> core::iter::Product<T> for Scalar
where
T: core::borrow::Borrow<Scalar>,
{
fn product<I>(iter: I) -> Self
where
I: Iterator<Item = T>,
{
iter.fold(Self::one(), |acc, item| acc * item.borrow())
}
}
#[test]
fn test_constants() {
assert_eq!(
Scalar::MODULUS,
"0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001",
);
assert_eq!(Scalar::from(2) * Scalar::TWO_INV, Scalar::ONE);
assert_eq!(
Scalar::ROOT_OF_UNITY * Scalar::ROOT_OF_UNITY_INV,
Scalar::ONE,
);
// ROOT_OF_UNITY^{2^s} mod m == 1
assert_eq!(
Scalar::ROOT_OF_UNITY.pow(&[1u64 << Scalar::S, 0, 0, 0]),
Scalar::ONE,
);
// DELTA^{t} mod m == 1
assert_eq!(
Scalar::DELTA.pow(&[
0xfffe_5bfe_ffff_ffff,
0x09a1_d805_53bd_a402,
0x299d_7d48_3339_d808,
0x0000_0000_73ed_a753,
]),
Scalar::ONE,
);
}
#[test]
fn test_inv() {
// Compute -(q^{-1} mod 2^64) mod 2^64 by exponentiating
// by totient(2**64) - 1
let mut inv = 1u64;
for _ in 0..63 {
inv = inv.wrapping_mul(inv);
inv = inv.wrapping_mul(MODULUS.0[0]);
}
inv = inv.wrapping_neg();
assert_eq!(inv, INV);
}
#[test]
fn test_debug() {
assert_eq!(
format!("{:?}", Scalar::zero()),
"0x0000000000000000000000000000000000000000000000000000000000000000"
);
assert_eq!(
format!("{:?}", Scalar::one()),
"0x0000000000000000000000000000000000000000000000000000000000000001"
);
assert_eq!(
format!("{:?}", R2),
"0x1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe"
);
}
#[test]
fn test_equality() {
assert_eq!(Scalar::zero(), Scalar::zero());
assert_eq!(Scalar::one(), Scalar::one());
assert_eq!(R2, R2);
assert!(Scalar::zero() != Scalar::one());
assert!(Scalar::one() != R2);
}
#[test]
fn test_to_bytes() {
assert_eq!(
Scalar::zero().to_bytes(),
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0
]
);
assert_eq!(
Scalar::one().to_bytes(),
[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0
]
);
assert_eq!(
R2.to_bytes(),
[
254, 255, 255, 255, 1, 0, 0, 0, 2, 72, 3, 0, 250, 183, 132, 88, 245, 79, 188, 236, 239,
79, 140, 153, 111, 5, 197, 172, 89, 177, 36, 24
]
);
assert_eq!(
(-&Scalar::one()).to_bytes(),
[
0, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 115
]
);
}
#[test]
fn test_from_bytes() {
assert_eq!(
Scalar::from_bytes(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0
])
.unwrap(),
Scalar::zero()
);
assert_eq!(
Scalar::from_bytes(&[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0
])
.unwrap(),
Scalar::one()
);
assert_eq!(
Scalar::from_bytes(&[
254, 255, 255, 255, 1, 0, 0, 0, 2, 72, 3, 0, 250, 183, 132, 88, 245, 79, 188, 236, 239,
79, 140, 153, 111, 5, 197, 172, 89, 177, 36, 24
])
.unwrap(),
R2
);
// -1 should work
assert!(bool::from(
Scalar::from_bytes(&[
0, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 115
])
.is_some()
));
// modulus is invalid
assert!(bool::from(
Scalar::from_bytes(&[
1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 115
])
.is_none()
));
// Anything larger than the modulus is invalid
assert!(bool::from(
Scalar::from_bytes(&[
2, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 115
])
.is_none()
));
assert!(bool::from(
Scalar::from_bytes(&[
1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
216, 58, 51, 72, 125, 157, 41, 83, 167, 237, 115
])
.is_none()
));
assert!(bool::from(
Scalar::from_bytes(&[
1, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 116
])
.is_none()
));
}
#[test]
fn test_from_u512_zero() {
assert_eq!(
Scalar::zero(),
Scalar::from_u512([
MODULUS.0[0],
MODULUS.0[1],
MODULUS.0[2],
MODULUS.0[3],
0,
0,
0,
0
])
);
}
#[test]
fn test_from_u512_r() {
assert_eq!(R, Scalar::from_u512([1, 0, 0, 0, 0, 0, 0, 0]));
}
#[test]
fn test_from_u512_r2() {
assert_eq!(R2, Scalar::from_u512([0, 0, 0, 0, 1, 0, 0, 0]));
}
#[test]
fn test_from_u512_max() {
let max_u64 = 0xffff_ffff_ffff_ffff;
assert_eq!(
R3 - R,
Scalar::from_u512([max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64, max_u64])
);
}
#[test]
fn test_from_bytes_wide_r2() {
assert_eq!(
R2,
Scalar::from_bytes_wide(&[
254, 255, 255, 255, 1, 0, 0, 0, 2, 72, 3, 0, 250, 183, 132, 88, 245, 79, 188, 236, 239,
79, 140, 153, 111, 5, 197, 172, 89, 177, 36, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])
);
}
#[test]
fn test_from_bytes_wide_negative_one() {
assert_eq!(
-&Scalar::one(),
Scalar::from_bytes_wide(&[
0, 0, 0, 0, 255, 255, 255, 255, 254, 91, 254, 255, 2, 164, 189, 83, 5, 216, 161, 9, 8,
216, 57, 51, 72, 125, 157, 41, 83, 167, 237, 115, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])
);
}
#[test]
fn test_from_bytes_wide_maximum() {
assert_eq!(
Scalar([
0xc62c_1805_439b_73b1,
0xc2b9_551e_8ced_218e,
0xda44_ec81_daf9_a422,
0x5605_aa60_1c16_2e79,
]),
Scalar::from_bytes_wide(&[0xff; 64])
);
}
#[test]
fn test_zero() {
assert_eq!(Scalar::zero(), -&Scalar::zero());
assert_eq!(Scalar::zero(), Scalar::zero() + Scalar::zero());
assert_eq!(Scalar::zero(), Scalar::zero() - Scalar::zero());
assert_eq!(Scalar::zero(), Scalar::zero() * Scalar::zero());
}
#[cfg(test)]
const LARGEST: Scalar = Scalar([
0xffff_ffff_0000_0000,
0x53bd_a402_fffe_5bfe,
0x3339_d808_09a1_d805,
0x73ed_a753_299d_7d48,
]);
#[test]
fn test_addition() {
let mut tmp = LARGEST;
tmp += &LARGEST;
assert_eq!(
tmp,
Scalar([
0xffff_fffe_ffff_ffff,
0x53bd_a402_fffe_5bfe,
0x3339_d808_09a1_d805,
0x73ed_a753_299d_7d48,
])
);
let mut tmp = LARGEST;
tmp += &Scalar([1, 0, 0, 0]);
assert_eq!(tmp, Scalar::zero());
}
#[test]
fn test_negation() {
let tmp = -&LARGEST;
assert_eq!(tmp, Scalar([1, 0, 0, 0]));
let tmp = -&Scalar::zero();
assert_eq!(tmp, Scalar::zero());
let tmp = -&Scalar([1, 0, 0, 0]);
assert_eq!(tmp, LARGEST);
}
#[test]
fn test_subtraction() {
let mut tmp = LARGEST;
tmp -= &LARGEST;
assert_eq!(tmp, Scalar::zero());
let mut tmp = Scalar::zero();
tmp -= &LARGEST;
let mut tmp2 = MODULUS;
tmp2 -= &LARGEST;
assert_eq!(tmp, tmp2);
}
#[test]
fn test_multiplication() {
let mut cur = LARGEST;
for _ in 0..100 {
let mut tmp = cur;
tmp *= &cur;
let mut tmp2 = Scalar::zero();
for b in cur
.to_bytes()
.iter()
.rev()
.flat_map(|byte| (0..8).rev().map(move |i| ((byte >> i) & 1u8) == 1u8))
{
let tmp3 = tmp2;
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | true |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/fp.rs | zkcrypto/bls12_381/src/fp.rs | //! This module provides an implementation of the BLS12-381 base field `GF(p)`
//! where `p = 0x1a0111ea397fe69a4b1ba7b6434bacd764774b84f38512bf6730d2a0f6b0f6241eabfffeb153ffffb9feffffffffaaab`
#![allow(clippy::all)]
use core::fmt;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use rand_core::RngCore;
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
use crate::util::{adc, mac, sbb};
// The internal representation of this type is six 64-bit unsigned
// integers in little-endian order. `Fp` values are always in
// Montgomery form; i.e., Scalar(a) = aR mod p, with R = 2^384.
#[derive(Copy, Clone)]
pub struct Fp(pub [u64; 6]);
impl fmt::Debug for Fp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let tmp = self.to_bytes();
write!(f, "0x")?;
for &b in tmp.iter() {
write!(f, "{:02x}", b)?;
}
Ok(())
}
}
impl Default for Fp {
fn default() -> Self {
Fp::zero()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for Fp {}
impl ConstantTimeEq for Fp {
fn ct_eq(&self, other: &Self) -> Choice {
self.0[0].ct_eq(&other.0[0])
& self.0[1].ct_eq(&other.0[1])
& self.0[2].ct_eq(&other.0[2])
& self.0[3].ct_eq(&other.0[3])
& self.0[4].ct_eq(&other.0[4])
& self.0[5].ct_eq(&other.0[5])
}
}
impl Eq for Fp {}
impl PartialEq for Fp {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl ConditionallySelectable for Fp {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Fp([
u64::conditional_select(&a.0[0], &b.0[0], choice),
u64::conditional_select(&a.0[1], &b.0[1], choice),
u64::conditional_select(&a.0[2], &b.0[2], choice),
u64::conditional_select(&a.0[3], &b.0[3], choice),
u64::conditional_select(&a.0[4], &b.0[4], choice),
u64::conditional_select(&a.0[5], &b.0[5], choice),
])
}
}
/// p = 4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787
const MODULUS: [u64; 6] = [
0xb9fe_ffff_ffff_aaab,
0x1eab_fffe_b153_ffff,
0x6730_d2a0_f6b0_f624,
0x6477_4b84_f385_12bf,
0x4b1b_a7b6_434b_acd7,
0x1a01_11ea_397f_e69a,
];
/// INV = -(p^{-1} mod 2^64) mod 2^64
const INV: u64 = 0x89f3_fffc_fffc_fffd;
/// R = 2^384 mod p
const R: Fp = Fp([
0x7609_0000_0002_fffd,
0xebf4_000b_c40c_0002,
0x5f48_9857_53c7_58ba,
0x77ce_5853_7052_5745,
0x5c07_1a97_a256_ec6d,
0x15f6_5ec3_fa80_e493,
]);
/// R2 = 2^(384*2) mod p
const R2: Fp = Fp([
0xf4df_1f34_1c34_1746,
0x0a76_e6a6_09d1_04f1,
0x8de5_476c_4c95_b6d5,
0x67eb_88a9_939d_83c0,
0x9a79_3e85_b519_952d,
0x1198_8fe5_92ca_e3aa,
]);
/// R3 = 2^(384*3) mod p
const R3: Fp = Fp([
0xed48_ac6b_d94c_a1e0,
0x315f_831e_03a7_adf8,
0x9a53_352a_615e_29dd,
0x34c0_4e5e_921e_1761,
0x2512_d435_6572_4728,
0x0aa6_3460_9175_5d4d,
]);
impl<'a> Neg for &'a Fp {
type Output = Fp;
#[inline]
fn neg(self) -> Fp {
self.neg()
}
}
impl Neg for Fp {
type Output = Fp;
#[inline]
fn neg(self) -> Fp {
-&self
}
}
impl<'a, 'b> Sub<&'b Fp> for &'a Fp {
type Output = Fp;
#[inline]
fn sub(self, rhs: &'b Fp) -> Fp {
self.sub(rhs)
}
}
impl<'a, 'b> Add<&'b Fp> for &'a Fp {
type Output = Fp;
#[inline]
fn add(self, rhs: &'b Fp) -> Fp {
self.add(rhs)
}
}
impl<'a, 'b> Mul<&'b Fp> for &'a Fp {
type Output = Fp;
#[inline]
fn mul(self, rhs: &'b Fp) -> Fp {
self.mul(rhs)
}
}
impl_binops_additive!(Fp, Fp);
impl_binops_multiplicative!(Fp, Fp);
impl Fp {
/// Returns zero, the additive identity.
#[inline]
pub const fn zero() -> Fp {
Fp([0, 0, 0, 0, 0, 0])
}
/// Returns one, the multiplicative identity.
#[inline]
pub const fn one() -> Fp {
R
}
pub fn is_zero(&self) -> Choice {
self.ct_eq(&Fp::zero())
}
/// Attempts to convert a big-endian byte representation of
/// a scalar into an `Fp`, failing if the input is not canonical.
pub fn from_bytes(bytes: &[u8; 48]) -> CtOption<Fp> {
let mut tmp = Fp([0, 0, 0, 0, 0, 0]);
tmp.0[5] = u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[0..8]).unwrap());
tmp.0[4] = u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap());
tmp.0[3] = u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap());
tmp.0[2] = u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap());
tmp.0[1] = u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[32..40]).unwrap());
tmp.0[0] = u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[40..48]).unwrap());
// Try to subtract the modulus
let (_, borrow) = sbb(tmp.0[0], MODULUS[0], 0);
let (_, borrow) = sbb(tmp.0[1], MODULUS[1], borrow);
let (_, borrow) = sbb(tmp.0[2], MODULUS[2], borrow);
let (_, borrow) = sbb(tmp.0[3], MODULUS[3], borrow);
let (_, borrow) = sbb(tmp.0[4], MODULUS[4], borrow);
let (_, borrow) = sbb(tmp.0[5], MODULUS[5], borrow);
// If the element is smaller than MODULUS then the
// subtraction will underflow, producing a borrow value
// of 0xffff...ffff. Otherwise, it'll be zero.
let is_some = (borrow as u8) & 1;
// Convert to Montgomery form by computing
// (a.R^0 * R^2) / R = a.R
tmp *= &R2;
CtOption::new(tmp, Choice::from(is_some))
}
/// Converts an element of `Fp` into a byte representation in
/// big-endian byte order.
pub fn to_bytes(self) -> [u8; 48] {
// Turn into canonical form by computing
// (a.R) / R = a
let tmp = Fp::montgomery_reduce(
self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], 0, 0, 0, 0, 0, 0,
);
let mut res = [0; 48];
res[0..8].copy_from_slice(&tmp.0[5].to_be_bytes());
res[8..16].copy_from_slice(&tmp.0[4].to_be_bytes());
res[16..24].copy_from_slice(&tmp.0[3].to_be_bytes());
res[24..32].copy_from_slice(&tmp.0[2].to_be_bytes());
res[32..40].copy_from_slice(&tmp.0[1].to_be_bytes());
res[40..48].copy_from_slice(&tmp.0[0].to_be_bytes());
res
}
pub(crate) fn random(mut rng: impl RngCore) -> Fp {
let mut bytes = [0u8; 96];
rng.fill_bytes(&mut bytes);
// Parse the random bytes as a big-endian number, to match Fp encoding order.
Fp::from_u768([
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[0..8]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[8..16]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[16..24]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[24..32]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[32..40]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[40..48]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[48..56]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[56..64]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[64..72]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[72..80]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[80..88]).unwrap()),
u64::from_be_bytes(<[u8; 8]>::try_from(&bytes[88..96]).unwrap()),
])
}
/// Reduces a big-endian 64-bit limb representation of a 768-bit number.
fn from_u768(limbs: [u64; 12]) -> Fp {
// We reduce an arbitrary 768-bit number by decomposing it into two 384-bit digits
// with the higher bits multiplied by 2^384. Thus, we perform two reductions
//
// 1. the lower bits are multiplied by R^2, as normal
// 2. the upper bits are multiplied by R^2 * 2^384 = R^3
//
// and computing their sum in the field. It remains to see that arbitrary 384-bit
// numbers can be placed into Montgomery form safely using the reduction. The
// reduction works so long as the product is less than R=2^384 multiplied by
// the modulus. This holds because for any `c` smaller than the modulus, we have
// that (2^384 - 1)*c is an acceptable product for the reduction. Therefore, the
// reduction always works so long as `c` is in the field; in this case it is either the
// constant `R2` or `R3`.
let d1 = Fp([limbs[11], limbs[10], limbs[9], limbs[8], limbs[7], limbs[6]]);
let d0 = Fp([limbs[5], limbs[4], limbs[3], limbs[2], limbs[1], limbs[0]]);
// Convert to Montgomery form
d0 * R2 + d1 * R3
}
/// Returns whether or not this element is strictly lexicographically
/// larger than its negation.
pub fn lexicographically_largest(&self) -> Choice {
// This can be determined by checking to see if the element is
// larger than (p - 1) // 2. If we subtract by ((p - 1) // 2) + 1
// and there is no underflow, then the element must be larger than
// (p - 1) // 2.
// First, because self is in Montgomery form we need to reduce it
let tmp = Fp::montgomery_reduce(
self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], 0, 0, 0, 0, 0, 0,
);
let (_, borrow) = sbb(tmp.0[0], 0xdcff_7fff_ffff_d556, 0);
let (_, borrow) = sbb(tmp.0[1], 0x0f55_ffff_58a9_ffff, borrow);
let (_, borrow) = sbb(tmp.0[2], 0xb398_6950_7b58_7b12, borrow);
let (_, borrow) = sbb(tmp.0[3], 0xb23b_a5c2_79c2_895f, borrow);
let (_, borrow) = sbb(tmp.0[4], 0x258d_d3db_21a5_d66b, borrow);
let (_, borrow) = sbb(tmp.0[5], 0x0d00_88f5_1cbf_f34d, borrow);
// If the element was smaller, the subtraction will underflow
// producing a borrow value of 0xffff...ffff, otherwise it will
// be zero. We create a Choice representing true if there was
// overflow (and so this element is not lexicographically larger
// than its negation) and then negate it.
!Choice::from((borrow as u8) & 1)
}
/// Constructs an element of `Fp` without checking that it is
/// canonical.
pub const fn from_raw_unchecked(v: [u64; 6]) -> Fp {
Fp(v)
}
/// Although this is labeled "vartime", it is only
/// variable time with respect to the exponent. It
/// is also not exposed in the public API.
pub fn pow_vartime(&self, by: &[u64; 6]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
if ((*e >> i) & 1) == 1 {
res *= self;
}
}
}
res
}
#[inline]
pub fn sqrt(&self) -> CtOption<Self> {
// We use Shank's method, as p = 3 (mod 4). This means
// we only need to exponentiate by (p+1)/4. This only
// works for elements that are actually quadratic residue,
// so we check that we got the correct result at the end.
let sqrt = self.pow_vartime(&[
0xee7f_bfff_ffff_eaab,
0x07aa_ffff_ac54_ffff,
0xd9cc_34a8_3dac_3d89,
0xd91d_d2e1_3ce1_44af,
0x92c6_e9ed_90d2_eb35,
0x0680_447a_8e5f_f9a6,
]);
CtOption::new(sqrt, sqrt.square().ct_eq(self))
}
#[inline]
/// Computes the multiplicative inverse of this field
/// element, returning None in the case that this element
/// is zero.
pub fn invert(&self) -> CtOption<Self> {
// Exponentiate by p - 2
let t = self.pow_vartime(&[
0xb9fe_ffff_ffff_aaa9,
0x1eab_fffe_b153_ffff,
0x6730_d2a0_f6b0_f624,
0x6477_4b84_f385_12bf,
0x4b1b_a7b6_434b_acd7,
0x1a01_11ea_397f_e69a,
]);
CtOption::new(t, !self.is_zero())
}
#[inline]
const fn subtract_p(&self) -> Fp {
let (r0, borrow) = sbb(self.0[0], MODULUS[0], 0);
let (r1, borrow) = sbb(self.0[1], MODULUS[1], borrow);
let (r2, borrow) = sbb(self.0[2], MODULUS[2], borrow);
let (r3, borrow) = sbb(self.0[3], MODULUS[3], borrow);
let (r4, borrow) = sbb(self.0[4], MODULUS[4], borrow);
let (r5, borrow) = sbb(self.0[5], MODULUS[5], borrow);
// If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise
// borrow = 0x000...000. Thus, we use it as a mask!
let r0 = (self.0[0] & borrow) | (r0 & !borrow);
let r1 = (self.0[1] & borrow) | (r1 & !borrow);
let r2 = (self.0[2] & borrow) | (r2 & !borrow);
let r3 = (self.0[3] & borrow) | (r3 & !borrow);
let r4 = (self.0[4] & borrow) | (r4 & !borrow);
let r5 = (self.0[5] & borrow) | (r5 & !borrow);
Fp([r0, r1, r2, r3, r4, r5])
}
#[inline]
pub const fn add(&self, rhs: &Fp) -> Fp {
let (d0, carry) = adc(self.0[0], rhs.0[0], 0);
let (d1, carry) = adc(self.0[1], rhs.0[1], carry);
let (d2, carry) = adc(self.0[2], rhs.0[2], carry);
let (d3, carry) = adc(self.0[3], rhs.0[3], carry);
let (d4, carry) = adc(self.0[4], rhs.0[4], carry);
let (d5, _) = adc(self.0[5], rhs.0[5], carry);
// Attempt to subtract the modulus, to ensure the value
// is smaller than the modulus.
(&Fp([d0, d1, d2, d3, d4, d5])).subtract_p()
}
#[inline]
pub const fn neg(&self) -> Fp {
let (d0, borrow) = sbb(MODULUS[0], self.0[0], 0);
let (d1, borrow) = sbb(MODULUS[1], self.0[1], borrow);
let (d2, borrow) = sbb(MODULUS[2], self.0[2], borrow);
let (d3, borrow) = sbb(MODULUS[3], self.0[3], borrow);
let (d4, borrow) = sbb(MODULUS[4], self.0[4], borrow);
let (d5, _) = sbb(MODULUS[5], self.0[5], borrow);
// Let's use a mask if `self` was zero, which would mean
// the result of the subtraction is p.
let mask = (((self.0[0] | self.0[1] | self.0[2] | self.0[3] | self.0[4] | self.0[5]) == 0)
as u64)
.wrapping_sub(1);
Fp([
d0 & mask,
d1 & mask,
d2 & mask,
d3 & mask,
d4 & mask,
d5 & mask,
])
}
#[inline]
pub const fn sub(&self, rhs: &Fp) -> Fp {
(&rhs.neg()).add(self)
}
/// Returns `c = a.zip(b).fold(0, |acc, (a_i, b_i)| acc + a_i * b_i)`.
///
/// Implements Algorithm 2 from Patrick Longa's
/// [ePrint 2022-367](https://eprint.iacr.org/2022/367) §3.
#[inline]
pub(crate) fn sum_of_products<const T: usize>(a: [Fp; T], b: [Fp; T]) -> Fp {
// For a single `a x b` multiplication, operand scanning (schoolbook) takes each
// limb of `a` in turn, and multiplies it by all of the limbs of `b` to compute
// the result as a double-width intermediate representation, which is then fully
// reduced at the end. Here however we have pairs of multiplications (a_i, b_i),
// the results of which are summed.
//
// The intuition for this algorithm is two-fold:
// - We can interleave the operand scanning for each pair, by processing the jth
// limb of each `a_i` together. As these have the same offset within the overall
// operand scanning flow, their results can be summed directly.
// - We can interleave the multiplication and reduction steps, resulting in a
// single bitshift by the limb size after each iteration. This means we only
// need to store a single extra limb overall, instead of keeping around all the
// intermediate results and eventually having twice as many limbs.
// Algorithm 2, line 2
let (u0, u1, u2, u3, u4, u5) =
(0..6).fold((0, 0, 0, 0, 0, 0), |(u0, u1, u2, u3, u4, u5), j| {
// Algorithm 2, line 3
// For each pair in the overall sum of products:
let (t0, t1, t2, t3, t4, t5, t6) = (0..T).fold(
(u0, u1, u2, u3, u4, u5, 0),
|(t0, t1, t2, t3, t4, t5, t6), i| {
// Compute digit_j x row and accumulate into `u`.
let (t0, carry) = mac(t0, a[i].0[j], b[i].0[0], 0);
let (t1, carry) = mac(t1, a[i].0[j], b[i].0[1], carry);
let (t2, carry) = mac(t2, a[i].0[j], b[i].0[2], carry);
let (t3, carry) = mac(t3, a[i].0[j], b[i].0[3], carry);
let (t4, carry) = mac(t4, a[i].0[j], b[i].0[4], carry);
let (t5, carry) = mac(t5, a[i].0[j], b[i].0[5], carry);
let (t6, _) = adc(t6, 0, carry);
(t0, t1, t2, t3, t4, t5, t6)
},
);
// Algorithm 2, lines 4-5
// This is a single step of the usual Montgomery reduction process.
let k = t0.wrapping_mul(INV);
let (_, carry) = mac(t0, k, MODULUS[0], 0);
let (r1, carry) = mac(t1, k, MODULUS[1], carry);
let (r2, carry) = mac(t2, k, MODULUS[2], carry);
let (r3, carry) = mac(t3, k, MODULUS[3], carry);
let (r4, carry) = mac(t4, k, MODULUS[4], carry);
let (r5, carry) = mac(t5, k, MODULUS[5], carry);
let (r6, _) = adc(t6, 0, carry);
(r1, r2, r3, r4, r5, r6)
});
// Because we represent F_p elements in non-redundant form, we need a final
// conditional subtraction to ensure the output is in range.
(&Fp([u0, u1, u2, u3, u4, u5])).subtract_p()
}
#[inline(always)]
pub(crate) const fn montgomery_reduce(
t0: u64,
t1: u64,
t2: u64,
t3: u64,
t4: u64,
t5: u64,
t6: u64,
t7: u64,
t8: u64,
t9: u64,
t10: u64,
t11: u64,
) -> Self {
// The Montgomery reduction here is based on Algorithm 14.32 in
// Handbook of Applied Cryptography
// <http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
let k = t0.wrapping_mul(INV);
let (_, carry) = mac(t0, k, MODULUS[0], 0);
let (r1, carry) = mac(t1, k, MODULUS[1], carry);
let (r2, carry) = mac(t2, k, MODULUS[2], carry);
let (r3, carry) = mac(t3, k, MODULUS[3], carry);
let (r4, carry) = mac(t4, k, MODULUS[4], carry);
let (r5, carry) = mac(t5, k, MODULUS[5], carry);
let (r6, r7) = adc(t6, 0, carry);
let k = r1.wrapping_mul(INV);
let (_, carry) = mac(r1, k, MODULUS[0], 0);
let (r2, carry) = mac(r2, k, MODULUS[1], carry);
let (r3, carry) = mac(r3, k, MODULUS[2], carry);
let (r4, carry) = mac(r4, k, MODULUS[3], carry);
let (r5, carry) = mac(r5, k, MODULUS[4], carry);
let (r6, carry) = mac(r6, k, MODULUS[5], carry);
let (r7, r8) = adc(t7, r7, carry);
let k = r2.wrapping_mul(INV);
let (_, carry) = mac(r2, k, MODULUS[0], 0);
let (r3, carry) = mac(r3, k, MODULUS[1], carry);
let (r4, carry) = mac(r4, k, MODULUS[2], carry);
let (r5, carry) = mac(r5, k, MODULUS[3], carry);
let (r6, carry) = mac(r6, k, MODULUS[4], carry);
let (r7, carry) = mac(r7, k, MODULUS[5], carry);
let (r8, r9) = adc(t8, r8, carry);
let k = r3.wrapping_mul(INV);
let (_, carry) = mac(r3, k, MODULUS[0], 0);
let (r4, carry) = mac(r4, k, MODULUS[1], carry);
let (r5, carry) = mac(r5, k, MODULUS[2], carry);
let (r6, carry) = mac(r6, k, MODULUS[3], carry);
let (r7, carry) = mac(r7, k, MODULUS[4], carry);
let (r8, carry) = mac(r8, k, MODULUS[5], carry);
let (r9, r10) = adc(t9, r9, carry);
let k = r4.wrapping_mul(INV);
let (_, carry) = mac(r4, k, MODULUS[0], 0);
let (r5, carry) = mac(r5, k, MODULUS[1], carry);
let (r6, carry) = mac(r6, k, MODULUS[2], carry);
let (r7, carry) = mac(r7, k, MODULUS[3], carry);
let (r8, carry) = mac(r8, k, MODULUS[4], carry);
let (r9, carry) = mac(r9, k, MODULUS[5], carry);
let (r10, r11) = adc(t10, r10, carry);
let k = r5.wrapping_mul(INV);
let (_, carry) = mac(r5, k, MODULUS[0], 0);
let (r6, carry) = mac(r6, k, MODULUS[1], carry);
let (r7, carry) = mac(r7, k, MODULUS[2], carry);
let (r8, carry) = mac(r8, k, MODULUS[3], carry);
let (r9, carry) = mac(r9, k, MODULUS[4], carry);
let (r10, carry) = mac(r10, k, MODULUS[5], carry);
let (r11, _) = adc(t11, r11, carry);
// Attempt to subtract the modulus, to ensure the value
// is smaller than the modulus.
(&Fp([r6, r7, r8, r9, r10, r11])).subtract_p()
}
#[inline]
pub const fn mul(&self, rhs: &Fp) -> Fp {
let (t0, carry) = mac(0, self.0[0], rhs.0[0], 0);
let (t1, carry) = mac(0, self.0[0], rhs.0[1], carry);
let (t2, carry) = mac(0, self.0[0], rhs.0[2], carry);
let (t3, carry) = mac(0, self.0[0], rhs.0[3], carry);
let (t4, carry) = mac(0, self.0[0], rhs.0[4], carry);
let (t5, t6) = mac(0, self.0[0], rhs.0[5], carry);
let (t1, carry) = mac(t1, self.0[1], rhs.0[0], 0);
let (t2, carry) = mac(t2, self.0[1], rhs.0[1], carry);
let (t3, carry) = mac(t3, self.0[1], rhs.0[2], carry);
let (t4, carry) = mac(t4, self.0[1], rhs.0[3], carry);
let (t5, carry) = mac(t5, self.0[1], rhs.0[4], carry);
let (t6, t7) = mac(t6, self.0[1], rhs.0[5], carry);
let (t2, carry) = mac(t2, self.0[2], rhs.0[0], 0);
let (t3, carry) = mac(t3, self.0[2], rhs.0[1], carry);
let (t4, carry) = mac(t4, self.0[2], rhs.0[2], carry);
let (t5, carry) = mac(t5, self.0[2], rhs.0[3], carry);
let (t6, carry) = mac(t6, self.0[2], rhs.0[4], carry);
let (t7, t8) = mac(t7, self.0[2], rhs.0[5], carry);
let (t3, carry) = mac(t3, self.0[3], rhs.0[0], 0);
let (t4, carry) = mac(t4, self.0[3], rhs.0[1], carry);
let (t5, carry) = mac(t5, self.0[3], rhs.0[2], carry);
let (t6, carry) = mac(t6, self.0[3], rhs.0[3], carry);
let (t7, carry) = mac(t7, self.0[3], rhs.0[4], carry);
let (t8, t9) = mac(t8, self.0[3], rhs.0[5], carry);
let (t4, carry) = mac(t4, self.0[4], rhs.0[0], 0);
let (t5, carry) = mac(t5, self.0[4], rhs.0[1], carry);
let (t6, carry) = mac(t6, self.0[4], rhs.0[2], carry);
let (t7, carry) = mac(t7, self.0[4], rhs.0[3], carry);
let (t8, carry) = mac(t8, self.0[4], rhs.0[4], carry);
let (t9, t10) = mac(t9, self.0[4], rhs.0[5], carry);
let (t5, carry) = mac(t5, self.0[5], rhs.0[0], 0);
let (t6, carry) = mac(t6, self.0[5], rhs.0[1], carry);
let (t7, carry) = mac(t7, self.0[5], rhs.0[2], carry);
let (t8, carry) = mac(t8, self.0[5], rhs.0[3], carry);
let (t9, carry) = mac(t9, self.0[5], rhs.0[4], carry);
let (t10, t11) = mac(t10, self.0[5], rhs.0[5], carry);
Self::montgomery_reduce(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11)
}
/// Squares this element.
#[inline]
pub const fn square(&self) -> Self {
let (t1, carry) = mac(0, self.0[0], self.0[1], 0);
let (t2, carry) = mac(0, self.0[0], self.0[2], carry);
let (t3, carry) = mac(0, self.0[0], self.0[3], carry);
let (t4, carry) = mac(0, self.0[0], self.0[4], carry);
let (t5, t6) = mac(0, self.0[0], self.0[5], carry);
let (t3, carry) = mac(t3, self.0[1], self.0[2], 0);
let (t4, carry) = mac(t4, self.0[1], self.0[3], carry);
let (t5, carry) = mac(t5, self.0[1], self.0[4], carry);
let (t6, t7) = mac(t6, self.0[1], self.0[5], carry);
let (t5, carry) = mac(t5, self.0[2], self.0[3], 0);
let (t6, carry) = mac(t6, self.0[2], self.0[4], carry);
let (t7, t8) = mac(t7, self.0[2], self.0[5], carry);
let (t7, carry) = mac(t7, self.0[3], self.0[4], 0);
let (t8, t9) = mac(t8, self.0[3], self.0[5], carry);
let (t9, t10) = mac(t9, self.0[4], self.0[5], 0);
let t11 = t10 >> 63;
let t10 = (t10 << 1) | (t9 >> 63);
let t9 = (t9 << 1) | (t8 >> 63);
let t8 = (t8 << 1) | (t7 >> 63);
let t7 = (t7 << 1) | (t6 >> 63);
let t6 = (t6 << 1) | (t5 >> 63);
let t5 = (t5 << 1) | (t4 >> 63);
let t4 = (t4 << 1) | (t3 >> 63);
let t3 = (t3 << 1) | (t2 >> 63);
let t2 = (t2 << 1) | (t1 >> 63);
let t1 = t1 << 1;
let (t0, carry) = mac(0, self.0[0], self.0[0], 0);
let (t1, carry) = adc(t1, 0, carry);
let (t2, carry) = mac(t2, self.0[1], self.0[1], carry);
let (t3, carry) = adc(t3, 0, carry);
let (t4, carry) = mac(t4, self.0[2], self.0[2], carry);
let (t5, carry) = adc(t5, 0, carry);
let (t6, carry) = mac(t6, self.0[3], self.0[3], carry);
let (t7, carry) = adc(t7, 0, carry);
let (t8, carry) = mac(t8, self.0[4], self.0[4], carry);
let (t9, carry) = adc(t9, 0, carry);
let (t10, carry) = mac(t10, self.0[5], self.0[5], carry);
let (t11, _) = adc(t11, 0, carry);
Self::montgomery_reduce(t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11)
}
}
#[test]
fn test_conditional_selection() {
let a = Fp([1, 2, 3, 4, 5, 6]);
let b = Fp([7, 8, 9, 10, 11, 12]);
assert_eq!(
ConditionallySelectable::conditional_select(&a, &b, Choice::from(0u8)),
a
);
assert_eq!(
ConditionallySelectable::conditional_select(&a, &b, Choice::from(1u8)),
b
);
}
#[test]
fn test_equality() {
fn is_equal(a: &Fp, b: &Fp) -> bool {
let eq = a == b;
let ct_eq = a.ct_eq(&b);
assert_eq!(eq, bool::from(ct_eq));
eq
}
assert!(is_equal(&Fp([1, 2, 3, 4, 5, 6]), &Fp([1, 2, 3, 4, 5, 6])));
assert!(!is_equal(&Fp([7, 2, 3, 4, 5, 6]), &Fp([1, 2, 3, 4, 5, 6])));
assert!(!is_equal(&Fp([1, 7, 3, 4, 5, 6]), &Fp([1, 2, 3, 4, 5, 6])));
assert!(!is_equal(&Fp([1, 2, 7, 4, 5, 6]), &Fp([1, 2, 3, 4, 5, 6])));
assert!(!is_equal(&Fp([1, 2, 3, 7, 5, 6]), &Fp([1, 2, 3, 4, 5, 6])));
assert!(!is_equal(&Fp([1, 2, 3, 4, 7, 6]), &Fp([1, 2, 3, 4, 5, 6])));
assert!(!is_equal(&Fp([1, 2, 3, 4, 5, 7]), &Fp([1, 2, 3, 4, 5, 6])));
}
#[test]
fn test_squaring() {
let a = Fp([
0xd215_d276_8e83_191b,
0x5085_d80f_8fb2_8261,
0xce9a_032d_df39_3a56,
0x3e9c_4fff_2ca0_c4bb,
0x6436_b6f7_f4d9_5dfb,
0x1060_6628_ad4a_4d90,
]);
let b = Fp([
0x33d9_c42a_3cb3_e235,
0xdad1_1a09_4c4c_d455,
0xa2f1_44bd_729a_aeba,
0xd415_0932_be9f_feac,
0xe27b_c7c4_7d44_ee50,
0x14b6_a78d_3ec7_a560,
]);
assert_eq!(a.square(), b);
}
#[test]
fn test_multiplication() {
let a = Fp([
0x0397_a383_2017_0cd4,
0x734c_1b2c_9e76_1d30,
0x5ed2_55ad_9a48_beb5,
0x095a_3c6b_22a7_fcfc,
0x2294_ce75_d4e2_6a27,
0x1333_8bd8_7001_1ebb,
]);
let b = Fp([
0xb9c3_c7c5_b119_6af7,
0x2580_e208_6ce3_35c1,
0xf49a_ed3d_8a57_ef42,
0x41f2_81e4_9846_e878,
0xe076_2346_c384_52ce,
0x0652_e893_26e5_7dc0,
]);
let c = Fp([
0xf96e_f3d7_11ab_5355,
0xe8d4_59ea_00f1_48dd,
0x53f7_354a_5f00_fa78,
0x9e34_a4f3_125c_5f83,
0x3fbe_0c47_ca74_c19e,
0x01b0_6a8b_bd4a_dfe4,
]);
assert_eq!(a * b, c);
}
#[test]
fn test_addition() {
let a = Fp([
0x5360_bb59_7867_8032,
0x7dd2_75ae_799e_128e,
0x5c5b_5071_ce4f_4dcf,
0xcdb2_1f93_078d_bb3e,
0xc323_65c5_e73f_474a,
0x115a_2a54_89ba_be5b,
]);
let b = Fp([
0x9fd2_8773_3d23_dda0,
0xb16b_f2af_738b_3554,
0x3e57_a75b_d3cc_6d1d,
0x900b_c0bd_627f_d6d6,
0xd319_a080_efb2_45fe,
0x15fd_caa4_e4bb_2091,
]);
let c = Fp([
0x3934_42cc_b58b_b327,
0x1092_685f_3bd5_47e3,
0x3382_252c_ab6a_c4c9,
0xf946_94cb_7688_7f55,
0x4b21_5e90_93a5_e071,
0x0d56_e30f_34f5_f853,
]);
assert_eq!(a + b, c);
}
#[test]
fn test_subtraction() {
let a = Fp([
0x5360_bb59_7867_8032,
0x7dd2_75ae_799e_128e,
0x5c5b_5071_ce4f_4dcf,
0xcdb2_1f93_078d_bb3e,
0xc323_65c5_e73f_474a,
0x115a_2a54_89ba_be5b,
]);
let b = Fp([
0x9fd2_8773_3d23_dda0,
0xb16b_f2af_738b_3554,
0x3e57_a75b_d3cc_6d1d,
0x900b_c0bd_627f_d6d6,
0xd319_a080_efb2_45fe,
0x15fd_caa4_e4bb_2091,
]);
let c = Fp([
0x6d8d_33e6_3b43_4d3d,
0xeb12_82fd_b766_dd39,
0x8534_7bb6_f133_d6d5,
0xa21d_aa5a_9892_f727,
0x3b25_6cfb_3ad8_ae23,
0x155d_7199_de7f_8464,
]);
assert_eq!(a - b, c);
}
#[test]
fn test_negation() {
let a = Fp([
0x5360_bb59_7867_8032,
0x7dd2_75ae_799e_128e,
0x5c5b_5071_ce4f_4dcf,
0xcdb2_1f93_078d_bb3e,
0xc323_65c5_e73f_474a,
0x115a_2a54_89ba_be5b,
]);
let b = Fp([
0x669e_44a6_8798_2a79,
0xa0d9_8a50_37b5_ed71,
0x0ad5_822f_2861_a854,
0x96c5_2bf1_ebf7_5781,
0x87f8_41f0_5c0c_658c,
0x08a6_e795_afc5_283e,
]);
assert_eq!(-a, b);
}
#[test]
fn test_debug() {
assert_eq!(
format!(
"{:?}",
Fp([
0x5360_bb59_7867_8032,
0x7dd2_75ae_799e_128e,
0x5c5b_5071_ce4f_4dcf,
0xcdb2_1f93_078d_bb3e,
0xc323_65c5_e73f_474a,
0x115a_2a54_89ba_be5b,
])
),
"0x104bf052ad3bc99bcb176c24a06a6c3aad4eaf2308fc4d282e106c84a757d061052630515305e59bdddf8111bfdeb704"
);
}
#[test]
fn test_from_bytes() {
let mut a = Fp([
0xdc90_6d9b_e3f9_5dc8,
0x8755_caf7_4596_91a1,
0xcff1_a7f4_e958_3ab3,
0x9b43_821f_849e_2284,
0xf575_54f3_a297_4f3f,
0x085d_bea8_4ed4_7f79,
]);
for _ in 0..100 {
a = a.square();
let tmp = a.to_bytes();
let b = Fp::from_bytes(&tmp).unwrap();
assert_eq!(a, b);
}
assert_eq!(
-Fp::one(),
Fp::from_bytes(&[
26, 1, 17, 234, 57, 127, 230, 154, 75, 27, 167, 182, 67, 75, 172, 215, 100, 119, 75,
132, 243, 133, 18, 191, 103, 48, 210, 160, 246, 176, 246, 36, 30, 171, 255, 254, 177,
83, 255, 255, 185, 254, 255, 255, 255, 255, 170, 170
])
.unwrap()
);
assert!(bool::from(
Fp::from_bytes(&[
27, 1, 17, 234, 57, 127, 230, 154, 75, 27, 167, 182, 67, 75, 172, 215, 100, 119, 75,
132, 243, 133, 18, 191, 103, 48, 210, 160, 246, 176, 246, 36, 30, 171, 255, 254, 177,
83, 255, 255, 185, 254, 255, 255, 255, 255, 170, 170
])
.is_none()
));
assert!(bool::from(Fp::from_bytes(&[0xff; 48]).is_none()));
}
#[test]
fn test_sqrt() {
// a = 4
let a = Fp::from_raw_unchecked([
0xaa27_0000_000c_fff3,
0x53cc_0032_fc34_000a,
0x478f_e97a_6b0a_807f,
0xb1d3_7ebe_e6ba_24d7,
0x8ec9_733b_bf78_ab2f,
0x09d6_4551_3d83_de7e,
]);
assert_eq!(
// sqrt(4) = -2
-a.sqrt().unwrap(),
// 2
Fp::from_raw_unchecked([
0x3213_0000_0006_554f,
0xb93c_0018_d6c4_0005,
0x5760_5e0d_b0dd_bb51,
0x8b25_6521_ed1f_9bcb,
0x6cf2_8d79_0162_2c03,
0x11eb_ab9d_bb81_e28c,
])
);
}
#[test]
fn test_inversion() {
let a = Fp([
0x43b4_3a50_78ac_2076,
0x1ce0_7630_46f8_962b,
0x724a_5276_486d_735c,
0x6f05_c2a6_282d_48fd,
0x2095_bd5b_b4ca_9331,
0x03b3_5b38_94b0_f7da,
]);
let b = Fp([
0x69ec_d704_0952_148f,
0x985c_cc20_2219_0f55,
0xe19b_ba36_a9ad_2f41,
0x19bb_16c9_5219_dbd8,
0x14dc_acfd_fb47_8693,
0x115f_f58a_fff9_a8e1,
]);
assert_eq!(a.invert().unwrap(), b);
assert!(bool::from(Fp::zero().invert().is_none()));
}
#[test]
fn test_lexicographic_largest() {
assert!(!bool::from(Fp::zero().lexicographically_largest()));
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | true |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/fp2.rs | zkcrypto/bls12_381/src/fp2.rs | //! This module implements arithmetic over the quadratic extension field Fp2.
#![allow(clippy::all)]
use core::fmt;
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
use rand_core::RngCore;
use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
use crate::fp::Fp;
#[derive(Copy, Clone)]
pub struct Fp2 {
pub c0: Fp,
pub c1: Fp,
}
impl fmt::Debug for Fp2 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?} + {:?}*u", self.c0, self.c1)
}
}
impl Default for Fp2 {
fn default() -> Self {
Fp2::zero()
}
}
#[cfg(feature = "zeroize")]
impl zeroize::DefaultIsZeroes for Fp2 {}
impl From<Fp> for Fp2 {
fn from(f: Fp) -> Fp2 {
Fp2 {
c0: f,
c1: Fp::zero(),
}
}
}
impl ConstantTimeEq for Fp2 {
fn ct_eq(&self, other: &Self) -> Choice {
self.c0.ct_eq(&other.c0) & self.c1.ct_eq(&other.c1)
}
}
impl Eq for Fp2 {}
impl PartialEq for Fp2 {
#[inline]
fn eq(&self, other: &Self) -> bool {
bool::from(self.ct_eq(other))
}
}
impl ConditionallySelectable for Fp2 {
fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
Fp2 {
c0: Fp::conditional_select(&a.c0, &b.c0, choice),
c1: Fp::conditional_select(&a.c1, &b.c1, choice),
}
}
}
impl<'a> Neg for &'a Fp2 {
type Output = Fp2;
#[inline]
fn neg(self) -> Fp2 {
self.neg()
}
}
impl Neg for Fp2 {
type Output = Fp2;
#[inline]
fn neg(self) -> Fp2 {
-&self
}
}
impl<'a, 'b> Sub<&'b Fp2> for &'a Fp2 {
type Output = Fp2;
#[inline]
fn sub(self, rhs: &'b Fp2) -> Fp2 {
self.sub(rhs)
}
}
impl<'a, 'b> Add<&'b Fp2> for &'a Fp2 {
type Output = Fp2;
#[inline]
fn add(self, rhs: &'b Fp2) -> Fp2 {
self.add(rhs)
}
}
impl<'a, 'b> Mul<&'b Fp2> for &'a Fp2 {
type Output = Fp2;
#[inline]
fn mul(self, rhs: &'b Fp2) -> Fp2 {
self.mul(rhs)
}
}
impl_binops_additive!(Fp2, Fp2);
impl_binops_multiplicative!(Fp2, Fp2);
impl Fp2 {
#[inline]
pub const fn zero() -> Fp2 {
Fp2 {
c0: Fp::zero(),
c1: Fp::zero(),
}
}
#[inline]
pub const fn one() -> Fp2 {
Fp2 {
c0: Fp::one(),
c1: Fp::zero(),
}
}
pub fn is_zero(&self) -> Choice {
self.c0.is_zero() & self.c1.is_zero()
}
pub(crate) fn random(mut rng: impl RngCore) -> Fp2 {
Fp2 {
c0: Fp::random(&mut rng),
c1: Fp::random(&mut rng),
}
}
/// Raises this element to p.
#[inline(always)]
pub fn frobenius_map(&self) -> Self {
// This is always just a conjugation. If you're curious why, here's
// an article about it: https://alicebob.cryptoland.net/the-frobenius-endomorphism-with-finite-fields/
self.conjugate()
}
#[inline(always)]
pub fn conjugate(&self) -> Self {
Fp2 {
c0: self.c0,
c1: -self.c1,
}
}
#[inline(always)]
pub fn mul_by_nonresidue(&self) -> Fp2 {
// Multiply a + bu by u + 1, getting
// au + a + bu^2 + bu
// and because u^2 = -1, we get
// (a - b) + (a + b)u
Fp2 {
c0: self.c0 - self.c1,
c1: self.c0 + self.c1,
}
}
/// Returns whether or not this element is strictly lexicographically
/// larger than its negation.
#[inline]
pub fn lexicographically_largest(&self) -> Choice {
// If this element's c1 coefficient is lexicographically largest
// then it is lexicographically largest. Otherwise, in the event
// the c1 coefficient is zero and the c0 coefficient is
// lexicographically largest, then this element is lexicographically
// largest.
self.c1.lexicographically_largest()
| (self.c1.is_zero() & self.c0.lexicographically_largest())
}
pub const fn square(&self) -> Fp2 {
// Complex squaring:
//
// v0 = c0 * c1
// c0' = (c0 + c1) * (c0 + \beta*c1) - v0 - \beta * v0
// c1' = 2 * v0
//
// In BLS12-381's F_{p^2}, our \beta is -1 so we
// can modify this formula:
//
// c0' = (c0 + c1) * (c0 - c1)
// c1' = 2 * c0 * c1
let a = (&self.c0).add(&self.c1);
let b = (&self.c0).sub(&self.c1);
let c = (&self.c0).add(&self.c0);
Fp2 {
c0: (&a).mul(&b),
c1: (&c).mul(&self.c1),
}
}
pub fn mul(&self, rhs: &Fp2) -> Fp2 {
// F_{p^2} x F_{p^2} multiplication implemented with operand scanning (schoolbook)
// computes the result as:
//
// a·b = (a_0 b_0 + a_1 b_1 β) + (a_0 b_1 + a_1 b_0)i
//
// In BLS12-381's F_{p^2}, our β is -1, so the resulting F_{p^2} element is:
//
// c_0 = a_0 b_0 - a_1 b_1
// c_1 = a_0 b_1 + a_1 b_0
//
// Each of these is a "sum of products", which we can compute efficiently.
Fp2 {
c0: Fp::sum_of_products([self.c0, -self.c1], [rhs.c0, rhs.c1]),
c1: Fp::sum_of_products([self.c0, self.c1], [rhs.c1, rhs.c0]),
}
}
pub const fn add(&self, rhs: &Fp2) -> Fp2 {
Fp2 {
c0: (&self.c0).add(&rhs.c0),
c1: (&self.c1).add(&rhs.c1),
}
}
pub const fn sub(&self, rhs: &Fp2) -> Fp2 {
Fp2 {
c0: (&self.c0).sub(&rhs.c0),
c1: (&self.c1).sub(&rhs.c1),
}
}
pub const fn neg(&self) -> Fp2 {
Fp2 {
c0: (&self.c0).neg(),
c1: (&self.c1).neg(),
}
}
pub fn sqrt(&self) -> CtOption<Self> {
// Algorithm 9, https://eprint.iacr.org/2012/685.pdf
// with constant time modifications.
CtOption::new(Fp2::zero(), self.is_zero()).or_else(|| {
// a1 = self^((p - 3) / 4)
let a1 = self.pow_vartime(&[
0xee7f_bfff_ffff_eaaa,
0x07aa_ffff_ac54_ffff,
0xd9cc_34a8_3dac_3d89,
0xd91d_d2e1_3ce1_44af,
0x92c6_e9ed_90d2_eb35,
0x0680_447a_8e5f_f9a6,
]);
// alpha = a1^2 * self = self^((p - 3) / 2 + 1) = self^((p - 1) / 2)
let alpha = a1.square() * self;
// x0 = self^((p + 1) / 4)
let x0 = a1 * self;
// In the event that alpha = -1, the element is order p - 1 and so
// we're just trying to get the square of an element of the subfield
// Fp. This is given by x0 * u, since u = sqrt(-1). Since the element
// x0 = a + bu has b = 0, the solution is therefore au.
CtOption::new(
Fp2 {
c0: -x0.c1,
c1: x0.c0,
},
alpha.ct_eq(&(&Fp2::one()).neg()),
)
// Otherwise, the correct solution is (1 + alpha)^((q - 1) // 2) * x0
.or_else(|| {
CtOption::new(
(alpha + Fp2::one()).pow_vartime(&[
0xdcff_7fff_ffff_d555,
0x0f55_ffff_58a9_ffff,
0xb398_6950_7b58_7b12,
0xb23b_a5c2_79c2_895f,
0x258d_d3db_21a5_d66b,
0x0d00_88f5_1cbf_f34d,
]) * x0,
Choice::from(1),
)
})
// Only return the result if it's really the square root (and so
// self is actually quadratic nonresidue)
.and_then(|sqrt| CtOption::new(sqrt, sqrt.square().ct_eq(self)))
})
}
/// Computes the multiplicative inverse of this field
/// element, returning None in the case that this element
/// is zero.
pub fn invert(&self) -> CtOption<Self> {
// We wish to find the multiplicative inverse of a nonzero
// element a + bu in Fp2. We leverage an identity
//
// (a + bu)(a - bu) = a^2 + b^2
//
// which holds because u^2 = -1. This can be rewritten as
//
// (a + bu)(a - bu)/(a^2 + b^2) = 1
//
// because a^2 + b^2 = 0 has no nonzero solutions for (a, b).
// This gives that (a - bu)/(a^2 + b^2) is the inverse
// of (a + bu). Importantly, this can be computing using
// only a single inversion in Fp.
(self.c0.square() + self.c1.square()).invert().map(|t| Fp2 {
c0: self.c0 * t,
c1: self.c1 * -t,
})
}
/// Although this is labeled "vartime", it is only
/// variable time with respect to the exponent. It
/// is also not exposed in the public API.
pub fn pow_vartime(&self, by: &[u64; 6]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
if ((*e >> i) & 1) == 1 {
res *= self;
}
}
}
res
}
/// Vartime exponentiation for larger exponents, only
/// used in testing and not exposed through the public API.
#[cfg(all(test, feature = "experimental"))]
pub(crate) fn pow_vartime_extended(&self, by: &[u64]) -> Self {
let mut res = Self::one();
for e in by.iter().rev() {
for i in (0..64).rev() {
res = res.square();
if ((*e >> i) & 1) == 1 {
res *= self;
}
}
}
res
}
}
#[test]
fn test_conditional_selection() {
let a = Fp2 {
c0: Fp::from_raw_unchecked([1, 2, 3, 4, 5, 6]),
c1: Fp::from_raw_unchecked([7, 8, 9, 10, 11, 12]),
};
let b = Fp2 {
c0: Fp::from_raw_unchecked([13, 14, 15, 16, 17, 18]),
c1: Fp::from_raw_unchecked([19, 20, 21, 22, 23, 24]),
};
assert_eq!(
ConditionallySelectable::conditional_select(&a, &b, Choice::from(0u8)),
a
);
assert_eq!(
ConditionallySelectable::conditional_select(&a, &b, Choice::from(1u8)),
b
);
}
#[test]
fn test_equality() {
fn is_equal(a: &Fp2, b: &Fp2) -> bool {
let eq = a == b;
let ct_eq = a.ct_eq(&b);
assert_eq!(eq, bool::from(ct_eq));
eq
}
assert!(is_equal(
&Fp2 {
c0: Fp::from_raw_unchecked([1, 2, 3, 4, 5, 6]),
c1: Fp::from_raw_unchecked([7, 8, 9, 10, 11, 12]),
},
&Fp2 {
c0: Fp::from_raw_unchecked([1, 2, 3, 4, 5, 6]),
c1: Fp::from_raw_unchecked([7, 8, 9, 10, 11, 12]),
}
));
assert!(!is_equal(
&Fp2 {
c0: Fp::from_raw_unchecked([2, 2, 3, 4, 5, 6]),
c1: Fp::from_raw_unchecked([7, 8, 9, 10, 11, 12]),
},
&Fp2 {
c0: Fp::from_raw_unchecked([1, 2, 3, 4, 5, 6]),
c1: Fp::from_raw_unchecked([7, 8, 9, 10, 11, 12]),
}
));
assert!(!is_equal(
&Fp2 {
c0: Fp::from_raw_unchecked([1, 2, 3, 4, 5, 6]),
c1: Fp::from_raw_unchecked([2, 8, 9, 10, 11, 12]),
},
&Fp2 {
c0: Fp::from_raw_unchecked([1, 2, 3, 4, 5, 6]),
c1: Fp::from_raw_unchecked([7, 8, 9, 10, 11, 12]),
}
));
}
#[test]
fn test_squaring() {
let a = Fp2 {
c0: Fp::from_raw_unchecked([
0xc9a2_1831_63ee_70d4,
0xbc37_70a7_196b_5c91,
0xa247_f8c1_304c_5f44,
0xb01f_c2a3_726c_80b5,
0xe1d2_93e5_bbd9_19c9,
0x04b7_8e80_020e_f2ca,
]),
c1: Fp::from_raw_unchecked([
0x952e_a446_0462_618f,
0x238d_5edd_f025_c62f,
0xf6c9_4b01_2ea9_2e72,
0x03ce_24ea_c1c9_3808,
0x0559_50f9_45da_483c,
0x010a_768d_0df4_eabc,
]),
};
let b = Fp2 {
c0: Fp::from_raw_unchecked([
0xa1e0_9175_a4d2_c1fe,
0x8b33_acfc_204e_ff12,
0xe244_15a1_1b45_6e42,
0x61d9_96b1_b6ee_1936,
0x1164_dbe8_667c_853c,
0x0788_557a_cc7d_9c79,
]),
c1: Fp::from_raw_unchecked([
0xda6a_87cc_6f48_fa36,
0x0fc7_b488_277c_1903,
0x9445_ac4a_dc44_8187,
0x0261_6d5b_c909_9209,
0xdbed_4677_2db5_8d48,
0x11b9_4d50_76c7_b7b1,
]),
};
assert_eq!(a.square(), b);
}
#[test]
fn test_multiplication() {
let a = Fp2 {
c0: Fp::from_raw_unchecked([
0xc9a2_1831_63ee_70d4,
0xbc37_70a7_196b_5c91,
0xa247_f8c1_304c_5f44,
0xb01f_c2a3_726c_80b5,
0xe1d2_93e5_bbd9_19c9,
0x04b7_8e80_020e_f2ca,
]),
c1: Fp::from_raw_unchecked([
0x952e_a446_0462_618f,
0x238d_5edd_f025_c62f,
0xf6c9_4b01_2ea9_2e72,
0x03ce_24ea_c1c9_3808,
0x0559_50f9_45da_483c,
0x010a_768d_0df4_eabc,
]),
};
let b = Fp2 {
c0: Fp::from_raw_unchecked([
0xa1e0_9175_a4d2_c1fe,
0x8b33_acfc_204e_ff12,
0xe244_15a1_1b45_6e42,
0x61d9_96b1_b6ee_1936,
0x1164_dbe8_667c_853c,
0x0788_557a_cc7d_9c79,
]),
c1: Fp::from_raw_unchecked([
0xda6a_87cc_6f48_fa36,
0x0fc7_b488_277c_1903,
0x9445_ac4a_dc44_8187,
0x0261_6d5b_c909_9209,
0xdbed_4677_2db5_8d48,
0x11b9_4d50_76c7_b7b1,
]),
};
let c = Fp2 {
c0: Fp::from_raw_unchecked([
0xf597_483e_27b4_e0f7,
0x610f_badf_811d_ae5f,
0x8432_af91_7714_327a,
0x6a9a_9603_cf88_f09e,
0xf05a_7bf8_bad0_eb01,
0x0954_9131_c003_ffae,
]),
c1: Fp::from_raw_unchecked([
0x963b_02d0_f93d_37cd,
0xc95c_e1cd_b30a_73d4,
0x3087_25fa_3126_f9b8,
0x56da_3c16_7fab_0d50,
0x6b50_86b5_f4b6_d6af,
0x09c3_9f06_2f18_e9f2,
]),
};
assert_eq!(a * b, c);
}
#[test]
fn test_addition() {
let a = Fp2 {
c0: Fp::from_raw_unchecked([
0xc9a2_1831_63ee_70d4,
0xbc37_70a7_196b_5c91,
0xa247_f8c1_304c_5f44,
0xb01f_c2a3_726c_80b5,
0xe1d2_93e5_bbd9_19c9,
0x04b7_8e80_020e_f2ca,
]),
c1: Fp::from_raw_unchecked([
0x952e_a446_0462_618f,
0x238d_5edd_f025_c62f,
0xf6c9_4b01_2ea9_2e72,
0x03ce_24ea_c1c9_3808,
0x0559_50f9_45da_483c,
0x010a_768d_0df4_eabc,
]),
};
let b = Fp2 {
c0: Fp::from_raw_unchecked([
0xa1e0_9175_a4d2_c1fe,
0x8b33_acfc_204e_ff12,
0xe244_15a1_1b45_6e42,
0x61d9_96b1_b6ee_1936,
0x1164_dbe8_667c_853c,
0x0788_557a_cc7d_9c79,
]),
c1: Fp::from_raw_unchecked([
0xda6a_87cc_6f48_fa36,
0x0fc7_b488_277c_1903,
0x9445_ac4a_dc44_8187,
0x0261_6d5b_c909_9209,
0xdbed_4677_2db5_8d48,
0x11b9_4d50_76c7_b7b1,
]),
};
let c = Fp2 {
c0: Fp::from_raw_unchecked([
0x6b82_a9a7_08c1_32d2,
0x476b_1da3_39ba_5ba4,
0x848c_0e62_4b91_cd87,
0x11f9_5955_295a_99ec,
0xf337_6fce_2255_9f06,
0x0c3f_e3fa_ce8c_8f43,
]),
c1: Fp::from_raw_unchecked([
0x6f99_2c12_73ab_5bc5,
0x3355_1366_17a1_df33,
0x8b0e_f74c_0aed_aff9,
0x062f_9246_8ad2_ca12,
0xe146_9770_738f_d584,
0x12c3_c3dd_84bc_a26d,
]),
};
assert_eq!(a + b, c);
}
#[test]
fn test_subtraction() {
let a = Fp2 {
c0: Fp::from_raw_unchecked([
0xc9a2_1831_63ee_70d4,
0xbc37_70a7_196b_5c91,
0xa247_f8c1_304c_5f44,
0xb01f_c2a3_726c_80b5,
0xe1d2_93e5_bbd9_19c9,
0x04b7_8e80_020e_f2ca,
]),
c1: Fp::from_raw_unchecked([
0x952e_a446_0462_618f,
0x238d_5edd_f025_c62f,
0xf6c9_4b01_2ea9_2e72,
0x03ce_24ea_c1c9_3808,
0x0559_50f9_45da_483c,
0x010a_768d_0df4_eabc,
]),
};
let b = Fp2 {
c0: Fp::from_raw_unchecked([
0xa1e0_9175_a4d2_c1fe,
0x8b33_acfc_204e_ff12,
0xe244_15a1_1b45_6e42,
0x61d9_96b1_b6ee_1936,
0x1164_dbe8_667c_853c,
0x0788_557a_cc7d_9c79,
]),
c1: Fp::from_raw_unchecked([
0xda6a_87cc_6f48_fa36,
0x0fc7_b488_277c_1903,
0x9445_ac4a_dc44_8187,
0x0261_6d5b_c909_9209,
0xdbed_4677_2db5_8d48,
0x11b9_4d50_76c7_b7b1,
]),
};
let c = Fp2 {
c0: Fp::from_raw_unchecked([
0xe1c0_86bb_bf1b_5981,
0x4faf_c3a9_aa70_5d7e,
0x2734_b5c1_0bb7_e726,
0xb2bd_7776_af03_7a3e,
0x1b89_5fb3_98a8_4164,
0x1730_4aef_6f11_3cec,
]),
c1: Fp::from_raw_unchecked([
0x74c3_1c79_9519_1204,
0x3271_aa54_79fd_ad2b,
0xc9b4_7157_4915_a30f,
0x65e4_0313_ec44_b8be,
0x7487_b238_5b70_67cb,
0x0952_3b26_d0ad_19a4,
]),
};
assert_eq!(a - b, c);
}
#[test]
fn test_negation() {
let a = Fp2 {
c0: Fp::from_raw_unchecked([
0xc9a2_1831_63ee_70d4,
0xbc37_70a7_196b_5c91,
0xa247_f8c1_304c_5f44,
0xb01f_c2a3_726c_80b5,
0xe1d2_93e5_bbd9_19c9,
0x04b7_8e80_020e_f2ca,
]),
c1: Fp::from_raw_unchecked([
0x952e_a446_0462_618f,
0x238d_5edd_f025_c62f,
0xf6c9_4b01_2ea9_2e72,
0x03ce_24ea_c1c9_3808,
0x0559_50f9_45da_483c,
0x010a_768d_0df4_eabc,
]),
};
let b = Fp2 {
c0: Fp::from_raw_unchecked([
0xf05c_e7ce_9c11_39d7,
0x6274_8f57_97e8_a36d,
0xc4e8_d9df_c664_96df,
0xb457_88e1_8118_9209,
0x6949_13d0_8772_930d,
0x1549_836a_3770_f3cf,
]),
c1: Fp::from_raw_unchecked([
0x24d0_5bb9_fb9d_491c,
0xfb1e_a120_c12e_39d0,
0x7067_879f_c807_c7b1,
0x60a9_269a_31bb_dab6,
0x45c2_56bc_fd71_649b,
0x18f6_9b5d_2b8a_fbde,
]),
};
assert_eq!(-a, b);
}
#[test]
fn test_sqrt() {
// a = 1488924004771393321054797166853618474668089414631333405711627789629391903630694737978065425271543178763948256226639*u + 784063022264861764559335808165825052288770346101304131934508881646553551234697082295473567906267937225174620141295
let a = Fp2 {
c0: Fp::from_raw_unchecked([
0x2bee_d146_27d7_f9e9,
0xb661_4e06_660e_5dce,
0x06c4_cc7c_2f91_d42c,
0x996d_7847_4b7a_63cc,
0xebae_bc4c_820d_574e,
0x1886_5e12_d93f_d845,
]),
c1: Fp::from_raw_unchecked([
0x7d82_8664_baf4_f566,
0xd17e_6639_96ec_7339,
0x679e_ad55_cb40_78d0,
0xfe3b_2260_e001_ec28,
0x3059_93d0_43d9_1b68,
0x0626_f03c_0489_b72d,
]),
};
assert_eq!(a.sqrt().unwrap().square(), a);
// b = 5, which is a generator of the p - 1 order
// multiplicative subgroup
let b = Fp2 {
c0: Fp::from_raw_unchecked([
0x6631_0000_0010_5545,
0x2114_0040_0eec_000d,
0x3fa7_af30_c820_e316,
0xc52a_8b8d_6387_695d,
0x9fb4_e61d_1e83_eac5,
0x005c_b922_afe8_4dc7,
]),
c1: Fp::zero(),
};
assert_eq!(b.sqrt().unwrap().square(), b);
// c = 25, which is a generator of the (p - 1) / 2 order
// multiplicative subgroup
let c = Fp2 {
c0: Fp::from_raw_unchecked([
0x44f6_0000_0051_ffae,
0x86b8_0141_9948_0043,
0xd715_9952_f1f3_794a,
0x755d_6e3d_fe1f_fc12,
0xd36c_d6db_5547_e905,
0x02f8_c8ec_bf18_67bb,
]),
c1: Fp::zero(),
};
assert_eq!(c.sqrt().unwrap().square(), c);
// 2155129644831861015726826462986972654175647013268275306775721078997042729172900466542651176384766902407257452753362*u + 2796889544896299244102912275102369318775038861758288697415827248356648685135290329705805931514906495247464901062529
// is nonsquare.
assert!(bool::from(
Fp2 {
c0: Fp::from_raw_unchecked([
0xc5fa_1bc8_fd00_d7f6,
0x3830_ca45_4606_003b,
0x2b28_7f11_04b1_02da,
0xa7fb_30f2_8230_f23e,
0x339c_db9e_e953_dbf0,
0x0d78_ec51_d989_fc57,
]),
c1: Fp::from_raw_unchecked([
0x27ec_4898_cf87_f613,
0x9de1_394e_1abb_05a5,
0x0947_f85d_c170_fc14,
0x586f_bc69_6b61_14b7,
0x2b34_75a4_077d_7169,
0x13e1_c895_cc4b_6c22,
])
}
.sqrt()
.is_none()
));
}
#[test]
fn test_inversion() {
let a = Fp2 {
c0: Fp::from_raw_unchecked([
0x1128_ecad_6754_9455,
0x9e7a_1cff_3a4e_a1a8,
0xeb20_8d51_e08b_cf27,
0xe98a_d408_11f5_fc2b,
0x736c_3a59_232d_511d,
0x10ac_d42d_29cf_cbb6,
]),
c1: Fp::from_raw_unchecked([
0xd328_e37c_c2f5_8d41,
0x948d_f085_8a60_5869,
0x6032_f9d5_6f93_a573,
0x2be4_83ef_3fff_dc87,
0x30ef_61f8_8f48_3c2a,
0x1333_f55a_3572_5be0,
]),
};
let b = Fp2 {
c0: Fp::from_raw_unchecked([
0x0581_a133_3d4f_48a6,
0x5824_2f6e_f074_8500,
0x0292_c955_349e_6da5,
0xba37_721d_dd95_fcd0,
0x70d1_6790_3aa5_dfc5,
0x1189_5e11_8b58_a9d5,
]),
c1: Fp::from_raw_unchecked([
0x0eda_09d2_d7a8_5d17,
0x8808_e137_a7d1_a2cf,
0x43ae_2625_c1ff_21db,
0xf85a_c9fd_f7a7_4c64,
0x8fcc_dda5_b8da_9738,
0x08e8_4f0c_b32c_d17d,
]),
};
assert_eq!(a.invert().unwrap(), b);
assert!(bool::from(Fp2::zero().invert().is_none()));
}
#[test]
fn test_lexicographic_largest() {
assert!(!bool::from(Fp2::zero().lexicographically_largest()));
assert!(!bool::from(Fp2::one().lexicographically_largest()));
assert!(bool::from(
Fp2 {
c0: Fp::from_raw_unchecked([
0x1128_ecad_6754_9455,
0x9e7a_1cff_3a4e_a1a8,
0xeb20_8d51_e08b_cf27,
0xe98a_d408_11f5_fc2b,
0x736c_3a59_232d_511d,
0x10ac_d42d_29cf_cbb6,
]),
c1: Fp::from_raw_unchecked([
0xd328_e37c_c2f5_8d41,
0x948d_f085_8a60_5869,
0x6032_f9d5_6f93_a573,
0x2be4_83ef_3fff_dc87,
0x30ef_61f8_8f48_3c2a,
0x1333_f55a_3572_5be0,
]),
}
.lexicographically_largest()
));
assert!(!bool::from(
Fp2 {
c0: -Fp::from_raw_unchecked([
0x1128_ecad_6754_9455,
0x9e7a_1cff_3a4e_a1a8,
0xeb20_8d51_e08b_cf27,
0xe98a_d408_11f5_fc2b,
0x736c_3a59_232d_511d,
0x10ac_d42d_29cf_cbb6,
]),
c1: -Fp::from_raw_unchecked([
0xd328_e37c_c2f5_8d41,
0x948d_f085_8a60_5869,
0x6032_f9d5_6f93_a573,
0x2be4_83ef_3fff_dc87,
0x30ef_61f8_8f48_3c2a,
0x1333_f55a_3572_5be0,
]),
}
.lexicographically_largest()
));
assert!(!bool::from(
Fp2 {
c0: Fp::from_raw_unchecked([
0x1128_ecad_6754_9455,
0x9e7a_1cff_3a4e_a1a8,
0xeb20_8d51_e08b_cf27,
0xe98a_d408_11f5_fc2b,
0x736c_3a59_232d_511d,
0x10ac_d42d_29cf_cbb6,
]),
c1: Fp::zero(),
}
.lexicographically_largest()
));
assert!(bool::from(
Fp2 {
c0: -Fp::from_raw_unchecked([
0x1128_ecad_6754_9455,
0x9e7a_1cff_3a4e_a1a8,
0xeb20_8d51_e08b_cf27,
0xe98a_d408_11f5_fc2b,
0x736c_3a59_232d_511d,
0x10ac_d42d_29cf_cbb6,
]),
c1: Fp::zero(),
}
.lexicographically_largest()
));
}
#[cfg(feature = "zeroize")]
#[test]
fn test_zeroize() {
use zeroize::Zeroize;
let mut a = Fp2::one();
a.zeroize();
assert!(bool::from(a.is_zero()));
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/tests/mod.rs | zkcrypto/bls12_381/src/tests/mod.rs | use super::*;
macro_rules! test_vectors {
($projective:ident, $affine:ident, $serialize:ident, $deserialize:ident, $expected:ident) => {
let mut e = $projective::identity();
let mut v = vec![];
{
let mut expected = $expected;
for _ in 0..1000 {
let e_affine = $affine::from(e);
let encoded = e_affine.$serialize();
v.extend_from_slice(&encoded[..]);
let mut decoded = encoded;
let len_of_encoding = decoded.len();
(&mut decoded[..]).copy_from_slice(&expected[0..len_of_encoding]);
expected = &expected[len_of_encoding..];
let decoded = $affine::$deserialize(&decoded).unwrap();
assert_eq!(e_affine, decoded);
e = &e + &$projective::generator();
}
}
assert_eq!(&v[..], $expected);
};
}
#[test]
fn g1_uncompressed_valid_test_vectors() {
let bytes: &'static [u8] = include_bytes!("g1_uncompressed_valid_test_vectors.dat");
test_vectors!(
G1Projective,
G1Affine,
to_uncompressed,
from_uncompressed,
bytes
);
}
#[test]
fn g1_compressed_valid_test_vectors() {
let bytes: &'static [u8] = include_bytes!("g1_compressed_valid_test_vectors.dat");
test_vectors!(
G1Projective,
G1Affine,
to_compressed,
from_compressed,
bytes
);
}
#[test]
fn g2_uncompressed_valid_test_vectors() {
let bytes: &'static [u8] = include_bytes!("g2_uncompressed_valid_test_vectors.dat");
test_vectors!(
G2Projective,
G2Affine,
to_uncompressed,
from_uncompressed,
bytes
);
}
#[test]
fn g2_compressed_valid_test_vectors() {
let bytes: &'static [u8] = include_bytes!("g2_compressed_valid_test_vectors.dat");
test_vectors!(
G2Projective,
G2Affine,
to_compressed,
from_compressed,
bytes
);
}
#[test]
#[cfg(all(feature = "alloc", feature = "pairing"))]
fn test_pairing_result_against_relic() {
/*
Sent to me from Diego Aranha (author of RELIC library):
1250EBD871FC0A92 A7B2D83168D0D727 272D441BEFA15C50 3DD8E90CE98DB3E7 B6D194F60839C508 A84305AACA1789B6
089A1C5B46E5110B 86750EC6A5323488 68A84045483C92B7 AF5AF689452EAFAB F1A8943E50439F1D 59882A98EAA0170F
1368BB445C7C2D20 9703F239689CE34C 0378A68E72A6B3B2 16DA0E22A5031B54 DDFF57309396B38C 881C4C849EC23E87
193502B86EDB8857 C273FA075A505129 37E0794E1E65A761 7C90D8BD66065B1F FFE51D7A579973B1 315021EC3C19934F
01B2F522473D1713 91125BA84DC4007C FBF2F8DA752F7C74 185203FCCA589AC7 19C34DFFBBAAD843 1DAD1C1FB597AAA5
018107154F25A764 BD3C79937A45B845 46DA634B8F6BE14A 8061E55CCEBA478B 23F7DACAA35C8CA7 8BEAE9624045B4B6
19F26337D205FB46 9CD6BD15C3D5A04D C88784FBB3D0B2DB DEA54D43B2B73F2C BB12D58386A8703E 0F948226E47EE89D
06FBA23EB7C5AF0D 9F80940CA771B6FF D5857BAAF222EB95 A7D2809D61BFE02E 1BFD1B68FF02F0B8 102AE1C2D5D5AB1A
11B8B424CD48BF38 FCEF68083B0B0EC5 C81A93B330EE1A67 7D0D15FF7B984E89 78EF48881E32FAC9 1B93B47333E2BA57
03350F55A7AEFCD3 C31B4FCB6CE5771C C6A0E9786AB59733 20C806AD36082910 7BA810C5A09FFDD9 BE2291A0C25A99A2
04C581234D086A99 02249B64728FFD21 A189E87935A95405 1C7CDBA7B3872629 A4FAFC05066245CB 9108F0242D0FE3EF
0F41E58663BF08CF 068672CBD01A7EC7 3BACA4D72CA93544 DEFF686BFD6DF543 D48EAA24AFE47E1E FDE449383B676631
*/
let a = G1Affine::generator();
let b = G2Affine::generator();
use super::fp::Fp;
use super::fp12::Fp12;
use super::fp2::Fp2;
use super::fp6::Fp6;
let res = pairing(&a, &b);
let prep = G2Prepared::from(b);
assert_eq!(
res,
multi_miller_loop(&[(&a, &prep)]).final_exponentiation()
);
assert_eq!(
res.0,
Fp12 {
c0: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0x1972_e433_a01f_85c5,
0x97d3_2b76_fd77_2538,
0xc8ce_546f_c96b_cdf9,
0xcef6_3e73_66d4_0614,
0xa611_3427_8184_3780,
0x13f3_448a_3fc6_d825,
]),
c1: Fp::from_raw_unchecked([
0xd263_31b0_2e9d_6995,
0x9d68_a482_f779_7e7d,
0x9c9b_2924_8d39_ea92,
0xf480_1ca2_e131_07aa,
0xa16c_0732_bdbc_b066,
0x083c_a4af_ba36_0478,
])
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x59e2_61db_0916_b641,
0x2716_b6f4_b23e_960d,
0xc8e5_5b10_a0bd_9c45,
0x0bdb_0bd9_9c4d_eda8,
0x8cf8_9ebf_57fd_aac5,
0x12d6_b792_9e77_7a5e,
]),
c1: Fp::from_raw_unchecked([
0x5fc8_5188_b0e1_5f35,
0x34a0_6e3a_8f09_6365,
0xdb31_26a6_e02a_d62c,
0xfc6f_5aa9_7d9a_990b,
0xa12f_55f5_eb89_c210,
0x1723_703a_926f_8889,
])
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0x9358_8f29_7182_8778,
0x43f6_5b86_11ab_7585,
0x3183_aaf5_ec27_9fdf,
0xfa73_d7e1_8ac9_9df6,
0x64e1_76a6_a64c_99b0,
0x179f_a78c_5838_8f1f,
]),
c1: Fp::from_raw_unchecked([
0x672a_0a11_ca2a_ef12,
0x0d11_b9b5_2aa3_f16b,
0xa444_12d0_699d_056e,
0xc01d_0177_221a_5ba5,
0x66e0_cede_6c73_5529,
0x05f5_a71e_9fdd_c339,
])
}
},
c1: Fp6 {
c0: Fp2 {
c0: Fp::from_raw_unchecked([
0xd30a_88a1_b062_c679,
0x5ac5_6a5d_35fc_8304,
0xd0c8_34a6_a81f_290d,
0xcd54_30c2_da37_07c7,
0xf0c2_7ff7_8050_0af0,
0x0924_5da6_e2d7_2eae,
]),
c1: Fp::from_raw_unchecked([
0x9f2e_0676_791b_5156,
0xe2d1_c823_4918_fe13,
0x4c9e_459f_3c56_1bf4,
0xa3e8_5e53_b9d3_e3c1,
0x820a_121e_21a7_0020,
0x15af_6183_41c5_9acc,
])
},
c1: Fp2 {
c0: Fp::from_raw_unchecked([
0x7c95_658c_2499_3ab1,
0x73eb_3872_1ca8_86b9,
0x5256_d749_4774_34bc,
0x8ba4_1902_ea50_4a8b,
0x04a3_d3f8_0c86_ce6d,
0x18a6_4a87_fb68_6eaa,
]),
c1: Fp::from_raw_unchecked([
0xbb83_e71b_b920_cf26,
0x2a52_77ac_92a7_3945,
0xfc0e_e59f_94f0_46a0,
0x7158_cdf3_7860_58f7,
0x7cc1_061b_82f9_45f6,
0x03f8_47aa_9fdb_e567,
])
},
c2: Fp2 {
c0: Fp::from_raw_unchecked([
0x8078_dba5_6134_e657,
0x1cd7_ec9a_4399_8a6e,
0xb1aa_599a_1a99_3766,
0xc9a0_f62f_0842_ee44,
0x8e15_9be3_b605_dffa,
0x0c86_ba0d_4af1_3fc2,
]),
c1: Fp::from_raw_unchecked([
0xe80f_f2a0_6a52_ffb1,
0x7694_ca48_721a_906c,
0x7583_183e_03b0_8514,
0xf567_afdd_40ce_e4e2,
0x9a6d_96d2_e526_a5fc,
0x197e_9f49_861f_2242,
])
}
}
}
);
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/hash_to_curve/map_g2.rs | zkcrypto/bls12_381/src/hash_to_curve/map_g2.rs | //! Implementation of hash-to-curve for the G2 group
use subtle::{Choice, ConditionallyNegatable, ConditionallySelectable, ConstantTimeEq};
use super::chain::chain_p2m9div16;
use super::{HashToField, MapToCurve, Sgn0};
use crate::generic_array::{
typenum::{U128, U64},
GenericArray,
};
use crate::{fp::Fp, fp2::Fp2, g2::G2Projective};
/// Coefficients of the 3-isogeny x map's numerator
const ISO3_XNUM: [Fp2; 4] = [
Fp2 {
c0: Fp::from_raw_unchecked([
0x47f6_71c7_1ce0_5e62,
0x06dd_5707_1206_393e,
0x7c80_cd2a_f3fd_71a2,
0x0481_03ea_9e6c_d062,
0xc545_16ac_c8d0_37f6,
0x1380_8f55_0920_ea41,
]),
c1: Fp::from_raw_unchecked([
0x47f6_71c7_1ce0_5e62,
0x06dd_5707_1206_393e,
0x7c80_cd2a_f3fd_71a2,
0x0481_03ea_9e6c_d062,
0xc545_16ac_c8d0_37f6,
0x1380_8f55_0920_ea41,
]),
},
Fp2 {
c0: Fp::zero(),
c1: Fp::from_raw_unchecked([
0x5fe5_5555_554c_71d0,
0x873f_ffdd_236a_aaa3,
0x6a6b_4619_b26e_f918,
0x21c2_8884_0887_4945,
0x2836_cda7_028c_abc5,
0x0ac7_3310_a7fd_5abd,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0x0a0c_5555_5559_71c3,
0xdb0c_0010_1f9e_aaae,
0xb1fb_2f94_1d79_7997,
0xd396_0742_ef41_6e1c,
0xb700_40e2_c205_56f4,
0x149d_7861_e581_393b,
]),
c1: Fp::from_raw_unchecked([
0xaff2_aaaa_aaa6_38e8,
0x439f_ffee_91b5_5551,
0xb535_a30c_d937_7c8c,
0x90e1_4442_0443_a4a2,
0x941b_66d3_8146_55e2,
0x0563_9988_53fe_ad5e,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0x40aa_c71c_71c7_25ed,
0x1909_5555_7a84_e38e,
0xd817_050a_8f41_abc3,
0xd864_85d4_c87f_6fb1,
0x696e_b479_f885_d059,
0x198e_1a74_3280_02d2,
]),
c1: Fp::zero(),
},
];
/// Coefficients of the 3-isogeny x map's denominator
const ISO3_XDEN: [Fp2; 3] = [
Fp2 {
c0: Fp::zero(),
c1: Fp::from_raw_unchecked([
0x1f3a_ffff_ff13_ab97,
0xf25b_fc61_1da3_ff3e,
0xca37_57cb_3819_b208,
0x3e64_2736_6f8c_ec18,
0x0397_7bc8_6095_b089,
0x04f6_9db1_3f39_a952,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0x4476_0000_0027_552e,
0xdcb8_009a_4348_0020,
0x6f7e_e9ce_4a6e_8b59,
0xb103_30b7_c0a9_5bc6,
0x6140_b1fc_fb1e_54b7,
0x0381_be09_7f0b_b4e1,
]),
c1: Fp::from_raw_unchecked([
0x7588_ffff_ffd8_557d,
0x41f3_ff64_6e0b_ffdf,
0xf7b1_e8d2_ac42_6aca,
0xb374_1acd_32db_b6f8,
0xe9da_f5b9_482d_581f,
0x167f_53e0_ba74_31b8,
]),
},
Fp2::one(),
];
/// Coefficients of the 3-isogeny y map's numerator
const ISO3_YNUM: [Fp2; 4] = [
Fp2 {
c0: Fp::from_raw_unchecked([
0x96d8_f684_bdfc_77be,
0xb530_e4f4_3b66_d0e2,
0x184a_88ff_3796_52fd,
0x57cb_23ec_fae8_04e1,
0x0fd2_e39e_ada3_eba9,
0x08c8_055e_31c5_d5c3,
]),
c1: Fp::from_raw_unchecked([
0x96d8_f684_bdfc_77be,
0xb530_e4f4_3b66_d0e2,
0x184a_88ff_3796_52fd,
0x57cb_23ec_fae8_04e1,
0x0fd2_e39e_ada3_eba9,
0x08c8_055e_31c5_d5c3,
]),
},
Fp2 {
c0: Fp::zero(),
c1: Fp::from_raw_unchecked([
0xbf0a_71c7_1c91_b406,
0x4d6d_55d2_8b76_38fd,
0x9d82_f98e_5f20_5aee,
0xa27a_a27b_1d1a_18d5,
0x02c3_b2b2_d293_8e86,
0x0c7d_1342_0b09_807f,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0xd7f9_5555_5553_1c74,
0x21cf_fff7_48da_aaa8,
0x5a9a_d186_6c9b_be46,
0x4870_a221_0221_d251,
0x4a0d_b369_c0a3_2af1,
0x02b1_ccc4_29ff_56af,
]),
c1: Fp::from_raw_unchecked([
0xe205_aaaa_aaac_8e37,
0xfcdc_0007_6879_5556,
0x0c96_011a_8a15_37dd,
0x1c06_a963_f163_406e,
0x010d_f44c_82a8_81e6,
0x174f_4526_0f80_8feb,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0xa470_bda1_2f67_f35c,
0xc0fe_38e2_3327_b425,
0xc9d3_d0f2_c6f0_678d,
0x1c55_c993_5b5a_982e,
0x27f6_c0e2_f074_6764,
0x117c_5e6e_28aa_9054,
]),
c1: Fp::zero(),
},
];
/// Coefficients of the 3-isogeny y map's denominator
const ISO3_YDEN: [Fp2; 4] = [
Fp2 {
c0: Fp::from_raw_unchecked([
0x0162_ffff_fa76_5adf,
0x8f7b_ea48_0083_fb75,
0x561b_3c22_59e9_3611,
0x11e1_9fc1_a9c8_75d5,
0xca71_3efc_0036_7660,
0x03c6_a03d_41da_1151,
]),
c1: Fp::from_raw_unchecked([
0x0162_ffff_fa76_5adf,
0x8f7b_ea48_0083_fb75,
0x561b_3c22_59e9_3611,
0x11e1_9fc1_a9c8_75d5,
0xca71_3efc_0036_7660,
0x03c6_a03d_41da_1151,
]),
},
Fp2 {
c0: Fp::zero(),
c1: Fp::from_raw_unchecked([
0x5db0_ffff_fd3b_02c5,
0xd713_f523_58eb_fdba,
0x5ea6_0761_a84d_161a,
0xbb2c_75a3_4ea6_c44a,
0x0ac6_7359_21c1_119b,
0x0ee3_d913_bdac_fbf6,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0x66b1_0000_003a_ffc5,
0xcb14_00e7_64ec_0030,
0xa73e_5eb5_6fa5_d106,
0x8984_c913_a0fe_09a9,
0x11e1_0afb_78ad_7f13,
0x0542_9d0e_3e91_8f52,
]),
c1: Fp::from_raw_unchecked([
0x534d_ffff_ffc4_aae6,
0x5397_ff17_4c67_ffcf,
0xbff2_73eb_870b_251d,
0xdaf2_8271_5287_0915,
0x393a_9cba_ca9e_2dc3,
0x14be_74db_faee_5748,
]),
},
Fp2::one(),
];
const SSWU_ELLP_A: Fp2 = Fp2 {
c0: Fp::zero(),
c1: Fp::from_raw_unchecked([
0xe53a_0000_0313_5242,
0x0108_0c0f_def8_0285,
0xe788_9edb_e340_f6bd,
0x0b51_3751_2631_0601,
0x02d6_9857_17c7_44ab,
0x1220_b4e9_79ea_5467,
]),
};
const SSWU_ELLP_B: Fp2 = Fp2 {
c0: Fp::from_raw_unchecked([
0x22ea_0000_0cf8_9db2,
0x6ec8_32df_7138_0aa4,
0x6e1b_9440_3db5_a66e,
0x75bf_3c53_a794_73ba,
0x3dd3_a569_412c_0a34,
0x125c_db5e_74dc_4fd1,
]),
c1: Fp::from_raw_unchecked([
0x22ea_0000_0cf8_9db2,
0x6ec8_32df_7138_0aa4,
0x6e1b_9440_3db5_a66e,
0x75bf_3c53_a794_73ba,
0x3dd3_a569_412c_0a34,
0x125c_db5e_74dc_4fd1,
]),
};
const SSWU_XI: Fp2 = Fp2 {
c0: Fp::from_raw_unchecked([
0x87eb_ffff_fff9_555c,
0x656f_ffe5_da8f_fffa,
0x0fd0_7493_45d3_3ad2,
0xd951_e663_0665_76f4,
0xde29_1a3d_41e9_80d3,
0x0815_664c_7dfe_040d,
]),
c1: Fp::from_raw_unchecked([
0x43f5_ffff_fffc_aaae,
0x32b7_fff2_ed47_fffd,
0x07e8_3a49_a2e9_9d69,
0xeca8_f331_8332_bb7a,
0xef14_8d1e_a0f4_c069,
0x040a_b326_3eff_0206,
]),
};
const SSWU_ETAS: [Fp2; 4] = [
Fp2 {
c0: Fp::from_raw_unchecked([
0x05e5_1466_8ac7_36d2,
0x9089_b4d6_b84f_3ea5,
0x603c_384c_224a_8b32,
0xf325_7909_536a_fea6,
0x5c5c_dbab_ae65_6d81,
0x075b_fa08_63c9_87e9,
]),
c1: Fp::from_raw_unchecked([
0x338d_9bfe_0808_7330,
0x7b8e_48b2_bd83_cefe,
0x530d_ad5d_306b_5be7,
0x5a4d_7e8e_6c40_8b6d,
0x6258_f7a6_232c_ab9b,
0x0b98_5811_cce1_4db5,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0x8671_6401_f7f7_377b,
0xa31d_b74b_f3d0_3101,
0x1423_2543_c645_9a3c,
0x0a29_ccf6_8744_8752,
0xe8c2_b010_201f_013c,
0x0e68_b9d8_6c9e_98e4,
]),
c1: Fp::from_raw_unchecked([
0x05e5_1466_8ac7_36d2,
0x9089_b4d6_b84f_3ea5,
0x603c_384c_224a_8b32,
0xf325_7909_536a_fea6,
0x5c5c_dbab_ae65_6d81,
0x075b_fa08_63c9_87e9,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0x718f_dad2_4ee1_d90f,
0xa58c_025b_ed82_76af,
0x0c3a_1023_0ab7_976f,
0xf0c5_4df5_c8f2_75e1,
0x4ec2_478c_28ba_f465,
0x1129_373a_90c5_08e6,
]),
c1: Fp::from_raw_unchecked([
0x019a_f5f9_80a3_680c,
0x4ed7_da0e_6606_3afa,
0x6003_5472_3b5d_9972,
0x8b2f_958b_20d0_9d72,
0x0474_938f_02d4_61db,
0x0dcf_8b9e_0684_ab1c,
]),
},
Fp2 {
c0: Fp::from_raw_unchecked([
0xb864_0a06_7f5c_429f,
0xcfd4_25f0_4b4d_c505,
0x072d_7e2e_bb53_5cb1,
0xd947_b5f9_d2b4_754d,
0x46a7_1427_4077_4afb,
0x0c31_864c_32fb_3b7e,
]),
c1: Fp::from_raw_unchecked([
0x718f_dad2_4ee1_d90f,
0xa58c_025b_ed82_76af,
0x0c3a_1023_0ab7_976f,
0xf0c5_4df5_c8f2_75e1,
0x4ec2_478c_28ba_f465,
0x1129_373a_90c5_08e6,
]),
},
];
const SSWU_RV1: Fp2 = Fp2 {
c0: Fp::from_raw_unchecked([
0x7bcf_a7a2_5aa3_0fda,
0xdc17_dec1_2a92_7e7c,
0x2f08_8dd8_6b4e_bef1,
0xd1ca_2087_da74_d4a7,
0x2da2_5966_96ce_bc1d,
0x0e2b_7eed_bbfd_87d2,
]),
c1: Fp::from_raw_unchecked([
0x7bcf_a7a2_5aa3_0fda,
0xdc17_dec1_2a92_7e7c,
0x2f08_8dd8_6b4e_bef1,
0xd1ca_2087_da74_d4a7,
0x2da2_5966_96ce_bc1d,
0x0e2b_7eed_bbfd_87d2,
]),
};
impl HashToField for Fp2 {
// ceil(log2(p)) = 381, m = 2, k = 128.
type InputLength = U128;
fn from_okm(okm: &GenericArray<u8, U128>) -> Fp2 {
let c0 = <Fp as HashToField>::from_okm(GenericArray::<u8, U64>::from_slice(&okm[..64]));
let c1 = <Fp as HashToField>::from_okm(GenericArray::<u8, U64>::from_slice(&okm[64..]));
Fp2 { c0, c1 }
}
}
impl Sgn0 for Fp2 {
fn sgn0(&self) -> Choice {
let sign_0 = self.c0.sgn0();
let zero_0 = self.c0.is_zero();
let sign_1 = self.c1.sgn0();
sign_0 | (zero_0 & sign_1)
}
}
/// Maps from an [`Fp2]` element to a point on iso-G2.
fn map_to_curve_simple_swu(u: &Fp2) -> G2Projective {
let usq = u.square();
let xi_usq = SSWU_XI * usq;
let xisq_u4 = xi_usq.square();
let nd_common = xisq_u4 + xi_usq; // XI^2 * u^4 + XI * u^2
let x_den = SSWU_ELLP_A * Fp2::conditional_select(&(-nd_common), &SSWU_XI, nd_common.is_zero());
let x0_num = SSWU_ELLP_B * (Fp2::one() + nd_common); // B * (1 + (XI^2 * u^4 + XI * u^2))
// compute g(x0(u))
let x_densq = x_den.square();
let gx_den = x_densq * x_den;
// x0_num^3 + A * x0_num * x_den^2 + B * x_den^3
let gx0_num = (x0_num.square() + SSWU_ELLP_A * x_densq) * x0_num + SSWU_ELLP_B * gx_den;
// compute g(x0(u)) ^ ((p^2 - 9) // 16)
let sqrt_candidate = {
let vsq = gx_den.square(); // v^2
let v_3 = vsq * gx_den; // v^3
let v_4 = vsq.square(); // v^4
let uv_7 = gx0_num * v_3 * v_4; // u v^7
let uv_15 = uv_7 * v_4.square(); // u v^15
uv_7 * chain_p2m9div16(&uv_15) // u v^7 (u v^15) ^ ((p^2 - 9) // 16)
};
// set y = sqrt_candidate * Fp2::one(), check candidate against other roots of unity
let mut y = sqrt_candidate;
// check Fp2(0, 1)
let tmp = Fp2 {
c0: -sqrt_candidate.c1,
c1: sqrt_candidate.c0,
};
y.conditional_assign(&tmp, (tmp.square() * gx_den).ct_eq(&gx0_num));
// check Fp2(RV1, RV1)
let tmp = sqrt_candidate * SSWU_RV1;
y.conditional_assign(&tmp, (tmp.square() * gx_den).ct_eq(&gx0_num));
// check Fp2(RV1, -RV1)
let tmp = Fp2 {
c0: tmp.c1,
c1: -tmp.c0,
};
y.conditional_assign(&tmp, (tmp.square() * gx_den).ct_eq(&gx0_num));
// compute g(x1(u)) = g(x0(u)) * XI^3 * u^6
let gx1_num = gx0_num * xi_usq * xisq_u4;
// compute g(x1(u)) * u^3
let sqrt_candidate = sqrt_candidate * usq * u;
let mut eta_found = Choice::from(0u8);
for eta in &SSWU_ETAS[..] {
let tmp = sqrt_candidate * eta;
let found = (tmp.square() * gx_den).ct_eq(&gx1_num);
y.conditional_assign(&tmp, found);
eta_found |= found;
}
let x_num = Fp2::conditional_select(&x0_num, &(x0_num * xi_usq), eta_found);
// ensure sign of y and sign of u agree
y.conditional_negate(u.sgn0() ^ y.sgn0());
G2Projective {
x: x_num,
y: y * x_den,
z: x_den,
}
}
/// Maps from an iso-G2 point to a G2 point.
fn iso_map(u: &G2Projective) -> G2Projective {
const COEFFS: [&[Fp2]; 4] = [&ISO3_XNUM, &ISO3_XDEN, &ISO3_YNUM, &ISO3_YDEN];
// unpack input point
let G2Projective { x, y, z } = *u;
// xnum, xden, ynum, yden
let mut mapvals = [Fp2::zero(); 4];
// compute powers of z
let zsq = z.square();
let zpows = [z, zsq, zsq * z];
// compute map value by Horner's rule
for idx in 0..4 {
let coeff = COEFFS[idx];
let clast = coeff.len() - 1;
mapvals[idx] = coeff[clast];
for jdx in 0..clast {
mapvals[idx] = mapvals[idx] * x + zpows[jdx] * coeff[clast - 1 - jdx];
}
}
// x denominator is order 1 less than x numerator, so we need an extra factor of z
mapvals[1] *= z;
// multiply result of Y map by the y-coord, y / z
mapvals[2] *= y;
mapvals[3] *= z;
G2Projective {
x: mapvals[0] * mapvals[3], // xnum * yden,
y: mapvals[2] * mapvals[1], // ynum * xden,
z: mapvals[1] * mapvals[3], // xden * yden
}
}
impl MapToCurve for G2Projective {
type Field = Fp2;
fn map_to_curve(u: &Fp2) -> G2Projective {
let pt = map_to_curve_simple_swu(u);
iso_map(&pt)
}
fn clear_h(&self) -> Self {
self.clear_cofactor()
}
}
#[cfg(test)]
fn check_g2_prime(pt: &G2Projective) -> bool {
// (X : Y : Z)==(X/Z, Y/Z) is on E': y^2 = x^3 + A * x + B.
// y^2 z = (x^3) + A (x z^2) + B z^3
let zsq = pt.z.square();
(pt.y.square() * pt.z)
== (pt.x.square() * pt.x + SSWU_ELLP_A * pt.x * zsq + SSWU_ELLP_B * zsq * pt.z)
}
#[test]
fn test_osswu_semirandom() {
use rand_core::SeedableRng;
let mut rng = rand_xorshift::XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..32 {
let input = Fp2::random(&mut rng);
let p = map_to_curve_simple_swu(&input);
assert!(check_g2_prime(&p));
let p_iso = iso_map(&p);
assert!(bool::from(p_iso.is_on_curve()));
}
}
// test vectors from the draft 10 RFC
#[test]
fn test_encode_to_curve_10() {
use crate::{
g2::G2Affine,
hash_to_curve::{ExpandMsgXmd, HashToCurve},
};
use std::string::{String, ToString};
struct TestCase {
msg: &'static [u8],
expected: [&'static str; 4],
}
impl TestCase {
fn expected(&self) -> String {
self.expected[0].to_string() + self.expected[1] + self.expected[2] + self.expected[3]
}
}
const DOMAIN: &[u8] = b"QUUX-V01-CS02-with-BLS12381G2_XMD:SHA-256_SSWU_NU_";
let cases = vec![
TestCase {
msg: b"",
expected: [
"126b855e9e69b1f691f816e48ac6977664d24d99f8724868a184186469ddfd4617367e94527d4b74fc86413483afb35b",
"00e7f4568a82b4b7dc1f14c6aaa055edf51502319c723c4dc2688c7fe5944c213f510328082396515734b6612c4e7bb7",
"1498aadcf7ae2b345243e281ae076df6de84455d766ab6fcdaad71fab60abb2e8b980a440043cd305db09d283c895e3d",
"0caead0fd7b6176c01436833c79d305c78be307da5f6af6c133c47311def6ff1e0babf57a0fb5539fce7ee12407b0a42",
],
},
TestCase {
msg: b"abc",
expected: [
"0296238ea82c6d4adb3c838ee3cb2346049c90b96d602d7bb1b469b905c9228be25c627bffee872def773d5b2a2eb57d",
"108ed59fd9fae381abfd1d6bce2fd2fa220990f0f837fa30e0f27914ed6e1454db0d1ee957b219f61da6ff8be0d6441f",
"153606c417e59fb331b7ae6bce4fbf7c5190c33ce9402b5ebe2b70e44fca614f3f1382a3625ed5493843d0b0a652fc3f",
"033f90f6057aadacae7963b0a0b379dd46750c1c94a6357c99b65f63b79e321ff50fe3053330911c56b6ceea08fee656",
],
},
TestCase {
msg: b"abcdef0123456789",
expected: [
"0da75be60fb6aa0e9e3143e40c42796edf15685cafe0279afd2a67c3dff1c82341f17effd402e4f1af240ea90f4b659b",
"038af300ef34c7759a6caaa4e69363cafeed218a1f207e93b2c70d91a1263d375d6730bd6b6509dcac3ba5b567e85bf3",
"0492f4fed741b073e5a82580f7c663f9b79e036b70ab3e51162359cec4e77c78086fe879b65ca7a47d34374c8315ac5e",
"19b148cbdf163cf0894f29660d2e7bfb2b68e37d54cc83fd4e6e62c020eaa48709302ef8e746736c0e19342cc1ce3df4",
]
},
TestCase {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq",
expected: [
"12c8c05c1d5fc7bfa847f4d7d81e294e66b9a78bc9953990c358945e1f042eedafce608b67fdd3ab0cb2e6e263b9b1ad",
"0c5ae723be00e6c3f0efe184fdc0702b64588fe77dda152ab13099a3bacd3876767fa7bbad6d6fd90b3642e902b208f9",
"11c624c56dbe154d759d021eec60fab3d8b852395a89de497e48504366feedd4662d023af447d66926a28076813dd646",
"04e77ddb3ede41b5ec4396b7421dd916efc68a358a0d7425bddd253547f2fb4830522358491827265dfc5bcc1928a569",
]
},
TestCase {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expected: [
"1565c2f625032d232f13121d3cfb476f45275c303a037faa255f9da62000c2c864ea881e2bcddd111edc4a3c0da3e88d",
"0ea4e7c33d43e17cc516a72f76437c4bf81d8f4eac69ac355d3bf9b71b8138d55dc10fd458be115afa798b55dac34be1",
"0f8991d2a1ad662e7b6f58ab787947f1fa607fce12dde171bc17903b012091b657e15333e11701edcf5b63ba2a561247",
"043b6f5fe4e52c839148dc66f2b3751e69a0f6ebb3d056d6465d50d4108543ecd956e10fa1640dfd9bc0030cc2558d28",
]
}
];
for case in cases {
let g = <G2Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::encode_to_curve(
&case.msg, DOMAIN,
);
let g_uncompressed = G2Affine::from(g).to_uncompressed();
assert_eq!(case.expected(), hex::encode(&g_uncompressed[..]));
}
}
// test vectors from the draft 10 RFC
#[test]
fn test_hash_to_curve_10() {
use crate::{
g2::G2Affine,
hash_to_curve::{ExpandMsgXmd, HashToCurve},
};
use std::string::{String, ToString};
struct TestCase {
msg: &'static [u8],
expected: [&'static str; 4],
}
impl TestCase {
fn expected(&self) -> String {
self.expected[0].to_string() + self.expected[1] + self.expected[2] + self.expected[3]
}
}
const DOMAIN: &[u8] = b"QUUX-V01-CS02-with-BLS12381G2_XMD:SHA-256_SSWU_RO_";
let cases = vec![
TestCase {
msg: b"",
expected: [
"05cb8437535e20ecffaef7752baddf98034139c38452458baeefab379ba13dff5bf5dd71b72418717047f5b0f37da03d",
"0141ebfbdca40eb85b87142e130ab689c673cf60f1a3e98d69335266f30d9b8d4ac44c1038e9dcdd5393faf5c41fb78a",
"12424ac32561493f3fe3c260708a12b7c620e7be00099a974e259ddc7d1f6395c3c811cdd19f1e8dbf3e9ecfdcbab8d6",
"0503921d7f6a12805e72940b963c0cf3471c7b2a524950ca195d11062ee75ec076daf2d4bc358c4b190c0c98064fdd92",
],
},
TestCase {
msg: b"abc",
expected: [
"139cddbccdc5e91b9623efd38c49f81a6f83f175e80b06fc374de9eb4b41dfe4ca3a230ed250fbe3a2acf73a41177fd8",
"02c2d18e033b960562aae3cab37a27ce00d80ccd5ba4b7fe0e7a210245129dbec7780ccc7954725f4168aff2787776e6",
"00aa65dae3c8d732d10ecd2c50f8a1baf3001578f71c694e03866e9f3d49ac1e1ce70dd94a733534f106d4cec0eddd16",
"1787327b68159716a37440985269cf584bcb1e621d3a7202be6ea05c4cfe244aeb197642555a0645fb87bf7466b2ba48",
],
},
TestCase {
msg: b"abcdef0123456789",
expected: [
"190d119345b94fbd15497bcba94ecf7db2cbfd1e1fe7da034d26cbba169fb3968288b3fafb265f9ebd380512a71c3f2c",
"121982811d2491fde9ba7ed31ef9ca474f0e1501297f68c298e9f4c0028add35aea8bb83d53c08cfc007c1e005723cd0",
"0bb5e7572275c567462d91807de765611490205a941a5a6af3b1691bfe596c31225d3aabdf15faff860cb4ef17c7c3be",
"05571a0f8d3c08d094576981f4a3b8eda0a8e771fcdcc8ecceaf1356a6acf17574518acb506e435b639353c2e14827c8",
]
},
TestCase {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq",
expected: [
"0934aba516a52d8ae479939a91998299c76d39cc0c035cd18813bec433f587e2d7a4fef038260eef0cef4d02aae3eb91",
"19a84dd7248a1066f737cc34502ee5555bd3c19f2ecdb3c7d9e24dc65d4e25e50d83f0f77105e955d78f4762d33c17da",
"09bcccfa036b4847c9950780733633f13619994394c23ff0b32fa6b795844f4a0673e20282d07bc69641cee04f5e5662",
"14f81cd421617428bc3b9fe25afbb751d934a00493524bc4e065635b0555084dd54679df1536101b2c979c0152d09192",
]
},
TestCase {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expected: [
"11fca2ff525572795a801eed17eb12785887c7b63fb77a42be46ce4a34131d71f7a73e95fee3f812aea3de78b4d01569",
"01a6ba2f9a11fa5598b2d8ace0fbe0a0eacb65deceb476fbbcb64fd24557c2f4b18ecfc5663e54ae16a84f5ab7f62534",
"03a47f8e6d1763ba0cad63d6114c0accbef65707825a511b251a660a9b3994249ae4e63fac38b23da0c398689ee2ab52",
"0b6798718c8aed24bc19cb27f866f1c9effcdbf92397ad6448b5c9db90d2b9da6cbabf48adc1adf59a1a28344e79d57e",
]
}
];
for case in cases {
let g = <G2Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::hash_to_curve(
&case.msg, DOMAIN,
);
let g_uncompressed = G2Affine::from(g).to_uncompressed();
assert_eq!(case.expected(), hex::encode(&g_uncompressed[..]));
}
}
#[test]
fn test_sgn0() {
use super::map_g1::P_M1_OVER2;
assert_eq!(bool::from(Fp2::zero().sgn0()), false);
assert_eq!(bool::from(Fp2::one().sgn0()), true);
assert_eq!(
bool::from(
Fp2 {
c0: P_M1_OVER2,
c1: Fp::zero()
}
.sgn0()
),
true
);
assert_eq!(
bool::from(
Fp2 {
c0: P_M1_OVER2,
c1: Fp::one()
}
.sgn0()
),
true
);
assert_eq!(
bool::from(
Fp2 {
c0: Fp::zero(),
c1: P_M1_OVER2,
}
.sgn0()
),
true
);
assert_eq!(
bool::from(
Fp2 {
c0: Fp::one(),
c1: P_M1_OVER2,
}
.sgn0()
),
true
);
let p_p1_over2 = P_M1_OVER2 + Fp::one();
assert_eq!(
bool::from(
Fp2 {
c0: p_p1_over2,
c1: Fp::zero()
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: p_p1_over2,
c1: Fp::one()
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: Fp::zero(),
c1: p_p1_over2,
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: Fp::one(),
c1: p_p1_over2,
}
.sgn0()
),
true
);
assert_eq!(
bool::from(
Fp2 {
c0: P_M1_OVER2,
c1: -Fp::one()
}
.sgn0()
),
true
);
assert_eq!(
bool::from(
Fp2 {
c0: p_p1_over2,
c1: -Fp::one()
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: Fp::zero(),
c1: -Fp::one()
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: P_M1_OVER2,
c1: p_p1_over2
}
.sgn0()
),
true
);
assert_eq!(
bool::from(
Fp2 {
c0: p_p1_over2,
c1: P_M1_OVER2
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: -Fp::one(),
c1: P_M1_OVER2,
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: -Fp::one(),
c1: p_p1_over2,
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: -Fp::one(),
c1: Fp::zero(),
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: p_p1_over2,
c1: P_M1_OVER2,
}
.sgn0()
),
false
);
assert_eq!(
bool::from(
Fp2 {
c0: P_M1_OVER2,
c1: p_p1_over2,
}
.sgn0()
),
true
);
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/hash_to_curve/chain.rs | zkcrypto/bls12_381/src/hash_to_curve/chain.rs | //! Addition chains for computing square roots.
//! chain_pm3div4: input x, output x^((p-3)//4).
//! chain_p2m9div16: input x, output x^((p**2 - 9) // 16).
use core::ops::MulAssign;
use crate::{fp::Fp, fp2::Fp2};
macro_rules! square {
($var:expr, $n:expr) => {
for _ in 0..$n {
$var = $var.square();
}
};
}
#[allow(clippy::cognitive_complexity)]
/// addchain for 1000602388805416848354447456433976039139220704984751971333014534031007912622709466110671907282253916009473568139946
/// Bos-Coster (win=4) : 458 links, 16 variables */
/// Addition chain implementing exponentiation by (p - 3) // 4.
pub fn chain_pm3div4(var0: &Fp) -> Fp {
let mut var1 = var0.square();
//Self::sqr(var1, var0); /* 0 : 2 */
let var9 = var1 * var0;
//Self::mul(&mut var9, var1, var0); /* 1 : 3 */
let var5 = var1.square();
//Self::sqr(&mut var5, var1); /* 2 : 4 */
let var2 = var9 * var1;
//Self::mul(&mut var2, &var9, var1); /* 3 : 5 */
let var7 = var5 * var9;
//Self::mul(&mut var7, &var5, &var9); /* 4 : 7 */
let var10 = var2 * var5;
//Self::mul(&mut var10, &var2, &var5); /* 5 : 9 */
let var13 = var7 * var5;
//Self::mul(&mut var13, &var7, &var5); /* 6 : 11 */
let var4 = var10 * var5;
//Self::mul(&mut var4, &var10, &var5); /* 7 : 13 */
let var8 = var13 * var5;
//Self::mul(&mut var8, &var13, &var5); /* 8 : 15 */
let var15 = var4 * var5;
//Self::mul(&mut var15, &var4, &var5); /* 9 : 17 */
let var11 = var8 * var5;
//Self::mul(&mut var11, &var8, &var5); /* 10 : 19 */
let var3 = var15 * var5;
//Self::mul(&mut var3, &var15, &var5); /* 11 : 21 */
let var12 = var11 * var5;
//Self::mul(&mut var12, &var11, &var5); /* 12 : 23 */
var1 = var4.square();
//Self::sqr(var1, &var4); /* 13 : 26 */
let var14 = var12 * var5;
//Self::mul(&mut var14, &var12, &var5); /* 14 : 27 */
let var6 = var1 * var9;
//Self::mul(&mut var6, var1, &var9); /* 15 : 29 */
let var5 = var1 * var2;
//Self::mul(&mut var5, var1, &var2); /* 16 : 31 */
// 17 : 106496
square!(var1, 12);
// 29 : 106513
var1.mul_assign(&var15);
// 30 : 13633664
square!(var1, 7);
// 37 : 13633679
var1.mul_assign(&var8);
// 38 : 218138864
square!(var1, 4);
// 42 : 218138869
var1.mul_assign(&var2);
// 43 : 13960887616
square!(var1, 6);
// 49 : 13960887623
var1.mul_assign(&var7);
// 50 : 1786993615744
square!(var1, 7);
// 57 : 1786993615767
var1.mul_assign(&var12);
// 58 : 57183795704544
square!(var1, 5);
// 63 : 57183795704575
var1.mul_assign(&var5);
// 64 : 228735182818300
square!(var1, 2);
// 66 : 228735182818303
var1.mul_assign(&var9);
// 67 : 14639051700371392
square!(var1, 6);
// 73 : 14639051700371405
var1.mul_assign(&var4);
// 74 : 936899308823769920
square!(var1, 6);
// 80 : 936899308823769933
var1.mul_assign(&var4);
// 81 : 59961555764721275712
square!(var1, 6);
// 87 : 59961555764721275721
var1.mul_assign(&var10);
// 88 : 479692446117770205768
square!(var1, 3);
// 91 : 479692446117770205771
var1.mul_assign(&var9);
// 92 : 61400633103074586338688
square!(var1, 7);
// 99 : 61400633103074586338701
var1.mul_assign(&var4);
// 100 : 982410129649193381419216
square!(var1, 4);
// 104 : 982410129649193381419229
var1.mul_assign(&var4);
// 105 : 62874248297548376410830656
square!(var1, 6);
// 111 : 62874248297548376410830671
var1.mul_assign(&var8);
// 112 : 4023951891043096090293162944
square!(var1, 6);
// 118 : 4023951891043096090293162971
var1.mul_assign(&var14);
// 119 : 32191615128344768722345303768
square!(var1, 3);
// 122 : 32191615128344768722345303769
var1.mul_assign(var0);
// 123 : 8241053472856260792920397764864
square!(var1, 8);
// 131 : 8241053472856260792920397764877
var1.mul_assign(&var4);
// 132 : 1054854844525601381493810913904256
square!(var1, 7);
// 139 : 1054854844525601381493810913904279
var1.mul_assign(&var12);
// 140 : 33755355024819244207801949244936928
square!(var1, 5);
// 145 : 33755355024819244207801949244936939
var1.mul_assign(&var13);
// 146 : 2160342721588431629299324751675964096
square!(var1, 6);
// 152 : 2160342721588431629299324751675964109
var1.mul_assign(&var4);
// 153 : 138261934181659624275156784107261702976
square!(var1, 6);
// 159 : 138261934181659624275156784107261703005
var1.mul_assign(&var6);
// 160 : 2212190946906553988402508545716187248080
square!(var1, 4);
// 164 : 2212190946906553988402508545716187248089
var1.mul_assign(&var10);
// 165 : 566320882408077821031042187703343935510784
square!(var1, 8);
// 173 : 566320882408077821031042187703343935510813
var1.mul_assign(&var6);
// 174 : 9061134118529245136496675003253502968173008
square!(var1, 4);
// 178 : 9061134118529245136496675003253502968173021
var1.mul_assign(&var4);
// 179 : 1159825167171743377471574400416448379926146688
square!(var1, 7);
// 186 : 1159825167171743377471574400416448379926146711
var1.mul_assign(&var12);
// 187 : 593830485591932609265446093013221570522187116032
square!(var1, 9);
// 196 : 593830485591932609265446093013221570522187116051
var1.mul_assign(&var11);
// 197 : 2375321942367730437061784372052886282088748464204
square!(var1, 2);
// 199 : 2375321942367730437061784372052886282088748464207
var1.mul_assign(&var9);
// 200 : 76010302155767373985977099905692361026839950854624
square!(var1, 5);
// 205 : 76010302155767373985977099905692361026839950854631
var1.mul_assign(&var7);
// 206 : 9729318675938223870205068787928622211435513709392768
square!(var1, 7);
// 213 : 9729318675938223870205068787928622211435513709392773
var1.mul_assign(&var2);
// 214 : 1245352790520092655386248804854863643063745754802274944
square!(var1, 7);
// 221 : 1245352790520092655386248804854863643063745754802274953
var1.mul_assign(&var10);
// 222 : 79702578593285929944719923510711273156079728307345596992
square!(var1, 6);
// 228 : 79702578593285929944719923510711273156079728307345597015
var1.mul_assign(&var12);
// 229 : 2550482514985149758231037552342760740994551305835059104480
square!(var1, 5);
// 234 : 2550482514985149758231037552342760740994551305835059104509
var1.mul_assign(&var6);
// 235 : 81615440479524792263393201674968343711825641786721891344288
square!(var1, 5);
// 240 : 81615440479524792263393201674968343711825641786721891344307
var1.mul_assign(&var11);
// 241 : 2611694095344793352428582453598986998778420537175100523017824
square!(var1, 5);
// 246 : 2611694095344793352428582453598986998778420537175100523017843
var1.mul_assign(&var11);
// 247 : 668593688408267098221717108121340671687275657516825733892567808
square!(var1, 8);
// 255 : 668593688408267098221717108121340671687275657516825733892567821
var1.mul_assign(&var4);
// 256 : 85579992116258188572379789839531605975971284162153693938248681088
square!(var1, 7);
// 263 : 85579992116258188572379789839531605975971284162153693938248681109
var1.mul_assign(&var3);
// 264 : 43816955963524192549058452397840182259697297491022691296383324727808
square!(var1, 9);
// 273 : 43816955963524192549058452397840182259697297491022691296383324727823
var1.mul_assign(&var8);
// 274 : 1402142590832774161569870476730885832310313519712726121484266391290336
square!(var1, 5);
// 279 : 1402142590832774161569870476730885832310313519712726121484266391290349
var1.mul_assign(&var4);
// 280 : 11217140726662193292558963813847086658482508157701808971874131130322792
square!(var1, 3);
// 283 : 11217140726662193292558963813847086658482508157701808971874131130322795
var1.mul_assign(&var9);
// 284 : 2871588026025521482895094736344854184571522088371663096799777569362635520
square!(var1, 8);
// 292 : 2871588026025521482895094736344854184571522088371663096799777569362635535
var1.mul_assign(&var8);
// 293 : 22972704208204171863160757890758833476572176706973304774398220554901084280
square!(var1, 3);
// 296 : 22972704208204171863160757890758833476572176706973304774398220554901084283
var1.mul_assign(&var9);
// 297 : 2940506138650133998484577010017130685001238618492583011122972231027338788224
square!(var1, 7);
// 304 : 2940506138650133998484577010017130685001238618492583011122972231027338788233
var1.mul_assign(&var10);
// 305 : 1505539142988868607224103429128770910720634172668202501694961782285997459575296
square!(var1, 9);
// 314 : 1505539142988868607224103429128770910720634172668202501694961782285997459575311
var1.mul_assign(&var8);
// 315 : 96354505151287590862342619464241338286120587050764960108477554066303837412819904
square!(var1, 6);
// 321 : 96354505151287590862342619464241338286120587050764960108477554066303837412819925
var1.mul_assign(&var3);
// 322 : 6166688329682405815189927645711445650311717571248957446942563460243445594420475200
square!(var1, 6);
// 328 : 6166688329682405815189927645711445650311717571248957446942563460243445594420475231
var1.mul_assign(&var5);
// 329 : 197334026549836986086077684662766260809974962279966638302162030727790259021455207392
square!(var1, 5);
// 334 : 197334026549836986086077684662766260809974962279966638302162030727790259021455207423
var1.mul_assign(&var5);
// 335 : 6314688849594783554754485909208520345919198792958932425669184983289288288686566637536
square!(var1, 5);
// 340 : 6314688849594783554754485909208520345919198792958932425669184983289288288686566637567
var1.mul_assign(&var5);
// 341 : 101035021593516536876071774547336325534707180687342918810706959732628612618985066201072
square!(var1, 4);
// 345 : 101035021593516536876071774547336325534707180687342918810706959732628612618985066201085
var1.mul_assign(&var4);
// 346 : 808280172748132295008574196378690604277657445498743350485655677861028900951880529608680
square!(var1, 3);
// 349 : 808280172748132295008574196378690604277657445498743350485655677861028900951880529608683
var1.mul_assign(&var9);
// 350 : 206919724223521867522194994272944794695080306047678297724327853532423398643681415579822848
square!(var1, 8);
// 358 : 206919724223521867522194994272944794695080306047678297724327853532423398643681415579822869
var1.mul_assign(&var3);
// 359 : 26485724700610799042840959266936933720970279174102822108713965252150195026391221194217327232
square!(var1, 7);
// 366 : 26485724700610799042840959266936933720970279174102822108713965252150195026391221194217327263
var1.mul_assign(&var5);
// 367 : 847543190419545569370910696541981879071048933571290307478846888068806240844519078214954472416
square!(var1, 5);
// 372 : 847543190419545569370910696541981879071048933571290307478846888068806240844519078214954472447
var1.mul_assign(&var5);
// 373 : 27121382093425458219869142289343420130273565874281289839323100418201799707024610502878543118304
square!(var1, 5);
// 378 : 27121382093425458219869142289343420130273565874281289839323100418201799707024610502878543118335
var1.mul_assign(&var5);
// 379 : 433942113494807331517906276629494722084377053988500637429169606691228795312393768046056689893360
square!(var1, 4);
// 383 : 433942113494807331517906276629494722084377053988500637429169606691228795312393768046056689893375
var1.mul_assign(&var8);
// 384 : 6943073815916917304286500426071915553350032863816010198866713707059660724998300288736907038294000
square!(var1, 4);
// 388 : 6943073815916917304286500426071915553350032863816010198866713707059660724998300288736907038294007
var1.mul_assign(&var7);
// 389 : 888713448437365414948672054537205190828804206568449305454939354503636572799782436958324100901632896
square!(var1, 7);
// 396 : 888713448437365414948672054537205190828804206568449305454939354503636572799782436958324100901632927
var1.mul_assign(&var5);
// 397 : 28438830349995693278357505745190566106521734610190377774558059344116370329593037982666371228852253664
square!(var1, 5);
// 402 : 28438830349995693278357505745190566106521734610190377774558059344116370329593037982666371228852253693
var1.mul_assign(&var6);
// 403 : 910042571199862184907440183846098115408695507526092088785857899011723850546977215445323879323272118176
square!(var1, 5);
// 408 : 910042571199862184907440183846098115408695507526092088785857899011723850546977215445323879323272118207
var1.mul_assign(&var5);
// 409 : 29121362278395589917038085883075139693078256240834946841147452768375163217503270894250364138344707782624
square!(var1, 5);
// 414 : 29121362278395589917038085883075139693078256240834946841147452768375163217503270894250364138344707782655
var1.mul_assign(&var5);
// 415 : 931883592908658877345218748258404470178504199706718298916718488588005222960104668616011652427030649044960
square!(var1, 5);
// 420 : 931883592908658877345218748258404470178504199706718298916718488588005222960104668616011652427030649044991
var1.mul_assign(&var5);
// 421 : 29820274973077084075046999944268943045712134390614985565334991634816167134723349395712372877664980769439712
square!(var1, 5);
// 426 : 29820274973077084075046999944268943045712134390614985565334991634816167134723349395712372877664980769439743
var1.mul_assign(&var5);
// 427 : 954248799138466690401503998216606177462788300499679538090719732314117348311147180662795932085279384622071776
square!(var1, 5);
// 432 : 954248799138466690401503998216606177462788300499679538090719732314117348311147180662795932085279384622071807
var1.mul_assign(&var5);
// 433 : 30535961572430934092848127942931397678809225615989745218903031434051755145956709781209469826728940307906297824
square!(var1, 5);
// 438 : 30535961572430934092848127942931397678809225615989745218903031434051755145956709781209469826728940307906297855
var1.mul_assign(&var5);
// 439 : 488575385158894945485570047086902362860947609855835923502448502944828082335307356499351517227663044926500765680
square!(var1, 4);
// 443 : 488575385158894945485570047086902362860947609855835923502448502944828082335307356499351517227663044926500765693
var1.mul_assign(&var4);
// 444 : 31268824650169276511076483013561751223100647030773499104156704188468997269459670815958497102570434875296049004352
square!(var1, 6);
// 450 : 31268824650169276511076483013561751223100647030773499104156704188468997269459670815958497102570434875296049004373
var1.mul_assign(&var3);
// 451 : 500301194402708424177223728216988019569610352492375985666507267015503956311354733055335953641126958004736784069968
square!(var1, 4);
// 455 : 500301194402708424177223728216988019569610352492375985666507267015503956311354733055335953641126958004736784069973
var1.mul_assign(&var2);
// 456 : 1000602388805416848354447456433976039139220704984751971333014534031007912622709466110671907282253916009473568139946
var1.square()
}
#[allow(clippy::cognitive_complexity)]
/// addchain for 1001205140483106588246484290269935788605945006208159541241399033561623546780709821462541004956387089373434649096260670658193992783731681621012512651314777238193313314641988297376025498093520728838658813979860931248214124593092835
/// Bos-Coster (win=4) : 895 links, 17 variables
/// Addition chain implementing exponentiation by (p**2 - 9) // 16.
pub fn chain_p2m9div16(var0: &Fp2) -> Fp2 {
let mut var1 = var0.square();
//Self::sqr(var1, var0); /* 0 : 2 */
let var2 = var1 * var0;
//Self::mul(&mut var2, var1, var0); /* 1 : 3 */
let var15 = var2 * var1;
//Self::mul(&mut var15, &var2, var1); /* 2 : 5 */
let var3 = var15 * var1;
//Self::mul(&mut var3, &var15, var1); /* 3 : 7 */
let var14 = var3 * var1;
//Self::mul(&mut var14, &var3, var1); /* 4 : 9 */
let var13 = var14 * var1;
//Self::mul(&mut var13, &var14, var1); /* 5 : 11 */
let var5 = var13 * var1;
//Self::mul(&mut var5, &var13, var1); /* 6 : 13 */
let var10 = var5 * var1;
//Self::mul(&mut var10, &var5, var1); /* 7 : 15 */
let var9 = var10 * var1;
//Self::mul(&mut var9, &var10, var1); /* 8 : 17 */
let var16 = var9 * var1;
//Self::mul(&mut var16, &var9, var1); /* 9 : 19 */
let var4 = var16 * var1;
//Self::mul(&mut var4, &var16, var1); /* 10 : 21 */
let var7 = var4 * var1;
//Self::mul(&mut var7, &var4, var1); /* 11 : 23 */
let var6 = var7 * var1;
//Self::mul(&mut var6, &var7, var1); /* 12 : 25 */
let var12 = var6 * var1;
//Self::mul(&mut var12, &var6, var1); /* 13 : 27 */
let var8 = var12 * var1;
//Self::mul(&mut var8, &var12, var1); /* 14 : 29 */
let var11 = var8 * var1;
//Self::mul(&mut var11, &var8, var1); /* 15 : 31 */
var1 = var4.square();
//Self::sqr(var1, &var4); /* 16 : 42 */
// 17 : 168
square!(var1, 2);
// 19 : 169
var1.mul_assign(var0);
// 20 : 86528
square!(var1, 9);
// 29 : 86555
var1.mul_assign(&var12);
// 30 : 1384880
square!(var1, 4);
// 34 : 1384893
var1.mul_assign(&var5);
// 35 : 88633152
square!(var1, 6);
// 41 : 88633161
var1.mul_assign(&var14);
// 42 : 1418130576
square!(var1, 4);
// 46 : 1418130583
var1.mul_assign(&var3);
// 47 : 45380178656
square!(var1, 5);
// 52 : 45380178659
var1.mul_assign(&var2);
// 53 : 11617325736704
square!(var1, 8);
// 61 : 11617325736717
var1.mul_assign(&var5);
// 62 : 185877211787472
square!(var1, 4);
// 66 : 185877211787479
var1.mul_assign(&var3);
// 67 : 2974035388599664
square!(var1, 4);
// 71 : 2974035388599679
var1.mul_assign(&var10);
// 72 : 761353059481517824
square!(var1, 8);
// 80 : 761353059481517853
var1.mul_assign(&var8);
// 81 : 48726595806817142592
square!(var1, 6);
// 87 : 48726595806817142603
var1.mul_assign(&var13);
// 88 : 779625532909074281648
square!(var1, 4);
// 92 : 779625532909074281661
var1.mul_assign(&var5);
// 93 : 6237004263272594253288
square!(var1, 3);
// 96 : 6237004263272594253289
var1.mul_assign(var0);
// 97 : 399168272849446032210496
square!(var1, 6);
// 103 : 399168272849446032210511
var1.mul_assign(&var10);
// 104 : 102187077849458184245890816
square!(var1, 8);
// 112 : 102187077849458184245890845
var1.mul_assign(&var8);
// 113 : 6539972982365323791737014080
square!(var1, 6);
// 119 : 6539972982365323791737014101
var1.mul_assign(&var4);
// 120 : 1674233083485522890684675609856
square!(var1, 8);
// 128 : 1674233083485522890684675609873
var1.mul_assign(&var9);
// 129 : 53575458671536732501909619515936
square!(var1, 5);
// 134 : 53575458671536732501909619515951
var1.mul_assign(&var10);
// 135 : 3428829354978350880122215649020864
square!(var1, 6);
// 141 : 3428829354978350880122215649020873
var1.mul_assign(&var14);
// 142 : 109722539359307228163910900768667936
square!(var1, 5);
// 147 : 109722539359307228163910900768667951
var1.mul_assign(&var10);
// 148 : 438890157437228912655643603074671804
square!(var1, 2);
// 150 : 438890157437228912655643603074671805
var1.mul_assign(var0);
// 151 : 28088970075982650409961190596778995520
square!(var1, 6);
// 157 : 28088970075982650409961190596778995535
var1.mul_assign(&var10);
// 158 : 3595388169725779252475032396387711428480
square!(var1, 7);
// 165 : 3595388169725779252475032396387711428491
var1.mul_assign(&var13);
// 166 : 57526210715612468039600518342203382855856
square!(var1, 4);
// 170 : 57526210715612468039600518342203382855863
var1.mul_assign(&var3);
// 171 : 3681677485799197954534433173901016502775232
square!(var1, 6);
// 177 : 3681677485799197954534433173901016502775241
var1.mul_assign(&var14);
// 178 : 471254718182297338180407446259330112355230848
square!(var1, 7);
// 185 : 471254718182297338180407446259330112355230855
var1.mul_assign(&var3);
// 186 : 15080150981833514821773038280298563595367387360
square!(var1, 5);
// 191 : 15080150981833514821773038280298563595367387365
var1.mul_assign(&var15);
// 192 : 1930259325674689897186948899878216140207025582720
square!(var1, 7);
// 199 : 1930259325674689897186948899878216140207025582727
var1.mul_assign(&var3);
// 200 : 61768298421590076709982364796102916486624818647264
square!(var1, 5);
// 205 : 61768298421590076709982364796102916486624818647271
var1.mul_assign(&var3);
// 206 : 63250737583708238551021941551209386482303814294805504
square!(var1, 10);
// 216 : 63250737583708238551021941551209386482303814294805521
var1.mul_assign(&var9);
// 217 : 506005900669665908408175532409675091858430514358444168
square!(var1, 3);
// 220 : 506005900669665908408175532409675091858430514358444173
var1.mul_assign(&var15);
// 221 : 16192188821429309069061617037109602939469776459470213536
square!(var1, 5);
// 226 : 16192188821429309069061617037109602939469776459470213549
var1.mul_assign(&var5);
// 227 : 4145200338285903121679773961500058352504262773624374668544
square!(var1, 8);
// 235 : 4145200338285903121679773961500058352504262773624374668569
var1.mul_assign(&var6);
// 236 : 132646410825148899893752766768001867280136408755979989394208
square!(var1, 5);
// 241 : 132646410825148899893752766768001867280136408755979989394231
var1.mul_assign(&var7);
// 242 : 8489370292809529593200177073152119505928730160382719321230784
square!(var1, 6);
// 248 : 8489370292809529593200177073152119505928730160382719321230795
var1.mul_assign(&var13);
// 249 : 543319698739809893964811332681735648379438730264494036558770880
square!(var1, 6);
// 255 : 543319698739809893964811332681735648379438730264494036558770895
var1.mul_assign(&var10);
// 256 : 34772460719347833213747925291631081496284078736927618339761337280
square!(var1, 6);
// 262 : 34772460719347833213747925291631081496284078736927618339761337289
var1.mul_assign(&var14);
// 263 : 4450874972076522651359734437328778431524362078326735147489451172992
square!(var1, 7);
// 270 : 4450874972076522651359734437328778431524362078326735147489451173011
var1.mul_assign(&var16);
// 271 : 142427999106448724843511501994520909808779586506455524719662437536352
square!(var1, 5);
// 276 : 142427999106448724843511501994520909808779586506455524719662437536361
var1.mul_assign(&var14);
// 277 : 9115391942812718389984736127649338227761893536413153582058396002327104
square!(var1, 6);
// 283 : 9115391942812718389984736127649338227761893536413153582058396002327119
var1.mul_assign(&var10);
// 284 : 583385084340013976959023112169557646576761186330441829251737344148935616
square!(var1, 6);
// 290 : 583385084340013976959023112169557646576761186330441829251737344148935633
var1.mul_assign(&var9);
// 291 : 18668322698880447262688739589425844690456357962574138536055595012765940256
square!(var1, 5);
// 296 : 18668322698880447262688739589425844690456357962574138536055595012765940271
var1.mul_assign(&var10);
// 297 : 74673290795521789050754958357703378761825431850296554144222380051063761084
square!(var1, 2);
// 299 : 74673290795521789050754958357703378761825431850296554144222380051063761085
var1.mul_assign(var0);
// 300 : 19116362443653577996993269339572064963027310553675917860920929293072322837760
square!(var1, 8);
// 308 : 19116362443653577996993269339572064963027310553675917860920929293072322837765
var1.mul_assign(&var15);
// 309 : 2446894392787657983615138475465224315267495750870517486197878949513257323233920
square!(var1, 7);
// 316 : 2446894392787657983615138475465224315267495750870517486197878949513257323233925
var1.mul_assign(&var15);
// 317 : 39150310284602527737842215607443589044279932013928279779166063192212117171742800
square!(var1, 4);
// 321 : 39150310284602527737842215607443589044279932013928279779166063192212117171742803
var1.mul_assign(&var2);
// 322 : 5011239716429123550443803597752779397667831297782819811733256088603150997983078784
square!(var1, 7);
// 329 : 5011239716429123550443803597752779397667831297782819811733256088603150997983078795
var1.mul_assign(&var13);
// 330 : 320719341851463907228403430256177881450741203058100467950928389670601663870917042880
square!(var1, 6);
// 336 : 320719341851463907228403430256177881450741203058100467950928389670601663870917042895
var1.mul_assign(&var10);
// 337 : 5131509469623422515654454884098846103211859248929607487214854234729626621934672686320
square!(var1, 4);
// 341 : 5131509469623422515654454884098846103211859248929607487214854234729626621934672686333
var1.mul_assign(&var5);
// 342 : 656833212111798082003770225164652301211117983862989758363501342045392207607638103850624
square!(var1, 7);
// 349 : 656833212111798082003770225164652301211117983862989758363501342045392207607638103850635
var1.mul_assign(&var13);
// 350 : 42037325575155077248241294410537747277511550967231344535264085890905101286888838646440640
square!(var1, 6);
// 356 : 42037325575155077248241294410537747277511550967231344535264085890905101286888838646440667
var1.mul_assign(&var12);
// 357 : 1345194418404962471943721421137207912880369630951403025128450748508963241180442836686101344
square!(var1, 5);
// 362 : 1345194418404962471943721421137207912880369630951403025128450748508963241180442836686101367
var1.mul_assign(&var7);
// 363 : 43046221388958799102199085476390653212171828190444896804110423952286823717774170773955243744
square!(var1, 5);
// 368 : 43046221388958799102199085476390653212171828190444896804110423952286823717774170773955243749
var1.mul_assign(&var15);
// 369 : 5509916337786726285081482940978003611157994008376946790926134265892713435875093859066271199872
square!(var1, 7);
// 376 : 5509916337786726285081482940978003611157994008376946790926134265892713435875093859066271199899
var1.mul_assign(&var12);
// 377 : 176317322809175241122607454111296115557055808268062297309636296508566829948003003490120678396768
square!(var1, 5);
// 382 : 176317322809175241122607454111296115557055808268062297309636296508566829948003003490120678396791
var1.mul_assign(&var7);
// 383 : 5642154329893607715923438531561475697825785864577993513908361488274138558336096111683861708697312
square!(var1, 5);
// 388 : 5642154329893607715923438531561475697825785864577993513908361488274138558336096111683861708697333
var1.mul_assign(&var4);
// 389 : 90274469278297723454775016504983611165212573833247896222533783812386216933377537786941787339157328
square!(var1, 4);
// 393 : 90274469278297723454775016504983611165212573833247896222533783812386216933377537786941787339157331
var1.mul_assign(&var2);
// 394 : 5777566033811054301105601056318951114573604725327865358242162163992717883736162418364274389706069184
square!(var1, 6);
// 400 : 5777566033811054301105601056318951114573604725327865358242162163992717883736162418364274389706069189
var1.mul_assign(&var15);
// 401 : 369764226163907475270758467604412871332710702420983382927498378495533944559114394775313560941188428096
square!(var1, 6);
// 407 : 369764226163907475270758467604412871332710702420983382927498378495533944559114394775313560941188428105
var1.mul_assign(&var14);
// 408 : 5916227618622519604332135481670605941323371238735734126839974055928543112945830316405016975059014849680
square!(var1, 4);
// 412 : 5916227618622519604332135481670605941323371238735734126839974055928543112945830316405016975059014849683
var1.mul_assign(&var2);
// 413 : 94659641897960313669314167706729695061173939819771746029439584894856689807133285062480271600944237594928
square!(var1, 4);
// 417 : 94659641897960313669314167706729695061173939819771746029439584894856689807133285062480271600944237594931
var1.mul_assign(&var2);
// 418 : 24232868325877840299344426932922801935660528593861566983536533733083312590626120975994949529841724824302336
square!(var1, 8);
// 426 : 24232868325877840299344426932922801935660528593861566983536533733083312590626120975994949529841724824302345
var1.mul_assign(&var14);
// 427 : 775451786428090889579021661853529661941136915003570143473169079458666002900035871231838384954935194377675040
square!(var1, 5);
// 432 : 775451786428090889579021661853529661941136915003570143473169079458666002900035871231838384954935194377675055
var1.mul_assign(&var10);
// 433 : 49628914331397816933057386358625898364232762560228489182282821085354624185602295758837656637115852440171203520
square!(var1, 6);
// 439 : 49628914331397816933057386358625898364232762560228489182282821085354624185602295758837656637115852440171203527
var1.mul_assign(&var3);
// 440 : 1588125258604730141857836363476028747655448401927311653833050274731347973939273464282805012387707278085478512864
square!(var1, 5);
// 445 : 1588125258604730141857836363476028747655448401927311653833050274731347973939273464282805012387707278085478512879
var1.mul_assign(&var10);
// 446 : 6504961059244974661049697744797813750396716654294268534100173925299601301255264109702369330740049011038119988752384
square!(var1, 12);
// 458 : 6504961059244974661049697744797813750396716654294268534100173925299601301255264109702369330740049011038119988752401
var1.mul_assign(&var9);
// 459 : 104079376947919594576795163916765020006347466468708296545602782804793620820084225755237909291840784176609919820038416
square!(var1, 4);
// 463 : 104079376947919594576795163916765020006347466468708296545602782804793620820084225755237909291840784176609919820038429
var1.mul_assign(&var5);
// 464 : 3330540062333427026457445245336480640203118926998665489459289049753395866242695224167613097338905093651517434241229728
square!(var1, 5);
// 469 : 3330540062333427026457445245336480640203118926998665489459289049753395866242695224167613097338905093651517434241229741
var1.mul_assign(&var5);
// 470 : 213154563989339329693276495701534760972999611327914591325394499184217335439532494346727238229689925993697115791438703424
square!(var1, 6);
// 476 : 213154563989339329693276495701534760972999611327914591325394499184217335439532494346727238229689925993697115791438703427
var1.mul_assign(&var2);
// 477 : 109135136762541736802957565799185797618175800999892270758601983582319275745040637105524345973601242108772923285216616154624
square!(var1, 9);
// 486 : 109135136762541736802957565799185797618175800999892270758601983582319275745040637105524345973601242108772923285216616154649
var1.mul_assign(&var6);
// 487 : 3492324376401335577694642105573945523781625631996552664275263474634216823841300387376779071155239747480733545126931716948768
square!(var1, 5);
// 492 : 3492324376401335577694642105573945523781625631996552664275263474634216823841300387376779071155239747480733545126931716948793
var1.mul_assign(&var6);
// 493 : 223508760089685476972457094756732513522024040447779370513616862376589876725843224792113860553935343838766946888123629884722752
square!(var1, 6);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | true |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/hash_to_curve/mod.rs | zkcrypto/bls12_381/src/hash_to_curve/mod.rs | //! This module implements hash_to_curve, hash_to_field and related
//! hashing primitives for use with BLS signatures.
use core::ops::Add;
use subtle::Choice;
pub(crate) mod chain;
mod expand_msg;
pub use self::expand_msg::{
ExpandMessage, ExpandMessageState, ExpandMsgXmd, ExpandMsgXof, InitExpandMessage,
};
mod map_g1;
mod map_g2;
mod map_scalar;
use crate::generic_array::{typenum::Unsigned, ArrayLength, GenericArray};
/// Enables a byte string to be hashed into one or more field elements for a given curve.
///
/// Implements [section 5 of `draft-irtf-cfrg-hash-to-curve-12`][hash_to_field].
///
/// [hash_to_field]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-5
pub trait HashToField: Sized {
/// The length of the data used to produce an individual field element.
///
/// This must be set to `m * L = m * ceil((ceil(log2(p)) + k) / 8)`, where `p` is the
/// characteristic of `Self`, `m` is the extension degree of `Self`, and `k` is the
/// security parameter.
type InputLength: ArrayLength<u8>;
/// Interprets the given output keying material as a big endian integer, and reduces
/// it into a field element.
fn from_okm(okm: &GenericArray<u8, Self::InputLength>) -> Self;
/// Hashes a byte string of arbitrary length into one or more elements of `Self`,
/// using [`ExpandMessage`] variant `X`.
///
/// Implements [section 5.3 of `draft-irtf-cfrg-hash-to-curve-12`][hash_to_field].
///
/// [hash_to_field]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-5.3
fn hash_to_field<X: ExpandMessage>(message: &[u8], dst: &[u8], output: &mut [Self]) {
let len_per_elm = Self::InputLength::to_usize();
let len_in_bytes = output.len() * len_per_elm;
let mut expander = X::init_expand(message, dst, len_in_bytes);
let mut buf = GenericArray::<u8, Self::InputLength>::default();
output.iter_mut().for_each(|item| {
expander.read_into(&mut buf[..]);
*item = Self::from_okm(&buf);
});
}
}
/// Allow conversion from the output of hashed or encoded input into points on the curve
pub trait MapToCurve: Sized {
/// The field element type.
type Field: Copy + Default + HashToField;
/// Maps an element of the finite field `Self::Field` to a point on the curve `Self`.
fn map_to_curve(elt: &Self::Field) -> Self;
/// Clears the cofactor, sending a point on curve E to the target group (G1/G2).
fn clear_h(&self) -> Self;
}
/// Implementation of random oracle maps to the curve.
pub trait HashToCurve<X: ExpandMessage>: MapToCurve + for<'a> Add<&'a Self, Output = Self> {
/// Implements a uniform encoding from byte strings to elements of `Self`.
///
/// This function is suitable for most applications requiring a random
/// oracle returning points in `Self`.
fn hash_to_curve(message: impl AsRef<[u8]>, dst: &[u8]) -> Self {
let mut u = [Self::Field::default(); 2];
Self::Field::hash_to_field::<X>(message.as_ref(), dst, &mut u);
let p1 = Self::map_to_curve(&u[0]);
let p2 = Self::map_to_curve(&u[1]);
(p1 + &p2).clear_h()
}
/// Implements a **non-uniform** encoding from byte strings to elements of `Self`.
///
/// The distribution of its output is not uniformly random in `Self`: the set of
/// possible outputs of this function is only a fraction of the points in `Self`, and
/// some elements of this set are more likely to be output than others. See
/// [section 10.1 of `draft-irtf-cfrg-hash-to-curve-12`][encode_to_curve-distribution]
/// for a more precise definition of `encode_to_curve`'s output distribution.
///
/// [encode_to_curve-distribution]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-10.1
fn encode_to_curve(message: impl AsRef<[u8]>, dst: &[u8]) -> Self {
let mut u = [Self::Field::default(); 1];
Self::Field::hash_to_field::<X>(message.as_ref(), dst, &mut u);
let p = Self::map_to_curve(&u[0]);
p.clear_h()
}
}
impl<G, X> HashToCurve<X> for G
where
G: MapToCurve + for<'a> Add<&'a Self, Output = Self>,
X: ExpandMessage,
{
}
pub(crate) trait Sgn0 {
/// Returns either 0 or 1 indicating the "sign" of x, where sgn0(x) == 1
/// just when x is "negative". (In other words, this function always considers 0 to be positive.)
/// <https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-10#section-4.1>
/// The equivalent for draft 6 would be `lexicographically_largest`.
fn sgn0(&self) -> Choice;
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/hash_to_curve/map_scalar.rs | zkcrypto/bls12_381/src/hash_to_curve/map_scalar.rs | //! Implementation of hash-to-field for Scalar values
use super::HashToField;
use crate::generic_array::{typenum::U48, GenericArray};
use crate::scalar::Scalar;
impl HashToField for Scalar {
// ceil(log2(p)) = 255, m = 1, k = 128.
type InputLength = U48;
fn from_okm(okm: &GenericArray<u8, U48>) -> Scalar {
let mut bs = [0u8; 64];
bs[16..].copy_from_slice(okm);
bs.reverse(); // into little endian
Scalar::from_bytes_wide(&bs)
}
}
#[test]
fn test_hash_to_scalar() {
let tests: &[(&[u8], &str)] = &[
(
&[0u8; 48],
"0x0000000000000000000000000000000000000000000000000000000000000000",
),
(
b"aaaaaabbbbbbccccccddddddeeeeeeffffffgggggghhhhhh",
"0x2228450bf55d8fe62395161bd3677ff6fc28e45b89bc87e02a818eda11a8c5da",
),
(
b"111111222222333333444444555555666666777777888888",
"0x4aa543cbd2f0c8f37f8a375ce2e383eb343e7e3405f61e438b0a15fb8899d1ae",
),
];
for (input, expected) in tests {
let output = format!("{:?}", Scalar::from_okm(GenericArray::from_slice(input)));
assert_eq!(&output, expected);
}
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/hash_to_curve/map_g1.rs | zkcrypto/bls12_381/src/hash_to_curve/map_g1.rs | //! Implementation of hash-to-curve for the G1 group.
use subtle::{Choice, ConditionallyNegatable, ConditionallySelectable, ConstantTimeEq};
use super::chain::chain_pm3div4;
use super::{HashToField, MapToCurve, Sgn0};
use crate::fp::Fp;
use crate::g1::G1Projective;
use crate::generic_array::{typenum::U64, GenericArray};
/// Coefficients of the 11-isogeny x map's numerator
const ISO11_XNUM: [Fp; 12] = [
Fp::from_raw_unchecked([
0x4d18_b6f3_af00_131c,
0x19fa_2197_93fe_e28c,
0x3f28_85f1_467f_19ae,
0x23dc_ea34_f2ff_b304,
0xd15b_58d2_ffc0_0054,
0x0913_be20_0a20_bef4,
]),
Fp::from_raw_unchecked([
0x8989_8538_5cdb_bd8b,
0x3c79_e43c_c7d9_66aa,
0x1597_e193_f4cd_233a,
0x8637_ef1e_4d66_23ad,
0x11b2_2dee_d20d_827b,
0x0709_7bc5_9987_84ad,
]),
Fp::from_raw_unchecked([
0xa542_583a_480b_664b,
0xfc71_69c0_26e5_68c6,
0x5ba2_ef31_4ed8_b5a6,
0x5b54_91c0_5102_f0e7,
0xdf6e_9970_7d2a_0079,
0x0784_151e_d760_5524,
]),
Fp::from_raw_unchecked([
0x494e_2128_70f7_2741,
0xab9b_e52f_bda4_3021,
0x26f5_5779_94e3_4c3d,
0x049d_fee8_2aef_bd60,
0x65da_dd78_2850_5289,
0x0e93_d431_ea01_1aeb,
]),
Fp::from_raw_unchecked([
0x90ee_774b_d6a7_4d45,
0x7ada_1c8a_41bf_b185,
0x0f1a_8953_b325_f464,
0x104c_2421_1be4_805c,
0x1691_39d3_19ea_7a8f,
0x09f2_0ead_8e53_2bf6,
]),
Fp::from_raw_unchecked([
0x6ddd_93e2_f436_26b7,
0xa548_2c9a_a1cc_d7bd,
0x1432_4563_1883_f4bd,
0x2e0a_94cc_f77e_c0db,
0xb028_2d48_0e56_489f,
0x18f4_bfcb_b436_8929,
]),
Fp::from_raw_unchecked([
0x23c5_f0c9_5340_2dfd,
0x7a43_ff69_58ce_4fe9,
0x2c39_0d3d_2da5_df63,
0xd0df_5c98_e1f9_d70f,
0xffd8_9869_a572_b297,
0x1277_ffc7_2f25_e8fe,
]),
Fp::from_raw_unchecked([
0x79f4_f049_0f06_a8a6,
0x85f8_94a8_8030_fd81,
0x12da_3054_b18b_6410,
0xe2a5_7f65_0588_0d65,
0xbba0_74f2_60e4_00f1,
0x08b7_6279_f621_d028,
]),
Fp::from_raw_unchecked([
0xe672_45ba_78d5_b00b,
0x8456_ba9a_1f18_6475,
0x7888_bff6_e6b3_3bb4,
0xe215_85b9_a30f_86cb,
0x05a6_9cdc_ef55_feee,
0x09e6_99dd_9adf_a5ac,
]),
Fp::from_raw_unchecked([
0x0de5_c357_bff5_7107,
0x0a0d_b4ae_6b1a_10b2,
0xe256_bb67_b3b3_cd8d,
0x8ad4_5657_4e9d_b24f,
0x0443_915f_50fd_4179,
0x098c_4bf7_de8b_6375,
]),
Fp::from_raw_unchecked([
0xe6b0_617e_7dd9_29c7,
0xfe6e_37d4_4253_7375,
0x1daf_deda_137a_489e,
0xe4ef_d1ad_3f76_7ceb,
0x4a51_d866_7f0f_e1cf,
0x054f_df4b_bf1d_821c,
]),
Fp::from_raw_unchecked([
0x72db_2a50_658d_767b,
0x8abf_91fa_a257_b3d5,
0xe969_d683_3764_ab47,
0x4641_7014_2a10_09eb,
0xb14f_01aa_db30_be2f,
0x18ae_6a85_6f40_715d,
]),
];
/// Coefficients of the 11-isogeny x map's denominator
const ISO11_XDEN: [Fp; 11] = [
Fp::from_raw_unchecked([
0xb962_a077_fdb0_f945,
0xa6a9_740f_efda_13a0,
0xc14d_568c_3ed6_c544,
0xb43f_c37b_908b_133e,
0x9c0b_3ac9_2959_9016,
0x0165_aa6c_93ad_115f,
]),
Fp::from_raw_unchecked([
0x2327_9a3b_a506_c1d9,
0x92cf_ca0a_9465_176a,
0x3b29_4ab1_3755_f0ff,
0x116d_da1c_5070_ae93,
0xed45_3092_4cec_2045,
0x0833_83d6_ed81_f1ce,
]),
Fp::from_raw_unchecked([
0x9885_c2a6_449f_ecfc,
0x4a2b_54cc_d377_33f0,
0x17da_9ffd_8738_c142,
0xa0fb_a727_32b3_fafd,
0xff36_4f36_e54b_6812,
0x0f29_c13c_6605_23e2,
]),
Fp::from_raw_unchecked([
0xe349_cc11_8278_f041,
0xd487_228f_2f32_04fb,
0xc9d3_2584_9ade_5150,
0x43a9_2bd6_9c15_c2df,
0x1c2c_7844_bc41_7be4,
0x1202_5184_f407_440c,
]),
Fp::from_raw_unchecked([
0x587f_65ae_6acb_057b,
0x1444_ef32_5140_201f,
0xfbf9_95e7_1270_da49,
0xccda_0660_7243_6a42,
0x7408_904f_0f18_6bb2,
0x13b9_3c63_edf6_c015,
]),
Fp::from_raw_unchecked([
0xfb91_8622_cd14_1920,
0x4a4c_6442_3eca_ddb4,
0x0beb_2329_27f7_fb26,
0x30f9_4df6_f83a_3dc2,
0xaeed_d424_d780_f388,
0x06cc_402d_d594_bbeb,
]),
Fp::from_raw_unchecked([
0xd41f_7611_51b2_3f8f,
0x32a9_2465_4357_19b3,
0x64f4_36e8_88c6_2cb9,
0xdf70_a9a1_f757_c6e4,
0x6933_a38d_5b59_4c81,
0x0c6f_7f72_37b4_6606,
]),
Fp::from_raw_unchecked([
0x693c_0874_7876_c8f7,
0x22c9_850b_f9cf_80f0,
0x8e90_71da_b950_c124,
0x89bc_62d6_1c7b_af23,
0xbc6b_e2d8_dad5_7c23,
0x1791_6987_aa14_a122,
]),
Fp::from_raw_unchecked([
0x1be3_ff43_9c13_16fd,
0x9965_243a_7571_dfa7,
0xc7f7_f629_62f5_cd81,
0x32c6_aa9a_f394_361c,
0xbbc2_ee18_e1c2_27f4,
0x0c10_2cba_c531_bb34,
]),
Fp::from_raw_unchecked([
0x9976_14c9_7bac_bf07,
0x61f8_6372_b991_92c0,
0x5b8c_95fc_1435_3fc3,
0xca2b_066c_2a87_492f,
0x1617_8f5b_bf69_8711,
0x12a6_dcd7_f0f4_e0e8,
]),
Fp::from_raw_unchecked([
0x7609_0000_0002_fffd,
0xebf4_000b_c40c_0002,
0x5f48_9857_53c7_58ba,
0x77ce_5853_7052_5745,
0x5c07_1a97_a256_ec6d,
0x15f6_5ec3_fa80_e493,
]),
];
/// Coefficients of the 11-isogeny y map's numerator
const ISO11_YNUM: [Fp; 16] = [
Fp::from_raw_unchecked([
0x2b56_7ff3_e283_7267,
0x1d4d_9e57_b958_a767,
0xce02_8fea_04bd_7373,
0xcc31_a30a_0b6c_d3df,
0x7d7b_18a6_8269_2693,
0x0d30_0744_d42a_0310,
]),
Fp::from_raw_unchecked([
0x99c2_555f_a542_493f,
0xfe7f_53cc_4874_f878,
0x5df0_608b_8f97_608a,
0x14e0_3832_052b_49c8,
0x7063_26a6_957d_d5a4,
0x0a8d_add9_c241_4555,
]),
Fp::from_raw_unchecked([
0x13d9_4292_2a5c_f63a,
0x357e_33e3_6e26_1e7d,
0xcf05_a27c_8456_088d,
0x0000_bd1d_e7ba_50f0,
0x83d0_c753_2f8c_1fde,
0x13f7_0bf3_8bbf_2905,
]),
Fp::from_raw_unchecked([
0x5c57_fd95_bfaf_bdbb,
0x28a3_59a6_5e54_1707,
0x3983_ceb4_f636_0b6d,
0xafe1_9ff6_f97e_6d53,
0xb346_8f45_5019_2bf7,
0x0bb6_cde4_9d8b_a257,
]),
Fp::from_raw_unchecked([
0x590b_62c7_ff8a_513f,
0x314b_4ce3_72ca_cefd,
0x6bef_32ce_94b8_a800,
0x6ddf_84a0_9571_3d5f,
0x64ea_ce4c_b098_2191,
0x0386_213c_651b_888d,
]),
Fp::from_raw_unchecked([
0xa531_0a31_111b_bcdd,
0xa14a_c0f5_da14_8982,
0xf9ad_9cc9_5423_d2e9,
0xaa6e_c095_283e_e4a7,
0xcf5b_1f02_2e1c_9107,
0x01fd_df5a_ed88_1793,
]),
Fp::from_raw_unchecked([
0x65a5_72b0_d7a7_d950,
0xe25c_2d81_8347_3a19,
0xc2fc_ebe7_cb87_7dbd,
0x05b2_d36c_769a_89b0,
0xba12_961b_e86e_9efb,
0x07eb_1b29_c1df_de1f,
]),
Fp::from_raw_unchecked([
0x93e0_9572_f7c4_cd24,
0x364e_9290_7679_5091,
0x8569_467e_68af_51b5,
0xa47d_a894_39f5_340f,
0xf4fa_9180_82e4_4d64,
0x0ad5_2ba3_e669_5a79,
]),
Fp::from_raw_unchecked([
0x9114_2984_4e0d_5f54,
0xd03f_51a3_516b_b233,
0x3d58_7e56_4053_6e66,
0xfa86_d2a3_a9a7_3482,
0xa90e_d5ad_f1ed_5537,
0x149c_9c32_6a5e_7393,
]),
Fp::from_raw_unchecked([
0x462b_beb0_3c12_921a,
0xdc9a_f5fa_0a27_4a17,
0x9a55_8ebd_e836_ebed,
0x649e_f8f1_1a4f_ae46,
0x8100_e165_2b3c_dc62,
0x1862_bd62_c291_dacb,
]),
Fp::from_raw_unchecked([
0x05c9_b8ca_89f1_2c26,
0x0194_160f_a9b9_ac4f,
0x6a64_3d5a_6879_fa2c,
0x1466_5bdd_8846_e19d,
0xbb1d_0d53_af3f_f6bf,
0x12c7_e1c3_b289_62e5,
]),
Fp::from_raw_unchecked([
0xb55e_bf90_0b8a_3e17,
0xfedc_77ec_1a92_01c4,
0x1f07_db10_ea1a_4df4,
0x0dfb_d15d_c41a_594d,
0x3895_47f2_334a_5391,
0x0241_9f98_1658_71a4,
]),
Fp::from_raw_unchecked([
0xb416_af00_0745_fc20,
0x8e56_3e9d_1ea6_d0f5,
0x7c76_3e17_763a_0652,
0x0145_8ef0_159e_bbef,
0x8346_fe42_1f96_bb13,
0x0d2d_7b82_9ce3_24d2,
]),
Fp::from_raw_unchecked([
0x9309_6bb5_38d6_4615,
0x6f2a_2619_951d_823a,
0x8f66_b3ea_5951_4fa4,
0xf563_e637_04f7_092f,
0x724b_136c_4cf2_d9fa,
0x0469_59cf_cfd0_bf49,
]),
Fp::from_raw_unchecked([
0xea74_8d4b_6e40_5346,
0x91e9_079c_2c02_d58f,
0x4106_4965_946d_9b59,
0xa067_31f1_d2bb_e1ee,
0x07f8_97e2_67a3_3f1b,
0x1017_2909_1921_0e5f,
]),
Fp::from_raw_unchecked([
0x872a_a6c1_7d98_5097,
0xeecc_5316_1264_562a,
0x07af_e37a_fff5_5002,
0x5475_9078_e5be_6838,
0xc4b9_2d15_db8a_cca8,
0x106d_87d1_b51d_13b9,
]),
];
/// Coefficients of the 11-isogeny y map's denominator
const ISO11_YDEN: [Fp; 16] = [
Fp::from_raw_unchecked([
0xeb6c_359d_47e5_2b1c,
0x18ef_5f8a_1063_4d60,
0xddfa_71a0_889d_5b7e,
0x723e_71dc_c5fc_1323,
0x52f4_5700_b70d_5c69,
0x0a8b_981e_e476_91f1,
]),
Fp::from_raw_unchecked([
0x616a_3c4f_5535_b9fb,
0x6f5f_0373_95db_d911,
0xf25f_4cc5_e35c_65da,
0x3e50_dffe_a3c6_2658,
0x6a33_dca5_2356_0776,
0x0fad_eff7_7b6b_fe3e,
]),
Fp::from_raw_unchecked([
0x2be9_b66d_f470_059c,
0x24a2_c159_a3d3_6742,
0x115d_be7a_d10c_2a37,
0xb663_4a65_2ee5_884d,
0x04fe_8bb2_b8d8_1af4,
0x01c2_a7a2_56fe_9c41,
]),
Fp::from_raw_unchecked([
0xf27b_f8ef_3b75_a386,
0x898b_3674_76c9_073f,
0x2448_2e6b_8c2f_4e5f,
0xc8e0_bbd6_fe11_0806,
0x59b0_c17f_7631_448a,
0x1103_7cd5_8b3d_bfbd,
]),
Fp::from_raw_unchecked([
0x31c7_912e_a267_eec6,
0x1dbf_6f1c_5fcd_b700,
0xd30d_4fe3_ba86_fdb1,
0x3cae_528f_bee9_a2a4,
0xb1cc_e69b_6aa9_ad9a,
0x0443_93bb_632d_94fb,
]),
Fp::from_raw_unchecked([
0xc66e_f6ef_eeb5_c7e8,
0x9824_c289_dd72_bb55,
0x71b1_a4d2_f119_981d,
0x104f_c1aa_fb09_19cc,
0x0e49_df01_d942_a628,
0x096c_3a09_7732_72d4,
]),
Fp::from_raw_unchecked([
0x9abc_11eb_5fad_eff4,
0x32dc_a50a_8857_28f0,
0xfb1f_a372_1569_734c,
0xc4b7_6271_ea65_06b3,
0xd466_a755_99ce_728e,
0x0c81_d464_5f4c_b6ed,
]),
Fp::from_raw_unchecked([
0x4199_f10e_5b8b_e45b,
0xda64_e495_b1e8_7930,
0xcb35_3efe_9b33_e4ff,
0x9e9e_fb24_aa64_24c6,
0xf08d_3368_0a23_7465,
0x0d33_7802_3e4c_7406,
]),
Fp::from_raw_unchecked([
0x7eb4_ae92_ec74_d3a5,
0xc341_b4aa_9fac_3497,
0x5be6_0389_9e90_7687,
0x03bf_d9cc_a75c_bdeb,
0x564c_2935_a96b_fa93,
0x0ef3_c333_71e2_fdb5,
]),
Fp::from_raw_unchecked([
0x7ee9_1fd4_49f6_ac2e,
0xe5d5_bd5c_b935_7a30,
0x773a_8ca5_196b_1380,
0xd0fd_a172_174e_d023,
0x6cb9_5e0f_a776_aead,
0x0d22_d5a4_0cec_7cff,
]),
Fp::from_raw_unchecked([
0xf727_e092_85fd_8519,
0xdc9d_55a8_3017_897b,
0x7549_d8bd_0578_94ae,
0x1784_1961_3d90_d8f8,
0xfce9_5ebd_eb5b_490a,
0x0467_ffae_f23f_c49e,
]),
Fp::from_raw_unchecked([
0xc176_9e6a_7c38_5f1b,
0x79bc_930d_eac0_1c03,
0x5461_c75a_23ed_e3b5,
0x6e20_829e_5c23_0c45,
0x828e_0f1e_772a_53cd,
0x116a_efa7_4912_7bff,
]),
Fp::from_raw_unchecked([
0x101c_10bf_2744_c10a,
0xbbf1_8d05_3a6a_3154,
0xa0ec_f39e_f026_f602,
0xfc00_9d49_96dc_5153,
0xb900_0209_d5bd_08d3,
0x189e_5fe4_470c_d73c,
]),
Fp::from_raw_unchecked([
0x7ebd_546c_a157_5ed2,
0xe47d_5a98_1d08_1b55,
0x57b2_b625_b6d4_ca21,
0xb0a1_ba04_2285_20cc,
0x9873_8983_c210_7ff3,
0x13dd_dbc4_799d_81d6,
]),
Fp::from_raw_unchecked([
0x0931_9f2e_3983_4935,
0x039e_952c_bdb0_5c21,
0x55ba_77a9_a2f7_6493,
0xfd04_e3df_c608_6467,
0xfb95_832e_7d78_742e,
0x0ef9_c24e_ccaf_5e0e,
]),
Fp::from_raw_unchecked([
0x7609_0000_0002_fffd,
0xebf4_000b_c40c_0002,
0x5f48_9857_53c7_58ba,
0x77ce_5853_7052_5745,
0x5c07_1a97_a256_ec6d,
0x15f6_5ec3_fa80_e493,
]),
];
const SSWU_ELLP_A: Fp = Fp::from_raw_unchecked([
0x2f65_aa0e_9af5_aa51,
0x8646_4c2d_1e84_16c3,
0xb85c_e591_b7bd_31e2,
0x27e1_1c91_b5f2_4e7c,
0x2837_6eda_6bfc_1835,
0x1554_55c3_e507_1d85,
]);
const SSWU_ELLP_B: Fp = Fp::from_raw_unchecked([
0xfb99_6971_fe22_a1e0,
0x9aa9_3eb3_5b74_2d6f,
0x8c47_6013_de99_c5c4,
0x873e_27c3_a221_e571,
0xca72_b5e4_5a52_d888,
0x0682_4061_418a_386b,
]);
const SSWU_XI: Fp = Fp::from_raw_unchecked([
0x886c_0000_0023_ffdc,
0x0f70_008d_3090_001d,
0x7767_2417_ed58_28c3,
0x9dac_23e9_43dc_1740,
0x5055_3f1b_9c13_1521,
0x078c_712f_be0a_b6e8,
]);
const SQRT_M_XI_CUBED: Fp = Fp::from_raw_unchecked([
0x43b5_71ca_d321_5f1f,
0xccb4_60ef_1c70_2dc2,
0x742d_884f_4f97_100b,
0xdb2c_3e32_38a3_382b,
0xe40f_3fa1_3fce_8f88,
0x0073_a2af_9892_a2ff,
]);
impl HashToField for Fp {
// ceil(log2(p)) = 381, m = 1, k = 128.
type InputLength = U64;
fn from_okm(okm: &GenericArray<u8, U64>) -> Fp {
const F_2_256: Fp = Fp::from_raw_unchecked([
0x075b_3cd7_c5ce_820f,
0x3ec6_ba62_1c3e_db0b,
0x168a_13d8_2bff_6bce,
0x8766_3c4b_f8c4_49d2,
0x15f3_4c83_ddc8_d830,
0x0f96_28b4_9caa_2e85,
]);
let mut bs = [0u8; 48];
bs[16..].copy_from_slice(&okm[..32]);
let db = Fp::from_bytes(&bs).unwrap();
bs[16..].copy_from_slice(&okm[32..]);
let da = Fp::from_bytes(&bs).unwrap();
db * F_2_256 + da
}
}
impl Sgn0 for Fp {
fn sgn0(&self) -> Choice {
// Turn into canonical form by computing
// (a.R) / R = a
let tmp = Fp::montgomery_reduce(
self.0[0], self.0[1], self.0[2], self.0[3], self.0[4], self.0[5], 0, 0, 0, 0, 0, 0,
);
Choice::from((tmp.0[0] & 1) as u8)
}
}
/// Maps an element of [`Fp`] to a point on iso-G1.
///
/// Implements [section 6.6.2 of `draft-irtf-cfrg-hash-to-curve-12`][sswu].
///
/// [sswu]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-6.6.2
fn map_to_curve_simple_swu(u: &Fp) -> G1Projective {
let usq = u.square();
let xi_usq = SSWU_XI * usq;
let xisq_u4 = xi_usq.square();
let nd_common = xisq_u4 + xi_usq; // XI^2 * u^4 + XI * u^2
let x_den = SSWU_ELLP_A * Fp::conditional_select(&(-nd_common), &SSWU_XI, nd_common.is_zero());
let x0_num = SSWU_ELLP_B * (Fp::one() + nd_common); // B * (1 + (XI^2 * u^4 + XI * u^2))
// compute g(x0(u))
let x_densq = x_den.square();
let gx_den = x_densq * x_den;
// x0_num^3 + A * x0_num * x_den^2 + B * x_den^3
let gx0_num = (x0_num.square() + SSWU_ELLP_A * x_densq) * x0_num + SSWU_ELLP_B * gx_den;
// compute g(X0(u)) ^ ((p - 3) // 4)
let sqrt_candidate = {
let u_v = gx0_num * gx_den; // u*v
let vsq = gx_den.square(); // v^2
u_v * chain_pm3div4(&(u_v * vsq)) // u v (u v^3) ^ ((p - 3) // 4)
};
let gx0_square = (sqrt_candidate.square() * gx_den).ct_eq(&gx0_num); // g(x0) is square
let x1_num = x0_num * xi_usq;
// sqrt(-XI**3) * u^3 g(x0) ^ ((p - 3) // 4)
let y1 = SQRT_M_XI_CUBED * usq * u * sqrt_candidate;
let x_num = Fp::conditional_select(&x1_num, &x0_num, gx0_square);
let mut y = Fp::conditional_select(&y1, &sqrt_candidate, gx0_square);
// ensure sign of y and sign of u agree
y.conditional_negate(y.sgn0() ^ u.sgn0());
G1Projective {
x: x_num,
y: y * x_den,
z: x_den,
}
}
/// Maps an iso-G1 point to a G1 point.
fn iso_map(u: &G1Projective) -> G1Projective {
const COEFFS: [&[Fp]; 4] = [&ISO11_XNUM, &ISO11_XDEN, &ISO11_YNUM, &ISO11_YDEN];
// unpack input point
let G1Projective { x, y, z } = *u;
// xnum, xden, ynum, yden
let mut mapvals = [Fp::zero(); 4];
// pre-compute powers of z
let zpows = {
let mut zpows = [Fp::zero(); 15];
zpows[0] = z;
for idx in 1..zpows.len() {
zpows[idx] = zpows[idx - 1] * z;
}
zpows
};
// compute map value by Horner's rule
for idx in 0..4 {
let coeff = COEFFS[idx];
let clast = coeff.len() - 1;
mapvals[idx] = coeff[clast];
for jdx in 0..clast {
mapvals[idx] = mapvals[idx] * x + zpows[jdx] * coeff[clast - 1 - jdx];
}
}
// x denominator is order 1 less than x numerator, so we need an extra factor of z
mapvals[1] *= z;
// multiply result of Y map by the y-coord, y / z
mapvals[2] *= y;
mapvals[3] *= z;
G1Projective {
x: mapvals[0] * mapvals[3], // xnum * yden,
y: mapvals[2] * mapvals[1], // ynum * xden,
z: mapvals[1] * mapvals[3], // xden * yden
}
}
impl MapToCurve for G1Projective {
type Field = Fp;
fn map_to_curve(u: &Fp) -> G1Projective {
let pt = map_to_curve_simple_swu(u);
iso_map(&pt)
}
fn clear_h(&self) -> Self {
self.clear_cofactor()
}
}
#[cfg(test)]
fn check_g1_prime(pt: &G1Projective) -> bool {
// (X : Y : Z)==(X/Z, Y/Z) is on E': y^2 = x^3 + A * x + B.
// y^2 z = (x^3) + A (x z^2) + B z^3
let zsq = pt.z.square();
(pt.y.square() * pt.z)
== (pt.x.square() * pt.x + SSWU_ELLP_A * pt.x * zsq + SSWU_ELLP_B * zsq * pt.z)
}
#[test]
fn test_simple_swu_expected() {
// exceptional case: zero
let p = map_to_curve_simple_swu(&Fp::zero());
let G1Projective { x, y, z } = &p;
let xo = Fp::from_raw_unchecked([
0xfb99_6971_fe22_a1e0,
0x9aa9_3eb3_5b74_2d6f,
0x8c47_6013_de99_c5c4,
0x873e_27c3_a221_e571,
0xca72_b5e4_5a52_d888,
0x0682_4061_418a_386b,
]);
let yo = Fp::from_raw_unchecked([
0xfd6f_ced8_7a7f_11a3,
0x9a6b_314b_03c8_db31,
0x41f8_5416_e0ea_b593,
0xfeeb_089f_7e6e_c4d7,
0x85a1_34c3_7ed1_278f,
0x0575_c525_bb9f_74bb,
]);
let zo = Fp::from_raw_unchecked([
0x7f67_4ea0_a891_5178,
0xb0f9_45fc_13b8_fa65,
0x4b46_759a_38e8_7d76,
0x2e7a_9296_41bb_b6a1,
0x1668_ddfa_462b_f6b6,
0x0096_0e2e_d1cf_294c,
]);
assert_eq!(x, &xo);
assert_eq!(y, &yo);
assert_eq!(z, &zo);
assert!(check_g1_prime(&p));
// exceptional case: sqrt(-1/XI) (positive)
let excp = Fp::from_raw_unchecked([
0x00f3_d047_7e91_edbf,
0x08d6_621e_4ca8_dc69,
0xb9cf_7927_b19b_9726,
0xba13_3c99_6caf_a2ec,
0xed2a_5ccd_5ca7_bb68,
0x19cb_022f_8ee9_d73b,
]);
let p = map_to_curve_simple_swu(&excp);
let G1Projective { x, y, z } = &p;
assert_eq!(x, &xo);
assert_eq!(y, &yo);
assert_eq!(z, &zo);
assert!(check_g1_prime(&p));
// exceptional case: sqrt(-1/XI) (negative)
let excp = Fp::from_raw_unchecked([
0xb90b_2fb8_816d_bcec,
0x15d5_9de0_64ab_2396,
0xad61_5979_4515_5efe,
0xaa64_0eeb_86d5_6fd2,
0x5df1_4ae8_e6a3_f16e,
0x0036_0fba_aa96_0f5e,
]);
let p = map_to_curve_simple_swu(&excp);
let G1Projective { x, y, z } = &p;
let myo = -yo;
assert_eq!(x, &xo);
assert_eq!(y, &myo);
assert_eq!(z, &zo);
assert!(check_g1_prime(&p));
let u = Fp::from_raw_unchecked([
0xa618_fa19_f7e2_eadc,
0x93c7_f1fc_876b_a245,
0xe2ed_4cc4_7b5c_0ae0,
0xd49e_fa74_e4a8_d000,
0xa0b2_3ba6_92b5_431c,
0x0d15_51f2_d7d8_d193,
]);
let xo = Fp::from_raw_unchecked([
0x2197_ca55_fab3_ba48,
0x591d_eb39_f434_949a,
0xf9df_7fb4_f1fa_6a08,
0x59e3_c16a_9dfa_8fa5,
0xe592_9b19_4aad_5f7a,
0x130a_46a4_c61b_44ed,
]);
let yo = Fp::from_raw_unchecked([
0xf721_5b58_c720_0ad0,
0x8905_1631_3a4e_66bf,
0xc903_1acc_8a36_19a8,
0xea1f_9978_fde3_ffec,
0x0548_f02d_6cfb_f472,
0x1693_7557_3529_163f,
]);
let zo = Fp::from_raw_unchecked([
0xf36f_eb2e_1128_ade0,
0x42e2_2214_250b_cd94,
0xb94f_6ba2_dddf_62d6,
0xf56d_4392_782b_f0a2,
0xb2d7_ce1e_c263_09e7,
0x182b_57ed_6b99_f0a1,
]);
let p = map_to_curve_simple_swu(&u);
let G1Projective { x, y, z } = &p;
assert_eq!(x, &xo);
assert_eq!(y, &yo);
assert_eq!(z, &zo);
assert!(check_g1_prime(&p));
}
#[test]
fn test_osswu_semirandom() {
use rand_core::SeedableRng;
let mut rng = rand_xorshift::XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
0xe5,
]);
for _ in 0..32 {
let input = Fp::random(&mut rng);
let p = map_to_curve_simple_swu(&input);
assert!(check_g1_prime(&p));
let p_iso = iso_map(&p);
assert!(bool::from(p_iso.is_on_curve()));
}
}
// test vectors from the draft 10 RFC
#[test]
fn test_encode_to_curve_10() {
use crate::{
g1::G1Affine,
hash_to_curve::{ExpandMsgXmd, HashToCurve},
};
use std::string::{String, ToString};
struct TestCase {
msg: &'static [u8],
expected: [&'static str; 2],
}
impl TestCase {
fn expected(&self) -> String {
self.expected[0].to_string() + self.expected[1]
}
}
const DOMAIN: &[u8] = b"QUUX-V01-CS02-with-BLS12381G1_XMD:SHA-256_SSWU_NU_";
let cases = vec![
TestCase {
msg: b"",
expected: [
"184bb665c37ff561a89ec2122dd343f20e0f4cbcaec84e3c3052ea81d1834e192c426074b02ed3dca4e7676ce4ce48ba",
"04407b8d35af4dacc809927071fc0405218f1401a6d15af775810e4e460064bcc9468beeba82fdc751be70476c888bf3",
],
},
TestCase {
msg: b"abc",
expected: [
"009769f3ab59bfd551d53a5f846b9984c59b97d6842b20a2c565baa167945e3d026a3755b6345df8ec7e6acb6868ae6d",
"1532c00cf61aa3d0ce3e5aa20c3b531a2abd2c770a790a2613818303c6b830ffc0ecf6c357af3317b9575c567f11cd2c",
],
},
TestCase {
msg: b"abcdef0123456789",
expected: [
"1974dbb8e6b5d20b84df7e625e2fbfecb2cdb5f77d5eae5fb2955e5ce7313cae8364bc2fff520a6c25619739c6bdcb6a",
"15f9897e11c6441eaa676de141c8d83c37aab8667173cbe1dfd6de74d11861b961dccebcd9d289ac633455dfcc7013a3",
]
},
TestCase {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq",
expected: [
"0a7a047c4a8397b3446450642c2ac64d7239b61872c9ae7a59707a8f4f950f101e766afe58223b3bff3a19a7f754027c",
"1383aebba1e4327ccff7cf9912bda0dbc77de048b71ef8c8a81111d71dc33c5e3aa6edee9cf6f5fe525d50cc50b77cc9",
]
},
TestCase {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expected: [
"0e7a16a975904f131682edbb03d9560d3e48214c9986bd50417a77108d13dc957500edf96462a3d01e62dc6cd468ef11",
"0ae89e677711d05c30a48d6d75e76ca9fb70fe06c6dd6ff988683d89ccde29ac7d46c53bb97a59b1901abf1db66052db",
]
}
];
for case in cases {
let g = <G1Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::encode_to_curve(
&case.msg, DOMAIN,
);
let aff = G1Affine::from(g);
let g_uncompressed = aff.to_uncompressed();
assert_eq!(case.expected(), hex::encode(&g_uncompressed[..]));
}
}
// test vectors from the draft 10 RFC
#[test]
fn test_hash_to_curve_10() {
use crate::{
g1::G1Affine,
hash_to_curve::{ExpandMsgXmd, HashToCurve},
};
use std::string::{String, ToString};
struct TestCase {
msg: &'static [u8],
expected: [&'static str; 2],
}
impl TestCase {
fn expected(&self) -> String {
self.expected[0].to_string() + self.expected[1]
}
}
const DOMAIN: &[u8] = b"QUUX-V01-CS02-with-BLS12381G1_XMD:SHA-256_SSWU_RO_";
let cases = vec![
TestCase {
msg: b"",
expected: [
"052926add2207b76ca4fa57a8734416c8dc95e24501772c814278700eed6d1e4e8cf62d9c09db0fac349612b759e79a1",
"08ba738453bfed09cb546dbb0783dbb3a5f1f566ed67bb6be0e8c67e2e81a4cc68ee29813bb7994998f3eae0c9c6a265",
],
},
TestCase {
msg: b"abc",
expected: [
"03567bc5ef9c690c2ab2ecdf6a96ef1c139cc0b2f284dca0a9a7943388a49a3aee664ba5379a7655d3c68900be2f6903",
"0b9c15f3fe6e5cf4211f346271d7b01c8f3b28be689c8429c85b67af215533311f0b8dfaaa154fa6b88176c229f2885d"
],
},
TestCase {
msg: b"abcdef0123456789",
expected: [
"11e0b079dea29a68f0383ee94fed1b940995272407e3bb916bbf268c263ddd57a6a27200a784cbc248e84f357ce82d98",
"03a87ae2caf14e8ee52e51fa2ed8eefe80f02457004ba4d486d6aa1f517c0889501dc7413753f9599b099ebcbbd2d709"
]
},
TestCase {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq",
expected: [
"15f68eaa693b95ccb85215dc65fa81038d69629f70aeee0d0f677cf22285e7bf58d7cb86eefe8f2e9bc3f8cb84fac488",
"1807a1d50c29f430b8cafc4f8638dfeeadf51211e1602a5f184443076715f91bb90a48ba1e370edce6ae1062f5e6dd38"
]
},
TestCase {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
expected: [
"082aabae8b7dedb0e78aeb619ad3bfd9277a2f77ba7fad20ef6aabdc6c31d19ba5a6d12283553294c1825c4b3ca2dcfe",
"05b84ae5a942248eea39e1d91030458c40153f3b654ab7872d779ad1e942856a20c438e8d99bc8abfbf74729ce1f7ac8"
]
}
];
for case in cases {
let g = <G1Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::hash_to_curve(
&case.msg, DOMAIN,
);
let g_uncompressed = G1Affine::from(g).to_uncompressed();
assert_eq!(case.expected(), hex::encode(&g_uncompressed[..]));
}
}
#[cfg(test)]
// p-1 / 2
pub const P_M1_OVER2: Fp = Fp::from_raw_unchecked([
0xa1fa_ffff_fffe_5557,
0x995b_fff9_76a3_fffe,
0x03f4_1d24_d174_ceb4,
0xf654_7998_c199_5dbd,
0x778a_468f_507a_6034,
0x0205_5993_1f7f_8103,
]);
#[test]
fn test_sgn0() {
assert_eq!(bool::from(Fp::zero().sgn0()), false);
assert_eq!(bool::from(Fp::one().sgn0()), true);
assert_eq!(bool::from((-Fp::one()).sgn0()), false);
assert_eq!(bool::from((-Fp::zero()).sgn0()), false);
assert_eq!(bool::from(P_M1_OVER2.sgn0()), true);
let p_p1_over2 = P_M1_OVER2 + Fp::one();
assert_eq!(bool::from(p_p1_over2.sgn0()), false);
let neg_p_p1_over2 = {
let mut tmp = p_p1_over2;
tmp.conditional_negate(Choice::from(1u8));
tmp
};
assert_eq!(neg_p_p1_over2, P_M1_OVER2);
}
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/hash_to_curve/expand_msg.rs | zkcrypto/bls12_381/src/hash_to_curve/expand_msg.rs | //! This module implements message expansion consistent with the
//! hash-to-curve RFC drafts 7 through 10
use core::{
fmt::{self, Debug, Formatter},
marker::PhantomData,
};
use digest::{BlockInput, Digest, ExtendableOutputDirty, Update, XofReader};
use crate::generic_array::{
typenum::{Unsigned, U32},
ArrayLength, GenericArray,
};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
const OVERSIZE_DST_SALT: &[u8] = b"H2C-OVERSIZE-DST-";
/// The domain separation tag for a message expansion.
///
/// Implements [section 5.4.3 of `draft-irtf-cfrg-hash-to-curve-12`][dst].
///
/// [dst]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-5.4.3
#[derive(Debug)]
enum ExpandMsgDst<'x, L: ArrayLength<u8>> {
/// DST produced by hashing a very long (> 255 chars) input DST.
Hashed(GenericArray<u8, L>),
/// A raw input DST (<= 255 chars).
Raw(&'x [u8]),
}
impl<'x, L: ArrayLength<u8>> ExpandMsgDst<'x, L> {
/// Produces a DST for use with `expand_message_xof`.
pub fn process_xof<H>(dst: &'x [u8]) -> Self
where
H: Default + Update + ExtendableOutputDirty,
{
if dst.len() > 255 {
let mut data = GenericArray::<u8, L>::default();
H::default()
.chain(OVERSIZE_DST_SALT)
.chain(&dst)
.finalize_xof_dirty()
.read(&mut data);
Self::Hashed(data)
} else {
Self::Raw(dst)
}
}
/// Produces a DST for use with `expand_message_xmd`.
pub fn process_xmd<H>(dst: &'x [u8]) -> Self
where
H: Digest<OutputSize = L>,
{
if dst.len() > 255 {
Self::Hashed(H::new().chain(OVERSIZE_DST_SALT).chain(&dst).finalize())
} else {
Self::Raw(dst)
}
}
/// Returns the raw bytes of the DST.
pub fn data(&'x self) -> &'x [u8] {
match self {
Self::Hashed(arr) => &arr[..],
Self::Raw(buf) => buf,
}
}
/// Returns the length of the DST.
pub fn len(&'x self) -> usize {
match self {
Self::Hashed(_) => L::to_usize(),
Self::Raw(buf) => buf.len(),
}
}
}
/// A trait for message expansion methods supported by hash-to-curve.
pub trait ExpandMessage: for<'x> InitExpandMessage<'x> {
// This intermediate is likely only necessary until GATs allow
// associated types with lifetimes.
}
/// Trait for constructing a new message expander.
pub trait InitExpandMessage<'x> {
/// The state object used during message expansion.
type Expander: ExpandMessageState<'x>;
/// Initializes a message expander.
fn init_expand(message: &[u8], dst: &'x [u8], len_in_bytes: usize) -> Self::Expander;
}
// Automatically derive trait
impl<X: for<'x> InitExpandMessage<'x>> ExpandMessage for X {}
/// Trait for types implementing the `expand_message` interface for `hash_to_field`.
pub trait ExpandMessageState<'x> {
/// Reads bytes from the generated output.
fn read_into(&mut self, output: &mut [u8]) -> usize;
/// Retrieves the number of bytes remaining in the generator.
fn remain(&self) -> usize;
#[cfg(feature = "alloc")]
/// Constructs a `Vec` containing the remaining bytes of the output.
fn into_vec(mut self) -> Vec<u8>
where
Self: Sized,
{
let mut result = alloc::vec![0u8; self.remain()];
self.read_into(&mut result[..]);
result
}
}
/// A generator for the output of `expand_message_xof` for a given
/// extendable hash function, message, DST, and output length.
///
/// Implements [section 5.4.2 of `draft-irtf-cfrg-hash-to-curve-12`][expand_message_xof]
/// with `k = 128`.
///
/// [expand_message_xof]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-5.4.2
pub struct ExpandMsgXof<H: ExtendableOutputDirty> {
hash: <H as ExtendableOutputDirty>::Reader,
remain: usize,
}
impl<H: ExtendableOutputDirty> Debug for ExpandMsgXof<H> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ExpandMsgXof")
.field("remain", &self.remain)
.finish()
}
}
impl<'x, H> ExpandMessageState<'x> for ExpandMsgXof<H>
where
H: ExtendableOutputDirty,
{
fn read_into(&mut self, output: &mut [u8]) -> usize {
let len = self.remain.min(output.len());
self.hash.read(&mut output[..len]);
self.remain -= len;
len
}
fn remain(&self) -> usize {
self.remain
}
}
impl<'x, H> InitExpandMessage<'x> for ExpandMsgXof<H>
where
H: Default + Update + ExtendableOutputDirty,
{
type Expander = Self;
fn init_expand(message: &[u8], dst: &[u8], len_in_bytes: usize) -> Self {
// Use U32 here for k = 128.
let dst = ExpandMsgDst::<U32>::process_xof::<H>(dst);
let hash = H::default()
.chain(message)
.chain((len_in_bytes as u16).to_be_bytes())
.chain(dst.data())
.chain([dst.len() as u8])
.finalize_xof_dirty();
Self {
hash,
remain: len_in_bytes,
}
}
}
/// Constructor for `expand_message_xmd` for a given digest hash function, message, DST,
/// and output length.
///
/// Implements [section 5.4.1 of `draft-irtf-cfrg-hash-to-curve-12`][expand_message_xmd].
///
/// [expand_message_xmd]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-5.4.1
#[derive(Debug)]
pub struct ExpandMsgXmd<H: Digest>(PhantomData<H>);
/// A generator for the output of `expand_message_xmd` for a given
/// digest hash function, message, DST, and output length.
///
/// Implements [section 5.4.1 of `draft-irtf-cfrg-hash-to-curve-12`][expand_message_xmd].
///
/// [expand_message_xmd]: https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-hash-to-curve-12#section-5.4.1
pub struct ExpandMsgXmdState<'x, H: Digest> {
dst: ExpandMsgDst<'x, H::OutputSize>,
b_0: GenericArray<u8, H::OutputSize>,
b_i: GenericArray<u8, H::OutputSize>,
i: usize,
b_offs: usize,
remain: usize,
}
impl<H: Digest> Debug for ExpandMsgXmdState<'_, H> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("ExpandMsgXmdState")
.field("remain", &self.remain)
.finish()
}
}
impl<'x, H> InitExpandMessage<'x> for ExpandMsgXmd<H>
where
H: Digest + BlockInput,
{
type Expander = ExpandMsgXmdState<'x, H>;
fn init_expand(message: &[u8], dst: &'x [u8], len_in_bytes: usize) -> Self::Expander {
let hash_size = <H as Digest>::OutputSize::to_usize();
let ell = (len_in_bytes + hash_size - 1) / hash_size;
if ell > 255 {
panic!("Invalid ExpandMsgXmd usage: ell > 255");
}
let dst = ExpandMsgDst::process_xmd::<H>(dst);
let b_0 = H::new()
.chain(GenericArray::<u8, <H as BlockInput>::BlockSize>::default())
.chain(message)
.chain((len_in_bytes as u16).to_be_bytes())
.chain([0u8])
.chain(dst.data())
.chain([dst.len() as u8])
.finalize();
// init with b_1
let b_i = H::new()
.chain(&b_0)
.chain([1u8])
.chain(dst.data())
.chain([dst.len() as u8])
.finalize();
ExpandMsgXmdState {
dst,
b_0,
b_i,
i: 2,
b_offs: 0,
remain: len_in_bytes,
}
}
}
impl<'x, H> ExpandMessageState<'x> for ExpandMsgXmdState<'x, H>
where
H: Digest + BlockInput,
{
fn read_into(&mut self, output: &mut [u8]) -> usize {
let read_len = self.remain.min(output.len());
let mut offs = 0;
let hash_size = H::OutputSize::to_usize();
while offs < read_len {
let b_offs = self.b_offs;
let mut copy_len = hash_size - b_offs;
if copy_len > 0 {
copy_len = copy_len.min(read_len - offs);
output[offs..(offs + copy_len)]
.copy_from_slice(&self.b_i[b_offs..(b_offs + copy_len)]);
offs += copy_len;
self.b_offs = b_offs + copy_len;
} else {
let mut b_prev_xor = self.b_0.clone();
for j in 0..hash_size {
b_prev_xor[j] ^= self.b_i[j];
}
self.b_i = H::new()
.chain(b_prev_xor)
.chain([self.i as u8])
.chain(self.dst.data())
.chain([self.dst.len() as u8])
.finalize();
self.b_offs = 0;
self.i += 1;
}
}
self.remain -= read_len;
read_len
}
fn remain(&self) -> usize {
self.remain
}
}
#[cfg(feature = "alloc")]
#[cfg(test)]
mod tests {
use super::*;
use sha2::{Sha256, Sha512};
use sha3::{Shake128, Shake256};
/// From <https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-12#appendix-K.1>
#[test]
fn expand_message_xmd_works_for_draft12_testvectors_sha256() {
let dst = b"QUUX-V01-CS02-with-expander-SHA256-128";
let msg = b"";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"68a985b87eb6b46952128911f2a4412bbc302a9d759667f8\
7f7a21d803f07235",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abc";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"d8ccab23b5985ccea865c6c97b6e5b8350e794e603b4b979\
02f53a8a0d605615",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abcdef0123456789";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"eff31487c770a893cfb36f912fbfcbff40d5661771ca4b2c\
b4eafe524333f5c1",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"b23a1d2b4d97b2ef7785562a7e8bac7eed54ed6e97e29aa5\
1bfe3f12ddad1ff9",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"4623227bcc01293b8c130bf771da8c298dede7383243dc09\
93d2d94823958c4c",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"af84c27ccfd45d41914fdff5df25293e221afc53d8ad2ac0\
6d5e3e29485dadbee0d121587713a3e0dd4d5e69e93eb7cd4f5df4\
cd103e188cf60cb02edc3edf18eda8576c412b18ffb658e3dd6ec8\
49469b979d444cf7b26911a08e63cf31f9dcc541708d3491184472\
c2c29bb749d4286b004ceb5ee6b9a7fa5b646c993f0ced",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abc";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"abba86a6129e366fc877aab32fc4ffc70120d8996c88aee2\
fe4b32d6c7b6437a647e6c3163d40b76a73cf6a5674ef1d890f95b\
664ee0afa5359a5c4e07985635bbecbac65d747d3d2da7ec2b8221\
b17b0ca9dc8a1ac1c07ea6a1e60583e2cb00058e77b7b72a298425\
cd1b941ad4ec65e8afc50303a22c0f99b0509b4c895f40",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abcdef0123456789";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"ef904a29bffc4cf9ee82832451c946ac3c8f8058ae97d8d6\
29831a74c6572bd9ebd0df635cd1f208e2038e760c4994984ce73f\
0d55ea9f22af83ba4734569d4bc95e18350f740c07eef653cbb9f8\
7910d833751825f0ebefa1abe5420bb52be14cf489b37fe1a72f7d\
e2d10be453b2c9d9eb20c7e3f6edc5a60629178d9478df",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"80be107d0884f0d881bb460322f0443d38bd222db8bd0b0a\
5312a6fedb49c1bbd88fd75d8b9a09486c60123dfa1d73c1cc3169\
761b17476d3c6b7cbbd727acd0e2c942f4dd96ae3da5de368d26b3\
2286e32de7e5a8cb2949f866a0b80c58116b29fa7fabb3ea7d520e\
e603e0c25bcaf0b9a5e92ec6a1fe4e0391d1cdbce8c68a",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"546aff5444b5b79aa6148bd81728704c32decb73a3ba76e9\
e75885cad9def1d06d6792f8a7d12794e90efed817d96920d72889\
6a4510864370c207f99bd4a608ea121700ef01ed879745ee3e4cee\
f777eda6d9e5e38b90c86ea6fb0b36504ba4a45d22e86f6db5dd43\
d98a294bebb9125d5b794e9d2a81181066eb954966a487",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
}
/// From <https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-12#appendix-K.2>
#[test]
fn expand_message_xmd_works_for_draft12_testvectors_sha256_long_dst() {
let dst = b"QUUX-V01-CS02-with-expander-SHA256-128-long-DST-111111\
111111111111111111111111111111111111111111111111111111\
111111111111111111111111111111111111111111111111111111\
111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111";
let msg = b"";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"e8dc0c8b686b7ef2074086fbdd2f30e3f8bfbd3bdf177f73\
f04b97ce618a3ed3",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abc";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"52dbf4f36cf560fca57dedec2ad924ee9c266341d8f3d6af\
e5171733b16bbb12",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abcdef0123456789";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"35387dcf22618f3728e6c686490f8b431f76550b0b2c61cb\
c1ce7001536f4521",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"01b637612bb18e840028be900a833a74414140dde0c4754c\
198532c3a0ba42bc",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"20cce7033cabc5460743180be6fa8aac5a103f56d481cf36\
9a8accc0c374431b",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"14604d85432c68b757e485c8894db3117992fc57e0e136f7\
1ad987f789a0abc287c47876978e2388a02af86b1e8d1342e5ce4f\
7aaa07a87321e691f6fba7e0072eecc1218aebb89fb14a0662322d\
5edbd873f0eb35260145cd4e64f748c5dfe60567e126604bcab1a3\
ee2dc0778102ae8a5cfd1429ebc0fa6bf1a53c36f55dfc",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abc";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"1a30a5e36fbdb87077552b9d18b9f0aee16e80181d5b951d\
0471d55b66684914aef87dbb3626eaabf5ded8cd0686567e503853\
e5c84c259ba0efc37f71c839da2129fe81afdaec7fbdc0ccd4c794\
727a17c0d20ff0ea55e1389d6982d1241cb8d165762dbc39fb0cee\
4474d2cbbd468a835ae5b2f20e4f959f56ab24cd6fe267",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abcdef0123456789";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"d2ecef3635d2397f34a9f86438d772db19ffe9924e28a1ca\
f6f1c8f15603d4028f40891044e5c7e39ebb9b31339979ff33a424\
9206f67d4a1e7c765410bcd249ad78d407e303675918f20f26ce6d\
7027ed3774512ef5b00d816e51bfcc96c3539601fa48ef1c07e494\
bdc37054ba96ecb9dbd666417e3de289d4f424f502a982",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"ed6e8c036df90111410431431a232d41a32c86e296c05d42\
6e5f44e75b9a50d335b2412bc6c91e0a6dc131de09c43110d9180d\
0a70f0d6289cb4e43b05f7ee5e9b3f42a1fad0f31bac6a625b3b5c\
50e3a83316783b649e5ecc9d3b1d9471cb5024b7ccf40d41d1751a\
04ca0356548bc6e703fca02ab521b505e8e45600508d32",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"78b53f2413f3c688f07732c10e5ced29a17c6a16f717179f\
fbe38d92d6c9ec296502eb9889af83a1928cd162e845b0d3c5424e\
83280fed3d10cffb2f8431f14e7a23f4c68819d40617589e4c4116\
9d0b56e0e3535be1fd71fbb08bb70c5b5ffed953d6c14bf7618b35\
fc1f4c4b30538236b4b08c9fbf90462447a8ada60be495",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha256>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
}
/// From <https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-12#appendix-K.3>
#[test]
fn expand_message_xmd_works_for_draft12_testvectors_sha512() {
let dst = b"QUUX-V01-CS02-with-expander-SHA512-256";
let msg = b"";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"6b9a7312411d92f921c6f68ca0b6380730a1a4d982c50721\
1a90964c394179ba",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abc";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"0da749f12fbe5483eb066a5f595055679b976e93abe9be6f\
0f6318bce7aca8dc",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abcdef0123456789";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"087e45a86e2939ee8b91100af1583c4938e0f5fc6c9db4b1\
07b83346bc967f58",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"7336234ee9983902440f6bc35b348352013becd88938d2af\
ec44311caf8356b3",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"57b5f7e766d5be68a6bfe1768e3c2b7f1228b3e4b3134956\
dd73a59b954c66f4",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"41b037d1734a5f8df225dd8c7de38f851efdb45c372887be\
655212d07251b921b052b62eaed99b46f72f2ef4cc96bfaf254ebb\
bec091e1a3b9e4fb5e5b619d2e0c5414800a1d882b62bb5cd1778f\
098b8eb6cb399d5d9d18f5d5842cf5d13d7eb00a7cff859b605da6\
78b318bd0e65ebff70bec88c753b159a805d2c89c55961",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abc";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"7f1dddd13c08b543f2e2037b14cefb255b44c83cc397c178\
6d975653e36a6b11bdd7732d8b38adb4a0edc26a0cef4bb4521713\
5456e58fbca1703cd6032cb1347ee720b87972d63fbf232587043e\
d2901bce7f22610c0419751c065922b488431851041310ad659e4b\
23520e1772ab29dcdeb2002222a363f0c2b1c972b3efe1",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abcdef0123456789";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"3f721f208e6199fe903545abc26c837ce59ac6fa45733f1b\
aaf0222f8b7acb0424814fcb5eecf6c1d38f06e9d0a6ccfbf85ae6\
12ab8735dfdf9ce84c372a77c8f9e1c1e952c3a61b7567dd069301\
6af51d2745822663d0c2367e3f4f0bed827feecc2aaf98c949b5ed\
0d35c3f1023d64ad1407924288d366ea159f46287e61ac",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"b799b045a58c8d2b4334cf54b78260b45eec544f9f2fb5bd\
12fb603eaee70db7317bf807c406e26373922b7b8920fa29142703\
dd52bdf280084fb7ef69da78afdf80b3586395b433dc66cde048a2\
58e476a561e9deba7060af40adf30c64249ca7ddea79806ee5beb9\
a1422949471d267b21bc88e688e4014087a0b592b695ed",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"05b0bfef265dcee87654372777b7c44177e2ae4c13a27f10\
3340d9cd11c86cb2426ffcad5bd964080c2aee97f03be1ca18e30a\
1f14e27bc11ebbd650f305269cc9fb1db08bf90bfc79b42a952b46\
daf810359e7bc36452684784a64952c343c52e5124cd1f71d474d5\
197fefc571a92929c9084ffe1112cf5eea5192ebff330b",
)
.unwrap();
assert_eq!(
ExpandMsgXmd::<Sha512>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
}
/// From <https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-12#appendix-K.4>
#[test]
fn expand_message_xof_works_for_draft12_testvectors_shake128() {
let dst = b"QUUX-V01-CS02-with-expander-SHAKE128";
let msg = b"";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"86518c9cd86581486e9485aa74ab35ba150d1c75c88e26b7\
043e44e2acd735a2",
)
.unwrap();
assert_eq!(
ExpandMsgXof::<Shake128>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abc";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"8696af52a4d862417c0763556073f47bc9b9ba43c99b5053\
05cb1ec04a9ab468",
)
.unwrap();
assert_eq!(
ExpandMsgXof::<Shake128>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"abcdef0123456789";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"912c58deac4821c3509dbefa094df54b34b8f5d01a191d1d\
3108a2c89077acca",
)
.unwrap();
assert_eq!(
ExpandMsgXof::<Shake128>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\
qqqqqqqqqqqqqqqqqqqqqqqqq";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"1adbcc448aef2a0cebc71dac9f756b22e51839d348e031e6\
3b33ebb50faeaf3f",
)
.unwrap();
assert_eq!(
ExpandMsgXof::<Shake128>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
let len_in_bytes = 0x20;
let uniform_bytes = hex::decode(
"df3447cc5f3e9a77da10f819218ddf31342c310778e0e4ef\
72bbaecee786a4fe",
)
.unwrap();
assert_eq!(
ExpandMsgXof::<Shake128>::init_expand(msg, dst, len_in_bytes).into_vec(),
uniform_bytes
);
let msg = b"";
let len_in_bytes = 0x80;
let uniform_bytes = hex::decode(
"7314ff1a155a2fb99a0171dc71b89ab6e3b2b7d59e38e644\
19b8b6294d03ffee42491f11370261f436220ef787f8f76f5b26bd\
cd850071920ce023f3ac46847744f4612b8714db8f5db83205b2e6\
25d95afd7d7b4d3094d3bdde815f52850bb41ead9822e08f22cf41\
d615a303b0d9dde73263c049a7b9898208003a739a2e57",
)
.unwrap();
assert_eq!(
ExpandMsgXof::<Shake128>::init_expand(msg, dst, len_in_bytes).into_vec(),
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | true |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/notes/serialization.rs | zkcrypto/bls12_381/src/notes/serialization.rs | //! # BLS12-381 serialization
//!
//! * $\mathbb{F}\_p$ elements are encoded in big-endian form. They occupy 48
//! bytes in this form.
//! * $\mathbb{F}\_{p^2}$ elements are encoded in big-endian form, meaning that
//! the $\mathbb{F}\_{p^2}$ element $c\_0 + c\_1 \cdot u$ is represented by the
//! $\mathbb{F}\_p$ element $c\_1$ followed by the $\mathbb{F}\_p$ element $c\_0$.
//! This means $\mathbb{F}_{p^2}$ elements occupy 96 bytes in this form.
//! * The group $\mathbb{G}\_1$ uses $\mathbb{F}\_p$ elements for coordinates. The
//! group $\mathbb{G}\_2$ uses $\mathbb{F}_{p^2}$ elements for coordinates.
//! * $\mathbb{G}\_1$ and $\mathbb{G}\_2$ elements can be encoded in uncompressed
//! form (the x-coordinate followed by the y-coordinate) or in compressed form
//! (just the x-coordinate). $\mathbb{G}\_1$ elements occupy 96 bytes in
//! uncompressed form, and 48 bytes in compressed form. $\mathbb{G}\_2$
//! elements occupy 192 bytes in uncompressed form, and 96 bytes in compressed
//! form.
//!
//! The most-significant three bits of a $\mathbb{G}\_1$ or $\mathbb{G}\_2$
//! encoding should be masked away before the coordinate(s) are interpreted.
//! These bits are used to unambiguously represent the underlying element:
//! * The most significant bit, when set, indicates that the point is in
//! compressed form. Otherwise, the point is in uncompressed form.
//! * The second-most significant bit indicates that the point is at infinity.
//! If this bit is set, the remaining bits of the group element's encoding
//! should be set to zero.
//! * The third-most significant bit is set if (and only if) this point is in
//! compressed form _and_ it is not the point at infinity _and_ its
//! y-coordinate is the lexicographically largest of the two associated with
//! the encoded x-coordinate.
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/src/notes/design.rs | zkcrypto/bls12_381/src/notes/design.rs | //! # Design of BLS12-381
//! ## Fixed Generators
//!
//! Although any generator produced by hashing to $\mathbb{G}_1$ or $\mathbb{G}_2$ is
//! safe to use in a cryptographic protocol, we specify some simple, fixed generators.
//!
//! In order to derive these generators, we select the lexicographically smallest
//! valid $x$-coordinate and the lexicographically smallest corresponding $y$-coordinate,
//! and then scale the resulting point by the cofactor, such that the result is not the
//! identity. This results in the following fixed generators:
//!
//! 1. $\mathbb{G}_1$
//! * $x = 3685416753713387016781088315183077757961620795782546409894578378688607592378376318836054947676345821548104185464507$
//! * $y = 1339506544944476473020471379941921221584933875938349620426543736416511423956333506472724655353366534992391756441569$
//! 2. $\mathbb{G}_2$
//! * $x = 352701069587466618187139116011060144890029952792775240219908644239793785735715026873347600343865175952761926303160 + 3059144344244213709971259814753781636986470325476647558659373206291635324768958432433509563104347017837885763365758 u$
//! * $y = 1985150602287291935568054521177171638300868978215655730859378665066344726373823718423869104263333984641494340347905 + 927553665492332455747201965776037880757740193453592970025027978793976877002675564980949289727957565575433344219582 u$
//!
//! This can be derived using the following sage script:
//!
//! ```text
//! param = -0xd201000000010000
//! def r(x):
//! return (x**4) - (x**2) + 1
//! def q(x):
//! return (((x - 1) ** 2) * ((x**4) - (x**2) + 1) // 3) + x
//! def g1_h(x):
//! return ((x-1)**2) // 3
//! def g2_h(x):
//! return ((x**8) - (4 * (x**7)) + (5 * (x**6)) - (4 * (x**4)) + (6 * (x**3)) - (4 * (x**2)) - (4*x) + 13) // 9
//! q = q(param)
//! r = r(param)
//! Fq = GF(q)
//! ec = EllipticCurve(Fq, [0, 4])
//! def psqrt(v):
//! assert(not v.is_zero())
//! a = sqrt(v)
//! b = -a
//! if a < b:
//! return a
//! else:
//! return b
//! for x in range(0,100):
//! rhs = Fq(x)^3 + 4
//! if rhs.is_square():
//! y = psqrt(rhs)
//! p = ec(x, y) * g1_h(param)
//! if (not p.is_zero()) and (p * r).is_zero():
//! print("g1 generator: {}".format(p))
//! break
//! Fq2.<i> = GF(q^2, modulus=[1, 0, 1])
//! ec2 = EllipticCurve(Fq2, [0, (4 * (1 + i))])
//! assert(ec2.order() == (r * g2_h(param)))
//! for x in range(0,100):
//! rhs = (Fq2(x))^3 + (4 * (1 + i))
//! if rhs.is_square():
//! y = psqrt(rhs)
//! p = ec2(Fq2(x), y) * g2_h(param)
//! if not p.is_zero() and (p * r).is_zero():
//! print("g2 generator: {}".format(p))
//! break
//! ```
//!
//! ## Nontrivial third root of unity
//!
//! To use the fast subgroup check algorithm for $\mathbb{G_1}$ from https://eprint.iacr.org/2019/814.pdf and
//! https://eprint.iacr.org/2021/1130, it is necessary to find a nontrivial cube root of
//! unity β in Fp to define the endomorphism:
//! $$(x, y) \rightarrow (\beta x, y)$$
//! which is equivalent to
//! $$P \rightarrow \lambda P$$
//! where $\lambda$, a nontrivial cube root of unity in Fr, satisfies $\lambda^2 + \lambda +1 = 0 \pmod{r}.
//!
//! $$\beta = 793479390729215512621379701633421447060886740281060493010456487427281649075476305620758731620350$$
//! can be derived using the following sage commands after running the above sage script:
//!
//! ```text
//! # Prints the given field element in Montgomery form.
//! def print_fq(a):
//! R = 1 << 384
//! tmp = ZZ(Fq(a*R))
//! while tmp > 0:
//! print("0x{:_x}, ".format(tmp % (1 << 64)))
//! tmp >>= 64
//! β = (Fq.multiplicative_generator() ** ((q-1)/3))
//! print_fq(β)
//! ```
//!
//! ## Psi
//!
//! To use the fast subgroup check algorithm for $\mathbb{G_2}$ from https://eprint.iacr.org/2019/814.pdf and
//! https://eprint.iacr.org/2021/1130, it is necessary to find the endomorphism:
//!
//! $$(x, y, z) \rightarrow (x^q \psi_x, y^q \psi_y, z^q)$$
//!
//! where:
//!
//! 1. $\psi_x = 1 / ((i+1) ^ ((q-1)/3)) \in \mathbb{F}_{q^2}$, and
//! 2. $\psi_y = 1 / ((i+1) ^ ((q-1)/2)) \in \mathbb{F}_{q^2}$
//!
//! can be derived using the following sage commands after running the above script and commands:
//! ```text
//! psi_x = (1/((i+1)**((q-1)/3)))
//! psi_y = (1/((i+1)**((q-1)/2)))
//! print_fq(psi_x.polynomial().coefficients()[0])
//! print_fq(psi_y.polynomial().coefficients()[0])
//! print_fq(psi_y.polynomial().coefficients()[1])
//! ```
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
grandinetech/rust-kzg | https://github.com/grandinetech/rust-kzg/blob/d47acbdf587753f466a5e6842395e03930ae1f96/zkcrypto/bls12_381/benches/hash_to_curve.rs | zkcrypto/bls12_381/benches/hash_to_curve.rs | #[macro_use]
extern crate criterion;
extern crate bls12_381;
use bls12_381::hash_to_curve::*;
use bls12_381::*;
use criterion::{black_box, Criterion};
fn criterion_benchmark(c: &mut Criterion) {
// G1Projective
{
let name = "G1Projective";
let message: &[u8] = b"test message";
let dst: &[u8] = b"test DST";
c.bench_function(
&format!("{} encode_to_curve SSWU SHA-256", name),
move |b| {
b.iter(|| {
<G1Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::encode_to_curve(
black_box(message),
black_box(dst),
)
})
},
);
c.bench_function(&format!("{} hash_to_curve SSWU SHA-256", name), move |b| {
b.iter(|| {
<G1Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::hash_to_curve(
black_box(message),
black_box(dst),
)
})
});
}
// G2Projective
{
let name = "G2Projective";
let message: &[u8] = b"test message";
let dst: &[u8] = b"test DST";
c.bench_function(
&format!("{} encode_to_curve SSWU SHA-256", name),
move |b| {
b.iter(|| {
<G2Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::encode_to_curve(
black_box(message),
black_box(dst),
)
})
},
);
c.bench_function(&format!("{} hash_to_curve SSWU SHA-256", name), move |b| {
b.iter(|| {
<G2Projective as HashToCurve<ExpandMsgXmd<sha2::Sha256>>>::hash_to_curve(
black_box(message),
black_box(dst),
)
})
});
}
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | d47acbdf587753f466a5e6842395e03930ae1f96 | 2026-01-04T20:22:26.256259Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.